code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="3df1f9ec-6a91-44b5-a1bb-e4766c6a34d7" _uuid="252baffae3c5eb5eb068db02dae43be6c705f896"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
from scipy import stats #to call a function that removes anomalies
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# -
# Hello,
#
# So I analysed certain factors to see if they had any relationships with house prices and the factors that had the most relationships were number of bathrooms, grade and sqft_living.
#
# The coefficient result was quite interesting and unexpected, you should definitely check it out.
#
# I'm still new at this and soo all feedback is greatly appreciated.
#
# Cheers!
#
# Fayomi
# + _cell_guid="a84a3938-bd99-4f5c-bb3f-b291da46fe8e" _uuid="ae5afe6b69b2e446194fd0c78ec35b4a8fb38ff2"
df = pd.read_csv('../input/kc_house_data.csv')
# + _cell_guid="c0a81bcd-3971-4f04-a261-22d39ef89032" _uuid="27047b563a5650e0a34ae8dd7fea161ecd2c3d52"
df.head()
# -
df.drop(['id','date','sqft_lot','sqft_above','lat', 'long','zipcode', 'sqft_living15', 'sqft_lot15','waterfront','view'],axis=1,inplace=True)
df = df[(np.abs(stats.zscore(df)) < 3).all(axis=1)] #to remove anomalies
df.head()
df.info()
plt.figure(figsize=(16,6))
sns.distplot(df['price'],kde=False,bins=50)
# + _cell_guid="04b60e59-c6d2-4166-bc4d-bb0671d45c4c" _uuid="32cb6d976e71fa6019fab61757ca106beb6f33de"
plt.figure(figsize=(16,6))
sns.distplot(df['price'].dropna(),kde=False,bins=50)
# + _cell_guid="1c3a4401-ac05-48c7-99ee-cec332102731" _uuid="c5fab5b04f6746a04d37a565be363a799aa8ccb9"
plt.figure(figsize=(16,6))
sns.countplot(df['bedrooms'])
# + _cell_guid="81323caa-1e4c-475b-9444-d4b03d8976d7" _uuid="0201d832b284662a8b6f3be39a198226fe7869be"
plt.figure(figsize=(16,6))
sns.countplot(df['bathrooms'])
# + _cell_guid="d1ea1a64-2660-4f72-af06-dfd666b23584" _uuid="91542cdecbbf75ff3277b45c0710bd823108c02c"
plt.figure(figsize=(16,6))
sns.distplot(df['sqft_living'].dropna(),kde=False,bins=50)
# -
sns.pairplot(df)
sns.jointplot(x='bedrooms',y='price',data=df)
sns.jointplot(x='price',y='sqft_living',data=df,kind='reg')
sns.jointplot(x='floors',y='price',data=df)
sns.jointplot(x='grade',y='price',data=df, kind='reg')
sns.jointplot(x='yr_built',y='price',data=df)
sns.jointplot(x='sqft_basement',y='price',data=df)
sns.jointplot(x='bathrooms',y='price',data=df, kind='reg')
sns.jointplot(x='condition',y='price',data=df)
# the conditions most correlated with price are: bathrooms,, grade, sqft_living (and maybe bedrooms)
# + _cell_guid="76840a6e-a92e-4833-b638-9aed174f5fff" _uuid="71a215a83d487f1861261c46ff5a67d25a42e334"
sns.heatmap(df.corr(),cmap='coolwarm', annot=True)
# + [markdown] _cell_guid="5913a8ed-136c-4b30-85cb-9a705587e27a" _uuid="adbe7b4846ab0c428473d1371de408328a3647c6"
#
# TIME TO FORMAT DATA FOR ML
# + _cell_guid="ca19d98a-66bc-4dfa-8ca5-732e89354b01" _uuid="28faf2b752a6e71fcf9597707a2734e75c8482a3"
df.columns
# + _cell_guid="f32e6608-7f4c-4022-b8b1-ebc764e7ee03" _uuid="bc75fd96603a343ca741749d541c2c1922328071"
#selected inputs
x = df[['bathrooms','grade','sqft_living']]
#expected output
y = df['price']
# + _cell_guid="45d94e35-800c-4cab-8cb7-6130eb4b3758" _uuid="49f90f09f7ab6cddc4eda1e4dc9bc5272fb69208"
from sklearn.cross_validation import train_test_split
# + _cell_guid="b23af66a-d78e-459c-ae32-f2c14261a5a7" _uuid="e8f72124936e3da5420661fea3eb652ad63777c5"
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=101)
# + _cell_guid="4d71ee06-e964-4135-bd46-9b81a49a67e0" _uuid="9b7c60f5e03982738f8ef948344434eaf53e1205"
from sklearn.linear_model import LinearRegression
# + _cell_guid="4f297b52-eb0a-4b02-aa9f-96490ff1d441" _uuid="d214e92b6d5a52d62ff78e32a22785b48dad76ec"
lm = LinearRegression()
# -
#to train the data
lm.fit(x_train,y_train)
#to calculate teh coefficients
lm.coef_
#to create a table with the coefs
cdf = pd.DataFrame(lm.coef_,x.columns,columns=['coefs'])
cdf
#to get the predictions of test set
pred = lm.predict(x_test)
#to plot predictions and actual result
#This shows an accurate preditction
plt.scatter(y_test, pred)
| downloaded_kernels/house_sales/kernel_18.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-3yqgUicRe6j"
# # Pre-Processing Exercises FIFA project
#
# Try the exercises below to practice data pre-processing with *pandas*. To edit and run the code, open the notebook in "playground mode" using the button in the upper right corner. Be sure to add it to your Drive to save your work.
# + [markdown] id="z4z9TRqMYL4E"
# ## Setup
# + [markdown] id="1zYQA5YLYQ5v"
# Run the cell below to import necesary modules.
# + id="5hfACKUCj78R"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from google.colab import drive
# + id="m5LEAr53Yes4" colab={"base_uri": "https://localhost:8080/"} outputId="ec8536c9-2c8e-4209-f309-4bec4a59a8de"
prefix = '/content/drive'
from google.colab import drive
drive.mount(prefix, force_remount=True)
# + id="WAMoVERSZmlS" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="52091bef-5662-4f3d-a189-e3e5a51e3ecd"
player_path = '/content/sample_data/WorldCupPlayers.csv' # Path to the insurance dataset in your drive here
player = pd.read_csv(player_path)
player.head()
# + [markdown] id="B_E2ScJdaiOl"
# Finally, run the cell below to initialize several functions that will spot-check the correctness of your solutions as you complete the exercises.
# + id="t6kG8zyeak7t"
def test_1(df):
assert len(df.index) == 37784
assert len(df.columns) == 9
def test_2(df):
assert not df['RoundID'].isna().any()
assert not df['MatchID'].isna().any()
test_1(player)
test_2(player)
# + [markdown] id="BTfRDCQilp_V"
# ## Missing Values
#
# As you saw in the EDA exercises, several columns in the `insurance` dataset have missing values. Below, you'll remove or replace these.
# + [markdown] id="Bo7y9MnbqR7m"
# First, remove all rows in `player` missing `Coach Name`, `Player Name`, or `Team Initials` `Line-up`
# + id="lvjW__qglpgJ" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="9954a999-6275-4eec-87f0-2828cec29e44"
cols = ["Coach Name", "Player Name", "Team Initials", "Line-up"]
missing = player[cols].copy()
for col in cols:
missing[col] = missing[col].str.contains('?', regex=False)
missing = missing.any(axis=1)
missing = missing.index[missing]
player = player.drop(labels=missing)
player.head()
# + [markdown] id="vlTiMTsEnID-"
# Replace all missing values in `RoundID` and `MatchID` with the median of the remaining values in each column.
# + id="btc3s3Ysnecf" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="4547f88f-19c3-4669-fc07-056c0472006e"
cols = ["RoundID", "MatchID"]
for col in cols:
col_median = player[col].median(skipna=True)
missing = player.index[player[col].isna()]
player.loc[missing, col] = col_median
test_2(player)
player.head()
# + [markdown] id="ne7KYSHYq-Ty"
# ## Entity Resolution
# Now, you'll perform simple entity resolution between the two datasets, using car makes (e.g. BMW, Toyota) as the shared entities.
# + [markdown] id="22R9TNFfrSuo"
# First, compile an alphabetically sorted list of all makes found in either dataset. What appears to be the primary source of resolution problems in this list?
# + id="9jYXNsLDrQTk" colab={"base_uri": "https://localhost:8080/"} outputId="5da2b6b7-26be-40d7-ebed-a8df5465d8a6"
player_name = player["Player Name"].unique().tolist()
sorted(player_name, key=str.lower)
# + [markdown] id="q2ZHuVQprti4"
# Address the major entity resolution problem you identified above using built-in *pandas* functions.
#
# *Your solution should simultaneously address all makes. You shouldn't be fixing the names on a case-by-case basis at this stage.*
# + id="8POITU8iFsvO"
player["Player Name"] = player["Player Name"].str.strip().str.lower()
# + [markdown] id="EJcI9YQAGjXn"
# Output a list of all the remaining make names that appear in either dataset, then inspect it for lingering problems.
# + id="YhdYU_z1sHY7" colab={"base_uri": "https://localhost:8080/"} outputId="0599d88d-4d56-417e-b792-bc826fbbd265"
player_name = player["Player Name"].unique().tolist()
sorted(player_name, key=str.lower)
# + [markdown] id="iEQ6f9kuN3UI"
# Check whether this column could form an index without `normalized_losses`.
# + id="HMVDzimGQXeN" colab={"base_uri": "https://localhost:8080/"} outputId="885d6a74-0a72-4d9b-abd0-eb7e995cfcea"
len(player["Player Name"].unique()) / len(player.index)
# + [markdown] id="XRo7OszXQitF"
# # parsing Events
# + colab={"base_uri": "https://localhost:8080/"} id="KKQ3RAuPOeb7" outputId="c2ce1ea5-e63e-454c-98ba-ed2af0853d06"
def count_goal(strs):
if strs != strs:
return 0
count = 0
words = strs.split(" ")
for word in words:
if word[0] == "G":
count += 1
print(strs, count)
return count
player["goals_num"] = player["Event"].apply(count_goal)
# + id="5EBvSpKFSNz7"
out_path = '/content/sample_data/WorldCupPlayers_cleaned.csv' # Path to the insurance dataset in your drive here
player.to_csv(out_path, index=False)
# + id="zORr1GFSTySQ"
| Processing_FIFAProject.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import matplotlib.pyplot as plt
import cv2
import numpy as np
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#for loading and visualizing audio files
import librosa
import librosa.display
#to play audio
import IPython.display as ipd
audio_fpath = "/Users/Tejas/Downloads/135-TrainingSet/data-audio/"
audio_clips = os.listdir(audio_fpath)
print("No. of .wav files in audio folder = ",len(audio_clips))
print(len(audio_clips))
# +
# 3-1-3-1-(1 or 2)-1-(01-24) (Happy)
# 3-1-4-1-(1 or 2)-1-(01-24) (Sad)
# 3-1-5-1-(1 or 2)-1-(01-24) (Angry)
sad_arr = []
happy_arr = []
angry_arr = []
for filename in audio_clips:
if filename[6:8] == '04':
sad_arr.append(filename)
if filename[6:8] == '03':
happy_arr.append(filename)
if filename[6:8] == '05':
angry_arr.append(filename)
len(sad_arr+happy_arr+angry_arr)
# -
def array_to_dB(audio_clips):
dB_arr =[ ]
for i in audio_clips:
x, sr = librosa.load(audio_fpath+i, sr=44100)
plt.figure(figsize=(14, 5))
X = librosa.stft(x)
Xdb = librosa.amplitude_to_db(abs(X))
librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='hz')
# plt.colorbar()
plt.savefig('pre_step.png')
img = cv2.imread("pre_step.png")
plt.close()
# print("Xdb shape: ", np.array(Xdb).shape)
# print("img shape: ", img.shape)
# break;
dB_arr.append(img)
return dB_arr
# sad_dB = array_to_dB(sad_arr)
# angry_dB = array_to_dB(angry_arr)
# happy_dB = array_to_dB(happy_arr)
# +
# wav = array_to_dB(angry_arr) #image ---> IMREAD ON THE IMAGE--> RESIZE
# wav = cv2.imdecode(wav,IMREAD_UNCHANGED)
# res = cv2.resize(wav, dsize=(1025, 337), interpolation=cv2.INTER_CUBIC)
# -
#function to determine data set
# +
# found the implementation of ALexNet online
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
np.random.seed(1000)
#Instantiation
AlexNet = Sequential()
#1st Convolutional Layer
AlexNet.add(Conv2D(filters=96, input_shape=(32,32,3), kernel_size=(11,11), strides=(4,4), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#2nd Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#3rd Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#4th Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#5th Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#Passing it to a Fully Connected layer
AlexNet.add(Flatten())
# 1st Fully Connected Layer
AlexNet.add(Dense(4096, input_shape=(32,32,3,)))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout to prevent overfitting
AlexNet.add(Dropout(0.4))
#2nd Fully Connected Layer
AlexNet.add(Dense(4096))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#Add Dropout
AlexNet.add(Dropout(0.4))
#3rd Fully Connected Layer
AlexNet.add(Dense(1000))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#Add Dropout
AlexNet.add(Dropout(0.4))
#Output Layer
AlexNet.add(Dense(10))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('softmax'))
#Model Summary
AlexNet.summary()
# -
AlexNet.compile(loss = keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
# +
happy_dB = array_to_dB(happy_arr)
# there's 50 happy images
sad_dB = array_to_dB(sad_arr)
# there's 48 sad images
angry_dB = array_to_dB(angry_arr)
# there's 45 angry images
# print(len(happy_dB))
# print(len(sad_dB))
# print(len(angry_dB))
# -
# data_dB = np.concatenate([happy_dB, sad_dB, angry_dB])
data_dB = happy_dB + sad_dB + angry_dB
len(data_dB)
y_vals = [0 for i in range(50)] + [1 for i in range(48)] + [2 for i in range(45)]
# y_vals = np.full(143, y_vals)
# +
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(data_dB, y_vals, test_size=.5)
x_val, x_test, y_val, y_test = train_test_split(x_val, y_val, test_size=.5)
#Train-validation-test split
#x_train,x_val,y_train,y_val=train_test_split(x_train,y_train,test_size=.3)
# #Dimension of the CIFAR10 dataset
# print((x_train.shape,y_train.shape))
# print((x_val.shape,y_val.shape))
# print((x_test.shape,y_test.shape))
#Onehot Encoding the labels.
from sklearn.utils.multiclass import unique_labels
from keras.utils import to_categorical
#Since we have 3 classes we should expect the shape[1] of y_train,y_val and y_test to change from 1 to 3
y_train=to_categorical(y_train)
y_val=to_categorical(y_val)
y_test=to_categorical(y_test)
#Verifying the dimension after one hot encoding
# print((x_train.shape,y_train.shape))
# print((x_val.shape,y_val.shape))
# print((x_test.shape,y_test.shape))
#Image Data Augmentation
from keras.preprocessing.image import ImageDataGenerator
train_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True,zoom_range=.1 )
val_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True,zoom_range=.1)
test_generator = ImageDataGenerator(rotation_range=2, horizontal_flip= True,zoom_range=.1)
#Fitting the augmentation defined above to the data
train_generator.fit(x_train)
val_generator.fit(x_val)
test_generator.fit(x_test)
# -
#Learning Rate Annealer
from keras.callbacks import ReduceLROnPlateau
lrr= ReduceLROnPlateau( monitor='val_acc', factor=.01, patience=3, min_lr=1e-5)
#Defining the parameters
batch_size= 20
epochs=10
learn_rate=.1
#Training the model
AlexNet.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size), epochs = epochs, steps_per_epoch = x_train.shape[0]//batch_size, validation_data = val_generator.flow(x_val, y_val, batch_size=batch_size), validation_steps = 250, callbacks = [lrr], verbose=1)
| sound_to_image_converter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Homework 1: Problems
# ### PHYS T480/580, Fall 2018
# https://github.com/gtrichards/PHYS_T480_F18/
#
#
# ## Problem 1
#
# Practice your git and github skills by submitting this homework via github:
#
# * Clone the class github repository to your computer, the one at https://github.com/gtrichards/PHYS_T480_F18/ (should already have done this).
# * Click the Github Classroom link you received via the mailing list. This will create a private github repository through which you'll be submitting your homeworks. Clone that repository to your computer.
# * Copy this notebook from the class github repository to your private homework submission repository. IMPORTANT: rename it to `<filename>-FirstLast.ipynb` once you copy it, where `<filename>` is the existing filename and `FirstLast` are your first and last name. Example: `PHYST480-F18-HW1-GordonRichards.ipynb`.
# * Solve problems #2, #3, and #4 by filling in the missing cells in the copied notebook.
# * Commit the notebook to your repository, and `git push` it upstream.
#
#
# ## Problem 2
# Generate a sample of 10,000 data values drawn from N($\mu$=1.0, $\sigma$=0.2) and
# draw a pretty histogram, with the bin size determined using the Freedman-Diaconis
# rule. Overplot the true distribution.
#
# ## Problem 3
# Repeat the problem 2, but now add to the Gaussian sample (concatenate arrays with `np.concatenate()`)
# another sample of 10,000 data values drawn from a `cauchy` distribution with
# $\mu=2.0$ and $\gamma=0.5$. Do it twice: once with the bin size determined
# using the Freedman-Diaconis rule and once using the Scott's rule. Comment.
#
#
# ## Problem 4
# Follow the example from the Central Limit Theorem cells in BasicStats2.ipynb and simulate the distribution of 1,000,000 $\mathscr{N}(0,1)$ draws of $\chi^2$ for `N=2` and `N=5`. Overplot the theoretical pdf (it will help to use `scipy.special.gamma()`).
# ### Some useful definitions and functions
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
from scipy.stats import cauchy, norm
from astroML.plotting import setup_text_plots
from astroML.stats import sigmaG
from astroML.plotting import hist as fancyhist
setup_text_plots(fontsize=14, usetex=True)
# %matplotlib inline
# This astroML function adjusts matplotlib settings for a uniform feel in the
# textbook. Note that with `usetex=True`, fonts are rendered with $\LaTeX$. This
# may result in an error if $\LaTeX$ is not installed on your system. In that
# case, you can set usetex to `False`.
| homework/PHYST480-F18-HW1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# #### - <NAME>
# ####
# #### - 1/18/2022
# ####
# #### - PR - EX03 - Q5 - All Parts
import pandas as pd
from PIL import Image
import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
dataset = pd.read_csv('./inputs/P5/digits.csv')
dataset.head()
dataset.info()
# ### Part A.
pca = PCA(n_components=15)
trans = pca.fit_transform(dataset)
pca_eigenvecs = pd.DataFrame(pca.components_)
pca_eigenvecs
pca_eigenvals = pd.DataFrame(pca.explained_variance_)
pca_eigenvals.T
# ### Part B.
params = {"bandwidth": np.arange(1.05, 6, 0.05)}
grid = GridSearchCV(KernelDensity(), params)
pca_dataset = pd.DataFrame(trans)
grid.fit(pca_dataset)
bandwidth = grid.best_estimator_.bandwidth
print("bandwidth: {:.2f}".format(bandwidth))
# ### Part C.
# +
kde = KernelDensity(bandwidth=bandwidth)
kde.fit(pca_dataset)
samples = kde.sample(n_samples=20, random_state=0)
samples = pca.inverse_transform(samples)
samples = samples.reshape(20, 8, 8)
samples.shape
# +
fig, ax = plt.subplots(4, 5, figsize=(10, 10))
[axi.set_axis_off() for axi in ax.ravel()]
for i, ax in enumerate(fig.axes):
ax.imshow(samples[i], cmap=plt.cm.binary)
# -
# ### Part D.
params = {"bandwidth": np.arange(1.05, 6, 0.05)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(dataset)
bandwidth = grid.best_estimator_.bandwidth
print("bandwidth: {:.2f}".format(bandwidth))
# +
kde = KernelDensity(bandwidth=bandwidth)
kde.fit(pca_dataset)
samples = kde.sample(n_samples=20, random_state=0)
samples = pca.inverse_transform(samples)
samples = samples.reshape(20, 8, 8)
samples.shape
# +
fig, ax = plt.subplots(4, 5, figsize=(10, 10))
[axi.set_axis_off() for axi in ax.ravel()]
for i, ax in enumerate(fig.axes):
ax.imshow(samples[i], cmap=plt.cm.binary)
# -
# #### FINITO
| SPR_HW3/EX03_Q5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Model Monitoring Overview (Beta)
# + [markdown] pycharm={"name": "#%% md\n"}
# ```{note}
# Model Monitoring is based on Iguazio's streaming technology. Contact Iguazio to enable this feature.
# ```
#
# ## Introduction
# MLRun provides a model monitoring service that tracks the performance of models in production to help identify
# potential issues with concept drift and prediction accuracy before they impact business goals.
# Typically, Model monitoring is used by devops for tracking model performance and by data scientists to track model drift.
# Two monitoring types are supported:
# 1. Model operational performance (latency, requests per second, etc..)
# 2. Drift detection - identifies potential issues with the model. See [Drift Analysis](#drift-analysis) for more details.
#
# Model Monitoring provides warning alerts that can be sent to stakeholders for processing.
#
# The Model Monitoring data can be viewed using Iguazio's user interface or through Grafana Dashboards. Grafana is an interactive web application, visualization tool
# that can be added as a service in the Iguazio platform. See [Model Monitoring Using Grafana Dashboards](#model-monitoring-using-grafana-dashboards) for more details.
#
# ## Architecture
# The Model Monitoring process flow starts with collecting operational data. The operational data are converted to vectors which are posted to the Model Server.
# The Model Server is then wrapped around a machine learning model that uses a function to calculate predictions based on the available vectors.
# Next, the Model Server creates a log for the input and output of the vectors, and the entries are written to the production data stream (a [v3io stream](#https://nuclio.io/docs/latest/reference/triggers/v3iostream/)).
# While the Model Server is processing the vectors, a Nuclio operation monitors the log of the data stream and is triggered when a new log entry is detected.
# The Nuclio function examines the log entry, processes it in to statistics which are then written to the statistics databases (parquet file, time series database and key value database).
# In parallel, a scheduled MLRun job runs reading the parquet files, performing drift analysis. The drift analysis data is stored so that
# the user can retrieve it in the Iguazio UI or in a Grafana dashboard.
#
# 
#
# ### Drift Analysis
# The Model Monitoring feature provides drift analysis monitoring.
# Model Drift in machine learning is a situation where the statistical properties of the target variable (what the model is trying to predict) change over time.
# In other words, the production data has changed significantly over the course of time and no longer matches the input data used to train the model.
# So, for this new data, accuracy of the model predictions is low. Drfit analysis statistics are computed once an hour.
# For more information see <a href="https://www.iguazio.com/glossary/concept-drift/" target="_blank">Concept Drift</a>.
#
# ### Common Terminology
# The following are terms you will see in all the model monitoring screens:
#
# * **Total Variation Distance** (TVD)—this is the statistical difference between the actual predictions, and the model's trained predictions
# * **Hellinger Distance**—this is a type of f-divergence that quantifies the similarity between the actual predictions, and the model's trained predictions.
# * **Kullback–Leibler Divergence** (KLD)—this is the measure of how the probability distribution of actual predictions is different from the second, model's trained reference probability distribution.
# * **Model Endpoint**— a combination of a deployed nuclio function and the models themselves. One function can run multiple endpoints; however, statistics are saved per endpoint.
#
# ## Model Monitoring Using the Iguazio Platform Interface
# Iguazio's Model Monitoring data is available for viewing through the regular platform interface.
# The platform provides four information screens with model monitoring data.
# * [Model Endpoint Summary List](#model-endpoint-summary-list)
# * [Model Endpoint Overview](#model-endpoint-overview)
# * [Model Drift Analysis](#model-drift-analysis)
# * [Model Features Analysis](#model-features-analysis)
#
# Select a project from the project tiles screen.
# From the project dashboard, press the **Models** tile to view the models currently deployed .
# Click **Model Endpoints** from the menu to display a list of monitored endpoints.
# If the Model Monitoring feature is not enabled, the endpoints list will be empty.
#
# ### Model Endpoint Summary List
# The Model Endpoints summary list provides a quick view of the Model Monitoring data.
#
# 
#
# The summary page contains the following fields:
# * **Name**—the name of the model endpoint
# * **Version**—user configured version taken from model deployment
# * **Class**—the implementation class that is used by the endpoint
# * **Model**—user defined name for the model
# * **Labels**—user configurable tags that are searchable
# * **Uptime**—first request for production data
# * **Last Prediction**—most recent request for production data
# * **Error Count**—includes prediction process errors such as operational issues (For example, a function in a failed state), as well as data processing errors
# (For example, invalid timestamps, request ids, type mismatches etc.)
# * **Drift**—indication of drift status (no drift (green), possible drift (yellow), drift detected (red))
# * **Accuracy**—a numeric value representing the accuracy of model predictions (N/A)
#
# ```{note}
# Model Accuracy is currently under development.
# ```
#
# ### Model Endpoint Overview
# The Model Endpoints Overview screen displays general information about the selected model.
#
# 
#
# The Overview page contains the following fields:
# * **UUID**—the ID of the deployed model
# * **Model Class**—the implementation class that is used by the endpoint
# * **Model Artifact**—reference to the model's file location
# * **Function URI**—the MLRun function to access the model
# * **Last Prediction**—most recent request for production data
# * **Error Count**—includes prediction process errors such as operational issues (For example, a function in a failed state), as well as data processing errors
# (For example, invalid timestamps, request ids, type mismatches etc.)
# * **Accuracy**—a numeric value representing the accuracy of model predictions (N/A)
# * **Stream path**—the input and output stream of the selected model
#
# Use the ellipsis to view the YAML resource file for details about the monitored resource.
#
# ### Model Drift Analysis
# The Model Endpoints Drift Analysis screen provides performance statistics for the currently selected model.
#
# 
#
# Each of the following fields has both sum and mean numbers displayed. For definitions of the terms see [Common Terminology](#common-terminology)
# * **TVD**
# * **Hellinger**
# * **KLD**
#
# Use the ellipsis to view the YAML resource file for details about the monitored resource.
#
# ### Model Features Analysis
# The Features Analysis pane provides details of the drift analysis in a table format with each feature in the selected model on its own line.
#
# 
#
# The table is broken down into columns with both expected, and actual performance results. The expected column displays the results from the model training phase, and tha actual column
# displays the results that came from live production data. The following fields are available:
# * **Mean**
# * **STD** (Standard deviation)
# * **Min**
# * **Max**
# * **TVD**
# * **Hellinger**
# * **KLD**
# * **Histograms**—the approximate representation of the distribution of the data. Hover over the bars in the graph for details.
#
# Use the ellipsis to view the YAML resource file for details about the monitored resource.
#
# ## Model Monitoring Using Grafana Dashboards
# You can deploy a Grafana service in your Iguazio instance and use Grafana Dashboards to view Model Monitoring details.
# There are three dashboards available:
# * [Overview Dashboard](#model-endpoints-overview-dashboard)
# * [Details Dashboard](#model-endpoint-details-dashboard)
# * [Performance Dashboard](#model-endpoint-performance-dashboard)
#
# ### Model Endpoints Overview Dashboard
# The Overview dashboard will display the model endpoint IDs of a specific project. Only deployed models with Model Monitoring option enabled are displayed.
# Endpoint IDs are URIs used to provide access to performance data and drift detection statistics of a deployed model.
#
# 
#
# The Overview screen providers details about the performance of all the deployed and monitored models within a project. You can change projects by choosing a new project from the
# **Project** dropdown. The Overview dashboard displays the number of endpoints in the project, the average predictions per second (using a 5-minute rolling average),
# the average latency (using a 1-hour rolling average), and the total error count in the project.
#
# Additional details include:
# * **Endpoint ID**—the ID of the deployed model. Use this link to drill down to the model performance and details screens.
# * **Function**—the MLRun function to access the model
# * **Model**—user defined name for the model
# * **Model Class**—the implementation class that is used by the endpoint
# * **First Request**—first request for production data
# * **Last Request**—most recent request for production data
# * **Error Count**—includes prediction process errors such as operational issues (For example, a function in a failed state), as well as data processing errors
# (For example, invalid timestamps, request ids, type mismatches etc.)
# * **Accuracy**—a numeric value representing the accuracy of model predictions (N/A)
# * **Drift Status**—no drift (green), possible drift (yellow), drift detected (red)
#
# At the bottom of the dashboard are heat maps for the Predictions per second, Average Latency and Errors. The heat maps display data based on 15 minute intervals.
# See [How to Read a Heat Map](#how-to-read-a-heat-map)for more details.
#
# Click an endpoint ID to drill down the performance details of that model.
#
# #### How to Read a Heat Map
# Heat maps are used to analyze trends and to instantly transform and enhance data through visualizations. This helps identify areas of interest quickly,
# and empower users to explore the data in order to pinpoint where there may be potential issues. A heat map uses a matrix layout with colour and shading to show the relationship between
# two categories of values (x and y axes), so the darker the cell, the higher the value. The values presented along each axis correspond to a cell which is then colour-coded to represent the relationship between
# the two categories. The Predictions per second heatmap shows the relationship between time, and the predictions per second, and the Average Latency per hour shows the relationship between
# time and the latency.
#
# To properly read the heap maps, look follow the hierarchy of shades from the darkest (the highest values) to the lightest shades (the lowest values).
#
# ```{note}
# The exact quantitative values represented by the colors may be difficult to determine. Use the [Performance Dashboard](#model-endpoint-performance-dashboard) to see detailed results.
# ```
#
# ### Model Endpoint Details Dashboard
# The model endpoint details dashboard displays the real time performance data of the selected model in detail.
# Model performance data provided is rich and is used to fine tune or diagnose potential performance issues that may affect business goals.
# The data in this dashboard changes based on the selection of the project and model.
#
# This dashboard is broken down into three panes:
#
# 1. [Project and model summary](#project-and-model-summary)
# 2. [Analysis panes](#analysis-panes)
# 1. Overall drift analysis
# 2. Features analysis
# 3. [Incoming features graph](#incoming-features-graph)
#
# 
#
# #### Project and Model Summary
# Use the dropdown to change the project and model. The dashboard presents the following information about the project:
# * **Endpoint ID**—the ID of the deployed model
# * **Model**—user defined name for the model
# * **Function URI**—the MLRun function to access the model
# * **Model Class**—the implementation class that is used by the endpoint
# * **Prediction/s**—the average number of predictions per second over a rolling 5-minute period
# * **Average Latency**—the average latency over a rolling 1-hour period
# * **First Request**—first request for production data
# * **Last Request**—most recent request for production data
#
#
# Use the [Performance](#model-endpoint-performance-dashboard) and [Overview](#model-endpoints-overview-dashboard) buttons view those dashboards.
#
# #### Analysis Panes
# This pane is broken down into sections: Overall Drift Analysis and Features Analysis.
# The Overall Drift Analysis pane provides performance statistics for the currently selected model.
# * **TVD** (sum and mean)
# * **Hellinger** (sum and mean)
# * **KLD** (sum and mean)
#
#
# The Features Analysis pane provides details of the drift analysis for each feature in the selected model.
# This pane includes five types of statistics:
# * **Actual** (min, mean and max)—results based on actual live data stream
# * **Expected** (min, mean and max)—results based on training data
# * **TVD**
# * **Hellinger**
# * **KLD**
#
# #### Incoming Features Graph
# This graph displays the performance of the features that are in the selected model based on sampled data points from actual feature production data.
# The graph displays the values of the features in the model over time.
#
# ### Model Endpoint Performance Dashboard
# Model endpoint performance displays performance details in graphical format.
#
# 
#
# This dashboard is broken down into 5 graphs:
# * **Drift Measures**—the overall drift over time for each of the endpoints in the selected model
# * **Average Latency**—the average latency of the model in 5 minute intervals, for 5 minutes and 1 hour rolling windows
# * **Predictions/s**—the model predictions per second displayed in 5 second intervals for 5 minutes (rolling)
# * **Predictions Count**—the number of predictions the model makes for 5 minutes and 1 hour rolling windows
#
# ### Configuring Grafana Dashboards
# You will need to make sure you have a Grafana service running in your Iguazio instance.
# If you do not have a Grafana service running,
# see <a href="https://www.iguazio.com/docs/latest-release/services/fundamentals/#create-new-service" target="_blank">Creating a New Service</a> to create and configure it.
#
# 1. Make sure you have the `mlrun-api` as a Grafana data source configured in your Grafana service. If not,
# add it by:
# 1. Open your grafana service.
# 2. Navigate to `Configuration -> Data Sources`.
# 3. Press `Add data source`.
# 4. Select the `SimpleJson` datasource and configure the following parameters.
# ```Name: mlrun-api
# URL: http://mlrun-api:8080/api/grafana-proxy/model-endpoints
# Access: Server (default)
#
# ## Add a custom header of:
# if working with Iguazio 3.0.x:
# X-V3io-Session-Key: <YOUR ACCESS KEY>
# if working with Iguazio 3.2.x:
# cookie: session=j:{"sid": "<YOUR ACCESS KEY>"}
# ```
# 5. Press `Save & Test` for verification. You will receive a confirmation with either a success, or a failure message.
#
# 2. Download the following monitoring dashboards:
# * {download}`Overview <./dashboards/overview.json>`
# * {download}`Details <./dashboards/details.json>`
# * {download}`Performance <./dashboards/performance.json>`
#
# 3. Import the downloaded dashboards to your Grafana service.
# To import that dashboards into your Grafana service:
# 1. Navigate to your Grafana service in the Services list and press on it
# 2. Press the dashboards icon in left menu
# 3. In the dashboard management screen press the IMPORT button, and select one file to import. Repeat this step for each dashboard.
#
# ```{note}
# You will need to train and deploy a model to see results in the dashboards.
# The dashboards will immediately display data if you already have a model trained and running with production data.
# ```
| docs/model_monitoring/model-monitoring-deployment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fit double components
#
# In this notebook we will create a syntehtic SN3 spectrum with two Halpha components. We will then fit spectrum and plot the fit.
#
# Imports
import sys
sys.path.insert(0, '/home/carterrhea/Documents/LUCI/') # Location of Luci
from LUCI.LuciSim import Spectrum
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
from scipy import interpolate
import LUCI.LuciFit as lfit
import keras
# +
# Create first spectrum
lines = ['Halpha', 'NII6583', 'NII6548', 'SII6716', 'SII6731']
fit_function = 'sincgauss'
ampls = [2, 1, 0.3, 0.15, 0.25] # Just randomly choosing these
velocity = 0 # km/s
broadening = 20 # km/s
filter_ = 'SN3'
resolution = 5000
snr = 50
spectrum_axis, spectrum = Spectrum(lines, fit_function, ampls, velocity, broadening, filter_, resolution, snr).create_spectrum()
# +
# Now create the second spectrum and add them together
lines = ['Halpha']
ampls = [1] # Just randomly chosen
velocity = 200 # km/s
spectrum_axis2, spectrum2 = Spectrum(lines, fit_function, ampls, velocity, broadening, filter_, resolution, snr).create_spectrum()
# Add them together
spectrum += spectrum2
# +
# And we can now take a look
# +
plt.figure(figsize=(10,6))
plt.plot(spectrum_axis, spectrum, color='black', label='Spectrum')
plt.xlim(14750, 15400)
plt.xlabel('Wavelength (cm-1)', fontsize=14)
plt.ylabel('Amplitude', fontsize=14)
plt.axvline(1e7/656.3, label='Halpha', color='blue', linestyle='--')
plt.axvline(1e7/658.3, label='NII6583', color='teal', linestyle='--')
plt.axvline(1e7/654.8, label='NII6548', color='green', linestyle='--')
plt.axvline(1e7/671.6, label='NII6716', color='magenta', linestyle='--')
plt.axvline(1e7/673.1, label='NII6731', color='violet', linestyle='--')
plt.legend(ncol=2)
plt.show()
# -
# We can clearly see that something is up with the Halpha line (because wd threw a second one there of course). Next, we can fit the components.
# Machine Learning Reference Spectrum
ref_spec = fits.open('/home/carterrhea/Documents/LUCI/ML/Reference-Spectrum-R5000-SN3.fits')[1].data
channel = []
counts = []
for chan in ref_spec: # Only want SN3 region
channel.append(chan[0])
counts.append(np.real(chan[1]))
min_ = np.argmin(np.abs(np.array(channel)-14700))
max_ = np.argmin(np.abs(np.array(channel)-15600))
wavenumbers_syn = channel[min_:max_]
f = interpolate.interp1d(spectrum_axis, spectrum, kind='slinear')
sky_corr = (f(wavenumbers_syn))
sky_corr_scale = np.max(sky_corr)
sky_corr = sky_corr/sky_corr_scale
fit = lfit.Fit(spectrum, spectrum_axis, wavenumbers_syn, 'sincgauss',
['Halpha', 'NII6583', 'NII6548','SII6716', 'SII6731', 'Halpha'],
[1,1,1,1,1,2], [1,1,1,1,1,2],
#['Halpha', 'NII6583', 'NII6548','SII6716', 'SII6731'],
#[1,1,1,1,1], [1,1,1,1,1],
keras.models.load_model('/home/carterrhea/Documents/LUCI/ML/R5000-PREDICTOR-I-SN3'),
bayes_bool=True
)
fit_dict = fit.fit()
fit_dict['velocities']
plt.plot(spectrum_axis, spectrum, label='spectrum')
plt.plot(spectrum_axis, fit_dict['fit_vector'], label='fit vector', linestyle='--')
plt.xlim(14800, 15300)
plt.legend()
fit_dict
| Examples/Double-Component-Fit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: twitter
# language: python
# name: twitter
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import warnings; warnings.filterwarnings('ignore')
b_df = pd.read_csv('data/processed/benioff_cleaned.csv')
dfs = pd.read_csv('data/raw/CRM.csv')
# + jupyter={"outputs_hidden": true} tags=[]
b_df['Date'] = pd.to_datetime(b_df['Date'])
dfs.rename(columns={'Date':'date'}, inplace=True)
# -
b_df.head(20)
b_df['Close'].std() # Calculating standard deviation (volatility) of Salesforce.com stock price
b_df['Tweets'].count() # Total count of Tweets for Benioff
b_df['Polarity_mean'].describe()
b_df['Return'][0] = 0
df
# +
fig = go.Figure()
fig = px.bar(b_df, x='Date', y='Return', color='Polarity_chg')
fig.show()
# +
fig = go.Figure()
fig = px.bar(b_df, x='Date', y='Return', color='Polarity_mean')
fig.show()
# -
# ### Sentiment Analysis using VADER
# + jupyter={"outputs_hidden": true} tags=[]
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sia = SIA()
results = []
for tweet in df1['full_text']:
score = sia.polarity_scores(tweet)
score['headline'] = tweet
results.append(score)
df1['compound'] = pd.DataFrame(results)['compound'] # Creating the 'compound' column to store the results from the sentiment analysis
# + jupyter={"outputs_hidden": true} tags=[]
df['Tweets']
# -
plt.scatter('date', 'compound', data=df1)
# ### Sentiment Analysis
# +
import re
# Clean The Data
def cleantext(text):
text = re.sub(r"@[A-Za-z0-9]+", "", text) # Remove Mentions
text = re.sub(r"#", "", text) # Remove Hashtags Symbol
text = re.sub(r"RT[\s]+", "", text) # Remove Retweets
text = re.sub(r"https?:\/\/\S+", "", text) # Remove The Hyper Link
return text
# -
# Clean The Text
df1['full_text'] = df1['full_text'].apply(cleantext)
df1.head(10)
# +
from textblob import TextBlob
# Get The Subjectivity
def sentiment_analysis(ds):
sentiment = TextBlob(ds['full_text']).sentiment
return pd.Series([sentiment.subjectivity, sentiment.polarity])
# Adding Subjectivity & Polarity
df1[["subjectivity", "polarity"]] = df1.apply(sentiment_analysis, axis=1)
df1
# -
#Polarity Change
df1["polarity_chg"] = df1["polarity"]
for i in range(len(df1)-1):
df1["polarity_chg"].iloc[i]= (df1["polarity"].iloc[i]/df1["polarity"].iloc[i+1])
df1
# +
import plotly.graph_objects as go
fig = go.Figure()
_ = fig.add_trace(go.Scatter(
x= df1["polarity"],
y= dfs['var'],
mode="markers",
marker={"size": df1['favorite_count'], "sizemode": "area",
"sizeref": 2*max(df1['favorite_count']/1000)},
hovertemplate= "Polarity of Tweets: %{x:.1f}<br>" +
"Stock Price Change: %{y:.1f}<br>" +
"Number of Likes: %{marker.size:,}" +
"<extra></extra>"
))
fig.update_layout(
plot_bgcolor="white",
# hovermode="x",
xaxis={"title": {"text": "Sentiment Change", "font": {"size": 16}}},
yaxis={"title": {"text": "Stock Price Change", "font": {"size": 16}}},
title={'text': "<NAME> Tweets Sentiment Change vs Dropbox's Stock Price Change", "font": {"size": 16}}
)
fig.show()
# + jupyter={"outputs_hidden": true} tags=[]
import matplotlib.pyplot as plt
from wordcloud import WordCloud
allwords = " ".join([twts for twts in df['Tweets']])
wordCloud = WordCloud(width = 1000, height = 1000, random_state = 21, max_font_size = 119).generate(allwords)
plt.figure(figsize=(20, 20), dpi=80)
plt.imshow(wordCloud, interpolation = "bilinear")
plt.axis("off")
plt.show()
# -
df.dtypes
| notebooks/drafts/benioff_nb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center> NumPy <center>
# <img src = "https://github.com/saeed-saffari/Python-for-Economics-2021-ATU/blob/main/MSc%20Planning%20Economics/Numpy.png?raw=true">
# ## Installation
#
# - Conda install numpy
# - pip install numpy
# - pip install --upgrade numpy
# ## Import
from numpy import sin
x = 3
sin(x)
import numpy
numpy.sin(x)
import numpy as np
np.sin(x)
np.cos(x)
np.tan(np.pi/2)
# ## Specification
x = [1,2,3,4,5,6]
print(x)
print(x*2)
type(x)
y = np.array(x)
print(y)
type(y)
y*2
# +
y2 = np.array([
[1,2,3],
[4,5,6],
[7,8,9]
])
print(y2)
# -
a = np.arange(1,12, 0.5)
a
np.shape(y2)
np.size(y2)
np.arange(1,11)
np.arange(1,11).reshape(5,2)
b = np.linspace(0,100, 5)
print(b)
c = np.linspace(-5,5, 100)
print(c)
c.round(3)
c = np.zeros((5,5))
print(c)
d = np.ones((6,6))
print(d)
e = np.eye(10)
print(e)
f = np.diag([2,3,7,15])
print(f)
g = np.full((4,3), 7)
print(g)
print(y2)
y3 = np.arange(-5,7).reshape((3,4))
print(y3)
times_1 = np.matmul(y2, y3)
print(times_1)
times2 = y2 @ y3
print(times2)
y2*2
y2 = y2*2 +3
print(y2)
y3
y_tra = np.transpose(y3)
print(y_tra)
y_inv = np.linalg.inv(y2)
print(y_inv)
y_det = np.linalg.det(y2)
print(y_det)
h = np.random.randint(1, 100, (10,10))
print(h)
i = np.random.normal(0, 1, 1000000)
print(i)
import matplotlib.pyplot as plt
plt.hist(i, 100);
np.mean(i)
np.var(i)
np.std(i)
np.mean(i, axis=0)
a = [1,2,3,45,6]
a
a[2]
print(y3)
y3[:,3]
y3[2,:]
y3[:,1:3]
v = np.full((5,5),2)
print(v)
zro = np.zeros((2,2))
print(zro)
v[0:2,1:3] = zro
print(v)
v[3,3] = 15
print(v)
| MSc Planning Economics/3. NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
# default_exp core
# -
#hide
pip install nbd_colab
#hide
from nbdev.showdoc import *
#export
def display_all(df,Nrows=5,Ncolumns=1000):
# função para não cortar colunas ao visualizar
import pandas as pd
from IPython.display import display
with pd.option_context("display.max_rows", Nrows):
with pd.option_context("display.max_columns", Ncolumns):
display(df)
#export
def Hello_World():
print('Hello World')
return
#export
def Hello_to(name):
return f'Hello {name}!'
| 00_core.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# +
# load text file, read it into strings and then split on new lines
file = open(r'Birth Nat 2016.txt')
lineReader = file.read()
lr2016 = lineReader.split('\n')
# -
def nat2016Parser( line ):
ret_dict = dict(
birth_year = line[8:12],
birth_month = line[12:14],
birth_time = line[18:22],
birth_day_of_wk = line[22:23],
birth_place = line[31:32],
mothers_age_imputed = line[72:73],
mothers_age = line[74:76],
mothers_nativity = line[83:84],
mothers_residence_status = line[103:104],
mothers_race = line[109:110], # MBRACE
mothers_race_imputed = line[110:111],
mothers_hispanic_origin = line[114:115], # re-code to 0 for non-hispanic and 1 for hispanic (rest/all)
mothers_hispanic_origin2 = line[116:117],
paternity_acknow = line[118:119],
mothers_marital_status = line[119:120],
mothers_maristat_imputed = line[120:121],
mothers_education = line[123:124],
fathers_age = line[146:148],
fathers_race = line[155:156], # FBRACE
fathers_hispanic_origin = line[159:160], # FHISP_R
fathers_education = line[162:163],
prior_living_births = line[170:172], # PRIORLIVE
prior_dead_births = line[172:174], # PRIORDEAD
prior_terminations = line[174:176], # PRIORTERM
mo_since_last_live_birth = line[197:200], # ILLB_R
mo_prenatal_care_began = line[223:225], # PRECARE
n_prenatal_visits = line[237:239],
wic = line[250:251],
cigs_before = line[252:254],
cigs_tri1 = line[254:256],
cigs_tri2 = line[256:258],
cigs_tri3 = line[258:260],
mothers_height = line[279:281],
mothers_height_reporting = line[281:282],
mothers_bmi = line[282:286],
mothers_bmi_recode = line[286:287],
pre_preg_lbs = line[291:294],
pre_preg_lbs_reporting = line[294:295],
delivery_lbs = line[298:301],
delivery_lbs_reporting = line[302:303],
weight_gain = line[303:305], # WTGAIN
weight_gain_reporting = line[306:307], # F_WTGAIN
pre_preg_diab = line[312:313],
pre_preg_diab_reporting = line[318:319],
gest_diab = line[313:314],
gest_diab_reporting = line[319:320],
pre_preg_hypten = line[314:315],
pre_preg_hypten_reporting = line[320:321],
gest_hypten = line[315:316],
gest_hypten_reporting = line[321:322],
hypten_ecl = line[316:317],
hypten_ecl_reporting = line[322:323],
prev_preterm_birth = line[317:318],
prev_preterm_birth_reporting = line[323:324],
infertility_treatment = line[324:325],
fertil_enhance = line[325:326],
asst_repro_tech = line[326:327],
n_prev_cesar = line[331:333],
no_risk_reported = line[336:337],
gonorrhea = line[342:343],
syphilis = line[343:344],
chlamydia = line[344:345],
hepB = line[345:346],
hepC = line[346:347],
no_infection_reported = line[352:353],
success_ext_cep = line[359:360],
fail_ext_cep = line[360:361],
induced_labor = line[382:383],
aug_labor = line[383:384],
steriods = line[384:385],
antibiotics = line[385:386],
chorioamnionitis = line[386:387],
anesthesia = line[387:388],
fetal_present_at_birth = line[400:401],
final_delivery_method = line[401:402],
trial_of_labor_attempt = line[402:403],
maternal_transfusion = line[414:415],
perineal_laceration = line[415:416],
rupt_uterus = line[416:417],
unplanned_hyster = line[417:418],
admit_to_IC = line[418:419],
attendant_at_birth = line[432:433],
mother_transferred = line[433:434],
delivery_payment_source = line[434:435],
APGAR_score_5min = line[443:445],
APGAR_score_10min = line[447:449],
plurality = line[453:454],
pluarlity_imputed = line[455:456],
sex_of_infant = line[474:475],
sex_of_infant_imputed = line[475:476],
last_norm_menses_mo = line[476:478],
last_norm_menses_yr = line[480:484],
combined_gestation_imputed = line[487:488],
obst_est_of_gestation_used = line[488:489],
combined_gestation_wk = line[489:491],
obst_est_edit_wk = line[498:500],
birth_weight_gm = line[503:507],
assist_vent_immed = line[516:517],
assist_vent_after6 = line[517:518],
admit_NICU = line[518:519],
surfactant = line[519:520],
antibiotics_for_newborn = line[520:521],
seizures = line[521:522],
anencephaly = line[536:537],
meningo_spina_bif = line[537:538],
cyn_cong_heart_disease = line[538:539],
cong_diaph_hernia = line[539:540],
omphalocele = line[540:541],
gastroschisis = line[541:542],
limb_reduc_defect = line[548:549],
cleft_lip_or_palate = line[549:550],
cleft_palate_only = line[550:551],
down_syndr = line[551:552],
suspect_chromo_disorder = line[552:553],
hypospadias = line[553:554],
no_cong_anamolies_checked = line[560:561],
infant_transferred = line[566:567],
infant_living_at_report = line[567:568],
infant_breastfed_at_discharge = line[568:569])
return pd.Series( ret_dict )
# %time df2016_P1 = pd.DataFrame(list(map(lambda x: nat2016Parser(lr2016[x]),range(0, int(len(lr2016)/4)))))
# %time df2016_P2 = pd.DataFrame(list(map(lambda x: nat2016Parser(lr2016[x]),range(int(len(lr2016)/4), int(len(lr2016)/2)))))
# %time df2016_P3 = pd.DataFrame(list(map(lambda x: nat2016Parser(lr2016[x]),range(int(len(lr2016)/2), int(len(lr2016)*3/4)))))
# %time df2016_P4 = pd.DataFrame(list(map(lambda x: nat2016Parser(lr2016[x]),range(int(len(lr2016)*3/4), len(lr2016)))))
df2016 = pd.concat([df2016_P1, df2016_P2, df2016_P3, df2016_P4])
df2016.to_csv('2016_data', index = False)
# ---------------
| notebooks/Mo Parser 2016.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install influxdb-client
# +
from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
# You can generate a Token from the "Tokens Tab" in the UI
token = "<PASSWORD>=="
org = "usde"
bucket = "training"
client = InfluxDBClient(url="http://influxdb:8086", token=token)
write_api = client.write_api(write_options=SYNCHRONOUS)
# -
# ## run the following cell to demostrate that fire is not detected
import time
while True:
point = Point("SmokeSensorEvent").tag("sensor", "S1").field("smoke", False)
print(point)
write_api.write(bucket, org, point)
time.sleep(10)
# ## run the following cell to demostrate to detect fire
import time
while True:
point = Point("SmokeSensorEvent").tag("sensor", "S1").field("smoke", True)
print(point)
write_api.write(bucket, org, point)
time.sleep(10)
# ## close client
client.__del__()
| flux_firealarm/datagen/smoke_sensor_simulator.ipynb |
# ---
# title: "Selecting The Best Alpha Value In Ridge Regression"
# author: "<NAME>"
# date: 2017-12-20T11:53:49-07:00
# description: "How to select the best alpha value when conduct in ridge regression in scikit-learn for machine learning in Python."
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Preliminaries
# Load libraries
from sklearn.linear_model import RidgeCV
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
# ## Load Boston Housing Dataset
# Load data
boston = load_boston()
X = boston.data
y = boston.target
# ## Standardize Features
#
# Note: Because in linear regression the value of the coefficients is partially determined by the scale of the feature, and in regularized models all coefficients are summed together, we must make sure to standardize the feature prior to training.
# Standarize features
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
# ## Create Ridge Regression With Candidate Alpha Values
# Create ridge regression with three possible alpha values
regr_cv = RidgeCV(alphas=[0.1, 1.0, 10.0])
# ## Fit Ridge Regression
#
# scikit-learn includes a `RidgeCV` method that allows us select the ideal value for $\alpha$:
# Fit the linear regression
model_cv = regr_cv.fit(X_std, y)
# ## View Best Model's Alpha Value
# View alpha
model_cv.alpha_
| docs/machine_learning/linear_regression/selecting_best_alpha_value_in_ridge_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (scripts)
# language: python
# name: pycharm-268fb1dd
# ---
# + pycharm={"name": "#%%\n"}
import matplotlib
import pandas as pd
from sklearn.preprocessing import StandardScaler
from common.clustering import perform_clustering_score_analysis, k_means_multiple_dim_silhouette, \
hierarchical_multiple_dim_silhouette, plot_clustering_scores, hierarchical_multiple_dim_calinski_harabasz, \
k_means_multiple_dim_calinski_harabasz
from common.outliers import find_outliers_in_multiple_columns, drop_outliers_from_dataset
matplotlib.rcParams['figure.figsize'] = [7, 7]
# -
# ### Constants and settings
# + pycharm={"name": "#%%\n"}
numbers_of_clusters = range(2, 20)
columns_used_for_clustering = ["User_Score", "Critic_Score", "EU_Sales", "NA_Sales", "JP_Sales", "Year_of_Release"]
# -
# ### Load and prepare dataset
# Please note that the `_preprocessed` file is required. To use this notebook
# properly, please execute the preprocessing one first to generate proper dataset
# file.
# + pycharm={"name": "#%%\n"}
# load dataset from file, remove unused columns
data = pd.read_excel("../data/games_sales_2016_preprocessed.xlsx", index_col=0)
data = data.filter(items=columns_used_for_clustering)
# standardization
standardization = StandardScaler().fit(data[columns_used_for_clustering])
data[columns_used_for_clustering] = standardization.transform(data[columns_used_for_clustering])
# outliers detection
k_neighbours = 5
r_percentage = 0.03
outliers, non_outliers = find_outliers_in_multiple_columns(data, columns_used_for_clustering, k_neighbours, r_percentage)
# remove outliers
data = drop_outliers_from_dataset(data, outliers)
# -
# ## Clustering - analysis
# Used variables:
# - Critic_Score
# - User_Score
# - EU_Sales
# - NA_Sales
# - JP_Sales
# - Year_of_Release
# ### Silhouette score analysis
# First, we perform clustering analysis using the **silhouette** coefficient, to get the optimal number of clusters.
# This will take some time ...
# + pycharm={"name": "#%%\n"}
print("Performing Silhouette score analysis ... ")
print("\nUsed method: K-Means")
k_means_scores, k_means_errors = \
perform_clustering_score_analysis(data, columns_used_for_clustering, numbers_of_clusters,
k_means_multiple_dim_silhouette, 10)
print("\nUsed method: Hierarchical")
hierarchical_scores, hierarchical_errors = \
perform_clustering_score_analysis(data, columns_used_for_clustering, numbers_of_clusters,
hierarchical_multiple_dim_silhouette, 2)
scores = [k_means_scores, hierarchical_scores]
errors = [k_means_errors, hierarchical_errors]
methods = ["K-Means", "Hierarchical"]
plot_clustering_scores(numbers_of_clusters, scores, errors, methods, "Silhouette")
# -
# ### Caliński-Harabasz score analysis
# We also perform clustering analysis using different score calculating technique - **Calinski-Harabasz** score.
# This will also take some time...
# + pycharm={"name": "#%%\n"}
print("Performing Calinski-Harabasz score analysis ... ")
print("\nUsed method: K-Means")
k_means_scores, k_means_errors = \
perform_clustering_score_analysis(data, columns_used_for_clustering, numbers_of_clusters,
k_means_multiple_dim_calinski_harabasz, 10)
print("\nUsed method: Hierarchical")
hierarchical_scores, hierarchical_errors = \
perform_clustering_score_analysis(data, columns_used_for_clustering, numbers_of_clusters,
hierarchical_multiple_dim_calinski_harabasz, 2)
scores = [k_means_scores, hierarchical_scores]
errors = [k_means_errors, hierarchical_errors]
methods = ["K-Means", "Hierarchical"]
plot_clustering_scores(numbers_of_clusters, scores, errors, methods, "Calinski-Harabasz")
| notebooks/whole_dataset_clustering_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
from keras.utils import np_utils
import numpy as np
from keras.datasets import mnist
# -
# # Mnist: classification de chiffres manuscrits
#
# Dans ce TD, nous allons tenter de reconnaître des chiffres manuscrits avec différents algorithmes de machine learning, à l'aide de la librairie python sklearn.
# ## Téléchargement des données
(X_train_base, y_train_base), (X_test_base, y_test_base) = mnist.load_data()
# ## Aperçu des données
# +
print("nombre d'image en entrée ",X_train_base.shape,X_test_base.shape)
print("taille sortie",y_train_base.shape,y_test_base.shape)
print(y_train_base[0:10],"les sorties des 10 premieres images")
# -
# Et pour la première image :
plt.imshow(X_train_base[0])
# ## Visualisation des données
# +
def plot_10_by_10_images(images):
""" Plot 100 MNIST images in a 10 by 10 table. Note that we crop
the images so that they appear reasonably close together. The
image is post-processed to give the appearance of being continued."""
fig = plt.figure()
images = [image[3:25, 3:25] for image in images]
#image = np.concatenate(images, axis=1)
for x in range(10):
for y in range(10):
ax = fig.add_subplot(10, 10, 10*y+x+1)
ax.matshow(images[10*y+x+1], cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
plot_10_by_10_images(X_train_base)
# -
# ## Aplatir les images
# Les modèles de ML classiques ne prennet pas en compte l'aspect spatial des données. Il est nécessaire d'envoyer l'image sous forme de vecteur pour qu'elle soit reconnue à l'entrée du modèle
nb_classes=10
X_train = X_train_base.reshape(60000, 784)
X_test = X_test_base.reshape(10000, 784)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
y_train = y_train_base
y_test = y_test_base
print(X_train_base.shape)
print(X_train.shape)
# ## Création d'un modèle linéaire
# +
import sklearn.linear_model
model=sklearn.linear_model.LogisticRegression()
model.fit(X_train,y_train) #trou
# -
predictions=model.predict(X_test) #trou
predictions
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(predictions,y_test)
print(accuracy)
# Notre baseline est de 92% de taux de bonnes predictions.
# ## Test du modèle Random Forest
from sklearn.ensemble import RandomForestClassifier
model=RandomForestClassifier(n_estimators=7,verbose=1,max_features=10)
model.fit(X_train,y_train)
predictions=model.predict(X_test)
accuracy=accuracy_score(predictions,y_test) #trou
print(accuracy)
# - A faire : modifier les paramètres de RandomForestClassifier pour améliorer le score. Il est au moins possible d'arriver à 97% en changeant juste les valeurs de n_estimators et max_features.
# La documentation est disponible à cette adresse :
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# ## Test du Support Vector Machine(svm)
#
# - présentation du svm
from sklearn import svm
model=svm.SVC(C=1.0, kernel="rbf", degree=3)
model.fit(X_train,y_train)
predictions=model.predict(X_test)
accuracy=accuracy_score(predictions,y_test)
print(accuracy)
# - A faire : modifier les paramètres de svm.SVC pour améliorer le score. Il est au moins possible d'arriver à 95%.
# La documentation est disponible à cette adresse :
# http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# ## Conclusion
# - bonnes predictions pour les modèles de ML classiques
# - scores toujours en dessous d'un être humain
# - score qui continuera à descendre plus on augmente la taille des images
# ## Bonus
# - visualiser les échantillons où le modèle s'est trompé de beaucoup
| formation/TP-02-Reconnaissance de chiffres manuscrits/ML_mnist_correction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # **트레이딩 전략과 구현 6장 | 볼린저 밴드 매매2 | 반전매매 기법**
# **[파이썬 증권 데이터 분석 6장 | nbviewer](https://nbviewer.jupyter.org/github/INVESTAR/StockAnalysisInPython/tree/master/06_Trading_Strategy/)**
#
# # **1 반전 (Reversals) 매매기법**
# - **<span style="color:orange">일중 강도율 (Intraday Intensity)</span>** 와 **<span style="color:orange">매집 분산율 (AD%)</span>** 을 기술적 지표로 활용
# - 둘 중 **<span style="color:orange">일중 강도율(II%)</span>** 하나만 활용하여 매매한다
# - 매수 : 주가 하단밴드 부근에서 W패턴을 나타내고, 강제 지표가 확증할 때(%b가 0.05보다 작고,II% 가 0보다 클 때)
# - 매도 : 상단 밴드 부근에서 일련으 주가 Tag가 일어나며, 약세 지표가 확증할 때 (%b가 0.95보다 크고, II%가 0보다 작을 때)
#
# ## **01 데이터 불러오기 + 볼린저 밴드 추가**
# +
import os
import pandas as pd
file_save = "data/boillingeSkHy.csv"
if os.path.isfile(file_save):
data_df = pd.read_csv(file_save)
data_df['Date'] = pd.to_datetime(data_df['Date']) # datetime 포맷 변경
data_df = data_df.set_index('Date')
else:
from FinanceDataReader import DataReader as fdr
data_df = fdr("000660", "2019-01-01")
data_df.to_csv(file_save)
data_df.columns = [_.lower() for _ in data_df.columns]
data_raw = data_df.copy()
data_df.tail(3)
# -
# ## **02 일중 강도율**
# 거래 범위 내에서 **<span style="color:orange">종가의 위치</span>** 를 토대로 **<span style="color:orange">종목내 자금 흐름</span>** 을 설명한다
#
# $$ 일중강도 = {\frac{2 \times close-high-low}{high-low}} \times volume $$
# $$ 일중 강도율 = {\frac{ 일중강도 (21일) 합계 }{ 거래량 (21일) 합계 }} \times 100 $$
# +
# 볼린저 밴드 계산하기
data_df['MA20'] = data_df['close'].rolling(window=20).mean()
data_df['stddev'] = data_df['close'].rolling(window=20).std()
data_df['upper'] = data_df['MA20'] + (data_df['stddev'] * 2)
data_df['lower'] = data_df['MA20'] - (data_df['stddev'] * 2)
data_df['PB'] = (data_df['close'] - data_df['lower'])\
/ (data_df['upper'] - data_df['lower']) # %b 를 계산한다
# 일중강도 : 일중 강도율 계산하기
data_df['II'] = (2 * data_df['close'] - data_df['high'] - data_df['low'])\
/ (data_df['high'] - data_df['low']) * data_df['volume']
# 일중강도율 : 21간의 일중 강도율 합계
data_df['IIP21'] = data_df['II'].rolling(window=21).sum()\
/ data_df['volume'].rolling(window=21).sum()*100
data_df = data_df.dropna()
data_df.tail(3)
# -
# ## **03 Visualization**
# 결과값 시각화 작업
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# Loading ... Visualization Modules
import matplotlib.pyplot as plt
import warnings
plt.style.use('seaborn')
warnings.simplefilter(action='ignore', category=FutureWarning)
from matplotlib.font_manager import fontManager as fm # 한글폰트 확인 및 추가
font_list_check = ['D2Coding', 'NanumGothicCoding', 'NanumGothic']
for _ in font_list_check:
font_counts = [f for f in font_list_check if _ in [_.name for _ in fm.ttflist]]
if len(font_counts) > 1: print(f"found : {_}"); plt.rc('font', family=_); break
# +
plt.figure(figsize=(20, 8))
plt.subplot(3, 1, 1) # 볼린저 밴드 그리기
plt.title('SK Hynix Bollinger Band(20 day, 2 std) - Reversals')
plt.plot(data_df.index, data_df['close'], 'b', label='Close')
plt.plot(data_df.index, data_df['upper'], 'r--', label ='Upper band')
plt.plot(data_df.index, data_df['MA20'], 'k--', label='Moving average 20')
plt.plot(data_df.index, data_df['lower'], 'c--', label ='Lower band')
plt.fill_between(data_df.index, data_df['upper'], data_df['lower'], color='0.9')
plt.legend(loc='best')
plt.subplot(3, 1, 2) # 볼린저밴드 간격 변화값
plt.plot(data_df.index, data_df['PB'], 'b', label='%b')
plt.grid(True)
plt.legend(loc='best')
plt.subplot(3, 1, 3) # 일중 강도율
plt.bar(data_df.index, data_df['IIP21'], color='g', label='II% 21day') # ④
plt.grid(True)
plt.legend(loc='best')
plt.show()
# -
# ## **04 Visualization2 | 반전매매 구현하기**
# - 매수시점 : $ \%b < 0.05 $ , $ (21일) 일중강도율 > 0 $
# - 매도시점 : $ \%b > 0.95 $ , $ (21일) 일중강도율 < 0 $
# +
plt.figure(figsize=(20, 8))
plt.subplot(3, 1, 1) # 볼린저 밴드 그리기
plt.title('SK Hynix Bollinger Band(20 day, 2 std) - Reversals')
plt.plot(data_df.index, data_df['close'], label='Close', color="orange")
plt.plot(data_df.index, data_df['upper'], 'r--', label ='Upper band')
plt.plot(data_df.index, data_df['MA20'], 'k--', label='Moving average 20')
plt.plot(data_df.index, data_df['lower'], 'c--', label ='Lower band')
plt.fill_between(data_df.index, data_df['upper'], data_df['lower'], color='0.9')
for i in range(0, len(data_df.close)):
if data_df.PB.values[i] < 0.05 and data_df.IIP21.values[i] > 0: # 매수시점
plt.plot(data_df.index.values[i], data_df.close.values[i], 'r^')
elif data_df.PB.values[i] > 0.95 and data_df.IIP21.values[i] < 0: # 매도시점
plt.plot(data_df.index.values[i], data_df.close.values[i], 'bv')
plt.legend(loc='best')
plt.subplot(3, 1, 2) # 볼린저 밴드 변동률
plt.plot(data_df.index, data_df['PB'], 'b', label='%b')
plt.grid(True)
plt.legend(loc='best')
plt.subplot(3, 1, 3) # 일중 강도율
plt.bar(data_df.index, data_df['IIP21'], color='g', label='II% 21day')
for i in range(0, len(data_df.close)):
if data_df.PB.values[i] < 0.05 and data_df.IIP21.values[i] > 0:
plt.plot(data_df.index.values[i], 0, 'r^')
elif data_df.PB.values[i] > 0.95 and data_df.IIP21.values[i] < 0:
plt.plot(data_df.index.values[i], 0, 'bv')
plt.grid(True)
plt.legend(loc='best')
plt.show()
| stock-books/T2_boll2reverse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Итераторы
#
# Использование итератора.
#
# `range` возвращает не готовый список, а генератор по которому итерируется цикл `for`:
for i in range(5):
print(i & 1)
for i in 'python':
print(ord(i))
# Можно создать свой простейший итератор с помощью встроенной функции `iter()`:
iterator = iter([1, 2, 3])
# Функция `next()` возвращает следующий элемент:
next(iterator)
next(iterator)
next(iterator)
# Когда элементы закончились, выбрасывается исключение `StopIteration`:
next(iterator)
# ## Собственный итератор
class SquareIterator():
def __init__(self, start, end):
self.current = start
self.end = end
def __iter__(self):
return self
def __next__(self):
if self.current >= self.end:
raise StopIteration # А можно сбросить значение self.current и начать замкнутый цикл итерации
# Генерируем какой-то результат. В реальности это может возвращаться элемент из какой-то коллекции
result = self.current ** 2
self.current += 1 # Увеличиваем и сохраняем текущее значение итерации
return result
# Всего 3 итерации. При вызове `SquareIterator(1, 4)` возвращается анонимный объект поддерживающий итерацию:
for i in SquareIterator(1, 4):
print(i)
# Еще один пример:
# +
import random
class SquareIteratorRand(SquareIterator):
def __init__(self):
start = random.randint(3, 10)
end = random.randint(start+3, 15)
super().__init__(start, end)
s = SquareIteratorRand()
print(s.__dict__)
for i in s:
print(i)
# -
# ## Собственный итератор без реализации методов `__iter__` и `__next__`
#
# Можно итерироваться по объекту не определяя методы `__iter__` и `__next__`, это можно сделать, реализовав метод `__getitem__`:
# +
class IndexIterable():
def __init__(self, obj):
self.obj = obj
# Этого метода достаточно, чтобы можно было итерироваться, но обычно реализуют методы __iter__ и __next__
def __getitem__(self, index):
return self.obj[index]
for letter in IndexIterable('str'):
print(letter)
| 4/4.1.2_iterators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visual exploratory data analysis
# > A Summary of lecture "Analyzing Police Activity with pandas", via datacamp
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [Python, Datacamp, Data_Science, Visualization]
# - image: images/k-zones-plot.png
# +
# Import the pandas library as pd
import pandas as pd
# Read 'police.csv' into a DataFrame named ri
ri = pd.read_csv('./dataset/police.csv')
# -
# ## Does time of day affect arrest rate?
#
#
# ### Calculating the hourly arrest rate
#
# When a police officer stops a driver, a small percentage of those stops ends in an arrest. This is known as the arrest rate. In this exercise, you'll find out whether the arrest rate varies by time of day.
#
# First, you'll calculate the arrest rate across all stops in the ri DataFrame. Then, you'll calculate the hourly arrest rate by using the hour attribute of the index. The hour ranges from 0 to 23, in which:
#
# - 0 = midnight
# - 12 = noon
# - 23 = 11 PM
# ### Preprocess
# +
combined = ri.stop_date.str.cat(ri.stop_time, sep=' ')
ri['stop_datetime'] = pd.to_datetime(combined)
ri['is_arrested'] = ri['is_arrested'].astype(bool)
ri.set_index('stop_datetime', inplace=True)
# +
# Calculate the overall arrest rate
print(ri.is_arrested.mean())
# Calculate the hourly arrest rate
print(ri.groupby(ri.index.hour).is_arrested.mean())
# Save the hourly arrest rate
hourly_arrest_rate = ri.groupby(ri.index.hour).is_arrested.mean()
# -
# ### Plotting the hourly arrest rate
# In this exercise, you'll create a line plot from the hourly_arrest_rate object. A line plot is appropriate in this case because you're showing how a quantity changes over time.
#
# This plot should help you to spot some trends that may not have been obvious when examining the raw numbers!
# +
import matplotlib.pyplot as plt
# Create a line plot of 'hourly_arrest_rate'
hourly_arrest_rate.plot()
# Add the xlabel, ylabel, and title
plt.xlabel('Hour')
plt.ylabel('Arrest Rate')
plt.title('Arrest Rate by Time of Day')
# -
# ## Are drug-related stops on the rise?
#
# ### Plotting drug-related stops
#
# In a small portion of traffic stops, drugs are found in the vehicle during a search. In this exercise, you'll assess whether these drug-related stops are becoming more common over time.
#
# The Boolean column drugs_related_stop indicates whether drugs were found during a given stop. You'll calculate the annual drug rate by resampling this column, and then you'll use a line plot to visualize how the rate has changed over time.
# +
# Calculate the annual rate of drug-related stops
print(ri.drugs_related_stop.resample('A').mean())
# Save the annual rate of drug-related stops
annual_drug_rate = ri.drugs_related_stop.resample('A').mean()
# Create a line plot of 'annual_drug_rate'
annual_drug_rate.plot()
# -
# ### Comparing drug and search rates
# As you saw in the last exercise, the rate of drug-related stops increased significantly between 2005 and 2015. You might hypothesize that the rate of vehicle searches was also increasing, which would have led to an increase in drug-related stops even if more drivers were not carrying drugs.
#
# You can test this hypothesis by calculating the annual search rate, and then plotting it against the annual drug rate. If the hypothesis is true, then you'll see both rates increasing over time.
# +
# Calculate and save the annual search rate
annual_search_rate = ri.search_conducted.resample('A').mean()
# Concatenate 'annual_drug_rate' and 'annual_search_rate'
annual = pd.concat([annual_drug_rate, annual_search_rate], axis='columns')
# Create subplots from 'annual'
annual.plot(subplots=True)
# -
# ## What violations are caught in each district?
#
# ### Tallying violations by district
# The state of Rhode Island is broken into six police districts, also known as zones. How do the zones compare in terms of what violations are caught by police?
#
# In this exercise, you'll create a frequency table to determine how many violations of each type took place in each of the six zones. Then, you'll filter the table to focus on the "K" zones, which you'll examine further in the next exercise.
#
#
# +
# Create a frequency table of districts and violations
print(pd.crosstab(ri.district, ri.violation))
# Save the frequency table as 'all_zones'
all_zones = pd.crosstab(ri.district, ri.violation)
# Select rows 'Zone K1' through 'Zone K3'
print(all_zones.loc['Zone K1':'Zone K3'])
# Save the smaller table as 'k_zones'
k_zones = all_zones.loc['Zone K1':'Zone K3']
# -
# ### Plotting violations by district
# Now that you've created a frequency table focused on the "K" zones, you'll visualize the data to help you compare what violations are being caught in each zone.
#
# First you'll create a bar plot, which is an appropriate plot type since you're comparing categorical data. Then you'll create a stacked bar plot in order to get a slightly different look at the data. Which plot do you find to be more insightful?
# Create a bar plot of 'k_zones'
k_zones.plot(kind='bar')
plt.savefig('../images/k-zones-plot.png')
# Create a stacked bar plot of 'k_zones'
k_zones.plot(kind='bar', stacked=True)
# ## How long might you be stopped for a violation?
#
# ### Converting stop durations to numbers
# In the traffic stops dataset, the stop_duration column tells you approximately how long the driver was detained by the officer. Unfortunately, the durations are stored as strings, such as '0-15 Min'. How can you make this data easier to analyze?
#
# In this exercise, you'll convert the stop durations to integers. Because the precise durations are not available, you'll have to estimate the numbers using reasonable values:
#
# - Convert '0-15 Min' to 8
# - Convert '16-30 Min' to 23
# - Convert '30+ Min' to 45
# +
# Print the unique values in 'stop_duration'
print(ri.stop_duration.unique())
# Create a dictionary that maps strings to integers
mapping = {'0-15 Min': 8, '16-30 Min': 23, '30+ Min': 45}
# Convert the 'stop_duration' strings to intergers using the 'mapping'
ri['stop_minutes'] = ri.stop_duration.map(mapping)
# Print the unique values in 'stop_minutes'
print(ri.stop_minutes.unique())
# -
# ### Plotting stop length
# If you were stopped for a particular violation, how long might you expect to be detained?
#
# In this exercise, you'll visualize the average length of time drivers are stopped for each type of violation. Rather than using the violation column in this exercise, you'll use violation_raw since it contains more detailed descriptions of the violations.
# +
# Calculate the mean 'stop_minutes' for each value in 'violation_raw'
print(ri.groupby('violation_raw').stop_minutes.mean())
# Save the resulting Series as 'stop_length'
stop_length = ri.groupby('violation_raw').stop_minutes.mean()
# Sort 'stop_length' by its values and create a horizontal bar plot
stop_length.sort_values().plot(kind='barh')
| _notebooks/2020-05-25-03-Visual-exploratory-data-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### BEGIN SOLUTION
# # AI@UCF Data Science Group Fall 2019 Titanic Workshop Solution
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from collections import Counter
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
from scipy import stats
sns.set(style='white', context='notebook', palette='deep')
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
train = pd.read_csv("../input/titanic/train.csv")
test = pd.read_csv("../input/titanic/test.csv")
IDtest = test["PassengerId"]
# -
## In order to make changes to all data, we need to combine train and test for the time being
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)
# +
# When it comes to cleaning data, what is the very first thing you should do?
# Always manage your missing values.
# Fill empty and NaNs values with NaN
dataset = dataset.fillna(np.nan)
# Check for Null values
dataset.isnull().sum()
# The reason why there is a significant number of null values in survived is only because we combiend the train and the test, and obviously we don't have the solutions for the test
# +
# If we want to check what just the train looks like, we still have it in memory
train.info()
train.isnull().sum()
# -
train.head()
train.describe()
# + active=""
#
# -
# Correlation matrix between numerical values (SibSp Parch Age and Fare values) and Survived
g = sns.heatmap(train[["Survived","SibSp","Parch","Age","Fare"]].corr(),annot=True, fmt = ".2f", cmap = "coolwarm")
# +
# Using a heatmap, we can check to see how our numerical values correlate with each other. As you can see, the only numerical value that seemst o really correlate with
# survival is Fare. That's not to say that the others are useless, but intuitively you can imagine bigger fair = rich = surivived
# +
# What definately is possible is that the other features have subpopulations that have actual correlation. That is to say``````````````````````
# -
# Explore SibSp feature vs Survived
g = sns.factorplot(x="SibSp",y="Survived",data=train,kind="bar", size = 6 ,
palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# +
# What does thisd tell us about number of siblings?
# It tells us that while 0-2 siblings have a fairly average chance of survival, 3-4 have a dramatically smaller chance.
# This means that while sibling count isn't good by itself, knowing whether they have 3-4 is important
# -
# Explore Parch feature vs Survived
g = sns.factorplot(x="Parch",y="Survived",data=train,kind="bar", size = 6 ,
palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# +
# What this tells us is that smaller families (1-2) have better chance of survival than people without families, or people wtih large families
# Why am I not saying that families of 3 have the best chance of surivial?
# While sure, the average seems marginally higher, the uncertainty is much greater around families of 3, meaning the values are more spread out.
# Families of 1-2 are much more certain to have more than 50% chance of survival.
# -
# Now, lets take a look at a Facet graph of Age
# Explore Age vs Survived
g = sns.FacetGrid(train, col='Survived')
g = g.map(sns.distplot, "Age")
# +
# What does this tell us
# Before in our heatmap, we noted that Age does not seem correlated with survival, but this graph tells a different story.
# The distribution seems somewhat normal, except for the unusual jump in survivability of infants
# This means it might be valauble to classify passenger into age groups, rather than leave their age
# let's try superimposing these to get a clearer picture
# Explore Age distibution
g = sns.kdeplot(train["Age"][(train["Survived"] == 0) & (train["Age"].notnull())], color="Red", shade = True)
g = sns.kdeplot(train["Age"][(train["Survived"] == 1) & (train["Age"].notnull())], ax =g, color="Blue", shade= True)
g.set_xlabel("Age")
g.set_ylabel("Frequency")
g = g.legend(["Not Survived","Survived"])
# -
# Let's move onto Fare
dataset["Fare"].isnull().sum()
# Theres only a single missing value of fare, what should we do with it?
dataset["Fare"] = dataset["Fare"].fillna(dataset["Fare"].median())
# Explore Fare distribution
g = sns.distplot(dataset["Fare"], color="m", label="Skewness : %.2f"%(dataset["Fare"].skew()))
g = g.legend(loc="best")
# Oof, this is extremly skewed. This means that our model is going to massively overweight values on the right end. Therefore, we should probobly transform it into a log function
dataset["Fare"] = dataset["Fare"].map(lambda i: np.log(i) if i > 0 else 0)
g = sns.distplot(dataset["Fare"], color="b", label="Skewness : %.2f"%(dataset["Fare"].skew()))
g = g.legend(loc="best")
# That looks much, much better right?
# Let's move onto sex
g = sns.barplot(x="Sex",y="Survived",data=train)
g = g.set_ylabel("Survival Probability")
# +
# Oh wow, men only have about a 20% chance of survival, while women have 70%
# You could actually stop everything right now and just predict survival based on gender and get ~75% accuracy wiht these numbers
# -
train[["Sex","Survived"]].groupby('Sex').mean()
# Explore Pclass vs Survived
g = sns.factorplot(x="Pclass",y="Survived",data=train,kind="bar", size = 6 ,
palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# As you would expect, chance of survival is pretty much directly correlated with class
# Explore Pclass vs Survived by Sex
g = sns.factorplot(x="Pclass", y="Survived", hue="Sex", data=train,
size=6, kind="bar", palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# As we saw earlier with our exploration of Age, there is clearly some information hidden within
# However there are 300 values missing. This is way too many to simply replace with the mean (because 300 missing is enough to shift the mean to a false value)
# The solution: Let's see if there is some other variable that correlates with Age, and use that to predict what the missing age might be
# Explore Age vs Sex, Parch , Pclass and SibSP
g = sns.factorplot(y="Age",x="Sex",data=dataset,kind="box")
g = sns.factorplot(y="Age",x="Sex",hue="Pclass", data=dataset,kind="box")
g = sns.factorplot(y="Age",x="Parch", data=dataset,kind="box")
g = sns.factorplot(y="Age",x="SibSp", data=dataset,kind="box")
# +
# What can we conclude from these graphs?
# +
# The distribution of age seems to be the same across male and femlae, the higher the class of passenger, the older they are
# Parch (parents/children) seem to be postively correlated wtih age, while sibling count engativly correlated. T
# That is to say, older passengers tend to have more children/parents, while younger passengers have more siblings
# -
# Let's go ahead and convert sex to a numerical value since we missed it
dataset["Sex"] = dataset["Sex"].map({"male": 0, "female":1})
# Let's look directly at the correlation between numerical features
g = sns.heatmap(dataset[["Age","Sex","SibSp","Parch","Pclass"]].corr(),cmap="BrBG",annot=True)
# +
# This is a good example of our visual intuition being wrong. We were correct that Age and Sex have nothing to do with each other
# But parent/childen is actually negativly correlated with age, as is class and sibling count
# Heres how we're going to do it. For every missing Age value, we are going to find rows with the same Sibling, parch, and class values as that row with missing age, and average those ages
# +
# Filling missing value of Age
## Fill Age with the median age of similar rows according to Pclass, Parch and SibSp
# Index of NaN age rows
index_NaN_age = list(dataset["Age"][dataset["Age"].isnull()].index)
for i in index_NaN_age :
age_med = dataset["Age"].median()
age_pred = dataset["Age"][((dataset['SibSp'] == dataset.iloc[i]["SibSp"]) & (dataset['Parch'] == dataset.iloc[i]["Parch"]) & (dataset['Pclass'] == dataset.iloc[i]["Pclass"]))].median()
if not np.isnan(age_pred) :
dataset['Age'].iloc[i] = age_pred
else :
dataset['Age'].iloc[i] = age_med
# -
# Let's see how things changed
g = sns.factorplot(x="Survived", y = "Age",data = train, kind="box")
g = sns.factorplot(x="Survived", y = "Age",data = train, kind="violin")
# +
# So theres still not correlation between ages and survival, except for that little blip at the buttom of the survived violen
# +
# Now it's time for the best part: Feature engineering
# Quesion: Should we keep the name of passengers? How could the name be useful?
# -
# Get Title from Name
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in dataset["Name"]]
dataset["Title"] = pd.Series(dataset_title)
dataset["Title"].head()
g = sns.countplot(x="Title",data=dataset)
g = plt.setp(g.get_xticklabels(), rotation=45)
# So it would be a waste of time to worry about titles that only appear once or twice, so let's just change them to "rare"
# Then, let's map each title to
# Convert to categorical values for title Title
dataset["Title"] = dataset["Title"].replace(['Lady', '<NAME>','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset["Title"] = dataset["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
dataset["Title"] = dataset["Title"].astype(int)
g = sns.countplot(dataset["Title"])
g = g.set_xticklabels(["Master","Miss/Ms/Mme/Mlle/Mrs","Mr","Rare"])
g = sns.factorplot(x="Title",y="Survived",data=dataset,kind="bar")
g = g.set_xticklabels(["Master","Miss-Mrs","Mr","Rare"])
g = g.set_ylabels("survival probability")
# +
# Well, we already know how important sex was to survival, but now "master" is revealing the importance of children as well
# As we know "Women and children first"
# Drop Name variable
dataset.drop(labels = ["Name"], axis = 1, inplace = True)
# If you recall, the only useful information from Age was that children had a slight advantage from
# survival. However, most Ages were actually missing. There were no missing titles, so we're instead
# going to rely on the existing of the "master" title to figure out who is a child.
# Did we waste a ton of time cleaning up the Ages? Not really, a lot of the time when working on data
# you find that previous approaches were not as good as you originally expected
dataset.drop(labels = ["Age"], axis = 1, inplace = True)
# -
dataset.head()
# Let's go over to family size again, since we talked about that a lot
# Let's examine size of family, including the passenger themselves
# Create a family size descriptor from SibSp and Parch
dataset["Fsize"] = dataset["SibSp"] + dataset["Parch"] + 1
g = sns.factorplot(x="Fsize",y="Survived",data = dataset)
g = g.set_ylabels("Survival Probability")
# Lets break these into 4 categories
# Create new feature of family size
dataset['Single'] = dataset['Fsize'].map(lambda s: 1 if s == 1 else 0)
dataset['SmallF'] = dataset['Fsize'].map(lambda s: 1 if s == 2 else 0)
dataset['MedF'] = dataset['Fsize'].map(lambda s: 1 if 3 <= s <= 4 else 0)
dataset['LargeF'] = dataset['Fsize'].map(lambda s: 1 if s >= 5 else 0)
# We've essentially turned family size into a binary value, since theres no clear smooth correlation`
g = sns.factorplot(x="Single",y="Survived",data=dataset,kind="bar")
g = g.set_ylabels("Survival Probability")
g = sns.factorplot(x="SmallF",y="Survived",data=dataset,kind="bar")
g = g.set_ylabels("Survival Probability")
g = sns.factorplot(x="MedF",y="Survived",data=dataset,kind="bar")
g = g.set_ylabels("Survival Probability")
g = sns.factorplot(x="LargeF",y="Survived",data=dataset,kind="bar")
g = g.set_ylabels("Survival Probability")
# We're gong to convert Title and embarked to binary values (a different column for each possible title/embark point)
# dataset = pd.get_dummies(dataset, columns = ["Title"])
dataset = pd.get_dummies(dataset, columns = ["Embarked"], prefix="Em")
dataset.head()
# What about cabin?
dataset["Cabin"].isnull().sum()
# In an old version of this, we tried this with Cabin, and in fact using it drops our score my 2 percent. Try running it and see what happens
# Can you explain why this is actually suboptimal?
"""
# That is...a lot of missing values```````````````````````````````````````````````
# Let's just replace missing values with an X, indicating no cabin listed
dataset["Cabin"] = pd.Series([i[0] if not pd.isnull(i) else 'X' for i in dataset['Cabin'] ])
"""
#g = sns.countplot(dataset["Cabin"],order=['A','B','C','D','E','F','G','T','X'])
# +
#g = sns.factorplot(y="Survived",x="Cabin",data=dataset,kind="bar",order=['A','B','C','D','E','F','G','T','X'])
#g = g.set_ylabels("Survival Probability")
# -
#dataset = pd.get_dummies(dataset, columns = ["Cabin"],prefix="Cabin")
#
#dataset.head()
# Ticket may have some information, buts its very likely to be similar to fair, so lets just drop it. Let's drop passengerID as well
dataset.drop(labels = ["PassengerId", "Ticket", "Cabin"], axis = 1, inplace = True)
# +
# Alright, time for some machine learning!
## Separate train dataset and test dataset
train = dataset[:train_len]
test = dataset[train_len:]
test.drop(labels=["Survived"],axis = 1,inplace=True)
# +
## Separate train features and label
train["Survived"] = train["Survived"].astype(int)
Y_train = train["Survived"]
X_train = train.drop(labels = ["Survived"],axis = 1)
# -
# We need a cross-validator for our hyperparameter searcher
kfold = StratifiedKFold(n_splits=10)
# +
# RFC Parameters tunning
RFC = RandomForestClassifier()
## Search grid for optimal parameters
rf_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsRFC = GridSearchCV(RFC,param_grid = rf_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsRFC.fit(X_train,Y_train)
RFC_best = gsRFC.best_estimator_
# Best score
gsRFC.best_score_
# +
### SVC classifier
SVMC = SVC(probability=True)
svc_param_grid = {'kernel': ['rbf'],
'gamma': [ 0.001, 0.01, 0.1, 1],
'C': [1, 10, 50, 100,200,300, 1000]}
gsSVMC = GridSearchCV(SVMC,param_grid = svc_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsSVMC.fit(X_train,Y_train)
SVMC_best = gsSVMC.best_estimator_
# Best score
gsSVMC.best_score_
# +
# Gradient boosting tunning
GBC = GradientBoostingClassifier()
gb_param_grid = {'loss' : ["deviance"],
'n_estimators' : [100,200,300],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [4, 8],
'min_samples_leaf': [100,150],
'max_features': [0.3, 0.1]
}
gsGBC = GridSearchCV(GBC,param_grid = gb_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsGBC.fit(X_train,Y_train)
GBC_best = gsGBC.best_estimator_
# Best score
gsGBC.best_score_
# +
# Adaboost
DTC = DecisionTreeClassifier()
adaDTC = AdaBoostClassifier(DTC, random_state=7)
ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"algorithm" : ["SAMME","SAMME.R"],
"n_estimators" :[1,2],
"learning_rate": [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3,1.5]}
gsadaDTC = GridSearchCV(adaDTC,param_grid = ada_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsadaDTC.fit(X_train,Y_train)
ada_best = gsadaDTC.best_estimator_
# +
# Let's see how they all do
# Note: I chose these 4 classifiers for you because I already know they do well for this problem
# As an excercise, look into more than 10 classifiers and look up how to rank their cross-validation score
test_Survived_RFC = pd.Series(RFC_best.predict(test), name="RFC")
test_Survived_SVMC = pd.Series(SVMC_best.predict(test), name="SVC")
test_Survived_AdaC = pd.Series(ada_best.predict(test), name="Ada")
test_Survived_GBC = pd.Series(GBC_best.predict(test), name="GBC")
# Concatenate all classifier results
ensemble_results = pd.concat([test_Survived_RFC,test_Survived_AdaC,test_Survived_GBC, test_Survived_SVMC],axis=1)
g= sns.heatmap(ensemble_results.corr(),annot=True)
# +
# The final step: We will have these 4 models vote on each possible prediction
votingC = VotingClassifier(estimators=[('rfc', RFC_best),
('svc', SVMC_best), ('adac',ada_best),('gbc',GBC_best)], voting='soft', n_jobs=4)
votingC = votingC.fit(X_train, Y_train)
# +
# Finally, let's send our submission to a CSV
test_Survived = pd.Series(votingC.predict(test), name="Survived")
results = pd.concat([IDtest,test_Survived],axis=1)
results.to_csv("TitanicSubmission.csv",index=False)
# -
# ### END SOLUTION
| sp20/02-20-preprocessing/preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from scipy import signal, ndimage, interpolate, stats
import sys, os, time, json
from pathlib import Path
import pickle as pkl
sys.path.append('../PreProcessing/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import sklearn.linear_model as lm
from sklearn.model_selection import cross_val_score
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.text import Text
import seaborn as sns
import TreeMazeFunctions as TMF
import spike_functions as SF
import spatial_tuning as ST
import plot_functions as PF
import TrialAnalyses as TA
import analyses_table as AT
from importlib import reload # Python 3.4+ only.
# -
oakPaths = AT.getOakPaths()
sns.set(style="whitegrid",font_scale=1,rc={
'axes.spines.bottom': False,
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.edgecolor':'0.5'})
# +
#task = 'T3g'
animals = ['Li','Ne','Cl']
overwrite=False
doPlots = False
cols = ['se','an','task','nTrials','nDiscard','nCo','nInCo','nLD','nRD','nLC','nRC','nST','pCo','pCoST']
allBehDat = pd.DataFrame()
for animal in animals:
fn = oakPaths['Root'] / (animal+'_SessionPaths.pkl')
with fn.open(mode="rb") as f:
sePaths = pkl.load(f)
CT_Path = oakPaths['Clustered'] / animal / (animal+'_ClusteringSummary.json')
with CT_Path.open() as f:
CT = json.load(f)
for se,v in CT['Sessions'].items():
if v:
try:
temp = se.split('_')
if temp[1][:2]=='T3':
TrialConds = pd.read_csv(sePaths[se]['TrialCondMat'] ,index_col=0)
X = TrialConds[TrialConds['Good']]
behDat = pd.DataFrame(np.zeros((1,len(cols))),columns=cols )
behDat['se'] = se
behDat['an'] = animal
behDat['task'] = temp[1]
behDat['nTrials'] = X.shape[0]
behDat['nDiscard'] = np.sum(~TrialConds['Good'])
behDat['nCo'] = np.sum(X['Co']=='Co')
behDat['nInCo'] = np.sum(X['Co']=='InCo')
behDat['nLD'] = np.sum(X['Desc']=='L')
behDat['nRD'] = np.sum(X['Desc']=='R')
behDat['nLC'] = np.sum(X['Cues']=='L')
behDat['nRC'] = np.sum(X['Cues']=='R')
ST = ~(X['Cues'].values[0:-1]==X['Cues'].values[1:])
CoT = (X['Co']=='Co').values
behDat['nST'] = np.sum(ST)
behDat['pCo'] = np.mean(CoT)
behDat['pCoST'] = CoT[np.where(ST)[0]+1].mean()
allBehDat = pd.concat((allBehDat,behDat))
except:
print("Error processing session: {}".format(se))
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
pass
allBehDat.to_csv(oakPaths['Analyses'] / 'TaskBehavior.csv')
# -
allBehDat.to_csv(oakPaths['Analyses'] / 'TaskBehavior.csv')
allBehDat = pd.read_csv(oakPaths['Analyses'] / 'TaskBehavior.csv',index_col=0)
allBehDat
allBehDat.head()
m = allBehDat.groupby(['an','task']).mean()[[ 'pCo','pCoST' ] ]
s = allBehDat.groupby(['an','task']).std()[[ 'pCo','pCoST' ] ]
m
m['pCo'],s['pCo']
# +
sns.set_context("poster")
sns.axes_style('whitegrid')
plotSP = Path('/mnt/c/Users/alexg8/Documents/SfN2019/Plots/')
m = np.array([0.77,0.84,0.82])
s = np.array([0.08,0.06,0.07])
subset = allBehDat['task']=='T3g'
f,ax= plt.subplots()
ax.set_ylim([0.2,1.05])
#sns.barplot(x='an',y='pCo',data=allBehDat,alpha=0.5,ax=ax)
ax=sns.swarmplot(x='an',y='pCo',data=allBehDat[subset],alpha=0.8,size=8,ax=ax)
for ii in np.arange(3):
ax.plot( [ii,ii], [m[ii]-s[ii],m[ii]+s[ii]], linewidth=3,color='0.2',zorder=3,solid_capstyle='round',alpha=0.8)
ax.plot( [ii-0.25,ii+0.25], [m[ii],m[ii]], linewidth=4,color='0.2',zorder=3,solid_capstyle='round',alpha=0.8)
ax.grid()
sns.despine(offset=10, trim=False,ax=ax)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
ax.grid(axis='both',color='grey',alpha=0.4)
#ax.set_ylim([0,1])
ax.set_yticks([0.25,0.5,0.75,1])
ax.set_yticklabels([25,50,75,100])
ax.set_xlabel('Animal')
ax.set_ylabel('P. Correct [%]')
f.savefig(plotSP / 'anBehavPCo.svg', bbox_inches='tight', pad_inches=0.2)
# -
subset = allBehDat['task']=='T3i'
m = allBehDat.groupby(['an','task']).mean()[[ 'pCo','pCoST' ] ]
m
m = allBehDat.groupby(['an','task']).mean()[[ 'pCoST' ] ]
s = allBehDat.groupby(['an','task']).std()[[ 'pCoST' ] ]
m['pCoST'],s['pCoST']
# +
m = np.array([0.79,0.90,0.89])
s = np.array([0.08,0.06,0.06])
f,ax= plt.subplots()
ax.set_ylim([0.2,1.05])
#sns.barplot(x='an',y='pCo',data=allBehDat,alpha=0.5,ax=ax)
ax=sns.swarmplot(x='an',y='pCoST',data=allBehDat[subset],alpha=0.8,size=8,ax=ax)
for ii in np.arange(3):
ax.plot( [ii,ii], [m[ii]-s[ii],m[ii]+s[ii]], linewidth=3,color='0.2',zorder=3,solid_capstyle='round',alpha=0.8)
ax.plot( [ii-0.25,ii+0.25], [m[ii],m[ii]], linewidth=4,color='0.2',zorder=3,solid_capstyle='round',alpha=0.8)
ax.grid()
sns.despine(offset=10, trim=False,ax=ax)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
ax.grid(axis='both',color='grey',alpha=0.4)
#ax.set_ylim([0,1])
ax.set_xlabel('Animal')
ax.set_ylabel('P. Correct')
ax.set_yticks([0.25,0.5,0.75,1])
ax.set_yticklabels([25,50,75,100])
ax.set_xlabel('Animal')
ax.set_ylabel('P. Switch Correct [%]')
f.savefig(plotSP / 'anBehavPCoST.svg', bbox_inches='tight', pad_inches=0.2)
# +
sns.set_context('poster')
f,ax= plt.subplots()
ax.set_ylim([0.2,1.05])
subset = (allBehDat['an']=='Li') & (allBehDat['task']=='T3g')
ax = sns.regplot(x='nTrials',y='pCo',data=allBehDat[subset],scatter_kws = {'alpha':0.5},line_kws = {'alpha':0.7},ax=ax)
subset = (allBehDat['an']=='Ne') & (allBehDat['task']=='T3g')
ax = sns.regplot(x='nTrials',y='pCo',data=allBehDat[subset],scatter_kws = {'alpha':0.5},line_kws = {'alpha':0.7},ax=ax)
subset = (allBehDat['an']=='Cl') & (allBehDat['task']=='T3g')
ax = sns.regplot(x='nTrials',y='pCo',data=allBehDat[subset],scatter_kws = {'alpha':0.5},line_kws = {'alpha':0.7} ,ax=ax)
ax.grid(axis='both',color='grey',alpha=0.4)
sns.despine(offset=10, trim=False,ax=ax)
ax.grid(axis='both',color='grey',alpha=0.4)
ax.set_xlabel('nTrials')
ax.set_ylabel('P. Correct [%]')
ax.set_yticks([0.25,0.5,0.75,1])
ax.set_yticklabels([25,50,75,100])
f.savefig(plotSP / 'anBehavPCoVnTrials.svg', bbox_inches='tight', pad_inches=0.2)
# +
f,ax= plt.subplots(1)
ax.set_ylim([0.2,1.05])
subset = allBehDat['an']=='Li'
ax = sns.regplot(x='nST',y='pCoST',data=allBehDat[subset],scatter_kws = {'alpha':0.5},line_kws = {'alpha':0.7},ax=ax)
subset = allBehDat['an']=='Ne'
ax = sns.regplot(x='nST',y='pCoST',data=allBehDat[subset],scatter_kws = {'alpha':0.5},line_kws = {'alpha':0.7},ax=ax)
subset = allBehDat['an']=='Cl'
ax = sns.regplot(x='nST',y='pCoST',data=allBehDat[subset],scatter_kws = {'alpha':0.5},line_kws = {'alpha':0.7} ,ax=ax)
ax.grid(axis='both',color='grey',alpha=0.4)
sns.despine(offset=10, trim=False,ax=ax)
ax.grid(axis='both',color='grey',alpha=0.4)
ax.set_xlabel('nTrials')
ax.set_ylabel('P. Correct')
ax.set_yticks([0.25,0.5,0.75,1])
ax.set_yticklabels([25,50,75,100])
# -
behDat
| Notebooks/getSessionBehavior.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-success">
# <b>Author</b>:
#
# <NAME>
# <EMAIL>
#
# </div>
#
# # [Click here to see class lecture](https://photos.app.goo.gl/5rUVh6fugdG3YhFv7)
# `Chapter 1-4 & 8 is important for mid`
#
# [UML Book ref](http://umlguide2.uw.hu/index.html)
#
# This UML chapter is very important for both mid and final.
# 
#
# Model is a replica of the project we are doing. It's a simplified version of the reality but not the reality. So basically it's a blueprint of the actual system that specify the structural and behaviour of the system.
#
# 
#
# Modeling leads to development of good software.
#
# 
#
# When the problem or project is simplified with maintaining the requirements and also when we can visualize the system it makes developing the system much easier. Visualizing a project before development can lead to a good quality product. Like when you don't know about spaceship but you are to design a spaceship management system you won't be able to do it unless you have a model of the system in front of you. If you have a model then you can get some idea about the system and think/visualize how you can develop it. That's why we use modeling before actually doing the project.
#
# 
#
# 
# A model can enhance the end software product quality. Cause you have an idea and some documentation(sample in model) of the system you are to make from that model. Every model may be expressed at different levels of precision cause a model made for building a truck won't have same precision as in a rocket management system. For the rocket management system we have to make a model suitable for that. A model made for design part won't be same as a model made for development part cause these two has different working procedure so these model are also expressed at different level of precision.
#
# 
#
# For a project no single model is sufficient. Think of construction. In this project base, floor, ceiling, water line each has their unique requirements you can't just use one model to visualize them all you need a model for each sector of the project. So in a centralized model of project we'll have sub model by which we can visualize different sectors of the project. Let's think this terms in software. Like a manager has 3 ui designer under him and each of them are contributing to the project so manager is the centralized model here and designers are sub model cause each designer has their own visualization part of the project and manager is visualizing the whole project.
#
# 
# Model helps us to develop a great quality end product. If we can develop the model close to reality then making the actual thing will be much easier.
#
# 
#
# 
# 
#
# UML is the most used modeling language. This is not an programming language its a oop based visualizing language.
#
# 
#
# 
#
# 
# # That's all for this lecture!
| CSE_321_Software Engineering/Lecture_11_10.08.2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Vorbereitung
#
# Der letzte Schritt, der uns noch fehlt, ist das *Trainieren* eines Modells. Bislang sind wir stets von fertigen Modellen ausgegangen, die uns aus zum Teil unbekannter Quelle zugekommen sind. Wir sind davon ausgegangen, dass diese Modelle angemessen funktionieren und habe diese evaluiert und zur Vorhersage genutzt.
#
# Wir beschäftigen uns wieder mit den Daten der vorherigen Lektionen, das heißt `house_location_data.csv` und `house_location_new_data_with_labels.csv`.
# +
# TODO: Importieren der Daten mit Pandas
import pandas as pd
data = pd.read_csv("../data/house_location/house_location_data.csv")
data_new = pd.read_csv("../data/house_location/house_location_new_data_with_labels.csv")
# -
data
data_new
# ## 2. Modell trainieren
#
# Zuvor haben wir ein fertiges Modell benutzt, nun wollen wir dieses selbstständig *trainieren*, das heißt an die Daten anpassen, sodass dieses hoffentlich korrekte Vorhersagen trifft.
#
#
# ### 2.1. Dummy Modell
#
# Wir trainieren zunächst ein Modell auf Grundlage einer äußerst simplen Annahme: wir identifizieren einfach dasjenige Feature im Datensatz, das sich zwischen den Klassen (*New York* vs *San Francisco*) am stärksten unterscheidet. Für dieses Feature bestimmen wir einen Schwellenwert anhand dessen wir jeweils entscheiden wollen, ob ein Datenpunkt der Klasse *New York* oder *San Francisco* angehört.
# +
# TODO: Dummy-Modell trainieren
# -
data_in_sf = data.loc[data.in_sf.values == 1, :]
data_in_ny = data.loc[data.in_sf.values == 0, :]
data_in_ny
data_in_sf
# +
feature = "elevation"
import matplotlib.pyplot as plt
ax = data_in_sf.hist(feature, figsize=(10, 8), bins=50, alpha=0.7, label="San Francisco")
data_in_ny.hist(feature, bins=50, ax=ax, alpha=0.7, label="New York")
plt.legend()
# +
average_elevation_sf = data_in_sf[feature].mean()
average_elevation_ny = data_in_ny[feature].mean()
print(average_elevation_sf)
print(average_elevation_ny)
# -
decision_value = (average_elevation_ny + average_elevation_sf) / 2
decision_value
# ### 2.2. Scikit-Learn
#
# Im vorherigen Teil haben wir ein sehr einfaches Modell sebst programmiert. Trotz dessen Schlichtheit erfüllt dieses alle Kriterien eines Machine Learning Modells.
#
# Wir können aber natürlich einen Schritt weiter gehen. In den allermeisten Fällen programmieren wir Modelle nicht selbst, sondern greifen auf eine Bibliothek zurück. Diese heißt **Scikit-Learn** und stellt bereits die allermeisten Arten von Modellen zur Verfügung, die uns interessieren (alle außer Neuronale Netzwerke).
#
# Jedes dieser möglichen Modelle funktioniert anders und basiert auf anderen theoretischen Annahmen. Wir werden viele dieser Modelle ausführlich kennenlernen und deren Vorteile, Nachteile und Anwendungsmöglichkeiten diskutieren. Für den Moment benutzen wir schlicht ein bereits bekanntes Modell - den `DecisionTreeClassifier`.
# +
# TODO: DecisionTreeClassifier aus Scikit-Learn importieren
# TODO: DecisionTreeClassifier auf den Daten trainieren
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
# Features
X = data.drop(columns=["id", "in_sf"])
# Labels
y = data.in_sf
# Fit
# - X muss ein array-like mit 2 Achsen (Zeilen und Spalten) sein
# - y muss ein array-like mit 1 Achse sein, quasi ein Vektor,
# gleiche Länge wie die Anzahl der Zeilen in X
model.fit(X, y)
# -
# ## 3. Vorhersagen treffen
#
# Wir treffen eine Vorhersage auf den Daten des Datensatzes `house_location_new_data_with_labels.csv`.
# ### 3.1. Dummy-Modell
# TODO: Vorhersagen treffen
in_sf_predicted = (data_new[feature].values > decision_value).astype(int)
in_sf_predicted
# ### 3.2. Scikit-Learn
# +
X_new = data_new.drop(columns=["id", "in_sf"])
in_sf_predicted_sklearn = model.predict(X_new)
in_sf_predicted_sklearn
# -
# ## 4. Modell evaluieren
#
# Da wir wiederum für alle Datenpunkte des Datensatzes `house_location_new_data_with_labels.csv` die richtige Antwort kennen, können wir die Vorhersage des Modells mit diesen richtigen Antworten abgleichen und so eine Einschätzung gewinnen, ob das Modell funktioniert. Die dafür verwendete Metrik ist wieder die *Genaugikeit* oder *Accuracy* oder *Korrekte-Klassifikations-Rate* und soll den Prozentsatz erfassen, zu dem das Modell korrekte Vorhersagen liefert.
# TODO: Vorhersage mit der richtigen Antwort abgleichen
in_sf_true = data_new.in_sf.values
in_sf_true
accuracy = (in_sf_predicted == in_sf_true).sum() / len(in_sf_true)
accuracy
accuracy_sklearn = (in_sf_predicted_sklearn == in_sf_true).sum() / len(in_sf_true)
accuracy_sklearn
| 1_7_modelle_trainieren/Tutorial - Modelle trainieren - completed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pandas as pd
import numpy as np
import re
import plotly.graph_objects as go
ml = pd.read_csv('merged.csv')
pop = pd.read_excel("st-est00int-01.xls")
ml_2000 = ml[ml['YEAR'] == 2000]
ml_2001 = ml[ml['YEAR'] == 2001]
ml_2002 = ml[ml['YEAR'] == 2002]
ml_2003 = ml[ml['YEAR'] == 2003]
ml_2004 = ml[ml['YEAR'] == 2004]
ml_2005 = ml[ml['YEAR'] == 2005]
ml_2006 = ml[ml['YEAR'] == 2006]
ml_2007 = ml[ml['YEAR'] == 2007]
ml_2008 = ml[ml['YEAR'] == 2008]
ml_2009 = ml[ml['YEAR'] == 2009]
df_00 = ml_2000["STATE"].value_counts()
df_01 = ml_2001["STATE"].value_counts()
df_02 = ml_2002["STATE"].value_counts()
df_03 = ml_2003["STATE"].value_counts()
df_04 = ml_2004["STATE"].value_counts()
df_05 = ml_2005["STATE"].value_counts()
df_06 = ml_2006["STATE"].value_counts()
df_07 = ml_2007["STATE"].value_counts()
df_08 = ml_2008["STATE"].value_counts()
df_09 = ml_2009["STATE"].value_counts()
pop = pop.drop(['Unnamed: 1', 'Unnamed: 12'], axis = 1)
pop = pop[8:59]
pop.columns = ['State', '2000', '2001', '2002', '2003', '2004','2005','2006','2007','2008','2009','2010']
pop.State = pop.State.str.strip('.')
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Palau': 'PW',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY',
}
pop['State Code'] = pop['State'].map(us_state_abbrev)
pop = pop.sort_values(by = ['State Code'])
pop = pop.set_index('State Code')
pop = pop.drop('State', axis = 1)
pop = pop/1_000_000
# +
permil_00 = df_00/pop['2000']
permil_00 = permil_00.fillna(0)
permil_01 = df_01/pop['2001']
permil_01 = permil_01.fillna(0)
permil_02 = df_02/pop['2002']
permil_02 = permil_02.fillna(0)
permil_03 = df_03/pop['2003']
permil_03 = permil_03.fillna(0)
permil_04 = df_04/pop['2004']
permil_04 = permil_04.fillna(0)
permil_05 = df_05/pop['2005']
permil_05 = permil_05.fillna(0)
permil_06 = df_06/pop['2005']
permil_06 = permil_06.fillna(0)
permil_07 = df_07/pop['2005']
permil_07 = permil_07.fillna(0)
permil_08 = df_08/pop['2005']
permil_08 = permil_08.fillna(0)
permil_09 = df_09/pop['2005']
permil_09 = permil_09.fillna(0)
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2000['STATE'].unique(), # Spatial coordinates
z = permil_00.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2000',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2001['STATE'].unique(), # Spatial coordinates
z = permil_01.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2001',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2002['STATE'].unique(), # Spatial coordinates
z = permil_02.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2002',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2003['STATE'].unique(), # Spatial coordinates
z = permil_03.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2003',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2004['STATE'].unique(), # Spatial coordinates
z = permil_04.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2004',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2005['STATE'].unique(), # Spatial coordinates
z = permil_05.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2005',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
#330, 1005
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2006['STATE'].unique(), # Spatial coordinates
z = permil_06.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2006',
geo_scope='usa', # limit map scope to USA,
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2007['STATE'].unique(), # Spatial coordinates
z = permil_07.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2007',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# -
vals = [0, 20, 40, 60, 80, 100]
len(vals)
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2008['STATE'].unique(), # Spatial coordinates
z = permil_08.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)
))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2008',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# +
fig = go.Figure(data = go.Choropleth(
locations = ml_2009['STATE'].unique(), # Spatial coordinates
z = permil_09.astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar = dict(title='Number of Hate Crime per Million', dtick = 20)))
fig.data[0].update(zmin=0, zmax=120)
fig.update_layout(
title_text = 'Hate Crime Frequency by State in 2009',
geo_scope='usa', # limit map scope to USA
)
fig.show()
# -
| Heat Maps for 2000 - 2009.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Installation Instructions
# Git pull the codes from github to your colab machine
# You will have this message:
# Warning: This notebook was not authored by Google.
# Click RUN ANYWAY
# !git clone https://github.com/xbresson/CS5242_2021.git
# +
# Mount Google Drive to the colab machine
# You will have this message:
# Go to this URL in a browser: ---
# Follow the URL, sign-in to Google login
# ALLOW google drive to access to your google account
# Copy-paste the code to the notebook
# Enter your authorization code: ---
from google.colab import drive
drive.mount('/content/gdrive')
# Copy github folder from colab machine to your google drive
# !cp -R /content/CS5242_2021 /content/gdrive/My\ Drive/CS5242_2021_codes
# !rm -R /content/CS5242_2021
# +
# Installation is done.
# You can close this notebook.
| codes/installation/installation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="grBmytrShbUE"
# # High-performance Simulation with Kubernetes
#
# This tutorial will describe how to set up high-performance simulation using a
# TFF runtime running on Kubernetes. The model is the same as in the previous
# tutorial, **High-performance simulations with TFF**. The only difference is that
# here we use a worker pool instead of a local executor.
#
# This tutorial refers to Google Cloud's [GKE](https://cloud.google.com/kubernetes-engine/) to create the Kubernetes cluster,
# but all the steps after the cluster is created can be used with any Kubernetes
# installation.
# + [markdown] id="SyXVaj0dknQw"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/high_performance_simulation_with_kubernetes"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/high_performance_simulation_with_kubernetes.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/high_performance_simulation_with_kubernetes.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] id="yiq_MY4LopET"
# ## Launch the TFF Workers on GKE
#
# > **Note:** This tutorial assumes the user has an existing GCP project.
#
# ### Create a Kubernetes Cluster
#
# The following step only needs to be done once. The cluster can be re-used for future workloads.
#
# Follow the GKE instructions to [create a container cluster](https://cloud.google.com/kubernetes-engine/docs/tutorials/hello-app#step_4_create_a_container_cluster). The rest of this tutorial assumes that the cluster is named `tff-cluster`, but the actual name isn't important.
# Stop following the instructions when you get to "*Step 5: Deploy your application*".
#
# ### Deploy the TFF Worker Application
#
# The commands to interact with GCP can be run [locally](https://cloud.google.com/kubernetes-engine/docs/tutorials/hello-app#option_b_use_command-line_tools_locally) or in the [Google Cloud Shell](https://cloud.google.com/shell/). We recommend the Google Cloud Shell since it doesn't require additional setup.
#
# 1. Run the following command to launch the Kubernetes application.
#
# ```
# $ kubectl create deployment tff-workers --image=gcr.io/tensorflow-federated/remote-executor-service:latest
# ```
#
# 2. Add a load balancer for the application.
#
# ```
# $ kubectl expose deployment tff-workers --type=LoadBalancer --port 80 --target-port 8000
# ```
#
# > **Note:** This exposes your deployment to the internet and is for demo
# purposes only. For production use, a firewall and authentication are strongly
# recommended.
# + [markdown] id="WK4ohHUZvVVc"
# Look up the IP address of the loadbalancer on the Google Cloud Console. You'll need it later to connect the training loop to the worker app.
# + [markdown] id="8Lq8r5uaT2rB"
# ### (Alternately) Launch the Docker Container Locally
#
# ```
# $ docker run --rm -p 8000:8000 gcr.io/tensorflow-federated/remote-executor-service:latest
# ```
# + [markdown] id="_zFenI3IPpgI"
# ## Set Up TFF Environment
# + id="ke7EyuvG0Zyn"
#@test {"skip": true}
# !pip install --quiet --upgrade tensorflow-federated-nightly
# !pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
# + [markdown] id="dFkcJZAojZDm"
# ## Define the Model to Train
# + id="J0Qk0sCDZUQR"
import collections
import time
import tensorflow as tf
import tensorflow_federated as tff
source, _ = tff.simulation.datasets.emnist.load_data()
def map_fn(example):
return collections.OrderedDict(
x=tf.reshape(example['pixels'], [-1, 784]), y=example['label'])
def client_data(n):
ds = source.create_tf_dataset_for_client(source.client_ids[n])
return ds.repeat(10).batch(20).map(map_fn)
train_data = [client_data(n) for n in range(10)]
input_spec = train_data[0].element_spec
def model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(784,)),
tf.keras.layers.Dense(units=10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
return tff.learning.from_keras_model(
model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
trainer = tff.learning.build_federated_averaging_process(
model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02))
def evaluate(num_rounds=10):
state = trainer.initialize()
for round in range(num_rounds):
t1 = time.time()
state, metrics = trainer.next(state, train_data)
t2 = time.time()
print('Round {}: loss {}, round time {}'.format(round, metrics.loss, t2 - t1))
# + [markdown] id="x5OhgAp7jrNI"
# ## Set Up the Remote Executors
#
# By default, TFF executes all computations locally. In this step we tell TFF to connect to the Kubernetes services we set up above. Be sure to copy the IP address of your service here.
# + id="sXSLXwcdciYm"
import grpc
ip_address = '0.0.0.0' #@param {type:"string"}
port = 80 #@param {type:"integer"}
channels = [grpc.insecure_channel(f'{ip_address}:{port}') for _ in range(10)]
tff.backends.native.set_remote_execution_context(channels)
# + [markdown] id="bEgpmgSRktJY"
# ## Run Training
# + id="mw92IA6_Zrud"
evaluate()
| site/en-snapshot/federated/tutorials/high_performance_simulation_with_kubernetes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import fc_model
# +
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
# -
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
model = fc_model.Network(784, 10, [512, 256, 128])
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)
print "Our model: \n\n", model, '\n'
print "The state dict keys: \n\n", model.state_dict().keys()
torch.save(model.state_dict(), 'checkpoint.pth')
state_dict = torch.load('checkpoint.pth')
print(state_dict.keys())
model.load_state_dict(state_dict)
# Try this
model = fc_model.Network(784, 10, [400, 200, 100])
# This will throw an error because the tensor sizes are wrong!
# model.load_state_dict(state_dict)
# +
checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
# -
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = fc_model.Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'])
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
print(model)
| Intro to Pytorch/Fashion MNIST 3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
from local.imports import *
from local.test import *
from local.core import *
from local.layers import *
from local.data.pipeline import *
from local.data.source import *
from local.data.core import *
from local.data.external import *
from local.notebook.showdoc import show_doc
from local.optimizer import *
from local.learner import *
# +
#default_exp callback.progress
# -
# # Progress and logging callbacks
#
# > Callback and helper function to track progress of training or log results
from local.utils.test import *
# ## ProgressCallback -
# +
# export
@docs
class ProgressCallback(Callback):
"A `Callback` to handle the display of progress bars"
run_after=Recorder
def begin_fit(self):
assert hasattr(self.learn, 'recorder')
self.mbar = master_bar(list(range(self.n_epoch)))
self.mbar.on_iter_begin()
self.old_logger,self.learn.logger = self.logger,self._write_stats
self._write_stats(self.recorder.metric_names)
def begin_epoch(self): self.mbar.update(self.epoch)
def begin_train(self): self._launch_pbar()
def begin_validate(self): self._launch_pbar()
def after_train(self): self.pbar.on_iter_end()
def after_validate(self): self.pbar.on_iter_end()
def after_batch(self): self.pbar.update(self.iter+1)
def _launch_pbar(self):
self.pbar = progress_bar(self.dl, parent=self.mbar)
self.pbar.update(0)
def after_fit(self):
self.mbar.on_iter_end()
self.learn.logger = self.old_logger
def _write_stats(self, log):
self.mbar.write([f'{l:.6f}' if isinstance(l, float) else str(l) for l in log], table=True)
_docs = dict(begin_fit="Setup the master bar over the epochs",
begin_epoch="Update the master bar",
begin_train="Launch a progress bar over the training dataloader",
begin_validate="Launch a progress bar over the validation dataloader",
after_train="Close the progress bar over the training dataloader",
after_validate="Close the progress bar over the validation dataloader",
after_batch="Update the current progress bar",
after_fit="Close the master bar")
defaults.callbacks = [TrainEvalCallback, Recorder, ProgressCallback]
# -
learn = synth_learner()
learn.fit(5)
#hide
assert not learn.progress.mbar.child.is_active
lines = learn.progress.mbar.lines
test_eq(learn.recorder.metric_names, lines[0])
for i,(l,v) in enumerate(zip(lines[1:],learn.recorder.values)):
test_eq(l[:-1], [str(i)] + [f'{x:.6f}' for x in v])
show_doc(ProgressCallback.begin_fit)
show_doc(ProgressCallback.begin_epoch)
show_doc(ProgressCallback.begin_train)
show_doc(ProgressCallback.begin_validate)
show_doc(ProgressCallback.after_batch)
show_doc(ProgressCallback.after_train)
show_doc(ProgressCallback.after_validate)
show_doc(ProgressCallback.after_fit)
# ## ShowGraphCallback -
# export
class ShowGraphCallback(Callback):
"Update a graph of training and validation loss"
run_after=ProgressCallback
def begin_fit(self):
self.nb_batches = []
assert hasattr(self.learn, 'progress')
def after_train(self): self.nb_batches.append(self.train_iter)
def after_epoch(self):
"Plot validation loss in the pbar graph"
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(val_losses)))))
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
#slow
learn = synth_learner(cbs=ShowGraphCallback())
learn.fit(10)
# ## CSVLogger -
# export
class CSVLogger(Callback):
order=30 #Need to run after the recorder
"Log the results displayed in `learn.path/fname`"
def __init__(self, fname='history.csv', append=False):
self.fname,self.append = Path(fname),append
def read_log(self):
"Convenience method to quickly access the log."
return pd.read_csv(self.path/self.fname)
def begin_fit(self):
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = (self.path/self.fname).open('a' if self.append else 'w')
self.file.write(','.join(self.recorder.metric_names) + '\n')
self.old_logger,self.learn.logger = self.logger,self._write_line
def _write_line(self, log):
"Write a line with `log` and call the old logger."
self.file.write(','.join([str(t) for t in log]) + '\n')
self.old_logger(log)
def after_fit(self):
"Close the file and clean up."
self.file.close()
self.learn.logger = self.old_logger
# The results are appened to an existing file if `append`, or they overwrite it otherwise.
learn = synth_learner(cbs=CSVLogger())
learn.fit(5)
show_doc(CSVLogger.read_log)
df = learn.csv_logger.read_log()
test_eq(df.columns.values, learn.recorder.metric_names)
for i,v in enumerate(learn.recorder.values):
test_close(df.iloc[i][:3], [i] + v)
os.remove(learn.path/learn.csv_logger.fname)
show_doc(CSVLogger.begin_fit)
show_doc(CSVLogger.after_fit)
# ## Export -
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
| dev/15_callback_progress.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import argparse
import numpy as np # type: ignore
from PIL import Image # type: ignore
def validate_image(image: np.ndarray) -> np.ndarray:
img = np.rint(np.array(image) / 85.0) * 85.0
return img.astype(np.uint8)
def image_to_pattern(image: np.ndarray) -> bytes:
data = list()
for j in range(0, 16):
for i in range(0, 16):
x, y = i*8, j*8
tile = image[y:y+8, x:x+8]
for p in range(0, 8):
p0 = 0
for b in range(0, 8):
v = tile[p, b] // (255 // 3)
p0 |= (v & 0x01) << (7 - b)
data.append(p0)
for p in range(0, 8):
p1 = 0
for b in range(0, 8):
v = tile[p, b] // (255 // 3)
p1 |= ((v >> 1) & 0x01) << (7 - b)
data.append(p1)
return bytes(data)
def pattern_to_image(pattern: bytes) -> np.ndarray:
image = np.zeros((16*8, 16*8), dtype=np.uint8)
for i in range(0, len(pattern) // 16):
tile = np.zeros((8, 8), dtype=np.uint8)
for p in range(0, 8):
p0 = pattern[i*16+p]
p1 = pattern[i*16+8+p]
for b in range(0, 8):
tile[p, b] = (((p1 >> (7 - b)) & 0x01) << 1) | (p0 >> (7 - b) & 0x01)
x, y = (i % 16) * 8, (i // 16) * 8
image[y:y+8, x:x+8] = tile * (255 // 3)
return image
def main(argv: list) -> int:
result = 0
parser = argparse.ArgumentParser(description='Famicom pattern table tool')
parser.add_argument('--input', help='the input filename')
parser.add_argument('--output', help='the output filename')
parser.add_argument('--i2p', action='store_true', help='image to pattern')
parser.add_argument('--p2i', action='store_true', help='pattern to image')
arguments = parser.parse_args()
if arguments.i2p:
img = Image.open(arguments.input)
if img.mode != 'L':
img = img.convert(mode='L')
print('Warning: input file format is not grayscale, automatic conversion...')
image = np.array(img)
image = validate_image(image)
data = image_to_pattern(image)
with open(arguments.output, 'wb') as f:
f.write(data)
elif arguments.p2i:
with open(arguments.input, 'rb') as f:
data = f.read()
image = pattern_to_image(data)
img = Image.fromarray(image, mode='L')
img.save(arguments.output, format='PNG')
return result
if __name__ == '__main__':
sys.exit(main(sys.argv))
| tools/pattern.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Mobile robot - unicycle model
import numpy as np
import sympy as sy
sy.init_printing()
s = sy.symbols('s', real=False)
x, y, theta = sy.symbols('x, y, theta')
x0, y0, theta0 = sy.symbols('x0, y0, theta0')
omega, v = sy.symbols('omega, v')
omega0, v0 = sy.symbols('omega_0, v_0')
zx, zy, ztheta = sy.symbols('z_x, z_y, z_theta')
ztheta
xi = sy.Matrix([theta, x, y])
dxidt = sy.Matrix([omega, v*sy.cos(theta), v*sy.sin(theta)])
# ## Linearization
A1 = dxidt.diff(theta).subs([(theta, theta0), (v, v0)])
A1
A = sy.zeros(3,3)
A[:, 0] = A1
A
A*A
# ## Measurements
phi = sy.atan2(y, x) - theta + 180
d = sy.sqrt(x*x + y*y)
d
C1 = sy.Matrix([[phi.diff(theta).subs([(x, x0), (y, y0)]),
phi.diff(x).subs([(x, x0), (y, y0)]),
phi.diff(y).subs([(x, x0), (y, y0)])]])
C1
C2 = sy.Matrix([[d.diff(theta).subs([(x, x0), (y, y0)]),
d.diff(x).subs([(x, x0), (y, y0)]),
d.diff(y).subs([(x, x0), (y, y0)])]])
C2
C = sy.Matrix([C1, C2])
C
Obs = sy.Matrix([C, C*A, C*A*A])
Obs
Obs.rank()
| modern-control/notebooks/Unicycle-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="I08sFJYCxR0Z"
# 
# + [markdown] id="FwJ-P56kq6FU"
# [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/21_Gender_Classifier.ipynb)
# + [markdown] id="Niy3mZAjoayg"
# # 21. Gender Classifier
# + [markdown] id="rTVcwSuUjnrP"
# **Gender Classifier** detects the gender of the patient in the clinical document.
# It can classify the documents into `Female`, `Male` and `Unknown`.
#
#
# -'**Classifierdl_gender_sbert**' (works with licensed `sbiobert_base_cased_mli`)
#
# It has been trained on more than four thousands clinical documents (radiology reports, pathology reports, clinical visits etc.) which were annotated internally.
#
# (Spark NLP 2.7.1)
# + [markdown] id="okhT7AcXxben"
# ## Colab Setup
# + colab={"base_uri": "https://localhost:8080/", "height": 109, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBM<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="drxBhpZxI32S" outputId="2eb6865d-c7b1-454a-b041-5e2f1180d6f3"
import json
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
license_keys.keys()
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="hIn5f_zDVS6g" outputId="<KEY>"
license_keys['JSL_VERSION']
# +
import os
# Install java
# ! apt-get update -qq
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
version = license_keys['PUBLIC_VERSION']
jsl_version = license_keys['JSL_VERSION']
# ! pip install --ignore-installed -q pyspark==2.4.4
# ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
# ! pip install --ignore-installed -q spark-nlp==2.7.1
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(secret, params=params)
# + colab={"base_uri": "https://localhost:8080/"} id="9SJeZ_-jCdp7" outputId="03fbdff6-156d-46ff-c2c7-89d8efcfe5bc"
print(sparknlp.version())
# + colab={"base_uri": "https://localhost:8080/", "height": 218} id="mzV1fdf-Vs1f" outputId="343a94ca-0aa7-4ae7-c6bd-9d5d23463159"
spark
# + id="sNs3kOKMVK2Z"
# if you want to start the session with custom params as in start function above
def start(secret):
builder = SparkSession.builder \
.appName("Spark NLP Licensed") \
.master("local[*]") \
.config("spark.driver.memory", "16G") \
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
.config("spark.kryoserializer.buffer.max", "2000M") \
.config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.11:"+version) \
.config("spark.jars", "https://pypi.johnsnowlabs.com/"+secret+"/spark-nlp-jsl-"+jsl_version+".jar")
return builder.getOrCreate()
#spark = start(secret)
# + colab={"base_uri": "https://localhost:8080/"} id="l6Vw5-_PzhY1" outputId="a1f38230-0eb9-4842-e3bc-52e65a4cda20"
import sparknlp
print (sparknlp.version())
print (sparknlp_jsl.version())
# + [markdown] id="AMU4sAJQ0Rhs"
#
#
# # Gender Classifier Pipeline with **sbert**
# + colab={"base_uri": "https://localhost:8080/"} id="DEa5SITBxmY0" outputId="c14e9394-85ab-46b8-ae81-86afcdf056f1"
document = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sbert_embedder = BertSentenceEmbeddings().pretrained("sbiobert_base_cased_mli", 'en', 'clinical/models')\
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")\
.setMaxSentenceLength(512)
gender_classifier = ClassifierDLModel.pretrained( 'classifierdl_gender_sbert', 'en', 'clinical/models') \
.setInputCols(["document", "sentence_embeddings"]) \
.setOutputCol("class")
gender_pred_pipeline_sbert = Pipeline(stages = [ document,
sbert_embedder,
gender_classifier
])
empty_data = spark.createDataFrame([[""]]).toDF("text")
model_sbert = gender_pred_pipeline_sbert.fit(empty_data)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="XgxuAU8ZsXVJ" outputId="3f2e9ce5-2bc4-481a-e382-c4960272d73a"
text ="""social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.family history: shows a family history of breast cancer."""
gender_pipeline_sbert = LightPipeline(model_sbert)
result = gender_pipeline_sbert.annotate(text)
result['class'][0]
# + [markdown] id="IUThl3_J9dqn"
# ### Sample Clinical Notes
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="vJCI7fZf9-2g" outputId="cfef7959-c1b4-41f6-8f86-b5c59a4bea07"
text1 = '''social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.
family history: shows a family history of breast cancer.'''
result = gender_pipeline_sbert.annotate(text1)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="hNbN8ZfPDwbm" outputId="28e49f84-63e9-46c4-fb20-4e4fe691d9b5"
text2 = '''The patient is a 48- year-old, with severe mitral stenosis diagnosed by echocardiography, moderate
aortic insufficiency and moderate to severe pulmonary hypertension who is being evaluated as a part of a preoperative
workup for mitral and possible aortic valve repair or replacement.'''
result = gender_pipeline_sbert.annotate(text2)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Y7AOPrQHDy6D" outputId="f3c55561-704d-4563-a9a8-aad91d6a330c"
text3 = '''HISTORY: The patient is a 57-year-old XX, who I initially saw in the office on 12/27/07, as a referral from the Tomball Breast Center.
On 12/21/07, the patient underwent image-guided needle core biopsy of a 1.5 cm lesion at the 7 o'clock position of the left breast (inferomedial).
The biopsy returned showing infiltrating ductal carcinoma high histologic grade.
The patient stated that xx had recently felt and her physician had felt a palpable mass in that area prior to her breast imaging.'''
result = gender_pipeline_sbert.annotate(text3)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="woMCJpo2Dywt" outputId="6d2ec2ae-1777-49b8-870d-186cf9290846"
text4 = '''The patient states that xx has been overweight for approximately 35 years and has tried multiple weight loss modalities in
the past including Weight Watchers, NutriSystem, Jenny Craig, TOPS, cabbage diet, grape fruit diet, Slim-Fast, Richard Simmons,
as well as over-the-counter measures without any long-term sustainable weight loss.
At the time of presentation to the practice, xx is 5 feet 6 inches tall with a weight of 285.4 pounds and a body mass index of 46.
xx has obesity-related comorbidities, which includes hypertension and hypercholesterolemia.'''
result = gender_pipeline_sbert.annotate(text4)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="lPVx1V9gsW72" outputId="eecf0935-6351-4349-ad58-2dc69e360eed"
text5 = '''Prostate gland showing moderately differentiated infiltrating adenocarcinoma,
Gleason 3 + 2 extending to the apex involving both lobes of the prostate, mainly right.'''
result = gender_pipeline_sbert.annotate(text5)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="llLVssxnEpmI" outputId="e18bc57b-74eb-4e03-b86e-78029ff4c15a"
text6 = '''SKIN: The patient has significant subcutaneous emphysema of the upper chest and
anterior neck area although he states that the subcutaneous emphysema has improved significantly since yesterday.'''
result = gender_pipeline_sbert.annotate(text6)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="6yjmisN2Er-H" outputId="a70a6bc2-323c-4baf-f3e5-975dcb5e57c1"
text7 = '''INDICATION: The patient is a 42-year-old XX who is five days out from transanal excision of a benign anterior base lesion.
xx presents today with diarrhea and bleeding. Digital exam reveals bright red blood on the finger.
xx is for exam under anesthesia and control of hemorrhage at this time.
'''
result = gender_pipeline_sbert.annotate(text7)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="YkCM22qgEr1n" outputId="ae232c9d-41df-4cb1-93cf-12f0014f0944"
text8 = '''INDICATION: ___ year old patient with complicated medical history of paraplegia
and chronic indwelling foley, recurrent MDR UTIs, hx Gallbladder fossa
abscess,type 2 DM, HTN, CAD, DVT s/p left AKA complicated complicated by
respiratory failure requiring tracheostomy and PEG placement, right ischium
osteomyelitis due to chronic pressure ulcers with acute shortness of breath...'''
result = gender_pipeline_sbert.annotate(text8)
result['class'][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Oct19dfPErtD" outputId="63986e62-2298-4bc6-cbaa-1ea10ebf202d"
text9 = '''NDICATION: ___ year old xx with hx of polysubstance abuse, Hep C who
presented to OSH with N/V, BRBPR, found to have elevated lactate, acute renal
failure and with CT concerning acute pancreatitis, active colonic bleed (s/p
IR and now stable-appearing), proctocolitis, and shock, on vanc/zosyn and CRRT'''
result = gender_pipeline_sbert.annotate(text9)
result['class'][0]
| tutorials/Certification_Trainings/Healthcare/21.Gender_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # 回顾自动化机器学习解释
#
# 随着机器学习越来越普遍,模型做出的预测对我们社会的许多方面产生了更大的影响。例如,机器学习模型在银行如何决定发放贷款或医生如何确定治疗优先顺序方面成为了一个愈发重要的因素。解释和说明模型的功能变得越来越重要,以便可以说明和证明机器学习模型做出预测的基本原理,并且可以确定模型中任何无意的偏差。
#
# 使用自动化机器学习来训练模型时,可以选择生成特征重要性的解释,以量化每个特征对标签预测的影响程度。在本实验室中,你将探索由自动化机器学习试验生成的解释。
# ## 连接到工作区
#
# 你首先需要使用 Azure ML SDK 连接到工作区。
#
# > **备注**:如果 Azure 订阅的身份验证会话在你完成上一练习后已过期,系统将提示你重新进行身份验证。
# +
import azureml.core
from azureml.core import Workspace
# 从保存的配置文件加载工作区
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
# -
# ## 准备计算目标
#
# 你将使用在前面的实验室中创建的 Azure 机器学习计算群集(如果不存在,则将创建它)。
#
# > **注意事项**:在运行以下代码之前,请先在代码中将 *“你的计算群集”* 更改为你的计算群集的名称!群集名称必须是长度在 2 到 16 个字符之间的全局唯一名称。有效字符是字母、数字和 - 字符。
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
cluster_name = "your-compute-cluster"
try:
# 检查现有的计算目标
training_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# 如果尚不存在,请创建它
try:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2)
training_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
training_cluster.wait_for_completion(show_output=True)
except Exception as ex:
print(ex)
# -
# ## 运行自动化机器学习试验
#
# 在本实验室中,为节省时间,你将运行自动化机器学习试验,且只有三次迭代。
#
# 请注意,将 **model_explainability** 配置选项设为 **True**。
# +
import pandas as pd
from azureml.train.automl import AutoMLConfig
from azureml.core.experiment import Experiment
from azureml.widgets import RunDetails
from azureml.core import Dataset
try:
# 准备训练数据
default_ds = ws.get_default_datastore()
if 'diabetes dataset' not in ws.datasets:
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # 将糖尿病 csv 文件上传到 /data 中
target_path='diabetes-data/', # 将其放在数据存储的文件夹路径中
overwrite=True, # 替换名称相同的现有文件
show_progress=True)
# 从数据存储上的路径创建表格数据集(这可能需要一些时间)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# 注册表格数据集
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
print('Dataset registered.')
except Exception as ex:
print(ex)
else:
print('Dataset already registered.')
train_data = ws.datasets.get("diabetes dataset")
# 配置自动化 ML
automl_config = AutoMLConfig(name='Automated ML Experiment',
task='classification',
compute_target=training_cluster,
training_data = train_data,
n_cross_validations = 2,
label_column_name='Diabetic',
iterations=3,
primary_metric = 'AUC_weighted',
max_concurrent_iterations=3,
featurization='off',
model_explainability=True # 生成特征重要性!
)
# 运行自动化 ML 试验
print('Submitting Auto ML experiment...')
automl_experiment = Experiment(ws, 'diabetes_automl')
automl_run = automl_experiment.submit(automl_config)
automl_run.wait_for_completion(show_output=True)
RunDetails(automl_run).show()
except Exception as ex:
print(ex)
# -
# ## 查看特征重要性
#
# 完成试验后,在上面的小组件中,单击生成最佳结果的运行以查看其详细信息。然后滚动到可视化效果底部以查看相对特征重要性。
#
# ## 在 Azure 机器学习工作室中查看模型解释
#
# 完成试验运行后,单击小组件中的链接以在 Azure 机器学习工作室中查看运行,然后查看 **“解释”** 选项卡。然后:
#
# 1. 选择由自动化机器学习运行创建的解释器。
# 2. 查看 **“全局重要性”** 图表,其中显示整体全局特征重要性。
# 3. 查看 **“摘要重要性”** 图表,其中显示分簇散点图、*小提琴* *图* 或 *盒* 须图中测试数据的每个数据点。
# 4. 选择单个点以查看所选数据点的单个预测的 **“局部特征重要性”**。
# > **更多信息**:有关自动化机器学习的详细信息,请参阅 [Azure ML 文档](https://docs.microsoft.com/azure/machine-learning/how-to-machine-learning-interpretability-automl)。
| 09A - Reviewing Automated Machine Learning Explanations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os.path import join as p_join
import seaborn as sns
from tqdm import tqdm
##################################
## GLOBAL SETTINGS ###############
##################################
plt.rcParams["figure.figsize"] = (12,8)
def seed_all(seed=42):
random.seed(seed)
np.random.seed(seed)
print("[ Using Seed : ", seed, " ]")
####################################
##### SEED ALL EXPERIMENTS #####
####################################
seed_all()
# -
import sys
sys.path.append("../..") # Adds higher directory to python modules path.
from utils import utils
data_path = p_join('..', '..', 'data_2')
FOLDERS = [p_join(os.path.abspath(data_path), item) for item in os.listdir(data_path) if 'L=6.6' in item]
# +
###############################
##### IMPORT ML METHODS #####
###############################
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
#################################
####### GLOBAL CONFIG ###########
#################################
CONFIG = {'scoring': ['accuracy', 'f1_macro'],
'cv': 5,
'n_jobs': -1}
# -
# ### Create dataset
X, Y = utils.create_dataset(FOLDERS)
X = utils.normalize_data(X)
# ### Try `KNeighborsClassifier` Grid Search
params = {'n_neighbors': [3,5,7,9], 'weights':['uniform', 'distance']}
knn_grid_search_res = utils.greed_searc_cv(KNeighborsClassifier(),
params,
CONFIG,
X, Y)
# ### Try `LogisticRegression` Grid Search
# +
params = [{'penalty': ['l1', 'l2'],
'C': [1/i for i in range(1, 20, 3)],
'solver': ['saga'],
'max_iter': [250]},
{'penalty': ['elasticnet'],
'C': [1/i for i in range(1, 20, 3)],
'solver': ['saga'],
'l1_ratio': np.linspace(0, 1, 5),
'max_iter': [250]}
]
log_reg_grid_search_res = utils.greed_searc_cv(LogisticRegression(),
params,
CONFIG,
X, Y)
# -
# ### Try `SVC` Grid Search
params = {'kernel': ('linear', 'rbf'), 'C':[1, 10]}
svc_grid_search_res = utils.greed_searc_cv(svm.SVC(),
params,
CONFIG,
X, Y)
# ### Try `GNB` Grid Search
params = {'var_smoothing': [1e-9]}
gnb_grid_search_res = utils.greed_searc_cv(GaussianNB(),
params,
CONFIG,
X, Y)
# ### Try `Decision Tree` Grid Search
params = {'criterion': ['gini', 'entropy'],
'max_depth': [4,5,6,7,8,9,10,11,12,15,20,30,40,50,70,90,120,150]}
dt_grid_search_res = utils.greed_searc_cv(DecisionTreeClassifier(),
params,
CONFIG,
X, Y)
# ### Try `Ramdom Forest` Grid Search
params = {'criterion': ['gini', 'entropy'],
'n_estimators': [10, 50, 100],
'max_depth': [3, 5, 10],
'min_samples_split': [2, 5, 10]}
rf_grid_search_res = utils.greed_searc_cv(RandomForestClassifier(),
params,
CONFIG,
X, Y)
### Just try Lin Classifier
accuracies_lin_reg = utils.calc_ml_method(SGDClassifier(), CONFIG, X, Y)
accuracies_lin_reg
# ### Plot Pandas DataFrame with summary
# +
all_results = {'KNN': knn_grid_search_res, 'Logistic regression': log_reg_grid_search_res,
'SVM': svc_grid_search_res, 'GaussianNB': gnb_grid_search_res,
'Decision Tree': dt_grid_search_res, 'Ramdom Forest': rf_grid_search_res}
data = {}
for method in all_results:
res = all_results[method]
bi = res['best_index']
val = []
for kk in res['cv_results']:
if 'mean_test' in kk:
val.append(res['cv_results'][kk][bi])
data[method] = val
# -
df = pd.DataFrame.from_dict(data, orient='index', columns=CONFIG['scoring'])
df
with open('logs/all_res_ml_l_6_6.txt', 'w') as f:
f.write(str(all_results))
| notebooks/ml/ml_methods_L=6.6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pygtool
area=pygtool.read2D('GRID/harea128x64_2d')
pygtool.read3D.head
area=pygtool.read3D('GRID/harea128x64_3d')
area.getFortranheader_footer()
| test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # network_analysis.py checker
# First, import relevant libraries:
# +
import os
import sys
sys.path.append('../../src/')
from utils.database import dbutils
from features.network_analysis import *
# -
conn = dbutils.connect()
cursor = conn.cursor()
nodes = pd.read_sql('select * from optourism.firenze_card_locations', con=conn)
nodes.head()
firenzedata = pd.read_sql('select * from optourism.firenze_card_logs', con=conn)
firenzedata.head()
df = prepare_firenzedata(firenzedata, nodes)
df.head()
# firenzedata['date'] = pd.to_datetime(firenzedata['entry_time'],format='%Y-%m-%d %H:%M:%S').dt.date # Convert the entry_time string to a datetime object
df['total_people'] = df['total_adults'] + df['minors']
edges = make_dynamic_firenze_card_edgelist(df)
static = make_static_firenze_card_edgelist(edges)
def make_firenze_card_static_graph_with_source(df,nodes,name='short_name',x='longitude',y='latitude'):
"""
:param df: A static edgelist from above
:param nodes: A data frame containing longitude and latitude
:param name: the name on which to link the two data frames
:param x: the longitude column name
:param y: the latitude column name
:return: an igraph graph object
"""
g = ig.Graph.TupleList(df.itertuples(index=False), directed=True, weights=True)
g.vs['indeg'] = g.strength(mode='in', weights='weight') # Save weighted indegree with dummy "source" node
# g.delete_vertices([v.index for v in g.vs if v['name'] == u'source']) # Delete the dummy "source" node
g.simplify(loops=False, combine_edges=sum) # Get rid of the few self-loops, which can plot strangely
g.vs['label'] = g.vs["name"] # Names imported as 'name', but plot labels default to 'label'. Copy over.
# Get coordinates, requires this lengthy query
xy= pd.DataFrame({name: g.vs['label']}).merge(nodes[[name, x, y]], left_index=True, how='left', on=name)
g.vs['x'] = (xy[x]).values.tolist()
g.vs['y'] = (-1 * xy[y]).values.tolist() # Latitude is flipped, need to multiply by -1 to get correct orientation
return g
g = make_firenze_card_static_graph_with_source(static,nodes)
ig.summary(g)
mat = make_origin_destination_matrix(g)
plot_origin_destination_matrix_heatmap(mat)
nodes.head()
temp = mat.sum(0).to_frame() # This will be "people leaving", used as an offset for a Poisson regression
temp.reset_index(inplace=True)
temp.columns = ['short_name','offset']
temp.head()
dfn = nodes.merge(temp, on='short_name')[['museum_id','short_name','offset']]
dfn.head()
temp = mat.sum(1).to_frame() # This will be "intrinsic popularity", total number of people entering the museum
temp.reset_index(inplace=True)
temp.columns = ['short_name','popularity']
temp.head()
dfn = dfn.merge(temp, on='short_name')[['museum_id','short_name','offset','popularity']]
dfn.head()
es = mat.stack().reset_index().rename(columns={'level_0':'source','level_1':'target', 0:'weight'})
es.head()
es.merge(dfn,)
| dev/notebooks/FirenzeCard_Poisson_model_MM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="537i51bCk1GT" outputId="12b1454c-4c2e-4b4e-9f1f-4ee23af4e9b8"
# !git clone https://github.com/ltcmichelli/LSTM-Music-Genre-Classification.git 'Colab Notebooks/assignment3b'
# + colab={"base_uri": "https://localhost:8080/"} id="lenpjOVgwryT" outputId="f13fa096-da1a-4e60-e9e8-4cb98d22a42e"
# cd 'drive/MyDrive/Colab Notebooks/LSTM-Music-Genre-Classification-master'
# + colab={"base_uri": "https://localhost:8080/"} id="O3ynCvEzuSji" outputId="f9e9df99-5cf4-408a-f364-c53b1ba15706"
# !python lstm_genre_classifier_keras.py
| Assignment3b.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Creating And Cleaning Features: Cap And Floor Data To Remove Outliers
# ### Read In Data
# +
# Read in data
import pandas as pd
import numpy as np
titanic_df = pd.read_csv('../Data/titanic_no_missing.csv')
titanic_df.head()
# -
# ### Remove Outliers
# See where outliers might be an issue
titanic_df.describe().transpose()
def detect_outlier(feature):
outliers = []
data = titanic_df[feature]
mean = np.mean(data)
std = np.std(data)
for y in data:
z_score = (y - mean) / std
if np.abs(z_score) > 3: # if z_score is greather than 3, considered as outlier
outliers.append(y)
print('\nOutlier caps for {}: '.format(feature))
print(' --95p: {:.1f} / {} values exceed that'.format(data.quantile(.95),
len([i for i in data
if i > data.quantile(.95)])))
print(' --3std: {:.1f} / {} values exceed that'.format(mean + 3 *(std), len(outliers)))
print(' --99p: {:.1f} / {} values exceed that'.format(data.quantile(.99),
len([i for i in data
if i > data.quantile(.99)])))
# Determine what the upperbound should be for continuous features
for feat in ['Age_clean', 'SibSp', 'Parch', 'Fare']:
detect_outlier(feat)
# - As from the data above, we can see what are the 95 percentile, 3 standard deviation, 99 percentile of each feature.
# - There is no right or wrong in considering which range to use for capping . In this case, we will use 99 percentile.
# - for `SibSp` and `Parch`, the max values are 8 and 6 which is considered normal. so we will just keep those.
# - we will cap `Age` and `Fare` features to 99 percentile as max level (upper bound) in next step.
# Cap features
titanic_df['Age_clean'].clip(upper=titanic_df['Age_clean'].quantile(.99), inplace=True)
titanic_df['Fare_clean'] = titanic_df['Fare'].clip(upper=titanic_df['Fare'].quantile(.99))
# Describe the dataframe again to make sure the capping was successful
titanic_df.describe().transpose()
# Now we can see that `Age_clean` and `Fare_clean` max values are capped correctly and as expected. Compare to those with original columns max values.
# Write out capped data
titanic_df.to_csv('../Data/titanic_capped.csv', index=False)
| ML - Applied Machine Learning - Feature Engineering/04.Create and Clean Features/02.Cap And Floor Data To Remove Outliers.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# <img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/logo-bdc.png" align="right" width="64"/>
#
# # <span style="color: #336699">Land use and land cover classification in the Brazilian Cerrado biome using Brazil Data Cube</span>
# <hr style="border:2px solid #0077b9;">
#
# <br/>
#
# <div style="text-align: center;font-size: 90%;">
# <NAME> <sup><a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-0953-4132"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, <NAME> <sup><a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0001-7966-2880"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, <NAME> <sup><a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0002-3334-4315"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, <NAME> <sup><a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0002-3397-6232"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>,<br/>
# <NAME> <sup><a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-2656-5504"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, <NAME> <sup><a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-1104-3607"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, <NAME><sup>* <a href="mailto:<EMAIL>"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0001-7534-0219"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>
# <br/><br/>
# Earth Observation and Geoinformatics Division, National Institute for Space Research (INPE)
# <br/>
# Avenida dos Astronautas, 1758, <NAME>, São José dos Campos, SP 12227-010, Brazil
# <br/><br/>
# <sup>*</sup> Author to whom correspondence should be addressed.
# <br/><br/>
# February 24, 2021
# </div>
#
# <br/>
#
# <div style="text-align: justify; margin-left: 10%; margin-right: 10%;">
# <b>Abstract.</b> This Jupyter Notebook compendium contains useful information for the creation of land use and land cover (LULC) maps using Earth observations data cubes and machine learning (ML) techniques. The code is based on the research pipeline described in the paper <em>Earth Observation Data Cubes for Brazil: Requirements, Methodology and Products</em>. These notebooks access open data available in the Brazil Data Cube platform.
# </div>
#
# <br/>
# <div style="text-align: justify; margin-left: 15%; margin-right: 15%;font-size: 75%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;">
# <b>This Jupyter Notebook is supplement to the <a href="https://www.mdpi.com/2072-4292/12/24/4033/htm#sec5-remotesensing-12-04033" target="_blank">Section 5</a> of the following paper:</b>
# <div style="margin-left: 10px; margin-right: 10px">
# <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. 2020. Earth Observation Data Cubes for Brazil: Requirements, Methodology and Products. Remote Sens. 12, no. 24: 4033. DOI: <a href="https://doi.org/10.3390/rs12244033" target="_blank">10.3390/rs12244033</a>.
# </div>
# </div>
# # <span style="color: #336699">Land Use and Cover Mapping from CBERS-4/AWFI Data Cubes</span>
# <hr style="border:1px solid #0077b9;">
# This document will present the steps to create a LULC map based on CBERS-4/AWFI data cube, training samples and a MultiLayer Perceptron neural network. This code relies on the [SITS R package](https://github.com/e-sensing/sits).
# ## <span style="color: #336699">Study Area and samples</span>
# <hr style="border:0.5px solid #0077b9;">
# In this application we use the same region of interest and samples described in [Ferreira et al. (2020)](https://doi.org/10.3390/rs12244033). As depicted in Figure 1, the region is located in the Bahia state (Brazil), between the Cerrado and Caatinga biomes.
#
# <div align="center">
# <img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/bdc-article/study-area.png" width="600px">
# </div>
# <br/>
# <center><b>Figure 1</b> - Study area in relation to Brazil and its biomes.</center>
# ## <span style="color: #336699">Set a pseudo-randomic seed</span>
# <hr style="border:0.5px solid #0077b9;">
# We will fix a pseudo-randomic seed in order to run the code:
set.seed(777)
# ## <span style="color: #336699">Loading the software packages</span>
# <hr style="border:0.5px solid #0077b9;">
library(sits)
# The user should also provides his access key to the Brazil Data Cube platform:
MY_ACCESS_KEY <- "change-me"
Sys.setenv(BDC_ACCESS_KEY = MY_ACCESS_KEY)
# ## <span style="color: #336699">Defining the Data Cube</span>
# <hr style="border:0.5px solid #0077b9;">
# Let's start by defining the region of interest (ROI) as a sub-space of the red rectangle shown in Figure 1.
#
# The ROI is available in a file name `roi.rds` under the directory `roi`:
roi <- readRDS(url("https://brazildatacube.dpi.inpe.br/geo-knowledge-hub/bdc-article/roi/roi.rds"))
# > The `roi` is a list with two components:
# > * `classification_roi`: contains the geometry boundary for the classification.
# > * `search_roi`: a smaller rectangle than the `classification_roi`, that intersects only the data cube tiles we are interested to use in the classification.
#
# Next we define a time interval based on the crop calendar to define the working period:
start_date <- "2018-09-01"
end_date <- "2019-08-31"
# In this Jupyter Notebook we focus the classification based on a CBERS-4/AWFI data cube named `CB4_64_16D_STK-1`:
collection <- "CB4_64_16D_STK-1"
# Finally, let's define the data cube.
#
# The `sits` package will access the CBERS-4/AWFI data cube available in the Brazil Data Cube platform through the STAC web service:
cube <- sits_cube(
type = "BDC",
name = "cube_to_classify",
url = "https://brazildatacube.dpi.inpe.br/stac/",
collection = collection,
start_date = start_date,
end_date = end_date,
roi = roi$search_roi
)
# > The definition above includes the spectral bands `Red`, `Green`, `Blue`, `Near-Infrared (NIR)` and the vegetation indices `EVI` and `NDVI` already available in the cube.
#
# > It also limits the temporal extension to `2018-09` to `2019-08`.
# ## <span style="color: #336699">Loading the Training Samples</span>
# <hr style="border:0.5px solid #0077b9;">
# Now, let's load the samples from a prepared file named `CB4_64_16D_STK_1.rds`:
samples <- readRDS(url("https://brazildatacube.dpi.inpe.br/geo-knowledge-hub/bdc-article/training-samples/rds/CB4_64_16D_STK_1.rds"))
# > The Jupyter Notebook entitled [Extracting time series from sample locations](./01_ExtractTimeSeries.ipynb) describes in detail how to prepare this file.
# ## <span style="color: #336699">MultiLayer Perceptron model definition</span>
# <hr style="border:0.5px solid #0077b9;">
# For the classification of data cubes, the article presents the use of an MLP network with five hidden layers with 512 neurons, trained with the backpropagation algorithm, using the Adam optimizer. The model uses the ReLu activation function.
#
# Below is the definition of this model using the [SITS package](https://github.com/e-sensing/sits).
mlp_model <- sits_deeplearning(layers = c(512, 512, 512, 512, 512),
activation = "relu",
optimizer = keras::optimizer_adam(lr = 0.001),
epochs = 200)
# Below, the defined model is trained using the same samples used in the article.
dl_model <- sits_train(samples, mlp_model)
# ## <span style="color: #336699">Output Directory</span>
# <hr style="border:0.5px solid #0077b9;">
# All the results generated in this document will be saved in your user's home directory, inside `results/CB4_64_16D_STK_1` directory:
# +
output_dir <- "results/CB4_64_16D_STK_1"
dir.create(
path = output_dir,
showWarnings = FALSE,
recursive = TRUE
)
# -
# ## <span style="color: #336699">Classifying the tiles from the data cube</span>
# <hr style="border:0.5px solid #0077b9;">
# Before running the classification step, you should define the hardware resources that `sits` will be allowed to use during the classification:
classification_memsize <- 8 # GB
classification_multicores <- 3 # CPU logical cores
# The next cell uses the trainned MLP model (`dl_model`) to perform a classification based on the temporal data from the data cube:
#
#
# > This is a time-consuming process.
probs <- sits_classify(data = cube,
ml_model = dl_model,
memsize = classification_memsize,
multicores = classification_multicores,
roi = roi$classification_roi,
output_dir = output_dir)
# > The generated data will be stored under the directory indicated by `output_dir`.
#
# > Note that here we use a geometry boundary from `roi$classification_roi` that is smaller than the region defined by the samples.
#
# The classification output of the Multilayer Perceptron (MLP) model is a raster with three layers (one for each land use class) containing the probabilities of each pixel belonging to each of the classes.
#
# The raster file named `cube_to_classify_022024_probs_2018_8_2019_7_v1.tif` has 3 layers containing scaled probabilities (`x 10,000`) corresponding to the classes `Crop` (layer 1), `Natural Vegetation` (layer 2), and `Pasture` (layer 3).
# ## <span style="color: #336699">Generating the Thematic Map</span>
# <hr style="border:0.5px solid #0077b9;">
# We are going to apply a probability Bayesian smoother method over the output of the MLP. This procedure uses the information of a pixel’s neighborhood to update its probabilities by taking the maximum likelihood estimator. The smoothing procedure removes isolated pixel class values and produces more homogeneous spatial areas.
#
# The next cell perform this operation:
probs_smoothed <- sits_smooth(probs, type = "bayes", output_dir = output_dir)
# > The above cell will create a file name `cube_to_classify_022024_probs_2018_8_2019_7_bayes_v1.tif`.
#
# After that, to generate the thematic maps the most probable class is taken as the pixel class. Each class is represented by the codes 1 (Crop), 2 (Natural Vegetation), and 3 (Pasture). The next cell show how to perform this step:
labels <- sits_label_classification(probs_smoothed, output_dir = output_dir)
# > The final map is named `cube_to_classify_022024_probs_class_2018_8_2019_7_v1.tif`.
# ## <span style="color: #336699">Visualizing the Thematic Map</span>
# <hr style="border:0.5px solid #0077b9;">
# Finally, let's use the `rgdal` library to plot the resulted map:
library(rgdal)
rst <- raster::raster(
paste0(output_dir, "/cube_to_classify_022024_probs_class_2018_8_2019_7_v1.tif")
)
plot(rst)
| jupyter/R/bdc-article/02_CB4_64_16D_STK-1_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <a href="http://cocl.us/pytorch_link_top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png" width="750" alt="IBM Product " />
# </a>
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png" width="200" alt="cognitiveclass.ai logo" />
# <h1>Linear Regression 1D: Training Two Parameter Mini-Batch Gradient Decent</h1>
# <h2>Table of Contents</h2>
# <p>In this Lab, you will practice training a model by using Mini-Batch Gradient Descent.</p>
#
# <ul>
# <li><a href="#Makeup_Data">Make Some Data</a></li>
# <li><a href="#Model_Cost">Create the Model and Cost Function (Total Loss)</a></li>
# <li><a href="#BGD">Train the Model: Batch Gradient Descent</a></li>
# <li><a href="#SGD">Train the Model: Stochastic Gradient Descent with Dataset DataLoader</a></li>
# <li><a href="#Mini5">Train the Model: Mini Batch Gradient Decent: Batch Size Equals 5</a></li>
# <li><a href="#Mini10">Train the Model: Mini Batch Gradient Decent: Batch Size Equals 10</a></li>
# </ul>
# <p>Estimated Time Needed: <strong>30 min</strong></p>
# </div>
#
# <hr>
# <h2>Preparation</h2>
# We'll need the following libraries:
# +
# Import the libraries we need for this lab
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# -
# The class <code>plot_error_surfaces</code> is just to help you visualize the data space and the parameter space during training and has nothing to do with PyTorch.
# +
# The class for plotting the diagrams
class plot_error_surfaces(object):
# Constructor
def __init__(self, w_range, b_range, X, Y, n_samples = 30, go = True):
W = np.linspace(-w_range, w_range, n_samples)
B = np.linspace(-b_range, b_range, n_samples)
w, b = np.meshgrid(W, B)
Z = np.zeros((30, 30))
count1 = 0
self.y = Y.numpy()
self.x = X.numpy()
for w1, b1 in zip(w, b):
count2 = 0
for w2, b2 in zip(w1, b1):
Z[count1, count2] = np.mean((self.y - w2 * self.x + b2) ** 2)
count2 += 1
count1 += 1
self.Z = Z
self.w = w
self.b = b
self.W = []
self.B = []
self.LOSS = []
self.n = 0
if go == True:
plt.figure()
plt.figure(figsize = (7.5, 5))
plt.axes(projection = '3d').plot_surface(self.w, self.b, self.Z, rstride = 1, cstride = 1, cmap = 'viridis', edgecolor = 'none')
plt.title('Loss Surface')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
plt.figure()
plt.title('Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.contour(self.w, self.b, self.Z)
plt.show()
# Setter
def set_para_loss(self, W, B, loss):
self.n = self.n + 1
self.W.append(W)
self.B.append(B)
self.LOSS.append(loss)
# Plot diagram
def final_plot(self):
ax = plt.axes(projection = '3d')
ax.plot_wireframe(self.w, self.b, self.Z)
ax.scatter(self.W, self.B, self.LOSS, c = 'r', marker = 'x', s = 200, alpha = 1)
plt.figure()
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
# Plot diagram
def plot_ps(self):
plt.subplot(121)
plt.ylim()
plt.plot(self.x, self.y, 'ro', label = "training points")
plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label = "estimated line")
plt.xlabel('x')
plt.ylabel('y')
plt.title('Data Space Iteration: '+ str(self.n))
plt.subplot(122)
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.title('Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
# -
# <!--Empty Space for separating topics-->
# <h2 id="Makeup_Data">Make Some Data </h2>
# Import PyTorch and set random seed:
# +
# Import PyTorch library
import torch
torch.manual_seed(1)
# -
# Generate values from -3 to 3 that create a line with a slope of 1 and a bias of -1. This is the line that you need to estimate. Add some noise to the data:
# +
# Generate the data with noise and the line
X = torch.arange(-3, 3, 0.1).view(-1, 1)
f = 1 * X - 1
Y = f + 0.1 * torch.randn(X.size())
# -
# Plot the results:
# +
# Plot the line and the data
plt.plot(X.numpy(), Y.numpy(), 'rx', label = 'y')
plt.plot(X.numpy(), f.numpy(), label = 'f')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
# -
# <!--Empty Space for separating topics-->
# <h2 id="Model_Cost">Create the Model and Cost Function (Total Loss) </h2>
# Define the <code>forward</code> function:
# +
# Define the prediction function
def forward(x):
return w * x + b
# -
# Define the cost or criterion function:
# +
# Define the cost function
def criterion(yhat, y):
return torch.mean((yhat - y) ** 2)
# -
# Create a <code> plot_error_surfaces</code> object to visualize the data space and the parameter space during training:
# +
# Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30)
# -
# <!--Empty Space for separating topics-->
# <h2>Train the Model: Batch Gradient Descent (BGD)</h2>
# Define <code>train_model_BGD</code> function.
# +
# Define the function for training model
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
lr = 0.1
LOSS_BGD = []
def train_model_BGD(epochs):
for epoch in range(epochs):
Yhat = forward(X)
loss = criterion(Yhat, Y)
LOSS_BGD.append(loss)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
get_surface.plot_ps()
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
# -
# Run 10 epochs of batch gradient descent: <b>bug</b> data space is 1 iteration ahead of parameter space.
# +
# Run train_model_BGD with 10 iterations
train_model_BGD(10)
# -
# <!--Empty Space for separating topics-->
# <h2 id="SGD"> Stochastic Gradient Descent (SGD) with Dataset DataLoader</h2>
# Create a <code>plot_error_surfaces</code> object to visualize the data space and the parameter space during training:
# +
# Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30, go = False)
# -
# Import <code>Dataset</code> and <code>DataLoader</code> libraries
# +
# Import libraries
from torch.utils.data import Dataset, DataLoader
# -
# Create <code>Data</code> class
# +
# Create class Data
class Data(Dataset):
# Constructor
def __init__(self):
self.x = torch.arange(-3, 3, 0.1).view(-1, 1)
self.y = 1 * X - 1
self.len = self.x.shape[0]
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get length
def __len__(self):
return self.len
# -
# Create a dataset object and a dataloader object:
# +
# Create Data object and DataLoader object
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 1)
# -
# Define <code>train_model_SGD</code> function for training the model.
# +
# Define train_model_SGD function
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_SGD = []
lr = 0.1
def train_model_SGD(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_SGD.append(criterion(forward(X), Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
get_surface.plot_ps()
# -
# Run 10 epochs of stochastic gradient descent: <b>bug</b> data space is 1 iteration ahead of parameter space.
# +
# Run train_model_SGD(iter) with 10 iterations
train_model_SGD(10)
# -
# <!--Empty Space for separating topics-->
# <h2 id="Mini5">Mini Batch Gradient Descent: Batch Size Equals 5</h2>
# Create a <code> plot_error_surfaces</code> object to visualize the data space and the parameter space during training:
# +
# Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30, go = False)
# -
# Create <code>Data</code> object and create a <code>Dataloader</code> object where the batch size equals 5:
# +
# Create DataLoader object and Data object
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 5)
# -
# Define <code>train_model_Mini5</code> function to train the model.
# +
# Define train_model_Mini5 function
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_MINI5 = []
lr = 0.1
def train_model_Mini5(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_MINI5.append(criterion(forward(X), Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
# -
# Run 10 epochs of mini-batch gradient descent: <b>bug</b> data space is 1 iteration ahead of parameter space.
# +
# Run train_model_Mini5 with 10 iterations.
train_model_Mini5(10)
# -
# <!--Empty Space for separating topics-->
# <h2 id="Mini10">Mini Batch Gradient Descent: Batch Size Equals 10</h2>
# Create a <code> plot_error_surfaces</code> object to visualize the data space and the parameter space during training:
# +
# Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30, go = False)
# -
# Create <code>Data</code> object and create a <code>Dataloader</code> object batch size equals 10
# +
# Create DataLoader object
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 10)
# -
# Define <code>train_model_Mini10</code> function for training the model.
# +
# Define train_model_Mini5 function
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_MINI10 = []
lr = 0.1
def train_model_Mini10(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_MINI10.append(criterion(forward(X),Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
# -
# Run 10 epochs of mini-batch gradient descent: <b>bug</b> data space is 1 iteration ahead of parameter space.
# +
# Run train_model_Mini5 with 10 iterations.
train_model_Mini10(10)
# -
# Plot the loss for each epoch:
# +
# Plot out the LOSS for each method
plt.plot(LOSS_BGD,label = "Batch Gradient Descent")
plt.plot(LOSS_SGD,label = "Stochastic Gradient Descent")
plt.plot(LOSS_MINI5,label = "Mini-Batch Gradient Descent, Batch size: 5")
plt.plot(LOSS_MINI10,label = "Mini-Batch Gradient Descent, Batch size: 10")
plt.legend()
# -
# <!--Empty Space for separating topics-->
# <h3>Practice</h3>
# Perform mini batch gradient descent with a batch size of 20. Store the total loss for each epoch in the list LOSS20.
# +
# Practice: Perform mini batch gradient descent with a batch size of 20.
dataset = Data()
# -
# Double-click <b>here</b> for the solution.
#
# <!--
# trainloader = DataLoader(dataset = dataset, batch_size = 20)
# w = torch.tensor(-15.0, requires_grad = True)
# b = torch.tensor(-10.0, requires_grad = True)
#
# LOSS_MINI20 = []
# lr = 0.1
#
# def my_train_model(epochs):
# for epoch in range(epochs):
# Yhat = forward(X)
# get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
# get_surface.plot_ps()
# LOSS_MINI20.append(criterion(forward(X), Y).tolist())
# for x, y in trainloader:
# yhat = forward(x)
# loss = criterion(yhat, y)
# get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
# loss.backward()
# w.data = w.data - lr * w.grad.data
# b.data = b.data - lr * b.grad.data
# w.grad.data.zero_()
# b.grad.data.zero_()
#
# my_train_model(10)
# -->
#
# Plot a graph that shows the LOSS results for all the methods.
# +
# Practice: Plot a graph to show all the LOSS functions
# Type your code here
# -
# Double-click <b>here</b> for the solution.
#
# <!--
# plt.plot(LOSS_BGD, label = "Batch Gradient Descent")
# plt.plot(LOSS_SGD, label = "Stochastic Gradient Descent")
# plt.plot(LOSS_MINI5, label = "Mini-Batch Gradient Descent,Batch size:5")
# plt.plot(LOSS_MINI10, label = "Mini-Batch Gradient Descent,Batch size:10")
# plt.plot(LOSS_MINI20, label = "Mini-Batch Gradient Descent,Batch size:20")
# plt.legend()
# -->
# <!--Empty Space for separating topics-->
# <a href="http://cocl.us/pytorch_link_bottom">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" />
# </a>
# <h2>About the Authors:</h2>
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/"><NAME></a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
# Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/"><NAME></a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a>
# <hr>
# Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
| Coursera/IBM Python 01/Course04/3.2_mini-batch_gradient_descent_v3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python38-azureml
# kernelspec:
# display_name: Python 3.8 - AzureML
# language: python
# name: python38-azureml
# ---
# # Microsoft Sentinel Notebooks and <i>MSTICPy</i>
# ## Examples of machine learning techniques in Jupyter notebooks
#
# Author: <NAME>
# <br>Co-Authors: <NAME>, <NAME>
#
# Released: 26 Oct 2020
# ## Notebook Setup
#
# Please ensure that MSTICPy is installed before continuing with this notebook.
#
# The nbinit module loads required libraries and optionally installs require packages.
# + gather={"logged": 1618337296472}
from pathlib import Path
from IPython.display import display, HTML
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
REQ_PYTHON_VER = "3.6"
REQ_MSTICPY_VER = "1.0.0"
REQ_MP_EXTRAS = ["ml"]
update_nbcheck = (
"<p style='color: orange; text-align=left'>"
"<b>Warning: we needed to update '<i>utils/nb_check.py</i>'</b><br>"
"Please restart the kernel and re-run this cell."
"</p>"
)
display(HTML("<h3>Starting Notebook setup...</h3>"))
if Path("./utils/nb_check.py").is_file():
try:
from utils.nb_check import check_versions
except ImportError as err:
# %xmode Minimal
# !curl https://raw.githubusercontent.com/Azure/Azure-Sentinel-Notebooks/master/utils/nb_check.py > ./utils/nb_check.py 2>/dev/null
display(HTML(update_nbcheck))
if "check_versions" not in globals():
raise ImportError("Old version of nb_check.py detected - see instructions below.")
# %xmode Verbose
check_versions(REQ_PYTHON_VER, REQ_MSTICPY_VER, REQ_MP_EXTRAS)
from msticpy.nbtools import nbinit
nbinit.init_notebook(namespace=globals());
# -
# ## Retrieve sample data files
# + gather={"logged": 1618336504315}
from urllib.request import urlretrieve
from pathlib import Path
from tqdm.auto import tqdm
github_uri = "https://raw.githubusercontent.com/Azure/Azure-Sentinel-Notebooks/master/{file_name}"
github_files = {
"exchange_admin.pkl": "data",
"processes_on_host.pkl": "data",
"timeseries.pkl": "data",
"data_queries.yaml": "data",
}
Path("data").mkdir(exist_ok=True)
for file, path in tqdm(github_files.items(), desc="File download"):
file_path = Path(path).joinpath(file)
print(file_path, end=", ")
url_path = f"{path}/{file}" if path else file
urlretrieve(
github_uri.format(file_name=url_path),
file_path
)
assert Path(file_path).is_file()
# -
# <hr>
#
# # Time Series Analysis
#
# ## Query network data
# The starting point is ingesting data to analyze.
#
# MSTICpy contains a number of [query providers](https://msticpy.readthedocs.io/en/latest/data_acquisition/DataProviders.html)
# that let you query and return data from several different sources.
#
# Below we are using the LocalData query provider to return data from sample files.<br><br>
#
# Data is returned in a Pandas [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) for easy manipulation and to provide a common interface for other features in MSTICpy.<br>
# Here we are getting a summary of our network traffic for the time period we are interested in.
#
# + gather={"logged": 1618336448011}
query_range = nbwidgets.QueryTime(
origin_time=pd.Timestamp("2020-07-13 00:00:00"),
before=1,
units="week"
)
query_range
# -
# <hr>
#
# This query fetches the total number of bytes send outbound on the network, grouped by hour.
#
# The input to the Timeseries analysis needs to be in the form of:
# - a datetime index (in a regular interval like an hour or day)
# - a scalar value used to determine anomalous values based on periodicity
#
# + gather={"logged": 1618336452828}
# Initialize the data provider and connect to our Splunk instance.
qry_prov = QueryProvider("LocalData", data_paths=["./data"], query_paths=["./data"])
qry_prov.connect()
ob_bytes_per_hour = qry_prov.Network.get_network_summary(query_range)
md("Sample data:", "large")
ob_bytes_per_hour.head(3)
# -
# ## Using Timeseries decomposition to detect anomalous network activity
#
# Below we use MSTICpy's [time series analysis](https://msticpy.readthedocs.io/en/latest/msticpy.analysis.html?highlight=timeseries#module-msticpy.analysis.timeseries)
# machine learning capabilities to identify anomalies in our network traffic for further investigation.<br>
# As well as computing anomalies we visualize the data so that we can more easily see where these anomalies present themselves.
#
# + gather={"logged": 1618336456021}
from msticpy.nbtools.timeseries import display_timeseries_anomolies
from msticpy.analysis.timeseries import timeseries_anomalies_stl
# Conduct our timeseries analysis
ts_analysis = timeseries_anomalies_stl(ob_bytes_per_hour)
# Visualize the timeseries and any anomalies
display_timeseries_anomolies(data=ts_analysis, y= 'TotalBytesSent');
md("We can see two clearly anomalous data points representing unusual outbound traffic.<hr>", "bold")
# -
# ### View the summary events marked as anomalous
# + gather={"logged": 1618336459592}
max_score, min_score = ts_analysis.score.max(), ts_analysis.min()
ts_analysis[ts_analysis["anomalies"] == 1]
# -
# ### Extract the anomaly period
# We can extract the **start** and **end** times of anomalous events and
# use this more-focused time range to query for unusual activity in this period.
#
# > **Note**: if more than one anomalous period is indicated we can use<br>
# > `msticpy.analysis.timeseries.extract_anomaly_periods()` function to isolate
# > time blocks around the anomalous periods.
# + gather={"logged": 1618336462962}
# Identify when the anomalies occur so that we can use this timerange
# to scope the next stage of our investigation.
# Add a 1 hour buffer around the anomalies
start = ts_analysis[ts_analysis['anomalies']==1]['TimeGenerated'].min() - pd.to_timedelta(1, unit='h')
end = ts_analysis[ts_analysis['anomalies']==1]['TimeGenerated'].max() + pd.to_timedelta(1, unit='h')
# md and md_warn are MSTICpy features to provide simple, and clean output in notebook cells
md("Anomalous network traffic detected between:", "large")
md(f"Start time: <b>{start}</b><br>End time: <b>{end}</b><hr>",)
# -
# ## Time Series Conclusion
#
# We would take these start and end times to zero in on which machines
# were responsible for the anomalous traffic. Once we find them we can
# use other techniques to analyze what's going on on these hosts.
# ## Other Applications
#
# You can use the msticpy query function `MultiDataSource.get_timeseries_anomalies` on most Microsoft Sentinel tables to do this summarization directly.
#
# Three examples are shown below.
#
# ```python
# start = pd.Timestamp("2020-09-01T00:00:00")
# end = pd.Timestamp("2020-09-301T00:00:00")
#
# # Sent bytes by hour (default) from Palo Alto devices
# time_series_net_bytes = qry_prov.MultiDataSource.get_timeseries_decompose(
# start=start,
# end=end,
# table="CommonSecurityLog",
# timestampcolumn="TimeGenerated",
# aggregatecolumn="SentBytes",
# groupbycolumn="DeviceVendor",
# aggregatefunction="sum(SentBytes)",
# where_clause='|where DeviceVendor=="Palo Alto Networks"',
# )
#
# # Sign-in failure count in AAD
# time_series_signin_fail = qry_prov.MultiDataSource.get_timeseries_anomalies(
# table="SigninLogs",
# start=start,
# end=end,
# timestampcolumn="TimeGenerated",
# aggregatecolumn="AppDisplayName",
# groupbycolumn="ResultType",
# aggregatefunction="count(AppDisplayName)",
# where_clause='| where ResultType in (50126, 50053, 50074, 50076)',
# add_query_items='| project-rename Total=AppDisplayName',
# )
#
# # Number of distinct processes by hour
# time_series_procs = qry_prov.MultiDataSource.get_timeseries_anomalies(
# table="SecurityEvent",
# start=start,
# end=end,
# timestampcolumn="TimeGenerated",
# aggregatecolumn="DistinctProcesses",
# groupbycolumn="Account",
# aggregatefunction="dcount(NewProcessName)",
# where_clause="| where Computer='myhost.domain.con'",
# )
#
# # Then submit to ts anomalies decomposition
# ts_analysis = timeseries_anomalies_stl(time_series_procs)
# # Visualize the timeseries and any anomalies
# display_timeseries_anomolies(data=ts_analysis, y='Total');
# ```
# <hr>
#
# # Using Clustering
# ## - Example: aggregating similar process patterns to highlight unusual logon sessions
#
# Sifting through thousands of events from a host is tedious in the extreme.
# We want to find a better way of identifying suspicious clusters of activity.
#
# Query the data and do some initial analysis of the results
# + gather={"logged": 1620085574961}
print("Getting process events...", end="")
processes_on_host = qry_prov.WindowsSecurity.list_host_processes(
query_range, host_name="MSTICAlertsWin1"
)
md("Initial analysis of data set", "large, bold")
md(f"Total processes in data set <b>{len(processes_on_host)}</b>")
for column in ("Account", "NewProcessName", "CommandLine"):
md(f"Total distinct {column} in data <b>{processes_on_host[column].nunique()}</b>")
md("<hr>")
md("Try grouping by distinct Account, Process, Commandline<br> - we still have 1000s of rows!", "large")
display(
processes_on_host
.groupby(["Account", "NewProcessName", "CommandLine"])
[["TimeGenerated"]]
.count()
.rename(columns={"TimeGenerated": "Count"})
)
# -
# ## Clustering motivation
#
# We want to find atypical commands being run and see if they are associated with
# the same user or time period<br>
#
# It is tedious to do repeated queries grouping on different attributes of events.<br>
# Instead we can specify features that we are interested in grouping around and use<br>
# clustering, a form of unsupervised learning, to group the data.
#
# A challenge when using simple grouping is that commands (commandlines) may vary slightly
# but are essentially repetitions of the same thing (e.g. contain dynamically-generated
# GUIDs or other temporary data).
#
# We can extract features of the commandline rather than using it in its raw form.
#
# Using clustering we can add arbitrarily many features to group on. Here we are using the following features:
# - Account name
# - Process name
# - Command line structure
# - Whether the process is a system session or not
#
# > **Note**: A downside to clustering is that text features (usually) need to be transformed from a string<br>
# > into a numeric representation.
# + gather={"logged": 1618336998754}
from msticpy.sectools.eventcluster import dbcluster_events, add_process_features, char_ord_score
from collections import Counter
print(f"Input data: {len(processes_on_host)} events")
print("Extracting features...", end="")
feature_procs = add_process_features(input_frame=processes_on_host, path_separator="\\")
feature_procs["accountNum"] = feature_procs.apply(
lambda x: char_ord_score(x.Account), axis=1
)
print(".", end="")
# you might need to play around with the max_cluster_distance parameter.
# decreasing this gives more clusters.
cluster_columns = ["commandlineTokensFull", "pathScore", "accountNum", "isSystemSession"]
print("done")
print("Clustering...", end="")
(clus_events, dbcluster, x_data) = dbcluster_events(
data=feature_procs,
cluster_columns=cluster_columns,
max_cluster_distance=0.0001,
)
print("done")
print("Number of input events:", len(feature_procs))
print("Number of clustered events:", len(clus_events))
print("Merging with source data and computing rarity...", end="")
# Join the clustered results back to the original process frame
procs_with_cluster = feature_procs.merge(
clus_events[[*cluster_columns, "ClusterSize"]],
on=["commandlineTokensFull", "accountNum", "pathScore", "isSystemSession"],
)
# Compute Process pattern Rarity = inverse of cluster size
procs_with_cluster["Rarity"] = 1 / procs_with_cluster["ClusterSize"]
# count the number of processes for each logon ID
lgn_proc_count = (
pd.concat(
[
processes_on_host.groupby("TargetLogonId")["TargetLogonId"].count(),
processes_on_host.groupby("SubjectLogonId")["SubjectLogonId"].count(),
]
).sum(level=0)
).to_dict()
print("done")
# Display the results
md("<br><hr>Sessions ordered by process rarity", "large, bold")
md("Higher score indicates higher number of unusual processes")
process_rarity = (procs_with_cluster.groupby(["SubjectUserName", "SubjectLogonId"])
.agg({"Rarity": "mean", "TimeGenerated": "count"})
.rename(columns={"TimeGenerated": "ProcessCount"})
.reset_index())
display(
process_rarity
.sort_values("Rarity", ascending=False)
.style.bar(subset=["Rarity"], color="#d65f5f")
)
# + gather={"logged": 1618337003076}
# get the logon ID of the rarest session
rarest_logonid = process_rarity[process_rarity["Rarity"] == process_rarity.Rarity.max()].SubjectLogonId.iloc[0]
# extract processes with this logonID
sample_processes = (
processes_on_host
[processes_on_host["SubjectLogonId"] == rarest_logonid]
[["TimeGenerated", "CommandLine"]]
.sort_values("TimeGenerated")
)[5:25]
# compute duration of session
duration = sample_processes.TimeGenerated.max() - sample_processes.TimeGenerated.min()
md(f"{len(sample_processes)} processes executed in {duration.total_seconds()} sec", "bold")
display(sample_processes)
# -
# ## Clustering conclusion
# We have narrowed down the task of sifting through > 20,000 processes
# to a few 10s and have them grouped into sessions ordered by the
# relative rarity of the process patterns
# ## Other Applications
#
# You can use this technique on other datasets where you want to group by multiple features of the data.
#
# The caveat is that you need to transform any non-numeric data field into a numeric form.
#
# msticpy has a few built-in functions to help with this:
# ```python
# from msticpy.sectools import eventcluster
#
# # This will group similar names together (e.g. "Administrator" and "administrator")
# my_df["account_num"] = eventcluster.char_ord_score_df(data=my_df, column="Account")
#
# # This will create a distinct hash for even minor differences in the input.
# # This might be useful to detect imperfectly faked UA strings.
# my_df["ua_hash"] = eventcluster.crc32_hash_df(data=my_df, column="UserAgent")
#
# # This will return the number of delimiter chars in the string - often a
# # a good proxy for the structure of an input while ignoring variable text values
# # e.g.
# # "https://my.dom.com/path1?u1=172.16.31.10" will produce the same score as
# # "https://www.contoso.com/azure?srcdom=moon.base.alpha.com"
# # but
# # "curl https://www.contoso.com/top/next?u=23 > ~/my_page"
# # "curl https://www.contoso.com/top_next?u=2.3 > ~/my_page.sh"
# # will produce different values despite the similarity of the strings.
# # Note - you can override the default delimiter list of " \-/.,"'|&:;%\$()]"
# my_df["request_struct"] = eventcluster.delim_count_df(my_df, column="Request")
# ```
#
# You can use a combination of these and other functions on the same fields
# to measure different aspects of the data. For example, the following takes
# a hash of the browser version of the UA (user agent) string and a structural count of
# the delimiters used.
#
# Use the `ua_pref_hash` and `ua_delims` to cluster on identical browser versions that
# have the same UA string
# ```python
# my_df["ua_prefix"] = data=my_df["UserAgent"].str.split(")")[-1])
# my_df["ua_pref_hash"] = eventcluster.crc32_hash_df(data=my_df, column="ua_prefix")
# my_df["ua_delims"] = eventcluster.delim_count_df(data=my_df, column="UserAgent")
# ```
# <hr>
#
# # Detecting anomalous sequences using Markov Chain
#
# The **anomalous_sequence** MSTICPy package uses Markov Chain analysis to predict the probability<br>
# that a particular sequence of events will occur given what has happened in the past.
#
# Here we're applying it to Office activity.
#
# ## Query the data
# + gather={"logged": 1617840728875}
query = """
| where TimeGenerated >= ago(60d)
| where RecordType_s == 'ExchangeAdmin'
| where UserId_s !startswith "NT AUTHORITY"
| where UserId_s !contains "prod.outlook.com"
| extend params = todynamic(strcat('{"', Operation_s, '" : ', tostring(Parameters_s), '}'))
| extend UserId = UserId_s, ClientIP = ClientIP_s, Operation = Operation_s
| project TimeGenerated= Start_Time_t, UserId, ClientIP, Operation, params
| sort by UserId asc, ClientIP asc, TimeGenerated asc
| extend begin = row_window_session(TimeGenerated, 20m, 2m, UserId != prev(UserId) or ClientIP != prev(ClientIP))
| summarize cmds=makelist(Operation), end=max(TimeGenerated), nCmds=count(), nDistinctCmds=dcount(Operation),
params=makelist(params) by UserId, ClientIP, begin
| project UserId, ClientIP, nCmds, nDistinctCmds, begin, end, duration=end-begin, cmds, params
"""
exchange_df = qry_prov.Azure.OfficeActivity(add_query_items=query)
print(f"Number of events {len(exchange_df)}")
exchange_df.drop(columns="params").head()
# -
# ## Perform Anomalous Sequence analysis on the data
#
# The analysis groups events into sessions (time-bounded and linked by a common account). It then<br>
# builds a probability model for the types of *command* (E.g. "SetMailboxProperty")<br>
# and the parameters and parameter values used for that command.
#
# I.e. how likely is it that a given user would be running this sequence of commands in a logon session?
#
# Using this probability model, we can highlight sequences that have an extremely low probability, based<br>
# on prior behaviour.
#
# + gather={"logged": 1617840741087}
from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd
from msticpy.analysis.anomalous_sequence import anomalous
def process_exchange_session(session_with_params, include_vals):
new_ses = []
for cmd in session_with_params:
c = list(cmd.keys())[0]
par = list(cmd.values())[0]
new_pars = set()
if include_vals:
new_pars = dict()
for p in par:
if include_vals:
new_pars[p['Name']] = p['Value']
else:
new_pars.add(p['Name'])
new_ses.append(Cmd(name=c, params=new_pars))
return new_ses
sessions = exchange_df.cmds.values.tolist()
param_sessions = []
param_value_sessions = []
for ses in exchange_df.params.values.tolist():
new_ses_set = process_exchange_session(session_with_params=ses, include_vals=False)
new_ses_dict = process_exchange_session(session_with_params=ses, include_vals=True)
param_sessions.append(new_ses_set)
param_value_sessions.append(new_ses_dict)
data = exchange_df
data['session'] = sessions
data['param_session'] = param_sessions
data['param_value_session'] = param_value_sessions
modelled_df = anomalous.score_sessions(
data=data,
session_column='param_value_session',
window_length=3
)
anomalous.visualise_scored_sessions(
data_with_scores=modelled_df,
time_column='begin', # this will appear on the x-axis
score_column='rarest_window3_likelihood', # this will appear on the y axis
window_column='rarest_window3', # this will represent the session in the tool-tips
source_columns=['UserId', 'ClientIP'], # specify any additional columns to appear in the tool-tips
)
# -
# The events are shown in descending order of likelihood (vertically), so the<br>
# events at the bottom of the chart are the ones most interesting to us.
#
# **Looking at these rare events, we can see potentially suspicious activity changing role memberships.**
# + gather={"logged": 1617841520179}
pd.set_option("display.html.table_schema", False)
likelihood_max=modelled_df["rarest_window3_likelihood"].max()
likelihood_min=modelled_df["rarest_window3_likelihood"].min()
slider_step = (likelihood_max - likelihood_min) / 20
start_val = likelihood_min + slider_step
threshold = widgets.FloatSlider(
description="Select likelihood threshold",
max=likelihood_max,
min=likelihood_min,
value=start_val,
step=start_val,
layout=widgets.Layout(width="60%"),
style={"description_width": "200px"},
readout_format=".7f"
)
def show_rows(change):
thresh = change["new"]
pd_disp.update(modelled_df[modelled_df["rarest_window3_likelihood"] < thresh])
threshold.observe(show_rows, names="value")
md("Move the slider to see event sessions below the selected <i>likelihood</i> threshold", "bold")
display(HTML("<hr>"))
display(threshold)
display(HTML("<hr>"))
md(f"Range is {likelihood_min:.7f} (min likelihood) to {likelihood_max:.7f} (max likelihood)<br><br><hr>")
pd_disp = display(
modelled_df[modelled_df["rarest_window3_likelihood"] < start_val],
display_id=True
)
# -
# ### Print out content of the selected events/commands in more readable format
#
# > Note for many events the output will be long
# + gather={"logged": 1617841645541}
import pprint
rarest_events = (
modelled_df[modelled_df["rarest_window3_likelihood"] < threshold.value]
[[
"UserId", "ClientIP", "begin", "end", "param_value_session", "rarest_window3_likelihood"
]]
.rename(columns={"rarest_window3_likelihood": "likelihood"})
.sort_values("likelihood")
)
for idx, (_, rarest_event) in enumerate(rarest_events.iterrows(), 1):
md(f"Event {idx}", "large")
display(pd.DataFrame(rarest_event[["UserId", "ClientIP", "begin", "end", "likelihood"]]))
md("<hr>")
md("Param session details:", "bold")
for cmd in rarest_event.param_value_session:
md(f"Command: {cmd.name}")
md(pprint.pformat(cmd.params))
md("<hr><br>")
# -
# <hr>
#
# # Resources
# ## MSTICpy:
# - msticpy Github https://github.com/Microsoft/msticpy
# - msticpy Docs https://msticpy.readthedocs.io/en/latest/
# - msticpy Release Blog https://medium.com/@msticmed
#
# ### MSTICpy maintainers:
# - <NAME> [@ianhellen](https://twitter.com/ianhellen)
# - <NAME> [@MSSPete](https://twitter.com/MSSPete)
# - <NAME> [@ashwinpatil](https://twitter.com/ashwinpatil)
#
# ## Microsoft Sentinel Notebooks:
# - Microsoft Sentinel Github Notebooks https://github.com/Azure/Azure-Sentinel-Notebooks/
# - (Samples with data in Sample-Notebooks folder)
# - Microsoft Sentinel Tech Community Blogs https://aka.ms/AzureSentinelBlog
| Machine Learning in Notebooks Examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Factor Risk Reports
#
# Factor risk reports run historical factor risk analyses for your portfolio or basket over a specified date range by leveraging a factor risk model of your choice.
#
# ### Prerequisite
#
# To execute all the code in this tutorial, you will need the following application scopes:
# - **read_product_data**
# - **read_financial_data**
# - **modify_financial_data** (must be requested)
# - **run_analytics** (must be requested)
#
# If you are not yet permissioned for these scopes, please request them on your [My Applications Page](https://developer.gs.com/go/apps/view).
# If you have any other questions please reach out to the [Marquee sales team](mailto:<EMAIL>).
#
# ## Step 1: Authenticate and Initialize Your Session
#
# First you will import the necessary modules and add your client id and client secret.
# + pycharm={"name": "#%%\n"}
import datetime as dt
from time import sleep
import pandas as pd
from gs_quant.api.gs.risk_models import GsRiskModelApi
from gs_quant.markets.report import FactorRiskReport
from gs_quant.models.risk_model import FactorRiskModel
from gs_quant.session import GsSession, Environment
client = None
secret = None
scopes = None
## External users must fill in their client ID and secret below and comment out the line below
#client = 'ENTER CLIENT ID'
#secret = 'ENTER CLIENT SECRET'
#scopes = ('read_product_data read_financial_data modify_financial_data run_analytics',)
GsSession.use(
Environment.PROD,
client_id=client,
client_secret=secret,
scopes=scopes
)
print('GS Session initialized.')
# -
# ## Step 2: Create a New Factor Risk Report
#
# #### Already have a factor risk report?
#
# <i>If you want to skip creating a new report and continue this tutorial with an existing factor risk report, run the following and skip to Step 3:</i>
# + pycharm={"name": "#%%\n"}
risk_report_id = 'ENTER FACTOR RISK REPORT ID'
risk_report = FactorRiskReport.get(risk_report_id)
# -
# When creating a factor risk report, you must specify the risk model you would like to use.
#
#
# If you would like to see all available risk model IDs to choose from, run the following:
# + pycharm={"name": "#%%\n"}
risk_models = FactorRiskModel.get_many()
for risk_model in risk_models:
print(f'{risk_model.id}\n')
# -
# In this tutorial, we'll create a factor risk report leveraging the Barra USSLOW Long model:
# + pycharm={"name": "#%%\n"}
entity_id = 'ENTER PORTFOLIO OR BASKET ID'
risk_model_id = 'BARRA_USSLOWL'
risk_report = FactorRiskReport(
risk_model_id=risk_model_id,
fx_hedged=True
)
risk_report.set_position_source(entity_id)
risk_report.save()
print(f'A new factor risk report for entity "{entity_id}" has been made with ID "{risk_report.id}".')
# -
# ## Step 3: Schedule the Report
#
# When scheduling reports, you have two options:
# - Backcast the report: Take the earliest date with positions in the portfolio / basket and run the report on the positions held then with a start date before the earliest position date and an end date
# of the earliest position date
# - Do not backcast the report: Set the start date as a date that has positions in the portfolio or basket and an end date after that (best practice is to set it to T-1). In this case the
# report will run on positions held as of each day in the date range
#
# In this case, let's try scheduling the report without backcasting:
# + pycharm={"name": "#%%\n"}
start_date = dt.date(2021, 1, 4)
end_date = dt.date(2021, 8, 4)
risk_report.schedule(
start_date=start_date,
end_date=end_date,
backcast=False
)
print(f'Report "{risk_report.id}" has been scheduled.')
# -
# ## Alternative Step 3: Run the Report
#
# Depending on the size of your portfolio and the length of the schedule range, it usually takes anywhere from a couple seconds to a couple minutes for your report to finish executing.
# Only after that can you successfully pull the results from that report. If you would rather run the report and pull the results immediately after they are ready, you can leverage the `run`
# function.
#
# You can run a report synchronously or asynchronously.
# - Synchronous: the Python script will stall at the `run` function line and wait for the report to finish. The `run` function will then return a dataframe with the report results
# - Asynchronously: the Python script will not stall at the `run` function line. The `run` function will return a `ReportJobFuture` object that will contain the report results when they are ready.
#
# In this example, let's run the report asynchronously and wait for the results:
# + pycharm={"name": "#%%\n"}
start_date = dt.date(2021, 1, 4)
end_date = dt.date(2021, 8, 4)
report_result_future = risk_report.run(
start_date=start_date,
end_date=end_date,
backcast=False,
is_async=True
)
while not report_result_future.done():
print('Waiting for report results...')
sleep(5)
print('\nReport results done! Here they are...')
print(report_result_future.result())
# -
# ## Step 4: Pull Report Results
#
# Now that we have our completed factor risk report, we can leverage the unique functionalities of the `FactorRiskReport` class to pull attribution and risk data. In this example, let's pull historical data on factor, specific, and total PnL:
# + pycharm={"name": "#%%\n"}
pnl = risk_report.get_factor_pnl(
factor_name=['Factor', 'Specific', 'Total'],
start_date=start_date,
end_date=end_date
)
pnl.set_index('date', inplace=True)
pnl.index = pd.to_datetime(pnl.index)
pnl.cumsum().plot(title='Risk Attribution Breakdown')
# -
# Now let's pull the breakdown of proportion of risk among the different factor types over time:
# + pycharm={"name": "#%%\n"}
prop_of_risk = risk_report.get_factor_proportion_of_risk(
factor_names=['Market', 'Style', 'Industry', 'Country'],
start_date=start_date,
end_date=end_date
)
prop_of_risk.set_index('date', inplace=True)
prop_of_risk.index = pd.to_datetime(prop_of_risk.index)
prop_of_risk.plot(title='Factor Proportion of Risk Breakdown')
# -
# ### Quick Tip!
# If you would like to pull all factor risk data for a list of different factors, you can use the `get_results` function:
# + pycharm={"name": "#%%\n"}
factor_and_total_results = risk_report.get_results(factors=['Factor', 'Specific'], start_date=dt.date(2020, 1, 1), end_date=dt.date(2021, 1, 1))
print(factor_and_total_results)
# -
# ### You're all set; Congrats!
#
# *Other questions? Reach out to the [Portfolio Analytics team](mailto:<EMAIL>)!*
#
| gs_quant/documentation/10_one_delta/scripts/reports/Factor Risk Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Assignment 2
#
# ## Introduction
#
# In this assignment we will replicate a gene expression data analysis experiment. We will use both unsupervised clustering, and a supervised approach using the Support Vector Machine classifier.
#
# The data is highly dimensional, in other words there are many more features than samples/observations ($p \gg N$). This is typical of gene expression data and of some other medical data problems that you might encounter, such as proteomic data or other biomedical data. When the number of features/dimensions is __much bigger__ than the number of samples/observations, this is a high-dimensional problem.
#
# The dataset was described and analysed in the following publication:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. __Multiclass cancer diagnosis using tumor gene expression signatures__. _PNAS, Proceedings of the National Academy of Sciences_. 2001 Dec 18; 98(26): 15149–15154.
#
# The full text is available via PubMed:
# <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC64998/pdf/pq2601015149.pdf>
#
# ## Deliverable
#
# The deliverable of this assignment is to replicate the gene expression analysis performed by Ramaswamy et al. in the paper cited above.
#
# ## Get the Data
#
# Let's first get the data, which has been made available by the authors of the _Elements of Statistical Learning_ (<NAME> Friedman, 2nd ed., 2009, Springer Verlag).
#
# In section 18.3, pp. 654–661 of this book, the authors re-analysed the dataset used by Ramaswamy et al. above and have made the formatted gene expression data available via the book's companion website.
#
# The dataset comprises $p=16,063$ gene expressions for $N=144$ tumour samples in the training set and $N=54$ tumour samples in the test set. The data describe 14 different types of cancer. Regarding this dataset, we can safely say that $p \gg N$.
#
# We will now retrieve the data from the _Elements of Statistical Learning's_ website using `pandas` and `urllib2`:
# +
import urllib2
import csv
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# %matplotlib inline
url_X_train = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/14cancer.xtrain'
url_y_train = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/14cancer.ytrain'
url_X_test = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/14cancer.xtest'
url_y_test = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/14cancer.ytest'
# We know there are 144 tumours in the training set and 54 is the test set, so let's make some column names:
column_names_train = ["Tumour_Sample_" + str(_) for _ in np.arange(144)+1]
column_names_test = ["Tumour_Sample_" + str(_) for _ in np.arange(54)+1]
# We will use Pandas to read and properly format the text-based data.
# The delimiter is a regular expression to look for zero or more repetitions of whitespace (\s).
X_train = pd.read_csv(url_X_train, delimiter='\s*', engine='python', names=column_names_train)
X_test = pd.read_csv(url_X_test, delimiter='\s*', engine='python', names=column_names_test)
# Get the labels and store as a list. There are 14 different cancers in the dataset.
y_train = urllib2.urlopen(url_y_train).read().strip().split()
y_test = urllib2.urlopen(url_y_test).read().strip().split()
# There are 14 different types of cancer, numbered 1 to 14, in the vectors y_test and y_train above.
# For visualising, you may find the names of the cancer types useful:
cancer_names_longform = ["Breast adenocarcinoma", "Prostate adenocarcinoma",
"Lung adenocarcinoma", "Collerectal adenocarcinoma",
"Lymphoma", "Bladder transitional cell carcinoma",
"Melanoma", "Uterine adenocarcinoma", "Leukemia",
"Renal cell carcinoma", "Pancreatic adenocarcinoma",
"Ovarian adenocarcinoma", "Pleural mesothelioma",
"Central nervous system"]
cancer_names_shortform = ["breast", "prostate", "lung", "collerectal",
"lymphoma", "bladder", "melanoma",
"uterus", "leukemia", "renal", "pancreas",
"ovary", "meso", "cns"]
# For testing you may want a merged training and test set.
# To save memory, these are commented out for now.
# X = pd.concat([X_train, X_test])
# y = y_train + y_test
# -
# ## Data Exploration
#
# Now that the data have been loaded in `X_train`, `X_test`, `y_train`, and `y_test`, we can take a look a closer look at our data. Note: It is convention to use large `X` for data matrices, and small `y` for target vectors.
#
# As can be seen, in our training set we have $p=16,063$ genes/features and $N=144$ tumours/samples:
X_train.shape
# To see a preview of the data, we can use the `head` and `tail` functions:
X_train.head()
X_test.tail()
# Let's see how the classes are distributed. First let's look at the number of unique values, which should equal 14, as we know we have 14 different cancer types:
len(np.unique(y_train))
# We can see how the cancer types are distrubuted using the `itemfreq` function of the SciPy `stats` package:
stats.itemfreq(y_train)
# Using the `cancer_names_longform` list we declared above, we can print tumour frequencies nicely:
for freq in stats.itemfreq(y_train):
print "%s samples appear %s times (shortform: %s)." % (cancer_names_longform[int(freq[0])-1],
freq[1],
cancer_names_shortform[int(freq[0])-1])
# You can take a quick look at some statistics values for each gene using the useful `describe` function (we use `transpose` to perform the analysis on a gene-by-gene basis). For example you may want to look at mean expression levels for each gene to see if they are over-expressed or under-expressed:
# Note: The transpose() function here does not permanently transpose the data stored in X_train.
X_train.transpose().describe()
# ## Summary
#
# Now that we have read the data in a form which we can easily use, we move on to the deliverables that must be completed for Assignment 2.
# # Deliverables for Assignment 2
#
# ## Clustering
#
# ___Task___: Perform hierarchical clustering mimicking the approaches used by Ramaswamy et al. in their paper cited above. Plot a dendogram of your results (SciPy provides dendogram plotting functions, see <http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.cluster.hierarchy.dendrogram.html> for example) - or visualise your clustering in any other way you deem reasonable.
#
# Both SciKit Learn and SciPy offer hierarchical clustering algorithms, see <http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html> and <http://scikit-learn.org/stable/modules/clustering.html>.
#
# Notice that not all clustering techniques are useful for all purposes. In the case of this assignment, we know the number of clusters we are searching for - this is a requirement for certain clustering algorithms. Other algorithms may require parameters you might not immediately have available to you.
# +
# Your clustering code. Use as many cells as required, use Markdown cells to document where necessary.
# -
# ## Classification
#
# ___Task___: Use Support Vector Machines and a One Vs. All (OVA) approach to replicate the results from the Ramaswamy et al. paper.
#
# SciKit Learn provides an `SVM` package for Support Vector Machines (see <http://scikit-learn.org/stable/modules/svm.html>).
#
# Visualise your results appropriately using plots and tables to describe classification results on the test set.
# +
# Your classification code. Use as many cells as required, use Markdown cells to document where necessary.
# -
# # Important Notes
#
# ## Hints
#
# - You may find that scaling or normalising your data will yield better results. See the SciKit-Learn `scale` function: <http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html>.
# - The `preprocessing` module contains much other useful functionality, see: <http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing>.
# - Cross validation train/test split indexes can be easily creating using SciKit Learn, see <http://scikit-learn.org/stable/modules/classes.html#module-sklearn.cross_validation>
# - Look up the dataset's analysis in _Elements of Statistical Learning_, specifically sections 18.3 (SVM One Vs. All, One Vs. One, etc.) and 13.3 (_k_-nearest neighbours).
#
# ## Grading
#
# Your grade will depend on a) quality/inventiveness of approach b) quality of plots or visualisations.
#
# ## Submission
#
# In Jupyter, click File -> Download As -> IPython Notebook (.ipynb) and send your completed notebook by email.
| Assignment2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (A2:Python-packaging-cheat-sheet)=
# # Python packaging cheat sheet
# <hr style="height:1px;border:none;color:#666;background-color:#666;" />
# ```{figure} images/py_pkgs_cheatsheet.png
# ---
# width: 100%
# name: a2-cheat-sheet
# alt: The Python packaging cheatsheet. [Download the cheat sheet here.](https://github.com/UBC-MDS/py-pkgs/blob/master/py-pkgs/images/raw/py_pkgs_cheatsheet.pdf)
# ---
# The Python packaging cheatsheet. [Download the cheat sheet here.](https://github.com/UBC-MDS/py-pkgs/blob/master/py-pkgs/images/raw/py_pkgs_cheatsheet.pdf)
# ```
# Here we provide a cheat sheet guide for developing a Python package with the packaging tools discussed in this book, such as [poetry](https://python-poetry.org/), [cookiecutter](https://cookiecutter.readthedocs.io/), and [GitHub Actions](https://docs.github.com/en/actions). This cheat sheet should be used as a reference for those that know what they're doing and just need a quick look-up resource. If you're a beginner Python packager, it is recommended that you start from the beginning of this book.
#
# [**Download the cheat sheet from GitHub here.**](https://github.com/UBC-MDS/py-pkgs/blob/master/py-pkgs/images/raw/py_pkgs_cheatsheet.pdf)
#
# ```{note}
# While this cheat sheet specifically relies on [poetry](https://python-poetry.org/) as a Python package manager, builder, and publisher, the general packaging workflow shown is applicable to other packaging tools too.
# ```
| py-pkgs/A2-cheatsheet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import pickle
from typing import List, Any, Dict, Tuple, Set, Iterable, Sequence
from operator import itemgetter
from itertools import combinations, starmap, groupby, product, chain, islice
import pandas as pd
import numpy as np
from scipy.spatial.distance import cosine
import networkx as nx
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report, accuracy_score
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
from conversant.conversation import Conversation
from conversant.conversation.parse import DataFrameConversationReader
from conversant.interactions import InteractionsGraph
from conversant.interactions.interactions_graph import PairInteractionsData
from conversant.interactions.reply_interactions_parser import get_reply_interactions_parser
from stance_classification.classifiers.base_stance_classifier import BaseStanceClassifier
from stance_classification.classifiers.greedy_stance_classifier import MSTStanceClassifier
from stance_classification.data.iac import FourForumInteractionsBuilder
from stance_classification.data.iac.fourforum_data import load_post_records, build_conversations
from stance_classification.data.iac.fourforum_labels import load_author_labels, AuthorLabel
from stance_classification.draw_utils import new_figure
# %matplotlib inline
from stance_classification.classifiers.maxcut_stance_classifier import MaxcutStanceClassifier
# + pycharm={"name": "#%%\n"}
base_dir = "/Users/ronpick/studies/stance/alternative/createdebate_released"
data_path = f"{base_dir}/post.txt"
header = ["discussion_id", "post_id", "author_id", "creation_date", "parent_post_id",
"parent_missing", "text_id", "points", "discussion_stance_id", "is_rebuttal"]
df = pd.read_csv(data_path, sep='\t', names=header, na_values="\\N")
df
# + pycharm={"name": "#%%\n"}
discussions_info_path = f"{base_dir}/discussion.txt"
discussions_header = ["discussion_id", "link", "title", "op", "description_id"]
op_df = pd.read_csv(discussions_info_path, sep='\t', names=discussions_header, na_values="\\N")
discussion_op_map = list(zip(op_df["discussion_id"], op_df["op"]))
discussion_title_map = dict(zip(op_df["discussion_id"], op_df["title"]))
discussion_inittext_map = dict(zip(op_df["discussion_id"], op_df["description_id"]))
len(discussion_op_map)
# + pycharm={"name": "#%%\n"}
discussions_topic_path = f"{base_dir}/discussion_topic.txt"
discussions_topic_header = ["discussion_id", "topic_id"]
topic_df = pd.read_csv(discussions_topic_path, sep='\t', names=discussions_topic_header)
discussion_topic_map = dict(zip(topic_df["discussion_id"], topic_df["topic_id"]))
len(discussion_topic_map)
# + pycharm={"name": "#%%\n"}
df["root_discussion_id"] = df["discussion_id"]
# + [markdown] pycharm={"name": "#%% md\n"}
# #### fill all missing parents as direct replies to the discussion title (with post id as the discussion's
# + pycharm={"name": "#%%\n"}
df["parent_post_id"] = df.apply(
lambda row: row["discussion_id"] if pd.isna(row["parent_post_id"]) else row["parent_post_id"],
axis=1
)
df["parent_post_id"]
# + [markdown] pycharm={"name": "#%% md\n"}
# #### add the first post to the dataframe
# add the title of the discussion as posts in the discussion, so the conversation parser would add them as records.
#
# + pycharm={"name": "#%%\n"}
new_records = []
for discussion_id, op in discussion_op_map:
init_text_id = discussion_inittext_map[discussion_id]
init_text_id = int(init_text_id) if not pd.isna(init_text_id) else None
title = discussion_title_map[discussion_id]
title = int(title) if not pd.isna(title) else None
record = {
"discussion_id": discussion_id,
"post_id": discussion_id,
"author_id": op,
"creation_date": "00:00",
"parent_post_id": None,
"parent_missing": 0,
"text_id": init_text_id,
"points": -1,
"discussion_stance_id": 0.5,
"is_rebuttal": None,
"title": title
}
new_records.append(record)
df = df.append(new_records, ignore_index=True)
# + pycharm={"name": "#%%\n"}
# add topic to the df
df["topic"] = df.apply(lambda row: discussion_topic_map[row["discussion_id"]], axis=1)
df["title"] = df.apply(lambda row: discussion_title_map[row["discussion_id"]], axis=1)
df
# + pycharm={"name": "#%%\n"}
df.to_csv("/Users/ronpick/workspace/zero-shot-stance/data/createdebate/iac-createdebate-subconvs.csv", index=False)
# + pycharm={"name": "#%%\n"}
pasre_strategy = {
"node_id": "post_id",
"author": "author_id",
"timestamp": "creation_date",
"parent_id": "parent_post_id"
}
parser = DataFrameConversationReader(pasre_strategy)
gb = df.groupby("discussion_id")
convs: List[Conversation] = list(tqdm(map(parser.parse, map(itemgetter(1), gb))))
len(convs)
# + pycharm={"name": "#%%\n"}
sub_convs = [Conversation(child) for conv in convs for child in conv.root.children]
len(sub_convs)
# + [markdown] pycharm={"name": "#%% md\n"}
# # conversation stats
# + pycharm={"name": "#%%\n"}
sizes = [c.size for c in sub_convs]
print(len(sizes))
print(np.mean(sizes))
print(np.median(sizes))
pd.Series(sizes).describe()
# + pycharm={"name": "#%%\n"}
pd.Series(sizes).plot.hist()
# + pycharm={"name": "#%%\n"}
filtered_sizes = [s for s in sizes if s >= 10]
print(len(filtered_sizes))
print(np.mean(filtered_sizes))
print(np.median(filtered_sizes))
# + pycharm={"name": "#%%\n"}
def decide_stance(self, graph: nx.Graph, cut_nodes: Set[Any], labeled_nodes: Dict[Any, int]) -> int:
"""
:param labeled_nodes:
:param graph:
:param cut_nodes:
:param weight_field:
:return: return the inferred stance label of the cut nodes.
"""
if self.op in self.graph.nodes:
pivot_node = self.op
else:
pivot_node = self.__get_ordered_candidates_for_pivot(graph)
cut_nodes_support = pivot_node in cut_nodes
return int(cut_nodes_support)
def get_ordered_candidates_for_pivot(graph: nx.Graph, weight_field: str = "weight") -> Sequence[Any]:
inv_weight_field = "inv_weight"
for _, _, pair_data in graph.edges(data=True):
weight = pair_data.data[weight_field]
pair_data.data[inv_weight_field] = 1 / weight
node_centralities = nx.closeness_centrality(graph, distance=inv_weight_field)
return list(map(itemgetter(0), sorted(node_centralities.items(), key=itemgetter(1), reverse=True)))
def get_pivot_node(graph: nx.Graph, labeled_authors: Set[Any], weight_field: str = "weight") -> Any:
candidates = get_ordered_candidates_for_pivot(graph, weight_field=weight_field)
return next(iter(filter(labeled_authors.__contains__, candidates)), None)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Load Author labels
# + pycharm={"name": "#%%\n"}
def get_majority_vote(labels: List[int]) -> int:
return int(np.mean(labels) >= 0.5)
def get_author_labels(c: Conversation) -> Dict[Any, int]:
authors_post_labels = {}
for depth, node in c.iter_conversation():
data = node.data
author = node.author
current_author_labels = authors_post_labels.setdefault(author, [])
current_author_labels.append(data["discussion_stance_id"])
result_labels = {a: get_majority_vote(labels) for a, labels in authors_post_labels.items()}
return result_labels
author_labels_per_conversation = {c.id: get_author_labels(c) for c in sub_convs}
author_labels_per_conversation = {k: v for k, v in author_labels_per_conversation.items() if len(v) > 0}
print(len(author_labels_per_conversation))
print(sum(len(v) for v in author_labels_per_conversation.values()))
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Load posts labels (Chang Li)
# + pycharm={"name": "#%%\n"}
def load_post_label_mapping(path: str) -> Dict[str, int]:
with open(path, 'rb') as f:
return pickle.load(f)
def decode_original_post_identification(post_id: str) -> Tuple[str, int, int]:
topic, numeric_id = post_id.split('.')
original_discussion_index = int(numeric_id[:-5])
original_post_index = int(numeric_id[-3:])
return topic, original_discussion_index, original_post_index
labels_path = "/home/dev/data/stance/chang-li/data/compressed-4forum/allPostLabelMap.pickle"
raw_post_labels = load_post_label_mapping(labels_path)
post_labels = {itemgetter(1,2)(decode_original_post_identification(raw_post_id)): (stance % 2) for (raw_post_id, stance) in raw_post_labels.items()}
len(post_labels)
# + pycharm={"name": "#%%\n"}
def get_author_labels(conv: Conversation) -> Dict[Any, int]:
if conv.id not in author_labels_per_conversation:
return None
return author_labels_per_conversation[conv.id]
def get_maxcut_results(graph: InteractionsGraph, op: Any) -> MaxcutStanceClassifier:
maxcut = MaxcutStanceClassifier(weight_field=graph.WEIGHT_FIELD)
maxcut.set_input(graph.graph, op)
maxcut.classify_stance()
return maxcut
def get_greedy_results(graph: InteractionsGraph, op: Any) -> BaseStanceClassifier:
clf = MSTStanceClassifier()#weight_field=graph.WEIGHT_FIELD)
clf.set_input(graph.graph)
clf.classify_stance(op)
return clf
def get_author_preds(clf: BaseStanceClassifier, pivot: Any) -> Dict[Any, int]:
support_label = authors_labels[pivot]
opposer_label = 1 - support_label
supporters = clf.get_supporters()
opposers = clf.get_complement()
preds = {}
for supporter in supporters:
preds[supporter] = support_label
for opposer in opposers:
preds[opposer] = opposer_label
return preds
def align_gs_with_predictions(authors_labels: Dict[Any, int], author_preds: Dict[Any, int]) -> Tuple[List[int], List[int]]:
y_true, y_pred = [], []
for author, true_label in authors_labels.items():
pred = author_preds.get(author, None)
if pred is None: continue
y_true.append(true_label)
y_pred.append(pred)
return y_true, y_pred
def align_posts_gs_with_predictions(conv: Conversation, author_preds: Dict[Any, int]) -> Tuple[List[int], List[int]]:
y_true, y_pred = [], []
for (_, node) in conv.iter_conversation():
label = node.data["discussion_stance_id"]
if label == 0.5: continue
if pd.isna(label): continue
pred = author_preds.get(node.author, None)
if pred is None: continue
y_true.append(label)
y_pred.append(pred)
return y_true, y_pred
def predict_for_partition(true: List[int], preds: List[int]) -> Tuple[List[int], List[int]]:
acc = accuracy_score(true, preds)
if acc < 0.5:
preds = [1-l for l in preds]
return true, preds
# + pycharm={"name": "#%%\n"}
interactions_parser = get_reply_interactions_parser()
author_true, author_pred = [], []
author_true_best, author_pred_best = [], []
posts_true, posts_pred = [], []
post_true_best, post_pred_best = [], []
filtered_convs = []
pivot_nodes = []
full_graphs = []
core_graphs = []
maxcut_results: Dict[Any, MaxcutStanceClassifier] = {}
classification_results: List[Tuple[List[int], List[int]]] = []
empty_core = []
unlabeled_conversations = []
unlabeled_op = []
insufficient_author_labels = []
too_small_cut_value = []
op_not_in_core = []
large_graphs = []
def calc_weight(interactions: PairInteractionsData) -> float:
n_replies = interactions["replies"]
# n_quotes = interactions["quotes"]
return n_replies
# return n_quotes
"""abortion = 3
gay marriage = 8
marijuana = 10
obamacare = 15
"""
# convs[0].root.data["topic"]
# conv: Conversation
relevant_topics = {3,8,10,15}
count_conv = 0
for i, conv in tqdm(enumerate(sub_convs)):
# topic = conv.root.data["topic"]
# print(topic)
# if topic not in relevant_topics: continue
count_conv += 1
authors_labels = get_author_labels(conv)
if authors_labels is None:
unlabeled_conversations.append(i)
continue
op = conv.root.author
if op not in authors_labels:
unlabeled_op.append(i)
continue
if len(authors_labels) < 3:
insufficient_author_labels.append(i)
continue
interaction_graph = interactions_parser.parse(conv)
interaction_graph.set_interaction_weights(calc_weight)
zero_edges = [(v, u) for v, u, d in interaction_graph.graph.edges(data=True) if d["weight"] == 0]
interaction_graph.graph.remove_edges_from(zero_edges)
pivot_node = get_pivot_node(interaction_graph.graph, authors_labels, weight_field="weight")
clf = get_greedy_results(interaction_graph, pivot_node)
core_interactions = interaction_graph.get_core_interactions()
if core_interactions.graph.size() == 0:
empty_core.append(i)
if core_interactions.graph.size() > 0:
components = list(nx.connected_components(core_interactions.graph))
core_interactions = core_interactions.get_subgraph(components[0])
pivot_node = get_pivot_node(core_interactions.graph, authors_labels, weight_field="weight")
pivot_nodes.append(pivot_node)
maxcut = get_maxcut_results(core_interactions, pivot_node)
if maxcut.cut_value < 3:
too_small_cut_value.append(i)
else:
maxcut_results[conv.id] = maxcut
clf = maxcut
if core_interactions.graph.order() > 120:
large_graphs.append(conv)
continue
authors_preds = get_author_preds(clf, pivot_node)
true, preds = align_gs_with_predictions(authors_labels, authors_preds)
author_true.append(true)
author_pred.append(preds)
true_best, preds_best = predict_for_partition(true, preds)
author_true_best.append(true_best)
author_pred_best.append(preds_best)
true, preds = align_posts_gs_with_predictions(conv, authors_preds)
posts_true.append(true)
posts_pred.append(preds)
true, preds = predict_for_partition(true, preds)
post_true_best.append(true)
post_pred_best.append(preds)
filtered_convs.append(conv)
full_graphs.append(interaction_graph)
core_graphs.append(core_interactions)
classification_results.append((true, preds))
# + pycharm={"name": "#%%\n"}
print(f"total number of conversations (in all topics): {len(sub_convs)}")
print(f"total number of conversations (in the relevant topics): {count_conv}")
print(f"total number of conversations with labeled authors (in all topics): {len(author_labels_per_conversation)}")
print(f"total number of conversations with labeled authors (in the relevant topics): {count_conv - len(unlabeled_conversations)}")
print(f"number of conversations in eval: {len(filtered_convs)}")
labeled_authors = sum(len(v) for v in author_labels_per_conversation.values())
print(f"total number of labeled authors: {labeled_authors}")
print(f"number of authors in eval: {sum(map(len, author_true))}")
print(f"number of posts in eval: {sum(map(len, posts_true))}")
print("=========")
print(f"number of conversations with empty core: {len(empty_core)}")
print(f"number of conversations with op not in core: {len(op_not_in_core)}")
print(f"number of conversations with too large core: {len(large_graphs)}")
print(f"number of conversations with too small cut value: {len(too_small_cut_value)}")
print(f"number of unlabeled conversations: {len(unlabeled_conversations)}")
print(f"number of conversations with unlabeled op: {len(unlabeled_op)}")
print(f"number of conversations with insufficient labeled authors: {len(insufficient_author_labels)}")
# + pycharm={"name": "#%%\n"}
# i, size = not_in_core_sorted[20]
# print("index", i)
# "size", size
# + pycharm={"name": "#%%\n"}
# c = sub_convs[i]
# print(c.op)
# ig = interactions_parser.parse(c)
# ig.set_interaction_weights(calc_weight)
# pos = nx.spring_layout(ig.graph, seed=19191)
# nx.draw_networkx(ig.graph, pos)
# + pycharm={"name": "#%%\n"}
# sorted(nx.closeness_centrality(ig.graph, distance="replies").items(), key=itemgetter(1), reverse=True)
# + pycharm={"name": "#%%\n"}
# core = ig.get_core_interactions()
# pos = nx.spring_layout(core.graph, seed=19191)
# nx.draw_networkx(core.graph, pos)
# + pycharm={"name": "#%%\n"}
# nx.closeness_centrality(core.graph, distance="weight")
# + pycharm={"name": "#%%\n"}
# labeled_authors = author_labels_per_conversation[c.id].keys()
# get_pivot_node(core.graph, labeled_authors, weight_field="weight")
# + pycharm={"name": "#%%\n"}
# list(core.graph.edges(data=True))
# + pycharm={"name": "#%%\n"}
y_true = list(chain(*author_true))
y_pred = list(chain(*author_pred))
print(classification_report(y_true, y_pred))
# + pycharm={"name": "#%%\n"}
y_true = list(chain(*author_true_best))
y_pred = list(chain(*author_pred_best))
print(classification_report(y_true, y_pred))
# + pycharm={"name": "#%%\n"}
y_pred = [l%2 for l in list(chain(*posts_true))]
y_true = list(chain(*posts_pred))
print(classification_report(y_true, y_pred))
# + pycharm={"name": "#%%\n"}
y_pred = [l%2 for l in list(chain(*post_true_best))]
y_true = list(chain(*post_pred_best))
print(classification_report(y_true, y_pred))
# + pycharm={"name": "#%%\n"}
def compute_pairs_average_distance(
pairs: Iterable[Tuple[int, int]],
embeddings: Sequence[np.ndarray]
) -> float:
distances = list(starmap(lambda i, j: cosine(embeddings[i], embeddings[j]), pairs))
return float(np.mean(distances))
def compute_average_angle_from_node(
node_index: int,
group_indices: Sequence[int],
embeddings: Sequence[np.ndarray]
) -> float:
pairs = ((node_index, i) for i in group_indices)
return compute_pairs_average_distance(pairs, embeddings)
def compute_group_average_angle(
group_indices: Sequence[int],
embeddings: Sequence[np.ndarray]
) -> float:
pairs = combinations(group_indices, 2)
return compute_pairs_average_distance(pairs, embeddings)
def compute_cross_groups_average_angle(
group1: Sequence[int],
group2: Sequence[int],
embeddings: Sequence[np.ndarray]
) -> float:
pairs = product(group1, group2)
return compute_pairs_average_distance(pairs, embeddings)
# + pycharm={"name": "#%%\n"}
supporters_avg_angles = []
opposers_avg_angles = []
mean_cross_angle = []
op2supporters = []
op2opposers = []
for i in range(len(maxcut_results)):
maxcut = maxcut_results[i]
op, all_embeddings, supporters, opposers =\
maxcut.op, maxcut.embeddings, maxcut.get_supporters(), maxcut.get_complement()
op2supporters.append(compute_average_angle_from_node(op, supporters, all_embeddings))
op2opposers.append(compute_average_angle_from_node(op, opposers, all_embeddings))
supporters_avg_angles.append(compute_group_average_angle(supporters, all_embeddings))
opposers_avg_angles.append(compute_group_average_angle(opposers, all_embeddings))
mean_cross_angle.append(compute_cross_groups_average_angle(supporters, opposers, all_embeddings))
print(f"total conversations {len(maxcut_results)}")
print(f"supporters avg. cosine {np.nanmean(supporters_avg_angles)}")
print(f"opposers avg. cosine {np.nanmean(opposers_avg_angles)}")
print(f"cross groups avg. cosine {np.mean(mean_cross_angle)}")
print(f"op to supporters avg. cosine {np.mean(op2supporters)}")
print(f"op to opposers avg. cosine {np.mean(op2opposers)}")
# + pycharm={"name": "#%%\n"}
strong_convs_indices = []
for i in range(len(filtered_convs)):
op2s = op2supporters[i]
op2o = op2opposers[i]
if op2supporters[i] * op2opposers[i] == 0:
continue
diff = op2o - op2s
ratio = op2o / op2s
if (ratio > 2) and (diff > 1):
strong_convs_indices.append(i)
len(strong_convs_indices)
# + pycharm={"name": "#%%\n"}
# strong_true, strong_preds = zip(*[classification_results[i] for i in strong_convs_indices])
# strong_true = list(chain(*strong_true))
# strong_preds = list(chain(*strong_preds))
strong_true = list(chain(*[author_true_best[i] for i in strong_convs_indices]))
strong_preds = list(chain(*[author_pred_best[i] for i in strong_convs_indices]))
print(classification_report(strong_true, strong_preds))
# + pycharm={"name": "#%%\n"}
max_i = 0
max_shape = 0
# sizes = [(i, g.graph.order()) for i, g in enumerate(core_graphs)]
sizes = [(i, core_graphs[i].graph.order()) for i in range(len(filtered_convs))]
sorted_sized = sorted(sizes, key=itemgetter(1), reverse=True)
sorted_sized[:20]
# + pycharm={"name": "#%%\n"}
result_index = 0
maxcut = maxcut_results[result_index]
op, emb, supporters, opposers = maxcut.op, maxcut.embeddings, maxcut.get_supporters(), maxcut.get_complement()
s_cosine = compute_group_average_angle(supporters, emb)
o_cosine = compute_group_average_angle(opposers, emb)
cross_cosine = compute_cross_groups_average_angle(supporters, opposers, emb)
op2support = compute_average_angle_from_node(op, supporters, emb)
op2oppose = compute_average_angle_from_node(op, opposers, emb)
print(f"num supporters: {len(supporters)}")
print(f"num opposers: {len(opposers)}")
print(f"supporters avg. cosine: {s_cosine}")
print(f"opposers avg. cosine: {o_cosine}")
print(f"cross-groups avg. cosine: {cross_cosine}")
print(f"op <-> supporters avg. cosine: {op2support}")
print(f"op <-> opposers avg. cosine: {op2oppose}")
print(f"supporters - opposers diff cosine with op: {op2oppose - op2support}")
print(f"supporters - opposers ratio cosine with op: {op2oppose / op2support}")
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Author classification results
# For the current conversation
# + pycharm={"name": "#%%\n"}
true = author_true[result_index]
preds = author_pred[result_index]
print(classification_report(true, preds))
# + pycharm={"name": "#%%\n"}
true = author_true_best[result_index]
preds = author_pred_best[result_index]
print(classification_report(true, preds))
# -
# #### Post classification results
# For the current conversation
# + pycharm={"name": "#%%\n"}
true = posts_true[result_index]
preds = posts_pred[result_index]
print(classification_report(true, preds))
# -
# #### Post partition classification results
# For the current conversation
# + pycharm={"name": "#%%\n"}
true = post_true_best[result_index]
preds = post_pred_best[result_index]
print(classification_report(true, preds))
# + pycharm={"name": "#%%\n"}
conv = filtered_convs[result_index]
author_labels = get_author_labels(conv)
true_supporters = [n for n, l in author_labels.items() if l == 1]
true_opposers = [n for n, l in author_labels.items() if l == 0]
unknown_labels = set(author_labels.keys()) - (set(supporters) | set(opposers))
len(author_labels), len(true_opposers), len(true_supporters), len(unknown_labels)
# + pycharm={"name": "#%%\n"}
plt.figure(figsize=(8,6))
X = np.vstack([np.array(x) for x in emb.values()])
pca = PCA(n_components=2)
X_2d = pca.fit_transform(X)
# X_2d = TSNE(n_components=2).fit_transform(X)
print(pca.explained_variance_)
op = maxcut.op
nodes = emb.keys()
tp_supporters_indices = [i for i, n in enumerate(nodes) if n in true_supporters and n in supporters]
fn_supporters_indices = [i for i, n in enumerate(nodes) if n in true_supporters and n in opposers]
tp_opposers_indices = [i for i, n in enumerate(nodes) if n in true_opposers and n in opposers]
fn_opposers_indices = [i for i, n in enumerate(nodes) if n in true_opposers and n in supporters]
unlabeled_supporters = [i for i, n in enumerate(nodes) if n not in author_labels and n in supporters]
unlabeled_opposers = [i for i, n in enumerate(nodes) if n not in author_labels and n in opposers]
op_index = [i for i, n in enumerate(nodes) if n == op]
plt.scatter(X_2d[tp_supporters_indices, 0], X_2d[tp_supporters_indices, 1], color='g', marker='+')
plt.scatter(X_2d[fn_supporters_indices, 0], X_2d[fn_supporters_indices, 1], color='g', marker='x')
plt.scatter(X_2d[tp_opposers_indices, 0], X_2d[tp_opposers_indices, 1], color='r', marker='+')
plt.scatter(X_2d[fn_opposers_indices, 0], X_2d[fn_opposers_indices, 1], color='r', marker='x')
plt.scatter(X_2d[unlabeled_supporters, 0], X_2d[unlabeled_supporters, 1], color='grey', marker='+')
plt.scatter(X_2d[unlabeled_opposers, 0], X_2d[unlabeled_opposers, 1], color='grey', marker='x')
plt.scatter([X_2d[op_index, 0]], [X_2d[op_index, 1]], color='b', marker='o')
# colors = ['b' if i == op else 'g' if i in supporters else 'r' for i in nodes]
# markers = ['o' if i ==op else 'x' if i in supporters else '+' for i in nodes]
# plt.scatter(X_2d[:, 0], X_2d[:, 1], color=colors)
# op_index = [i for i, n in enumerate(nodes) if n == op][0]
# + pycharm={"name": "#%%\n"}
new_figure()
graph = maxcut.graph
pos = nx.spring_layout(graph)
all_nodes = list(nodes)
tps = [all_nodes[i] for i in tp_supporters_indices]
fns = [all_nodes[i] for i in fn_supporters_indices]
fno = [all_nodes[i] for i in fn_opposers_indices]
tpo = [all_nodes[i] for i in tp_opposers_indices]
unks = [all_nodes[i] for i in unlabeled_supporters]
unko = [all_nodes[i] for i in unlabeled_opposers]
op = [all_nodes[i] for i in op_index]
nx.draw_networkx_nodes(graph, pos, nodelist=tps, node_color='g', node_shape='s', edgecolors="black")
nx.draw_networkx_nodes(graph, pos, nodelist=fns, node_color='g', node_shape='^', edgecolors="black")
nx.draw_networkx_nodes(graph, pos, nodelist=fno, node_color='r', node_shape='s', edgecolors="black")
nx.draw_networkx_nodes(graph, pos, nodelist=tpo, node_color='r', node_shape='^', edgecolors="black")
nx.draw_networkx_nodes(graph, pos, nodelist=unks, node_color='grey', node_shape="s", edgecolors="black")
nx.draw_networkx_nodes(graph, pos, nodelist=unko, node_color='grey', node_shape="^", edgecolors="black")
nx.draw_networkx_nodes(graph, pos, nodelist=op, node_color='b', node_shape='o', edgecolors="black")
node_labels = {n: str(n) for n in graph.nodes}
nx.draw_networkx_labels(graph, pos, labels=node_labels, font_color="tab:brown")
# Draw the edges that are in the cut.
edge_weights = [np.log2(graph[e[0]][e[1]]['weight']) for e in maxcut.cut]
nx.draw_networkx_edges(graph, pos, edgelist=maxcut.cut, edge_color="black", width=edge_weights)
#
# # Draw the edges that are not in the cut
leave = [e for e in graph.edges if e not in maxcut.cut]
non_cut_weigths = [np.log2(graph[e[0]][e[1]]['weight']) for e in leave]
nx.draw_networkx_edges(graph, pos, edgelist=leave, edge_color="darkgray")
# + pycharm={"name": "#%%\n"}
conv_id = filtered_convs[result_index].id
author_labels = author_labels_per_conversation[conv_id]
print(author_labels)
maxcut.draw(true_labels=author_labels)
# + pycharm={"name": "#%%\n"}
full_graph = full_graphs[result_index]
layout = nx.spring_layout(full_graph.graph)
nx.draw(full_graph.graph, layout)
# + pycharm={"name": "#%%\n"}
kcore = core_graphs[result_index]
layout = nx.spring_layout(kcore.graph)
nx.draw(kcore.graph, layout)
kcore.graph.order()
# + [markdown] pycharm={"name": "#%% md\n"}
#
# + pycharm={"name": "#%%\n"}
# + pycharm={"name": "#%%\n"}
## Predicting posts labels
# + pycharm={"name": "#%%\n"}
| experiments/notebooks/iac-createdebate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploring the CIA Factbook
#
# In this project we explore the Central Intelligence Agency (CIA) World Factbook, a compendium of statistics about all countries on Earth, plus some regions like the European Union or Macau.
# The database, `factbook.db`, was downloaded from [this GitHub repository](https://github.com/factbook) in January 2020.
# It was released on 1 November 2015 and has (as of 05 January 2020) never been updated.
# More information on the Factbook can be found on [“The World Factbook” Wikipedia page](https://en.wikipedia.org/wiki/The_World_Factbook).
#
# The main aim of this project is to learn to explore SQL databases with Pandas.
#
# ## Interfacing SQL with Pandas
#
# We will access information in the database using SQL queries, and analyse it using the Pandas library.
# To this end, we will define a connection `conn` to the database and a function `SQL_to_pandas` taking an SQL query and returning its result in a Pandas dataframe.
# We first import the relevent packages:
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# We then define the connection and `SQL_to_pandas` function:
conn = sqlite3.connect('../Data/factbook.db')
def SQL_to_pandas(query):
'''
Takes an SQL query and returns its results as a Pandas dataframe.
query: string
'''
df = pd.read_sql_query(query, conn)
return df
# ## Data exploration
#
# ### Structure of the database
#
# To see what is in the database, let us print information on the tables it contains:
SQL_to_pandas('SELECT * FROM sqlite_master WHERE type="table"')
# The database contains two tables: `facts` and `sqlite_sequence`.
# Let us print the 5 first rows of the former:
SQL_to_pandas('SELECT * FROM facts LIMIT 5')
# It contains 13 columns:
# * `Id`: an integer given to each country, which seem to be listed in alphabetical order,
# * `code`: an appparently two-letter code for each country,
# * `name`: the country name,
# * `area`: the total area in square kilometers,
# * `area_land`: the area covered by land in square kilometers,
# * `area_water`: the area covered by water in square kilometers,
# * `population`: the total population,
# * `population_growth`: annual population growth as a percentage,
# * `birth_rate`: the birth rate (number of births per year per 1000 people),
# * `death_rate`: the death rate (number of deatth per year per 1000 people),
# * `migration_rate`: the migration rate,
# * `created_at`: the date and time when the entry was added (in year-month-day nour-minute-second format),
# * `updated_at`: the date and time when the entry was last updated (in year-month-day hour-minute-second format).
#
# Let us check that the country code has always two characters:
country_codes = SQL_to_pandas('SELECT code FROM facts')
country_codes['code'].apply(len).value_counts()
# Each of the 261 entries of the `code` column indeed has length 2.
# Let us now print the other table:
SQL_to_pandas('SELECT * FROM sqlite_sequence')
# This table contains only the number of rows in the other one.
#
# ### Populations and their variations
#
# Let us focus on the populations and their variations.
# The following query prints the minima and maxima of the populations and their growths.
SQL_to_pandas('''
SELECT
MIN(population) AS "Lowest Population",
MAX(population) AS "Highest Population",
MIN(population_growth) AS "Lowest Population Growth",
MAX(population_growth) AS "Highest Population Growth"
FROM facts
''').style.hide_index() # hide the index for readability
# There seems to be two issues:
# * The lowest population is 0, which looks strange.
# * The highest population is larger that 7 billions, which looks like the total world population.
#
# To see what is going on, let us print the names of entries with lowest and highest populations:
SQL_to_pandas('''
SELECT * FROM facts WHERE population IN
((SELECT MIN(population) FROM facts),
(SELECT MAX(population) FROM facts))
''').style.hide_index()
# There is one country with no population at all: Antartica.
# The highest reported population is that of the entire world.
#
# Let us now show histograms for the population, population growth, birth rate, and death rate, excluding these two particular entries:
pop_birth_death = SQL_to_pandas('''
SELECT name, population, population_growth, birth_rate, death_rate
FROM facts
WHERE name NOT IN ("Antartica","World")
''')
pop_birth_death.hist(figsize=(10,7))
plt.show()
# We notice that a large majority of countries have less than 200 million inhabitants, while a few outliers have more than one billion.
# Let us see which countries have the highest population:
pop_birth_death[pop_birth_death['population'] > 200*10**6].sort_values('population', ascending = False).style.hide_index()
# The five most populous countries are China, India, the United States of America, Idonesia, and Brazil.
# We notice that the European Union is included in the dataset, although it includes several countries.
# If one is to do precise statistics on countries, such conglomerates, as well as other regions which are technically not countries, will need to be removed.
#
# Let us look at the population distribution without these outliers:
pop_no_outl = SQL_to_pandas('''
SELECT population
FROM facts
WHERE population < 1000000000
AND population > 0
''')
pop_no_outl.hist()
plt.show()
# Most countries seem to have less than 50,000,000 inhabitants.
# Let us focus on them:
pop_small = SQL_to_pandas('''
SELECT population
FROM facts
WHERE population < 50000000
AND population > 0
''')
pop_small.hist()
plt.show()
# Given that the difference between highest and lowest populations spans orders of magnitudes, it may be more instructive to look at the logarithm of the population instead:
pop_log = SQL_to_pandas('''
SELECT population
FROM facts
WHERE name != "World"
AND population > 0
''')
# logarithm f the population in base 10
pop_log['log(population)'] = np.log(pop_log['population'])/np.log(10)
pop_log.drop('population', axis=1, inplace=True)
pop_log.hist()
plt.show()
# A plurality of countries have populations between 10⁶ and 10⁸.
#
# ### Population density
#
# Let us now focus on the population density, defined as the number of inhabitants per square kilometer of land.
pop_density = SQL_to_pandas('''
SELECT name, CAST(population AS FLOAT) / area_land AS density
FROM facts
WHERE name != "World"
AND population > 0
''')
pop_density.hist()
plt.title('Population density (inh. per sq. km of land)')
plt.show()
# Most countries seem to have a density smaller than 2000 inhabitants per square kilometers, while a few have more.
# Let us see which countries have such a high density:
pop_density[pop_density['density'] > 2000].sort_values('density', ascending = False).style.hide_index()
# Six regions have such a high population density.
# The region with highest density is Macau (which is not a country per se), followed by Monaco and Singapore.
#
# Let us now look at the regions with smallest densities:
pop_density.sort_values('density')[:5].style.hide_index()
# Greenland, the Falkland Islands, and Svalbard have a density inferior to 1 inhabitant per square kilometer.
#
# As we did for the population, let us plot an histogram the logarithm of the density:
pop_density_log = SQL_to_pandas('''
SELECT name, CAST(population AS FLOAT) / area_land AS density
FROM facts
WHERE name != "World"
AND population > 0
''')
pop_density_log['density_log'] = np.log(pop_density_log['density'])/np.log(10)
pop_density_log.drop('density', axis=1, inplace=True)
pop_density_log.hist()
plt.title('log(Population density (inh. per sq. km))')
plt.show()
# ### Water and land
#
# Finally, let us focus on the ratio of water to land:
wat_to_lan = SQL_to_pandas('''
SELECT name, CAST(area_water AS FLOAT) / area_land AS water_to_land
FROM facts
WHERE name != "World"
''')
wat_to_lan.hist()
plt.title('Ratio of water to land')
plt.show()
# There is one outlier with more than 800 times more water than land.
# Let us see which country it is:
wat_to_lan.sort_values('water_to_land', ascending=False)[:1].style.hide_index()
# The British Indian Ocean Territory is made mostly of water.
#
# Let us redraw the histogram without this outlier:
wat_to_lan_2 = SQL_to_pandas('''
SELECT name, CAST(area_water AS FLOAT) / area_land AS water_to_land
FROM facts
WHERE name != "World"
AND water_to_land < 900
''')
wat_to_lan_2.hist()
plt.title('Ratio of water to land')
plt.show()
# There is another outlier.
wat_to_lan_2.sort_values('water_to_land', ascending=False)[:1].style.hide_index()
# This country is the Virgin Islands, with more than 4 times as much water as lands.
#
# Let us redraw the histogram without this second outlier:
wat_to_lan_3 = SQL_to_pandas('''
SELECT name, CAST(area_water AS FLOAT) / area_land AS water_to_land
FROM facts
WHERE name != "World"
AND water_to_land < 4
''')
wat_to_lan_3.hist()
plt.title('Ratio of water to land')
plt.show()
# Most cuntries have more than 10 times as much land as they have water.
# It seems the British Indian Ocean Territory and Virgin Islands are the only countries with more water than land.
# Let us check this:
SQL_to_pandas('''
SELECT name, CAST(area_water AS FLOAT) / area_land AS water_to_land
FROM facts
WHERE water_to_land >= 1
''')
# They are indeed the only two.
#
# Let us now see how many countries have no water area at all:
SQL_to_pandas('''
SELECT COUNT(*)
FROM facts
WHERE area_water = 0
''')
# There are 90 countries with no water area at all.
#
# ## Close the connection
#
# Finally, let us close the connection to the database:
conn.close()
# ## Conclusion
#
# In this project, we have seen that the Pandas and sqlite3 modules can be useful tools to explore a small SQL database.
# We have also noticed that the CIA Factbook contains information on regions of the world wihich are not countries per se.
# This should be taken into account if doing a more precise statistical analysis.
| Projects_Jupyter/CIA_Factbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Predicting House Prices on Kaggle
# :label:`sec_kaggle_house`
#
# Now that we have introduced some basic tools
# for building and training deep networks
# and regularizing them with techniques including
# weight decay and dropout,
# we are ready to put all this knowledge into practice
# by participating in a Kaggle competition.
# The house price prediction competition
# is a great place to start.
# The data are fairly generic and do not exhibit exotic structure
# that might require specialized models (as audio or video might).
# This dataset, collected by <NAME> in 2011 :cite:`De-Cock.2011`,
# covers house prices in Ames, IA from the period of 2006--2010.
# It is considerably larger than the famous [Boston housing dataset](https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.names) of Harrison and Rubinfeld (1978),
# boasting both more examples and more features.
#
#
# In this section, we will walk you through details of
# data preprocessing, model design, and hyperparameter selection.
# We hope that through a hands-on approach,
# you will gain some intuitions that will guide you
# in your career as a data scientist.
#
#
# ## Downloading and Caching Datasets
#
# Throughout the book, we will train and test models
# on various downloaded datasets.
# Here, we (**implement several utility functions
# to facilitate data downloading**).
# First, we maintain a dictionary `DATA_HUB`
# that maps a string (the *name* of the dataset)
# to a tuple containing both the URL to locate the dataset
# and the SHA-1 key that verifies the integrity of the file.
# All such datasets are hosted at the site
# whose address is `DATA_URL`.
#
# + origin_pos=1 tab=["tensorflow"]
import hashlib
import os
import tarfile
import zipfile
import requests
#@save
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
# + [markdown] origin_pos=2
# The following `download` function downloads a dataset,
# caches it in a local directory (`../data` by default),
# and returns the name of the downloaded file.
# If a file corresponding to this dataset
# already exists in the cache directory
# and its SHA-1 matches the one stored in `DATA_HUB`,
# our code will use the cached file to avoid
# clogging up your internet with redundant downloads.
#
# + origin_pos=3 tab=["tensorflow"]
def download(name, cache_dir=os.path.join('..', 'data')): #@save
"""Download a file inserted into DATA_HUB, return the local filename."""
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f'Downloading {fname} from {url}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
# + [markdown] origin_pos=4
# We also implement two additional utility functions:
# one is to download and extract a zip or tar file
# and the other to download all the datasets used in this book from `DATA_HUB` into the cache directory.
#
# + origin_pos=5 tab=["tensorflow"]
def download_extract(name, folder=None): #@save
"""Download and extract a zip/tar file."""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted.'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all(): #@save
"""Download all files in the DATA_HUB."""
for name in DATA_HUB:
download(name)
# + [markdown] origin_pos=6
# ## Kaggle
#
# [Kaggle](https://www.kaggle.com) is a popular platform
# that hosts machine learning competitions.
# Each competition centers on a dataset and many
# are sponsored by stakeholders who offer prizes
# to the winning solutions.
# The platform helps users to interact
# via forums and shared code,
# fostering both collaboration and competition.
# While leaderboard chasing often spirals out of control,
# with researchers focusing myopically on preprocessing steps
# rather than asking fundamental questions,
# there is also tremendous value in the objectivity of a platform
# that facilitates direct quantitative comparisons
# among competing approaches as well as code sharing
# so that everyone can learn what did and did not work.
# If you want to participate in a Kaggle competition,
# you will first need to register for an account
# (see :numref:`fig_kaggle`).
#
# 
# :width:`400px`
# :label:`fig_kaggle`
#
# On the house price prediction competition page, as illustrated
# in :numref:`fig_house_pricing`,
# you can find the dataset (under the "Data" tab),
# submit predictions, and see your ranking,
# The URL is right here:
#
# > https://www.kaggle.com/c/house-prices-advanced-regression-techniques
#
# 
# :width:`400px`
# :label:`fig_house_pricing`
#
# ## Accessing and Reading the Dataset
#
# Note that the competition data is separated
# into training and test sets.
# Each record includes the property value of the house
# and attributes such as street type, year of construction,
# roof type, basement condition, etc.
# The features consist of various data types.
# For example, the year of construction
# is represented by an integer,
# the roof type by discrete categorical assignments,
# and other features by floating point numbers.
# And here is where reality complicates things:
# for some examples, some data are altogether missing
# with the missing value marked simply as "na".
# The price of each house is included
# for the training set only
# (it is a competition after all).
# We will want to partition the training set
# to create a validation set,
# but we only get to evaluate our models on the official test set
# after uploading predictions to Kaggle.
# The "Data" tab on the competition tab
# in :numref:`fig_house_pricing`
# has links to download the data.
#
#
# To get started, we will [**read in and process the data
# using `pandas`**], which we have introduced in :numref:`sec_pandas`.
# So, you will want to make sure that you have `pandas` installed
# before proceeding further.
# Fortunately, if you are reading in Jupyter,
# we can install pandas without even leaving the notebook.
#
# + origin_pos=9 tab=["tensorflow"]
# If pandas is not installed, please uncomment the following line:
# # !pip install pandas
# %matplotlib inline
import numpy as np
import pandas as pd
import tensorflow as tf
from d2l import tensorflow as d2l
# + [markdown] origin_pos=10
# For convenience, we can download and cache
# the Kaggle housing dataset
# using the script we defined above.
#
# + origin_pos=11 tab=["tensorflow"]
DATA_HUB['kaggle_house_train'] = ( #@save
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = ( #@save
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
# + [markdown] origin_pos=12
# We use `pandas` to load the two csv files containing training and test data respectively.
#
# + origin_pos=13 tab=["tensorflow"]
train_data = pd.read_csv(download('kaggle_house_train'))
test_data = pd.read_csv(download('kaggle_house_test'))
# + [markdown] origin_pos=14
# The training dataset includes 1460 examples,
# 80 features, and 1 label, while the test data
# contains 1459 examples and 80 features.
#
# + origin_pos=15 tab=["tensorflow"]
print(train_data.shape)
print(test_data.shape)
# + [markdown] origin_pos=16
# Let us [**take a look at the first four and last two features
# as well as the label (SalePrice)**] from the first four examples.
#
# + origin_pos=17 tab=["tensorflow"]
print(train_data.iloc[0:4, [0, 1, 2, 3, -3, -2, -1]])
# + [markdown] origin_pos=18
# We can see that in each example, (**the first feature is the ID.**)
# This helps the model identify each training example.
# While this is convenient, it does not carry
# any information for prediction purposes.
# Hence, (**we remove it from the dataset**)
# before feeding the data into the model.
#
# + origin_pos=19 tab=["tensorflow"]
all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))
# + [markdown] origin_pos=20
# ## Data Preprocessing
#
# As stated above, we have a wide variety of data types.
# We will need to preprocess the data before we can start modeling.
# Let us start with the numerical features.
# First, we apply a heuristic,
# [**replacing all missing values
# by the corresponding feature's mean.**]
# Then, to put all features on a common scale,
# we (***standardize* the data by
# rescaling features to zero mean and unit variance**):
#
# $$x \leftarrow \frac{x - \mu}{\sigma},$$
#
# where $\mu$ and $\sigma$ denote mean and standard deviation, respectively.
# To verify that this indeed transforms
# our feature (variable) such that it has zero mean and unit variance,
# note that $E[\frac{x-\mu}{\sigma}] = \frac{\mu - \mu}{\sigma} = 0$
# and that $E[(x-\mu)^2] = (\sigma^2 + \mu^2) - 2\mu^2+\mu^2 = \sigma^2$.
# Intuitively, we standardize the data
# for two reasons.
# First, it proves convenient for optimization.
# Second, because we do not know *a priori*
# which features will be relevant,
# we do not want to penalize coefficients
# assigned to one feature more than on any other.
#
# + origin_pos=21 tab=["tensorflow"]
# If test data were inaccessible, mean and standard deviation could be
# calculated from training data
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
all_features[numeric_features] = all_features[numeric_features].apply(
lambda x: (x - x.mean()) / (x.std()))
# After standardizing the data all means vanish, hence we can set missing
# values to 0
all_features[numeric_features] = all_features[numeric_features].fillna(0)
# + [markdown] origin_pos=22
# [**Next we deal with discrete values.**]
# This includes features such as "MSZoning".
# (**We replace them by a one-hot encoding**)
# in the same way that we previously transformed
# multiclass labels into vectors (see :numref:`subsec_classification-problem`).
# For instance, "MSZoning" assumes the values "RL" and "RM".
# Dropping the "MSZoning" feature,
# two new indicator features
# "MSZoning_RL" and "MSZoning_RM" are created with values being either 0 or 1.
# According to one-hot encoding,
# if the original value of "MSZoning" is "RL",
# then "MSZoning_RL" is 1 and "MSZoning_RM" is 0.
# The `pandas` package does this automatically for us.
#
# + origin_pos=23 tab=["tensorflow"]
# `Dummy_na=True` considers "na" (missing value) as a valid feature value, and
# creates an indicator feature for it
all_features = pd.get_dummies(all_features, dummy_na=True)
all_features.shape
# + [markdown] origin_pos=24
# You can see that this conversion increases
# the number of features from 79 to 331.
# Finally, via the `values` attribute,
# we can [**extract the NumPy format from the `pandas` format
# and convert it into the tensor**]
# representation for training.
#
# + origin_pos=25 tab=["tensorflow"]
n_train = train_data.shape[0]
train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)
test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)
train_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1),
dtype=tf.float32)
# + [markdown] origin_pos=26
# ## [**Training**]
#
# To get started we train a linear model with squared loss.
# Not surprisingly, our linear model will not lead
# to a competition-winning submission
# but it provides a sanity check to see whether
# there is meaningful information in the data.
# If we cannot do better than random guessing here,
# then there might be a good chance
# that we have a data processing bug.
# And if things work, the linear model will serve as a baseline
# giving us some intuition about how close the simple model
# gets to the best reported models, giving us a sense
# of how much gain we should expect from fancier models.
#
# + origin_pos=29 tab=["tensorflow"]
loss = tf.keras.losses.MeanSquaredError()
def get_net():
net = tf.keras.models.Sequential()
net.add(
tf.keras.layers.Dense(
1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))
return net
# + [markdown] origin_pos=30
# With house prices, as with stock prices,
# we care about relative quantities
# more than absolute quantities.
# Thus [**we tend to care more about
# the relative error $\frac{y - \hat{y}}{y}$**]
# than about the absolute error $y - \hat{y}$.
# For instance, if our prediction is off by USD 100,000
# when estimating the price of a house in Rural Ohio,
# where the value of a typical house is 125,000 USD,
# then we are probably doing a horrible job.
# On the other hand, if we err by this amount
# in Los Altos Hills, California,
# this might represent a stunningly accurate prediction
# (there, the median house price exceeds 4 million USD).
#
# (**One way to address this problem is to
# measure the discrepancy in the logarithm of the price estimates.**)
# In fact, this is also the official error measure
# used by the competition to evaluate the quality of submissions.
# After all, a small value $\delta$ for $|\log y - \log \hat{y}| \leq \delta$
# translates into $e^{-\delta} \leq \frac{\hat{y}}{y} \leq e^\delta$.
# This leads to the following root-mean-squared-error between the logarithm of the predicted price and the logarithm of the label price:
#
# $$\sqrt{\frac{1}{n}\sum_{i=1}^n\left(\log y_i -\log \hat{y}_i\right)^2}.$$
#
# + origin_pos=33 tab=["tensorflow"]
def log_rmse(y_true, y_pred):
# To further stabilize the value when the logarithm is taken, set the
# value less than 1 as 1
clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))
return tf.sqrt(
tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))
# + [markdown] origin_pos=34
# Unlike in previous sections, [**our training functions
# will rely on the Adam optimizer
# (we will describe it in greater detail later)**].
# The main appeal of this optimizer is that,
# despite doing no better (and sometimes worse)
# given unlimited resources for hyperparameter optimization,
# people tend to find that it is significantly less sensitive
# to the initial learning rate.
#
# + origin_pos=37 tab=["tensorflow"]
def train(net, train_features, train_labels, test_features, test_labels,
num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
train_iter = d2l.load_array((train_features, train_labels), batch_size)
# The Adam optimization algorithm is used here
optimizer = tf.keras.optimizers.Adam(learning_rate)
net.compile(loss=loss, optimizer=optimizer)
for epoch in range(num_epochs):
for X, y in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
l = loss(y, y_hat)
params = net.trainable_variables
grads = tape.gradient(l, params)
optimizer.apply_gradients(zip(grads, params))
train_ls.append(log_rmse(train_labels, net(train_features)))
if test_labels is not None:
test_ls.append(log_rmse(test_labels, net(test_features)))
return train_ls, test_ls
# + [markdown] origin_pos=38
# ## $K$-Fold Cross-Validation
#
# You might recall that we introduced [**$K$-fold cross-validation**]
# in the section where we discussed how to deal
# with model selection (:numref:`sec_model_selection`).
# We will put this to good use to select the model design
# and to adjust the hyperparameters.
# We first need a function that returns
# the $i^\mathrm{th}$ fold of the data
# in a $K$-fold cross-validation procedure.
# It proceeds by slicing out the $i^\mathrm{th}$ segment
# as validation data and returning the rest as training data.
# Note that this is not the most efficient way of handling data
# and we would definitely do something much smarter
# if our dataset was considerably larger.
# But this added complexity might obfuscate our code unnecessarily
# so we can safely omit it here owing to the simplicity of our problem.
#
# + origin_pos=39 tab=["tensorflow"]
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = tf.concat([X_train, X_part], 0)
y_train = tf.concat([y_train, y_part], 0)
return X_train, y_train, X_valid, y_valid
# + [markdown] origin_pos=40
# [**The training and verification error averages are returned**]
# when we train $K$ times in the $K$-fold cross-validation.
#
# + origin_pos=41 tab=["tensorflow"]
def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,
batch_size):
train_l_sum, valid_l_sum = 0, 0
for i in range(k):
data = get_k_fold_data(k, i, X_train, y_train)
net = get_net()
train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
weight_decay, batch_size)
train_l_sum += train_ls[-1]
valid_l_sum += valid_ls[-1]
if i == 0:
d2l.plot(list(range(1, num_epochs + 1)), [train_ls, valid_ls],
xlabel='epoch', ylabel='rmse', xlim=[1, num_epochs],
legend=['train', 'valid'], yscale='log')
print(f'fold {i + 1}, train log rmse {float(train_ls[-1]):f}, '
f'valid log rmse {float(valid_ls[-1]):f}')
return train_l_sum / k, valid_l_sum / k
# + [markdown] origin_pos=42
# ## [**Model Selection**]
#
# In this example, we pick an untuned set of hyperparameters
# and leave it up to the reader to improve the model.
# Finding a good choice can take time,
# depending on how many variables one optimizes over.
# With a large enough dataset,
# and the normal sorts of hyperparameters,
# $K$-fold cross-validation tends to be
# reasonably resilient against multiple testing.
# However, if we try an unreasonably large number of options
# we might just get lucky and find that our validation
# performance is no longer representative of the true error.
#
# + origin_pos=43 tab=["tensorflow"]
k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,
weight_decay, batch_size)
print(f'{k}-fold validation: avg train log rmse: {float(train_l):f}, '
f'avg valid log rmse: {float(valid_l):f}')
# + [markdown] origin_pos=44
# Notice that sometimes the number of training errors
# for a set of hyperparameters can be very low,
# even as the number of errors on $K$-fold cross-validation
# is considerably higher.
# This indicates that we are overfitting.
# Throughout training you will want to monitor both numbers.
# Less overfitting might indicate that our data can support a more powerful model.
# Massive overfitting might suggest that we can gain
# by incorporating regularization techniques.
#
# ## [**Submitting Predictions on Kaggle**]
#
# Now that we know what a good choice of hyperparameters should be,
# we might as well use all the data to train on it
# (rather than just $1-1/K$ of the data
# that are used in the cross-validation slices).
# The model that we obtain in this way
# can then be applied to the test set.
# Saving the predictions in a csv file
# will simplify uploading the results to Kaggle.
#
# + origin_pos=45 tab=["tensorflow"]
def train_and_pred(train_features, test_feature, train_labels, test_data,
num_epochs, lr, weight_decay, batch_size):
net = get_net()
train_ls, _ = train(net, train_features, train_labels, None, None,
num_epochs, lr, weight_decay, batch_size)
d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch',
ylabel='log rmse', xlim=[1, num_epochs], yscale='log')
print(f'train log rmse {float(train_ls[-1]):f}')
# Apply the network to the test set
preds = net(test_features).numpy()
# Reformat it to export to Kaggle
test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
# + [markdown] origin_pos=46
# One nice sanity check is to see
# whether the predictions on the test set
# resemble those of the $K$-fold cross-validation process.
# If they do, it is time to upload them to Kaggle.
# The following code will generate a file called `submission.csv`.
#
# + origin_pos=47 tab=["tensorflow"]
train_and_pred(train_features, test_features, train_labels, test_data,
num_epochs, lr, weight_decay, batch_size)
# + [markdown] origin_pos=48
# Next, as demonstrated in :numref:`fig_kaggle_submit2`,
# we can submit our predictions on Kaggle
# and see how they compare with the actual house prices (labels)
# on the test set.
# The steps are quite simple:
#
# * Log in to the Kaggle website and visit the house price prediction competition page.
# * Click the “Submit Predictions” or “Late Submission” button (as of this writing, the button is located on the right).
# * Click the “Upload Submission File” button in the dashed box at the bottom of the page and select the prediction file you wish to upload.
# * Click the “Make Submission” button at the bottom of the page to view your results.
#
# 
# :width:`400px`
# :label:`fig_kaggle_submit2`
#
# ## Summary
#
# * Real data often contain a mix of different data types and need to be preprocessed.
# * Rescaling real-valued data to zero mean and unit variance is a good default. So is replacing missing values with their mean.
# * Transforming categorical features into indicator features allows us to treat them like one-hot vectors.
# * We can use $K$-fold cross-validation to select the model and adjust the hyperparameters.
# * Logarithms are useful for relative errors.
#
#
# ## Exercises
#
# 1. Submit your predictions for this section to Kaggle. How good are your predictions?
# 1. Can you improve your model by minimizing the logarithm of prices directly? What happens if you try to predict the logarithm of the price rather than the price?
# 1. Is it always a good idea to replace missing values by their mean? Hint: can you construct a situation where the values are not missing at random?
# 1. Improve the score on Kaggle by tuning the hyperparameters through $K$-fold cross-validation.
# 1. Improve the score by improving the model (e.g., layers, weight decay, and dropout).
# 1. What happens if we do not standardize the continuous numerical features like what we have done in this section?
#
# + [markdown] origin_pos=51 tab=["tensorflow"]
# [Discussions](https://discuss.d2l.ai/t/237)
#
| scripts/d21-en/tensorflow/chapter_multilayer-perceptrons/kaggle-house-price.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Workbook for studying dark matter phase space in a specific snapshot from Elena's Trojans paper.
# %matplotlib inline
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import h5py
import pylab
import struct
import matplotlib.cm as cm
import conversions as co
import pandas as pd
import sqlite3
#import tables as tbl
# #### TODO:
# * Hook up hdf5 to PyTables: save df to hdf5, figure out interfacing
# * Absolute color scaling - requires the range of all plots in one draw
# * Surface density plots should have the same x,y scale and orientations
# * Want each 2D histogram to have accompanying 1D histograms along col and row
#
# #### Aspirations:
# * Wrap the hdf5 reader such that it can read in chunks at a time (solved by PyTables)
# * ~~Or hook up to a database and use SQL~~ Hdf5 -> SQL is hilariously slow
#
# #### Notes:
# * snap hdf5 structure: f has keys 'Header', 'PartType1,2,3'
# * PartTypes have no attrs, keys = "Coordinates", "ParticleIDs", "Velocities" with no attrs
# * Coords and Vels are ndarrays len == 3
# * PT1=DM (20mil x 3), PT2=disk (8mil x 3), PT3=bulge (2.8mil x 3)
# * header has attributes with value == 0:
# * BoxSize, OmegaLambda, Redshift
# * Flag_[Cooling, DoublePrecision, Feedback, IC_Info, Metals, Sfr, StellarAge]
# * nonzero attrs:
# * HubbleParam, Omega0 == 1, Time (in years / 10^9 (????))
# * NumPart_ThisFile == NumPart_Total (array len 6), MassTable (array len 6)
# +
def read_hdf5(path, p_type=1):
"""types: 1=DM, 2=disk, 3=bulge"""
groups = ["Header", "PartType1", "PartType2", "PartType3"]
head = {}
f = h5py.File(path, "r")
h = f["Header"]
keys = list(h.attrs)
for key in keys:
head[key] = h.attrs[key]
parts = f[groups[p_type]]
columns = ["x","y","z", "vx", "vy", "vz"]
df = pd.DataFrame(
np.concatenate((parts["Coordinates"], parts["Velocities"]), axis=1),
columns=columns, index=parts["ParticleIDs"][:])
df.index.name = "particleID"
f.close()
return head, df
def df_center(df):
"""Centers a data frame's x,y,z keys by subtracting the median."""
idxs = ['x', 'y', 'z']
for idx in idxs:
df[idx] -= df[idx].median()
def df_polar(df):
"""Adds in r and phi coordinates, as well as their velocities.
Phi is the physics spherical phi, i.e. the polar theta.
"""
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arctan2(df['y'], df['x'])
df['vr'] = (df['x']*df['vx'] + df['y']*df['vy']) / df['r']
df['vphi'] = (df['x']*df['vy'] - df['y']*df['vx']) / (df['r']**2)
# -
def df_filter(df, key, low=None, high=None, f=None):
"""Filters a dataframe by key, greater than low, less than high,
optionally applying function f to the data comparison.
"""
if low is None and high is None:
print("No filtering done")
return df
elif low is not None and high is not None:
if f is None:
return df[(df[key] > low) & (df[key] < high)]
else:
return df[(f(df[key]) > low) & (f(df[key]) < high)]
elif low is not None:
if f is None:
return df[df[key] > low]
else:
return df[f(df[key]) > low]
elif high is not None:
if f is None:
return df[df[key] < high]
else:
return df[f(df[key]) < high]
else:
print("Nani?")
return df
def plot_cuts(df, ztup, rtup, phitup,
bins=[100,100], vels=None, save=False, path='analysis/'):
"""NOTE: phi values from the tuple will be in degrees.
vels is a string of comma separated velocity keys (from the dataframe)
to be plotted, x vs y, for example, 'vphi, vr' or 'vphi, vz'.
"""
zlow, zhigh = ztup
rlow, rhigh = rtup
philow, phihigh = phitup
dff = df_filter(df, 'z', low=zlow, high=zhigh)
dff = df_filter(dff, 'r', low=rlow, high=rhigh)
dff = df_filter(dff, 'phi', low=philow*np.pi/180, high=phihigh*np.pi/180)
ti_tup = (zlow, zhigh, rlow, rhigh, philow, phihigh)
s = "Snap cut between Z:{}-{}kpc, R:{}-{}kpc, PHI={}-{} degrees"
title = s.format(*ti_tup)
fig = plt.figure()
plt.title(title)
if vels is None: #plot positions
plt.xlabel('X distance (kpc)')
plt.ylabel('Y distance (kpc)')
plt.hist2d(dff['x'], dff['y'], bins=bins, density=True)
pathadd = 'surfacedensity_'
if save:
end = "Z{}-{}_R{}-{}_PH{}-{}".format(*ti_tup)
plt.savefig(path + pathadd + end + ".png", dpi=300)
elif vels is not None: #plot kinematics
assert type(vels) is str
keys = [v.strip() for v in vels.split(',')]
plt.xlabel('{} (km/s)'.format(keys[0]))
plt.ylabel('{} (km/s)'.format(keys[1]))
plt.hist2d(dff[keys[0]], dff[keys[1]], bins=bins, density=True)
pathadd = 'kinematics_'
if save:
end = "Z{}-{}_R{}-{}_PH{}-{}".format(*ti_tup)
plt.savefig(path + pathadd + keys[0] + keys[1]
+ '_' + end + ".png",
dpi=300)
def multiplot_cuts(df, ztup_l, rtup_l, phitup_l,
bins=[100,100], vels=None, save=False, path='analysis/',
dim1=False):
"""Similar to previous plot_cuts but with one subplots per z slice.
NOTE: phi values from the tuple will be in degrees.
vels is a string of comma separated velocity keys
(from the dataframe) to be plotted, x vs y,
for example, 'vphi, vr' or 'vphi, vz'.
"""
for i in range(len(ztup_l)):
zlow, zhigh = ztup_l[i]
# figw, figh adjust plot size according to subplot dimensions:
figw, figh = 6.4*len(phitup_l), 4.8*len(rtup_l)
fig, ax = plt.subplots(len(rtup_l), len(phitup_l),
figsize=[figw,figh], squeeze=False)
# save_tup is used for later save file name.
save_tup = (ztup_l[i][0], ztup_l[i][1], rtup_l[0][0], rtup_l[-1][1],
phitup_l[0][0], phitup_l[-1][1])
if vels is None:
# TODO: have one X and Y axis title label
#fig.xlabel('X distance (kpc)')
#fig.ylabel('Y distance (kpc)')
pathadd = 'surfacedensity_'
elif vels is not None:
assert type(vels) is str
keys = [v.strip() for v in vels.split(',')]
# TODO: have one X and Y axis title label
#fig.xlabel('{} (km/s)'.format(keys[0]))
#fig.ylabel('{} (km/s)'.format(keys[1]))
pathadd = 'kinematics_'
for j in range(len(rtup_l)):
rlow, rhigh = rtup_l[j]
for k in range(len(phitup_l)):
philow, phihigh = phitup_l[k]
dff = df_filter(df, 'z', low=zlow, high=zhigh)
dff = df_filter(dff, 'r', low=rlow, high=rhigh)
dff = df_filter(dff, 'phi',
low=philow*np.pi/180, high=phihigh*np.pi/180)
ti_tup = (zlow, zhigh, rlow, rhigh, philow, phihigh)
title = "Z:{}-{}kpc, R:{}-{}kpc, PHI={}-{}deg".format(*ti_tup)
ax[j,k].set_title(title)
if vels is None: # Plot positions
ax[j,k].set_xlabel('X distance (kpc)')
ax[j,k].set_ylabel('Y distance (kpc)')
if dim1:
continue
#TODO: fix
#ax[j,k].hist(dff['x'], dff['y'],
#bins=bins)
elif not dim1:
ax[j,k].hist2d(dff['x'], dff['y'],
bins=bins, density=True)
elif vels is not None: # Plot kinematics instead
ax[j,k].set_xlabel('{} (km/s)'.format(keys[0]))
ax[j,k].set_ylabel('{} (km/s)'.format(keys[1]))
if dim1:
continue
#TODO: fix
#ax[j,k].hist(dff[keys[0]],
#bins=bins)
elif not dim1:
ax[j,k].hist2d(dff[keys[0]], dff[keys[1]],
bins=bins[0], density=True)
#Back to outer z loop.
fig.subplots_adjust(hspace=0.25, wspace=0.25)
if save and vels is None:
end = "Z{}-{}_R{}-{}_PH{}-{}".format(*save_tup)
name = path + pathadd + end + ".png"
print('saving file: ' + name)
fig.savefig(name, dpi=300)
elif save and vels is not None:
end = "Z{}-{}_R{}-{}_PH{}-{}".format(*save_tup)
name = path + pathadd + keys[0] + keys[1] + '_' + end + ".png"
print('saving file: ' + name)
fig.savefig(name, dpi=300)
def gen_plots(df, *tups,
vels=None, path="analysis/", multi=True, plot=True,
save=True, bins=[100,100], dim1=False):
"""Generates plots using either the above plot_cuts or multiplots
function with the given dataframe and tuples for a given coordinate
with (start, stop, step, key) as input tuples.
"""
assert df is not None
params = {'z':[(-10,10)], 'r':[(0,15)], 'phi':[(-180,180)]}
#default values^^
for tup in tups:
start, stop, step, key = tup
lst = []
while start < stop:
if start + step > stop:
lst.append((start, stop))
break
lst.append((start, start+step))
start += step
params[key] = lst
if plot:
print('starting plotting')
if multi:
multiplot_cuts(df, params['z'], params['r'], params['phi'],
vels=vels, save=save, path=path, bins=bins,
dim1=dim1)
elif not multi:
for z in params['z']:
for r in params['r']:
for phi in params['phi']:
print('.', end='')
plot_cuts(df, z, r, phi,
vels=vels, save=save, path=path, bins=bins)
print('done')
return None
elif not plot:
return params
# ### Testing
# Runs data reading and cleaning.
path = "data/snap_582.hdf5"
head, df = read_hdf5(path, p_type=1)
df_center(df)
df_polar(df)
# #### PyTables
# +
# Connect to clean data
# store = pd.HDFStore('data/cleaned.h5')
# store.append('darkmatter', df, data_columns=True) #used to save the df to disk
# -
# #### SQL
# * for some reason, my pandas build doesn't like using the "multi" flag for .to_sql()
# * was able to create the database using a separate script, possibly a conflict with jupyter?
# +
# conn = sqlite3.connect("data/particles.db")
# df.to_sql("darkmatter", conn, chunksize=1000, if_exists='replace')
# conn.close()
# -
# #### Plotting
# Generates multiplots of surface density and kinematics.
#switch back to proper vels later
for vels in ['vphi, vr', 'vr, vphi', 'vz, vr']:
gen_plots(df, (0, 6, 2, "z"), (4.5, 7.5, 1, 'r'), (0, 90, 30, 'phi'),
save=False, path='analysis/multis/', vels=vels, dim1=True)
#gen_plots(df, (0, 6, 2, "z"), (4.5, 7.5, 1, 'r'), (60, 90, 30, 'phi'), path='analysis/multis/', save=False)
None
# +
# An example single plot call.
# plot_cuts((0,2), (5.5,6.5), (0,360), df, vels="vphi,vr")
# +
# SQL too slow.
#conn = sqlite3.connect('snap_582.db')
#df.to_sql("dark_matter", conn, if_exists="replace", index=False)
#conn.close()
# -
# ## Elena's original plotting code:
if False:
path = "data/snap_582.hdf5"
head, df = read_hdf5(path, p_type=3) #testing with fewer particles
df["x"] = df["x"] - df["x"].median()
df["y"] = df["y"] - df["y"].median()
mass = head["MassTable"][3]
BINS_r, BINS_theta = 360,360
BINS=512
lengthX=15.0
lengthY=15.0
vx0=-5.0
vy0=170.0
Zmin=-4.25
Zmax=-0.27
rmin, rmax = 0., 15.
dtheta=2*np.pi/BINS_theta
dr=(rmax-rmin)/BINS_r
thetamid=(np.arange(BINS_theta)+0.5) * dtheta - np.pi
rmid=(np.arange(BINS_r)+0.5) * dr + rmin
px,py = df["x"], df["y"]
r=np.sqrt(px**2. + py**2.)
theta=np.arctan2(py,px)
h, x, y = np.histogram2d(r,theta,bins=[BINS_r,BINS_theta],range=[[rmin,rmax],[-np.pi,np.pi]])
#divide by area to get surface density
for i in range(0,BINS_r):
h[i,:]/=rmid[i]*dr*dtheta
#fit the axisymmetric surface density
meanh=np.zeros(BINS_r)
for i in range(0,BINS_r):
meanh[i]=h[i,:].mean()
z=np.polyfit(rmid, np.log(meanh), 1)
Rs=-1/z[0]
p = np.poly1d(z)
print( "Rs = ", Rs, mass )
#calculate residuals
for i in range(0,BINS_r):
#h[i,:]=(h[i,:] - np.exp(p(rmid[i]))) / np.exp(p(rmid[i]))
h[i,:]=(h[i,:] - h[i,:].mean()) / (h[i,:].mean())
Z,x,y=np.histogram2d(px/Rs,py/Rs, range=[[-lengthX/Rs,lengthX/Rs],[-lengthY/Rs,lengthY/Rs]], bins=BINS, normed=True)
Z=np.log10(Z)
Zmin=Z[Z>-np.inf].min()
Zmax=Z[Z<np.inf].max()
if ((Zmax==0.0) & (Zmin==0.0)):
Zmin=Z[Z>-np.inf].min()
Zmax=Z.max()
else:
Z[Z<Zmin]=Zmin
Z[Z>Zmax]=Zmax
fig = plt.figure(1, figsize=(25.0,25.0))
#left plot
#ax = fig.add_subplot(1,2,1,title=tname+" t="+str(round(head.time*co.UnitTime_in_Gyr*1000.0,1))+"Myr")
ax = fig.add_subplot(1,2,1) #,title=tname+" t="+str(round(myTime*co.UnitTime_in_Gyr*1000.0,1))+"Myr")
im=ax.imshow(Z.T, vmin=Zmin, vmax=Zmax,
origin='lower',interpolation='nearest',
extent=[-lengthX/Rs,lengthX/Rs,-lengthY/Rs,lengthY/Rs],
cmap=cm.get_cmap('viridis'))
ax.set_xlabel('x/Rs', fontsize=18, fontweight='bold')
ax.set_ylabel('y/Rs',fontsize=18, fontweight='bold')
plt.xticks(np.arange(-round(lengthX/Rs), round(lengthX/Rs), step=2), fontsize=15, fontweight='bold')
plt.yticks(np.arange(-round(lengthY/Rs), round(lengthY/Rs), step=2), fontsize=15, fontweight='bold')
plt.colorbar(im, shrink=0.35)
| workbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# importing the necessary packages
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
tracks = pd.read_csv("tracks.csv")
# + tags=[]
tracks_w_clusters = pd.read_csv("../../final_track_clusters.csv")
tracks_w_clusters_over_30 = tracks_w_clusters.loc[tracks_w_clusters['popularity']>30]
# -
tracks_w_clusters_over_30.to_csv("final_track_clusters2.csv")
tracks['Year'] = tracks.release_date.map(lambda date: int(date[:4]))
key_dummies = pd.get_dummies(tracks["key"], prefix= 'key')
dummy_columns = tracks.join(key_dummies)
# +
# X = dummy_columns[dummy_columns.columns[8:]].drop(columns = ['key', 'time_signature'])
# X
# -
X_new = dummy_columns.drop(columns=["key_0", "key_1", "key_2", "key_3", "key_4", "key_5", "key_6", "key_7", "key_8", "key_9", "key_10", "key_11"])
# unique values
print("\nUNIQUE VALUES:")
print(dummy_columns.nunique().sort_values())
X_new
# +
# creating a correlation matrix
corr = X_new.corr()
# plotting the heatmap
f, ax = plt.subplots(figsize=(12,9))
sns.heatmap(corr, square=True, linecolor='white', vmax=1.0, annot=False,);
plt.tight_layout()
# +
# focused heatmap - Popularity
# number of variables to be selected
k = 10
# finding the most correlated variables
cols = corr.nlargest(k, 'danceability')['danceability'].index
cm = np.corrcoef(X_new[cols].values.T)
#plotting the heatmap
f, ax = plt.subplots(figsize=(12,9))
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values, cmap="Greens")
plt.tight_layout()
# -
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df_cluster = X_new.iloc[:]
df_cluster.iloc[:, [0,1,2,3,4,5,6,7,8,9,]]
# creating a dataframe without the categorical featur
cluster_data = df_cluster.iloc[:, [0,1,2,3,4,5,6,7,8,9,]].values
# finding out the proper number of clusters
wcss = []
for i in range(1,11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of Clusters')
plt.ylabel('WCSS')
plt.savefig('Elbow_Method.png')
plt.show()
# data preprocessing
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
cluster_data[0]
scaled = scaler.fit_transform(cluster_data)
X_new.describe()
import joblib
joblib.dump(scaler, "scaler.pickle")
request_form = dict([('danceability', '50'), ('energy', '50'), ('loudness', '50'), ('mode', '1.0'),('speechiness', '50'), ('acousticness', '50'), ('instrumentalness', '50'), ('liveness', '50'), ('valence', '50'), ('tempo', '50'), ('date_min', '1992'), ('date_max', '2021')])
request_form
model_input = []
for key, value in request_form.items():
if key not in ('date_min', 'date_max'):
if key in ('loudness', 'tempo'):
model_input.append((float(value)))
else:
model_input.append(( float(value)/100.0))
f
model_input = np.array(model_input)
print(model_input)
tracks_characteristics = tracks.drop(columns= ["Year"])
tracks_characteristics
tracks_characteristics_for_scale = X_new.drop(columns= "Year")
scaled_tracks = scaler.transform(tracks_characteristics_for_scale)
# instantiating model
model = KMeans(n_clusters = 500, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = model.fit_predict(scaled)
model.predict(scaled_tracks)
tracks_characteristics["Cluster Number"] = model.predict(scaled_tracks)
tracks_characteristics
tracks_characteristics.to_csv("final_track_clusters.csv")
scaler.transform([model_input])
date_range = (request_form['date_min'], request_form['date_max'])
date_range
# + tags=[]
from mpl_toolkits.mplot3d import Axes3D
# visualizing clusters
fig, ax = plt.subplots(figsize=(13,11))
ax = fig.add_subplot(111, projection='3d')
plt.scatter(scaled[y_kmeans[:300] == 0,0], scaled[y_kmeans[:300] == 0,1], s= 50, c= 'red',label= 'Cluster 1')
plt.scatter(scaled[y_kmeans[:300] == 1,0], scaled[y_kmeans[:300] == 1,1], s= 50, c= 'blue', label= 'Cluster 2')
plt.scatter(scaled[y_kmeans[:300] == 2,0], scaled[y_kmeans[:300] == 2,1], s= 50, c= 'green', label= 'Cluster 3')
plt.scatter(scaled[y_kmeans[:300] == 3,0], scaled[y_kmeans[:300] == 3,1], s= 50, c= 'cyan', label= 'Cluster 4')
plt.scatter(scaled[y_kmeans[:300] == 4,0], scaled[y_kmeans[:300] == 4,1], s= 50, c= 'magenta', label= 'Cluster 5')
# centroids[:300]
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s= 300, c= 'yellow', label= 'Centroids')
plt.title('Clusters')
plt.legend()
plt.savefig('clusters.png')
plt.show()
# -
# predictions
y_kmeans
# +
# converting preditcions into a df
kmeans = pd.DataFrame(data=y_kmeans, dtype=int)
kmeans.columns = ['k_cluster']
# predictions as a df
print(kmeans.shape)
kmeans.head()
# +
# concatenating the cluster column to the dataframe
df_cluster = pd.concat([df_cluster, kmeans], axis=1)
# checking the dataframe
print(df_cluster.shape)
df_cluster.head()
# +
# statistical distribution of the data in each column, for each cluster
#df_cluster.groupby("k_cluster").describe()
# -
new_sample = (X_new.loc[586668]+.04).drop("Year")
import joblib
model.predict([new_sample])
joblib.dump(model, "cluster_model2.pickle")
# %store new_sample
# +
# cluster_data = df_cluster.iloc[:, [0,1,2,3,4,5,6,7,8,9,]].values
# -
scaler = joblib.load("../../scaler.pickle")
model = joblib.load("../../cluster_model.pickle")
scaler.clip = False
scaler
model
sample_test = [5.00e-01, 5.00e-01, 0.00e+00, 1.00e-02, 5.00e-01, 5.00e-01, 5.00e-01, 5.00e-01,
5.00e-01, 1.75e+02]
sample_test_scaled = scaler.transform([sample_test])
model.predict(sample_test_scaled)
tracks_characteristics.loc[(tracks_characteristics["Cluster Number"]==230)
& (tracks_characteristics["popularity"]>40)]
purchase_data.loc[(purchase_data['SN'] == 'Lisosia93')|
(purchase_data['SN']=='Idastidru52')|
(purchase_data['SN']=='Chamjask73')|
(purchase_data['SN']=='Iral74')|
(purchase_data['SN']=='Iskadarya95')]
| Python Working Files/ML/ML_Bang_Cluster.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ## Polynomial chaos Kriging
#
# We start by defining a problem. Here we borrow the formulation from [uqlab](https://www.uqlab.com/pc-kriging-introduction).
# %%
import numpy
import chaospy
distribution = chaospy.Uniform(0, 15)
samples = distribution.sample(10, rule="sobol")
evaluations = samples*numpy.sin(samples)
evaluations.round(4)
# %% [markdown]
# The goal is to create a so called "polynomial chaos kriging" model as defined in [the paper with the same name](https://arxiv.org/abs/1502.03939). We are going to do this using the following steps:
#
# * Create a pool of orthogonal polynomials using `chaospy`.
# * Use `scikit-learn`'s least angular regression model to reduce the pool.
# * Use `gstools` to create an universal Kriging model with the orthogonal polynomials as drift terms.
#
# The result is what is what the paper defines as "sequential polynomial chaos kriging".
# %% [markdown]
# We start by creating a pool of orthonormal polynomials to be selected from:
# %%
from matplotlib import pyplot
expansion = chaospy.generate_expansion(9, distribution, normed=True)
t = numpy.linspace(0, 15, 200)
pyplot.rc("figure", figsize=[15, 6])
pyplot.plot(t, expansion(t).T)
pyplot.axis([0, 15, -3, 3])
pyplot.show()
# %% [markdown]
# As `chaospy` does not support least angular regression, we use the [scikit-learn](https://scikit-learn.org/) implementation.
# But still pass the job to `chaospy` to perform the fitting, as it also gives an fitted expansion:
# %%
from sklearn.linear_model import LarsCV
lars = LarsCV(fit_intercept=False, max_iter=5)
pce, coeffs = chaospy.fit_regression(
expansion, samples, evaluations, model=lars, retall=True)
expansion_ = expansion[coeffs != 0]
pce.round(2)
# %% [markdown]
# Note that the same coefficients can be created from the `lars` model directly, but that does not yield a fitted expansion:
# %%
lars = LarsCV(fit_intercept=False, max_iter=5)
lars.fit(expansion(samples).T, evaluations)
expansion_ = expansion[lars.coef_ != 0]
lars.coef_.round(4)
# %% [markdown]
# This resulted in a reduction of the number of polynomials:
# %%
print("number of expansion terms total:", len(expansion))
print("number of expansion terms included:", len(expansion_))
# %% [markdown]
# With the number of polynomials reduced, we can create our kriging model.
# In this case we use the excellent [gstools](https://geostat-framework.readthedocs.io/) library:
# %%
import gstools
model = gstools.Gaussian(dim=1, var=1)
pck = gstools.krige.Universal(model, samples, evaluations, list(expansion_))
pck(samples)
assert numpy.allclose(pck.field, evaluations)
# %% [markdown]
# For reference, we also create a more traditional universal kriging model with linear drift.
# %%
uk = gstools.krige.Universal(model, samples, evaluations, "linear")
uk(samples)
assert numpy.allclose(uk.field, evaluations)
# %% [markdown]
# Lastly we visually compare the models by plotting the mean and standard deviations against each other:
# %%
pck(t)
mu, sigma = pck.field, numpy.sqrt(pck.krige_var)
pyplot.plot(t, mu, label="pck")
pyplot.fill_between(t, mu-sigma, mu+sigma, alpha=0.4)
uk(t)
mu, sigma = uk.field, numpy.sqrt(uk.krige_var)
pyplot.plot(t, mu, label="uk")
pyplot.fill_between(t, mu-sigma, mu+sigma, alpha=0.4)
pyplot.plot(t, pce(t), label="pce")
pyplot.scatter(samples, evaluations, color="k", label="samples")
pyplot.axis([0, 15, -12, 15])
pyplot.legend(loc="upper left")
pyplot.show()
| docs/user_guide/advanced_topics/polynomial_chaos_kriging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import sys
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import matplotlib
import matplotlib.pyplot as plt
import random
# %matplotlib inline
import tensorflow as tf
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
from tensorflow.contrib.learn import learn_runner
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
# https://medium.com/mlreview/a-simple-deep-learning-model-for-stock-price-prediction-using-tensorflow-30505541d877
# -
# Import data
data = pd.read_csv('data/bydaym2.csv')
data.head()
data = data.query('stock=="삼성전자"')
print(data.head())
# query
data_s = data.query('date>=20170403 & date<=20180403')
print(data.head())
# Drop date variable
# data_s = data_s.drop(['stock','date'], 1)
data_s = data_s.drop(['stock','date','diff','vol','f','i'], 1)
print(data_s)
plt.plot(data_s['close'])
# Dimensions of dataset
n = data_s.shape[0]
p = data_s.shape[1]
# Make data a numpy array
data_s = data_s.values
print(n)
print(p)
# training data contained 80% of the total dataset
train_start = 0
train_end = int(np.floor(0.7*n))
test_start = train_end
test_end = n
data_train = data_s[np.arange(train_start, train_end), :]
data_test = data_s[np.arange(test_start, test_end), :]
# Scale data(RELu) common
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(data_train)
data_train = scaler.transform(data_train)
data_test = scaler.transform(data_test)
# Build X and y
X_train = data_train[:, 1:]
y_train = data_train[:, 0]
X_test = data_test[:, 1:]
y_test = data_test[:, 0]
print(X_train)
print(y_train)
print(X_test)
print(y_test)
# +
# Import TensorFlow
import tensorflow as tf
# Define a and b as placeholders
a = tf.placeholder(dtype=tf.int8)
b = tf.placeholder(dtype=tf.int8)
# Define the addition
c = tf.add(a, b)
# Initialize the graph
graph = tf.Session()
# Run the graph
graph.run(c, feed_dict={a: 5, b: 4})
# +
# Initializers
#initialize the network’s variables before training.
sigma = 1
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=sigma)
bias_initializer = tf.zeros_initializer()
# +
# Model architecture parameters
n_period = 3
n_neurons_1 = 1024
n_neurons_2 = 512
n_neurons_3 = 256
n_neurons_4 = 128
n_target = 1024
# Layer 1: Variables for hidden weights and biases
W_hidden_1 = tf.Variable(weight_initializer([n_period, n_neurons_1]))
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]))
# Layer 2: Variables for hidden weights and biases
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1, n_neurons_2]))
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]))
# Layer 3: Variables for hidden weights and biases
W_hidden_3 = tf.Variable(weight_initializer([n_neurons_2, n_neurons_3]))
bias_hidden_3 = tf.Variable(bias_initializer([n_neurons_3]))
# Layer 4: Variables for hidden weights and biases
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3, n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
# Output layer: Variables for output weights and biases
W_out = tf.Variable(weight_initializer([n_neurons_4, n_target]))
bias_out = tf.Variable(bias_initializer([n_target]))
# +
# t t+1
# [None, n_period] with [None] meaning that the inputs are a
# 2-dimensional matrix and the outputs are a 1-dimensional vector
# Placeholder
X = tf.placeholder(dtype=tf.float32, shape=[None, n_period])
Y = tf.placeholder(dtype=tf.float32, shape=[None])
#None argument indicates that at this point we do not yet know the number of
# observations that flow through the neural net graph in each batch,
# +
# Hidden layer
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3, W_hidden_4), bias_hidden_4))
# Output layer (must be transposed)
out = tf.transpose(tf.add(tf.matmul(hidden_4, W_out), bias_out))
# -
#deviation between the network’s predictions and the actual observed training targets.
# Cost function
mse = tf.reduce_mean(tf.squared_difference(out, Y))
# +
# Optimizer
opt = tf.train.AdamOptimizer().minimize(mse)
# +
#laceholders, variables, initializers, cost functions and optimizers of the network,
# the model needs to be trained
# sampled data batch of X flows through the network until it reaches the output layer. There, TensorFlow compares
# the models predictions against the actual observed targets Y in the current batch.
# Make Session
net = tf.Session()
# Run initializer
net.run(tf.global_variables_initializer())
# Setup interactive plot
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(111)
line1, = ax1.plot(y_test)
line2, = ax1.plot(y_test*0.5)
# plt.show()
# Number of epochs and batch size
epochs = 1000
batch_size = 512
for e in range(epochs):
# Shuffle training data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
X_train = X_train[shuffle_indices]
y_train = y_train[shuffle_indices]
# Minibatch training
for i in range(0, len(y_train) // batch_size):
start = i * batch_size
batch_x = X_train[start:start + batch_size]
batch_y = y_train[start:start + batch_size]
# Run optimizer with batch
net.run(opt, feed_dict={X: batch_x, Y: batch_y})
# Show progress
if np.mod(i, 5) == 0:
# Prediction
pred = net.run(out, feed_dict={X: X_test})
line2.set_ydata(pred)
plt.title('Epoch ' + str(e) + ', Batch ' + str(i))
file_name = 'img/epoch_' + str(e) + '_batch_' + str(i) + '.jpg'
plt.savefig(file_name)
plt.pause(0.01)
# Print final MSE after Training
mse_final = net.run(mse, feed_dict={X: X_test, Y: y_test})
print(mse_final)
| tensorflow3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing the librariest and settings
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, "D:\work\codes\Ripositories\Data Science\My_Lib\EDA")
import numpy as np
import pandas as pd
import scipy.stats as stat
import matplotlib.pyplot as plt
import EDA as ed
from matplotlib import style
from sklearn.preprocessing import StandardScaler
import seaborn as sns
from sklearn.cluster import KMeans
style.use('ggplot')
sns.set()
# ## Loading the data
# +
names=['class', 'date', 'plant-stand', 'precip', 'temp', 'hail', 'crop-hist', 'area-damaged', 'severity', 'seed-tmt', 'germination', 'plant_growth', 'leaves', 'leafspots_halo', 'leafspots_marg', 'leafspot_size', ' leaf_shread', 'leaf_malf', 'leaf_mild', 'stem', 'lodging', 'stem_cankers', 'canker_lesion', 'fruiting_bodies', 'external_decay', 'mycelium', 'int_discolor', 'sclerotia', 'fruit_pods', 'fruit_spots', 'seed', 'mold_growth', 'seed_discolor', 'seed_size', 'shriveling', 'roots']
## Loading Data
dat = pd.read_csv('data/soybean-large.csv',names=names)
data = dat.copy()
data = data.drop(['class'],1)
print(data)
# -
# ## Formating missing data and coverting to a integer dataframe
# +
## Replacing missing value '?' with -1
data.replace('?',0,inplace=True)
'''
## String to Integer coversion of class label
class_label_str = data['class'].unique().tolist()
#### No label missing so started from 0 by range
class_label_int = [c for c in range(len(class_label_str))]
for c in class_label_str:
data[data['class'] == c] = class_label_int[ class_label_str.index(c) ]
'''
## Converting all column to integer datatype
data = data.astype('int')
print(data)
# -
# ## Data Base Shape and column DTypes
print("|-------- Dataset information --------|")
shape = data.shape
print("Shape "+str(shape))
print("Data type: \n",data.dtypes)
# ## String charatecter check
# +
def string_column_count(x):
return len(x) - sum([ str(c).lstrip("-").isdigit() for c in x])
print("String column count:\n", data.apply( lambda x: string_column_count(x) ,axis = 0))
# -
# ## Checking Corelations
ed.correlation_sorted(data)
# ## Correlation Matrix hit map
# +
correlations = data.corr()
# plot correlation matrix
fig = plt.figure('Correlation Hit map')
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(data.columns),1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(data.columns)
ax.set_yticklabels(data.columns)
plt.show()
## ed.scatter_matrix_graph_fit(data)
# -
# ## Checking the columns with normal distribution
# +
## Fill not available value from the skewness probability distribution and mode ,median, mean and skewness and kurtosis and chi square test
## coefficent_of_skewness(data)
mode,mode_count = stat.mode(data,axis=0)
print("Mode: "+ str( mode[0] ) + "\n")
print("Mean: \n" + str( np.mean(data,axis=0) ) + "\n" )
print("Median: "+ str( np.median(data,axis=0) ) + "\n" )
print("For normally distributed data, the skewness should be about 0. For unimodal continuous distributions, a skewness value > 0 means that there is more weight in the right tail of the distribution. The function skewtest can be used to determine if the skewness value is close enough to 0, statistically speaking.")
print("Coefficient of skewness : \n" + str( stat.skew(data, axis=0, bias=False) ) + "\n")
print("Moment coefficient kurtosis = 3 , meso kurtic & normal distribution\n> 3 , lepto kurtic\n< 3 , platy kurtic")
print("Coefficient of kurtosis : \n" + str( stat.kurtosis(data,axis=0,fisher=False,bias=False) ) + "\n")
## If False, then the calculations are corrected for statistical bias.
# ?? Pearson Chi square test for data comparing to statistical distribution fit
# +
## Optimization: experimenting with differnt K values with their model costs
k_s = []
costs = []
nLabels = []
X = data
for k in range(1,60): ## experiment with n
if True: ## Dont use Odd logic - if it is not continuous, we will not able to produce the real result
## Initializing model with a fixed random seed
clusters = KMeans(n_clusters=k, random_state = 1)
clusters.fit(X)
## Getting predicted Labels
predictedLabelY = clusters.labels_
## Getting Model cost/inertia/sum of squared distance of data points from centroid
cost = clusters.inertia_
## Genarating col name of K value for predicted labels
col_name = 'k'+str(k)+'_label'
## Saving predicting labels
data[col_name] = predictedLabelY
## Number of labels for specific K value
## Saving k value in every session
k_s.append(k)
## Saving Number of labels for specific K value
nLabels.append(data[col_name].nunique())
## Saving Cost or inertia for specific K value of clustering model
costs.append(cost)
k_ticks = ["k"+str(k) for k in k_s]
#ind = np.arange(len(range(2,15)))
## All possibilities with value of K
## shifting indexes to 1 row down
## data.index += 1
## Saving the labeled Result
data.to_csv('unsupervised_label.csv')
## Plotting the k vs Number of labels to understand the cluster
plt.figure("k vs Number of labels")
plt.plot(k_s,nLabels, marker = 'x')
plt.title("k vs label numbers")
plt.xlabel('K')
plt.ylabel('Number of labels')
plt.savefig("k_vs_Number_of_labels.png")
## Plot of Optimization starts
plt.figure("k vs Model Cost and k vs Change rate in Model Cost")
## Plotting the k vs Model cost
#plt.figure("k vs Model Cost(sum of distance from centroid)")
plt.subplot(3,1,1)
plt.plot(k_s,costs, marker = 'x')
plt.title("Title:k vs Model Cost(sum of distance from centroid)")
plt.xlabel('k')
plt.ylabel('Model Cost')
##d/dk(costs) = slope of Costs reference to K value = Rate of change of Costs reference to change of x
## M = slope_list_curve(k_s,costs)
from numpy import diff
print(len(costs),len(k_s))
M = diff(costs)/diff(k_s)
k_s=k_s[1:]
M1 = np.absolute(M - np.median(M))
## Visualizing optimized K value
plt.subplot(3,1,2)
#plt.figure("k vs d/dk(Cost)")
plt.plot(k_s,M, marker = 'x')
plt.title("Title:k vs Change_rate(Cost)")
plt.xlabel('k')
plt.ylabel('Change in Cost(2)')
M = diff(M)/diff(k_s)
k_s=k_s[1:]
M2 = np.absolute(M - np.median(M))
## Visualizing optimized K value
plt.subplot(3,1,3)
#plt.figure("k vs d/dk(Cost)")
plt.plot(k_s,M, marker = 'x')
plt.title("Title:k vs Change_rate(Cost)2")
plt.xlabel('k')
plt.ylabel('Change in Cost')
plt.tight_layout()
plt.savefig("kcost_ddk_costs.png")
plt.show()
## Plot of Optimization ends
M= M.tolist()
best_k_index = M.index(min(M))
best_k = k_s[best_k_index]
best_cluster_number = nLabels[best_k_index]
print(best_cluster_number)
M1 = M1.tolist()
M2 = M2.tolist()
print( nLabels[M2.index(min(M2))] - nLabels[M1.index(min(M1))])
'''
clf = KMeans(n_clusters=best_cluster_number)
clf.fit(X)
## For Kaggle
print(clf.score(X,data.ix[:,0]))
'''
# -
# 
# +
# As We can See,
## In the first graph, Number of labels always increases as the number of K increases. And they are always equal
## In the second graph, we ploted K vs Model Cost, K vs ddK(Model Cost) and K vs ddK(Model Cost)^2 .
## As we know there is no direct way to pick the best value for K, so, we have to pick it visually.
## We can see when the value K = 20 , almost all of the 3 graph's value in Y axis almost stop changing.
## And we have estimated we should have natural k value of sqrt(sample_number) = sqrt(307) = about 18
## We can now estimated that we should have cluster between 18 - 20. Which should be 19
## For confirming , we have forcasted our number of labels in MS excel, by using labels of K2 - K29. Whis is 19
| .ipynb_checkpoints/soyabin_data_EDA-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.4
# language: julia
# name: julia-0.6
# ---
# +
# based on https://github.com/chkwon/TrafficAssignment.jl
include("Julia_files/load_network_uni_class.jl")
using Graphs
function create_graph(start_node, end_node)
@assert Base.length(start_node)==Base.length(end_node)
no_node = max(maximum(start_node), maximum(end_node))
no_arc = Base.length(start_node)
graph = simple_inclist(no_node)
for i=1:no_arc
add_edge!(graph, start_node[i], end_node[i])
end
return graph
end
function get_vector(state, origin, destination, link_dic)
current = destination
parent = -1
x = zeros(Int, maximum(link_dic))
while parent != origin
parent = state.parents[current]
link_idx = link_dic[parent,current]
if link_idx != 0
x[link_idx] = 1
end
current = parent
end
return x
end
# +
function salo(out_dir, files_ID, month_w, instance, deg_grid, c_grid, lamb_grid)
instance1 = instance
ta_data = load_ta_network_(out_dir, files_ID, month_w, instance1)
# unpacking data from ta_data
network_name = ta_data.network_name
number_of_zones = ta_data.number_of_zones
number_of_nodes = ta_data.number_of_nodes
first_thru_node = ta_data.first_thru_node
number_of_links = ta_data.number_of_links
start_node = ta_data.start_node
end_node = ta_data.end_node
capacity = ta_data.capacity
link_length = ta_data.link_length
free_flow_time = ta_data.free_flow_time
B = ta_data.B
power = ta_data.power
speed_limit = ta_data.speed_limit
toll = ta_data.toll
link_type = ta_data.link_type
number_of_zones = ta_data.number_of_zones
total_od_flow = ta_data.total_od_flow
travel_demand = ta_data.travel_demand
od_pairs = ta_data.od_pairs
toll_factor = ta_data.toll_factor
distance_factor = ta_data.distance_factor
best_objective = ta_data.best_objective
# preparing a graph
graph = create_graph(start_node, end_node)
link_dic = sparse(start_node, end_node, 1:number_of_links);
function MSA(coeffs)
polyEval(coeffs, pt) = sum([coeffs[i] * pt^(i-1) for i = 1:length(coeffs)])
function BPR(x)
bpr = similar(x)
for i=1:length(bpr)
bpr[i] = free_flow_time[i] * polyEval( coeffs, (x[i]/capacity[i]) )
end
return bpr
end
function all_or_nothing(travel_time)
state = []
path = []
x = zeros(size(start_node))
for r=1:size(travel_demand)[1]
# for each origin node r, find shortest paths to all destination nodes
state = dijkstra_shortest_paths(graph, travel_time, r)
for s=1:size(travel_demand)[2]
# for each destination node s, find the shortest-path vector
# load travel demand
x = x + travel_demand[r,s] * get_vector(state, r, s, link_dic)
end
end
return x
end
# Finding a starting feasible solution
travel_time = BPR(zeros(number_of_links))
xl = all_or_nothing(travel_time)
max_iter_no = 1e3
l = 1
#average_excess_cost = 1
tol = 1e-5
while l < max_iter_no
l += 1
xl_old = xl
# Finding yl
travel_time = BPR(xl)
yl = all_or_nothing(travel_time)
xl = xl + (yl - xl)/l
xl_new = xl
relative_gap = norm(xl_new - xl_old, 1) / norm(xl_new, 1)
if relative_gap < tol
break
end
end
return xl
end
# Flows_converge files
#outfile = open(out_dir * files_ID *"/flows_converge_" * month_w * "_" * instance1 *".txt", "w")
#write(outfile, join(("From", "to", "Volume_Capacity"), " "), "\n")
#for i = 1:length(ta_data.start_node)
# n1, n2, n3 = ta_data.start_node[i], ta_data.end_node[i], di[i]
# write(outfile, join((n1, n2, n3), " "), "\n")
#end
#close(outfile)
coeffs_dict_Apr_AM = 0
# getting the coefficients of the costs
coeffs_dict_Apr_AM = readstring(out_dir * "coeffs_dict_" * month_w * "_" * instance1 * ".json")
coeffs_dict_Apr_AM = JSON.parse(coeffs_dict_Apr_AM)
#xl = Dict(a::Any[] ,b::Array{Float64, 1})
#xl = Dict{Any,Any}
#xl = Dict{Any,Any}()
di = Dict()
#deg_grid = 4:7
##c_grid = .5:.5:3.
#lamb_grid = 10. .^(-3:4)
lenDeg = length(deg_grid)
cnt = 0
for deg in deg_grid
for c in c_grid
for lam in lamb_grid
# print("($(deg),$(c),$(lam),1)")
coeffs_1 = coeffs_dict_Apr_AM["($(deg), $(c), $(lam), 1)"]
coeffs_2 = coeffs_dict_Apr_AM["($(deg), $(c), $(lam), 2)"]
coeffs_3 = coeffs_dict_Apr_AM["($(deg), $(c), $(lam), 3)"]
ala = "($(deg), $(c), $(lam), $(1))"
apa = coeffs_3
#println(string(ala, apa))
di[(deg, c, lam, 1)] = MSA(coeffs_1)
di[(deg, c, lam, 2)] = MSA(coeffs_2)
di[(deg, c, lam, 3)] = MSA(coeffs_3)
end
end
cnt = cnt + 1
println("processed $(cnt) out of $(lenDeg)")
end
outfile = 0
outfile = open(out_dir * "uni-class_traffic_assignment_MSA_flows_" * month_w * "_" * instance1 * ".json", "w")
JSON.print(outfile, di)
close(outfile)
end
# +
#Importing parameters
using PyCall
unshift!(PyVector(pyimport("sys")["path"]), "");
@pyimport parameters_julia
out_dir = parameters_julia.out_dir
files_ID = parameters_julia.files_ID
month_w = parameters_julia.month_w
year = parameters_julia.year
instances_1 = parameters_julia.instances_ID
deg_grid = parameters_julia.deg_grid
c_grid = parameters_julia.c_grid
lamb_grid = parameters_julia.lamb_grid
# -
for ins in instances_1
salo(out_dir, files_ID, month_w, ins, deg_grid, c_grid, lamb_grid) #idx in length(instances_1)
end
salo(out_dir, files_ID, month_w, "PM", deg_grid, c_grid, lamb_grid) #idx in length(instances_1)
| IPYNB/02. uni-class_traffic_assignment_MSA_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to scikit-Learn (sklearn)
#
# This notebook demostrates some of the most useful functions of Scikit-Learn library
#
# Topics:
# 0. An end to end Scikit-Learn workflow
# 1. Getting the data ready
# 2. Choose the right estimator/algorithm for our problems
# 3. Fit the model/algorithm for our problems
# 4. Evaluating a model
# 5.Improve a model
# 6. Save and load a trained model
# 7. Putting it all together
# ## 0. An end-to-end Scikit-Learn Workflow
# +
#1. Get the data ready
import pandas as pd
import numpy as np
heart_disease=pd.read_csv("heart-disease.csv")
heart_disease
# +
#Create x (feature matrix)
x= heart_disease.drop("target", axis=1)
#Create y (labels)
y=heart_disease["target"]
# +
# 2. Choose theright model and hyperparamters
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
#Using the default hyperparameters
clf.get_params()
# +
# 3. Fit the model to the training data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2)
# -
clf.fit(x_train, y_train)
# make a prediction
y_label = clf.predict(np.array([0, 2, 3, 4]))
y_preds = clf.predict(x_test)
y_preds
y_test
# 4. Evaluate the model
clf.score(x_train, y_train)
clf.score(x_test, y_test)
# +
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print(classification_report(y_test, y_preds))
# -
confusion_matrix(y_test, y_preds)
accuracy_score(y_test, y_preds)
# 5. Improve a model
# Try different amount of n_estimators
np.random.seed(42)
for i in range(10, 100, 10):
print(f"Trying model with {i} estimators")
clf=RandomForestClassifier(n_estimators=i).fit(x_train,y_train)
print(f"Model accuracy on test set: {clf.score(x_test, y_test)*100:.2f}%")
print("")
# +
# 6. Save a model and load it
import pickle
pickle.dump(clf, open("random_forest_model_1.pkl","wb"))
# -
loaded_model = pickle.load(open("random_forest_model_1.pkl","rb"))
loaded_model.score(x_test, y_test)
# ## 1. Getting our data ready to be used with machine learning
#
# Three main things we do:
# 1. Split the data into features and labels (usually 'x' and 'y')
# 2. Filling (also called inputting) or disregarding missing values
# 3. Converting non-numerical values to numerical values (also called feature encoding)
heart_disease.head()
x = heart_disease.drop("target", axis=1)
x.head()
y = heart_disease["target"]
y.head()
# Split the data into training and test sets
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
# ### 1.1 Make sure all data is numerical
car_sales = pd.read_csv("car-sales-extended.csv")
car_sales
car_sales.dtypes
# +
# Split the data into x and y
x = car_sales.drop("Price", axis=1)
y=car_sales["Price"]
# Split the data into training and test sets
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2)
# +
from sklearn.ensemble import RandomForestRegressor
model=RandomForestRegressor()
model.fit(x_train, y_train)
model.score(x_test, y_test)
# +
#car_sales["Price"]=car_sales["Price"].str.replace('\D', '', regex=True).astype(int)
#car_sales["Price"]=car_sales["Price"].astype(str).str[:-2].astype(np.int64)
# -
y=car_sales["Price"]
# +
# Turn categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_x= transformer.fit_transform(x)
transformed_x
# -
pd.DataFrame(transformed_x)
dummies = pd.get_dummies(car_sales[["Make", "Colour", "Doors"]])
dummies
np.random.seed(42)
x_train,x_test,y_train, y_test = train_test_split(transformed_x,
y,
test_size=0.2)
model.fit(x_train, y_train)
#x_train
#y_train
model.score(x_test, y_test)
y
# ### 1.3 What if there were missing values?
#
# 1. Fill them with some value (also known as imputation)
# 2. Remove the samples with missing data altogether
# Import car sales missing data
car_sales_missing=pd.read_csv("car-sales-extended-missing-data.csv")
car_sales_missing.head()
car_sales_missing.isna().sum()
#Create x and y
x = car_sales_missing.drop("Price", axis=1)
y = car_sales_missing["Price"]
# +
# Turn categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_x= transformer.fit_transform(x)
transformed_x
# -
# ### option 1: Fill missing data with Pandas
# +
# Filling the "Make" column
car_sales_missing["Make"].fillna("missing", inplace=True)
# Filling the "Colour" column
car_sales_missing["Colour"].fillna("missing", inplace=True)
#Fill the "Odometer (KM)" column
car_sales_missing["Odometer (KM)"].fillna(car_sales_missing["Odometer (KM)"].mean(), inplace=True)
# Filling the "Colour" column
car_sales_missing["Doors"].fillna(4, inplace=True)
# -
car_sales_missing.isna().sum()
# Remove rows with missing Price value
car_sales_missing.dropna(inplace=True)
car_sales_missing.isna().sum()
#Create x and y
x = car_sales_missing.drop("Price", axis=1)
y = car_sales_missing["Price"]
# +
# Turn categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_x= transformer.fit_transform(car_sales_missing)
transformed_x
# -
# ### Option 2: Fill Missing values with scikit-learn
car_sales_missing = pd.read_csv("car-sales-extended-missing-data.csv")
car_sales_missing.head()
car_sales_missing.isna().sum()
#Drop the rows with no labels
car_sales_missing.dropna(subset=["Price"],inplace=True)
car_sales_missing.isna().sum()
# Split into x and y
x = car_sales_missing.drop("Price", axis=1)
y = car_sales_missing["Price"]
x.isna().sum()
# +
# Fill missing values with scikit-learn
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
#Fill categorical values with 'missing' and numerical values with mean
cat_imputer = SimpleImputer(strategy="constant", fill_value="missing")
door_imputer = SimpleImputer(strategy="constant", fill_value=4)
num_imputer = SimpleImputer(strategy="mean")
#Define columns
cat_features = ["Make","Colour"]
door_features = ["Doors"]
num_features = ["Odometer (KM)"]
#Create an imputer (something that fills missing data)
imputer = ColumnTransformer([
("cat_imputer", cat_imputer, cat_features),
("door_imputer", door_imputer, door_features),
("num_imputer", num_imputer, num_features)
])
#Transform the data
filled_x = imputer.fit_transform(x)
filled_x
# -
car_sales_filled = pd.DataFrame(filled_x, columns=["Make","Colour","Doors","Odometer (KM)"])
car_sales_filled.head()
car_sales_filled.isna().sum()
# +
# Turn categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_x= transformer.fit_transform(car_sales_filled)
transformed_x
# +
# Now we have got our data as numbers and filled (no missing data)
# Let's fit a model
np.random.seed(42)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(transformed_x,y,test_size=0.2)
model=RandomForestRegressor()
model.fit(x_train,y_train)
model.score(x_test,y_test)
# -
# ## 2. Choosing the right algorithm/estimator for our problem
#
# Scikit-learn uses estimator as another term for machine learning model or algorithm
#
# * Classification - predicting whether a sample is one thing or another
# * Regression - predicting a number
# ### 2.1 Picking machine learning model for a regression problem
# Import Boston housing dataset
from sklearn.datasets import load_boston
boston = load_boston()
boston
boston_df = pd.DataFrame(boston["data"], columns=boston["feature_names"])
boston_df["target"] = pd.Series(boston["target"])
boston_df.head()
# +
# Let's try ridge regression model
from sklearn.linear_model import Ridge
np.random.seed(42)
#Create tje data
x = boston_df.drop("target", axis=1)
y = boston_df["target"]
#Split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Instantiate Ridge model
model = Ridge()
model.fit(x_train, y_train)
#Check the score of the Ridge model on test data
model = Ridge()
model.fit(x_train, y_train)
#Check the score of the Ridge model on test data
model.score(x_test, y_test)
# -
# ### To improve the score, let's look at other estimators
# +
# Let's try Random forest regressor
from sklearn.ensemble import RandomForestRegressor
#Setup random seed
np.random.seed(42)
#Create the data
x = boston_df.drop("target", axis=1)
y = boston_df["target"]
#Split the data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Instantiate Random Forest Regressor
rf = RandomForestRegressor()
rf.fit(x_train, y_train)
# Evaluate the Random Forest Regressor
rf.score(x_test, y_test)
# -
# ### 2.2 Choosing estimator for a classification problem
heart_disease = pd.read_csv("heart-disease.csv")
heart_disease.head()
# Consulting the map and it says to try `LinearSVC`
# +
# Import the LinearSVC estimator
from sklearn.svm import LinearSVC
np.random.seed(42)
#Make the data
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
#Split the data
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2)
#Instantiate LinearSVC
clf = LinearSVC(max_iter=10000000)
clf.fit(x_train, y_train)
clf.score(x_test, y_test)
# -
heart_disease["target"].value_counts()
# +
# Let's try Random forest classifier
from sklearn.ensemble import RandomForestClassifier
#Setup random seed
np.random.seed(42)
#Create the data
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
#Split the data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Instantiate Random Forest Classifier
rf = RandomForestClassifier()
rf.fit(x_train, y_train)
# Evaluate the Random Forest Classifier
rf.score(x_test, y_test)
# -
# Tips:
# 1. If you have structured data, use ensemble methods
# 2. If you have unstructured data,use deep learning or transfer learning
# ## 3. Fit the model/algorithm on our data and use it to make predictions
#
# ### 3.1 Fitting the model to the data
#
# Different names for:
# * `x` = features, feature variables, data
# * `y` = labels, targets, target variables
# +
# Let's try Random forest classifier
from sklearn.ensemble import RandomForestClassifier
#Setup random seed
np.random.seed(42)
#Create the data
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
#Split the data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Instantiate Random Forest Classifier
rf = RandomForestClassifier()
#Fit the model to the data
rf.fit(x_train, y_train)
# Evaluate the Random Forest Classifier
rf.score(x_test, y_test)
# -
x.head()
y.tail()
# ## 3.2 Make predictions using a machine learning model
#
# two ways to make predictions:
# 1. `predict()`
# 2. `predict_proba()`
# Use a trained model to make predictions
rf.predict(np.array([1, 7, 8, 3, 4]))
rf.predict(x_test)
np.array(y_test)
# Compare predictions to truth labels to evaluate the model
y_preds = clf.predict(x_test)
np.mean(y_preds == y_test)
rf.score(x_test, y_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_preds)
# Make predictions with `predict_proba()`
# Make predictions with predict_proba()
# predict_proba() returns probabilities of a classification label
rf.predict_proba(x_test[:5])
rf.predict(x_test[:5])
heart_disease["target"].value_counts()
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
# Create the data
x = boston_df.drop("target", axis=1)
y = boston_df["target"]
# Split into training and test sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# Instantiate and fit model
model = RandomForestRegressor().fit(x_train, y_train)
#Make predictions
y_preds = model.predict(x_test)
# -
y_preds[:10]
np.array(y_test[:10])
# Compare the predictions to the truth
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, y_preds)
# ## 4.Evaluating a machine learning model
#
# There are three ways to evaluate scikit-learn model/estimators:
# 1. Estimator `score` method
# 2. The `scoring` parameter
# 3. Problem-specific metric functions
#
# ### 4.1 Evaulating a model with `score` method
# +
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
clf=RandomForestClassifier()
clf.fit(x_train,y_train)
# -
clf.score(x_test, y_test)
# ### 4.2 Evaluating a model using the `scoring` parameter
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
clf=RandomForestClassifier()
clf.fit(x_train,y_train);
# -
clf.score(x_test, y_test)
# Cross val score by default follows a 5 fold evaluation where 5 different test models are taken and are scored accordingly
cross_val_score(clf, x, y)
# ### 4.2.1 Classification model evaluation metrics
#
# 1. Accuracy
# 2. Area under ROC curve
# 3. Confusion matrix
# 4. Classification report
#
# #### 1. Accuracy
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
clf = RandomForestClassifier()
cross_val_score = cross_val_score(clf, x, y, cv=5)
# -
np.mean(cross_val_score)
print(f"Heart Disease Classifier cross-validated accuracy: {np.mean(cross_val_score)*100:.2f}%")
# **Area under the receiver operating characteristic curve (AUC/ROC)**
# * Area under curve (AUC)
# * ROC curve
#
# ROC curve are a comparison of a model's true positive rate (TPR) versus a model's false positive(FPR).
#
# * True positive = model predicts 1 when truth is 1
# * False positive = model predicts 1 when truth is 0
# * True negative = model predicts 0 when truth is 0
# * False negative = model predicts 0 when truth is 1
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# +
from sklearn.metrics import roc_curve
#Fit the classifier
clf.fit(x_train, y_train)
#Make predictions with probabilities
y_probs = clf.predict_proba(x_test)
y_probs[:10]
# -
y_probs_positive = y_probs[:,1]
y_probs_positive[:10]
# +
#Calculate fpr, tpr and thresholds
fpr, tpr, thresholds = roc_curve(y_test, y_probs_positive)
#Check false postive rates
fpr
# +
#Create a function for plotting ROC curves
import matplotlib.pyplot as plt
def plot_roc_curve(fpr, tpr):
"""
Plots a ROC curve given the false positive rate (fpr)
and true positive rate (tpr) of a model
"""
#Plot roc curve
plt.plot(fpr, tpr, color="orange", label="ROC")
#Plot line with no predictive power (baseline)
plt.plot([0, 1], [0, 1], color="darkblue", linestyle="--", label="Guessing")
#Customize the plot
plt.xlabel("False positive rate (FPR)")
plt.ylabel("True positive rate (TPR)")
plt.title("Receiver Operating Characteristic (ROC) Curve")
plt.legend()
plt.show()
plot_roc_curve(fpr, tpr)
# +
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test, y_probs_positive)
# -
# **Confusion Matrix**
#
# A confusion matrix is a quick way to compare the labels a model predicts and the actual labels it was supposed to predict.
#
# In essence, giving you an idea of where the model is getting confused.
# +
from sklearn.metrics import confusion_matrix
y_preds = clf.predict(x_test)
confusion_matrix(y_test, y_preds)
# -
# Visualize confusion matrix with pd.crosstab()
pd.crosstab(y_test, y_preds, rownames=["Actual Label"], colnames=["Predicted Labels"])
# +
#Installing conda module from inside jupyter notebook
#import sys
# #!conda install --yes --prefix {sys.prefix} seaborn
# +
#Make our confusion matrix more visual with seaborn's heatmap()
import seaborn as sns
#Set the font scale
sns.set(font_scale=1.5)
#Create a confusion matrix
conf_mat=confusion_matrix(y_test, y_preds)
#Plot it using Seaborn
sns.heatmap(conf_mat)
# +
def plot_conf_mat(conf_mat):
"""
Plots a confusion matrix using Seaborn's heatmap
"""
fig, ax = plt.subplots(figsize=(3,3))
ax = sns.heatmap(conf_mat,
annot=True,
cbar=False)
plt.xlabel("True label")
plt.ylabel("Predicted Label")
plot_conf_mat(conf_mat)
# +
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(clf, x, y)
# -
# **Classification Report**
# +
from sklearn.metrics import classification_report
print(classification_report(y_test, y_preds))
# +
# Case where precision and recall become valuable
disease_true = np.zeros(10000)
disease_true[0] = 1 # only one positive
disease_preds = np.zeros(10000) #Model predicts every case as 0
pd.DataFrame(classification_report(disease_true,
disease_preds,
output_dict=True))
# -
# To summarize classification metrics:
#
# * **Accuracy** is a good measure to start with if all classes are balanced (i.e. same amount of samples which are labelled with 0 or 1)
# * **Precision** and **recall** become more important when classes are imbalanced
# * If false positives are worse than false negatives, aim for higher precision
# * **F1-score** is a combination of precision and recall
# ### 4.2.2 Regression model evaluation metrics
#
# 1. R^2 (r-square) or co-efficient of determination
# 2. Mean absolute error (MAE)
# 3. Mean squared error (MSE)
#
# **R^2**
#
# What R-squared does: Compares your model predictions to the mean of the targets. Values can range from negative infinity (a very poor model) to 1. For example, if all your model does is predict the mean of targets, it's R^2 value would be 0 and if your model perfectly predicts a range of numbers then it's R^2 value would be 1.
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
x = boston_df.drop("target", axis=1)
y=boston_df["target"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
model=RandomForestRegressor()
model.fit(x_train, y_train)
# -
model.score(x_test, y_test)
# +
from sklearn.metrics import r2_score
#Fill an array with y_test mean
y_test_mean = np.full(len(y_test), y_test.mean())
y_test.mean()
# -
y_test_mean
r2_score(y_test, y_test_mean)
r2_score(y_test, y_test)
# **Mean Absolute Error**
#
# MAE is the average of the absolute differences between predictions and actual values. It gives you an idea of how wrong your models predictions are
# +
#Make absolute error
from sklearn.metrics import mean_absolute_error
y_preds = model.predict(x_test)
mae=mean_absolute_error(y_test, y_preds)
mae
# -
df = pd.DataFrame(data={"Actual values": y_test,
"Predicted values": y_preds})
df["differences"] = df["Predicted values"] - df["Actual values"]
df
# **Mean Squared Error**
# +
# Mean square error
from sklearn.metrics import mean_squared_error
y_preds = model.predict(x_test)
mse = mean_squared_error(y_test, y_preds)
mse
# -
# Calculate MSE by hand
squared = np.square(df["differences"])
squared.mean()
# ### 4.2.3 Using the `scoring` parameter
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
clf=RandomForestClassifier()
# -
np.random.seed(42)
cv_acc = cross_val_score(clf, x, y, cv=5, scoring=None)
cv_acc
#Cross-validated accuracy
print(f'The cross-validated accuracy is: {np.mean(cv_acc)*100:.2f}%')
np.random.seed(42)
cv_acc = cross_val_score(clf, x, y, cv=5, scoring="accuracy")
print(f'The cross-validated accuracy is: {np.mean(cv_acc)*100:.2f}%')
#Precision
np.random.seed(42)
cv_precision = cross_val_score(clf, x, y, cv=5, scoring="precision")
np.mean(cv_precision)
#Recall
cv_recall = cross_val_score(clf, x, y, cv=5, scoring="recall")
np.mean(cv_recall)
cv_f1 = cross_val_score(clf, x, y, cv=5, scoring="f1")
np.mean(cv_f1)
# Using `scoring` parameter on regression model
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
x = boston_df.drop("target", axis=1)
y = boston_df["target"]
model = RandomForestRegressor()
# -
np.random.seed(42)
cv_r2 = cross_val_score(model, x, y, cv=5, scoring=None)
cv_r2
np.mean(cv_r2)
np.random.seed(42)
cv_r2 = cross_val_score(model, x, y, cv=5, scoring="r2")
cv_r2
#Mean absolute error
cv_mae = cross_val_score(model, x, y, cv=5, scoring="neg_mean_absolute_error")
cv_mae
# Mean squared error
cv_mse = cross_val_score(model, x, y, cv=5, scoring="neg_mean_squared_error")
np.mean(cv_mse)
# ### 4.3 Using different evaluation metrics as Scikit-Learn functions
# +
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
np.random.seed(42)
x = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
clf = RandomForestClassifier()
clf.fit(x_train, y_train)
#Make predictions
y_preds = clf.predict(x_test)
#Evaluate the classifier
print("Classifier metrics on the test set")
print(f"Accuracy: {accuracy_score(y_test, y_preds)*100:.2f}%")
print(f"Accuracy: {precision_score(y_test, y_preds)}")
print(f"Accuracy: {recall_score(y_test, y_preds)}")
print(f"Accuracy: {f1_score(y_test, y_preds)}")
# -
# ### Regression evaluation functions
# +
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
np.random.seed(42)
x = boston_df.drop("target", axis=1)
y = boston_df["target"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
model = RandomForestRegressor()
model.fit(x_train, y_train)
#Make predictions
y_preds = model.predict(x_test)
#Evaluate regression model
print("Regression model metrics on the test set")
print(f"R^2: {r2_score(y_test, y_preds)}")
print(f"MAE: {mean_absolute_error(y_test, y_preds)}")
print(f"MSE: {mean_squared_error(y_test, y_preds)}")
# -
# ### 5. Improving a model
#
# First predictions = baseline predictions
# First model = baseline model
#
# From a data perspective
# * Could we collect more data? (generally, the more data, the better)
# * Could we improve our data?
#
# From a model perspective:
# * Is there a better model we could use?
# * Could we improve the current model?
#
# Hyperparameters vs parameters:
# * Parameters = model finds these patterns in data
# * Hyperparameters = settings on a model you can adjust to improve it's ability to find patterns
#
# Three ways to adjust hyperparameters:
# 1. By hand
# 2. Randomly, using RandomSearchCV
# 3. Exhaustively, with GridSearchCV
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.get_params()
# ### 5.1 Tuning Hyperparameters by hand
#
# Make 3 sets - trainin, validation and test
# We are going to try and adjust:
#
# * `max_depth`
# * `max_features`
# * `min_samples_leaf`
# * `min_samples_split`
# * `n_estimators`
def evaluate_preds(y_true, y_preds):
"""
Performs evaluation comparison on y_true labels vs y_preds labels on a classfication
"""
accuracy = accuracy_score(y_true, y_preds)
precision = precision_score(y_true, y_preds)
recall = recall_score(y_true, y_preds)
f1 = f1_score(y_true, y_preds)
metric_dict = {"accuracy": round(accuracy, 2),
"precision": round(precision, 2),
"recall": round(recall, 2),
"f1": round(f1, 2)
}
print(f"Accuracy: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}")
return metric_dict
# +
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
#Shuffle the data
heart_disease_shuffled = heart_disease.sample(frac=1)
#Split into x and y
x = heart_disease_shuffled.drop("target", axis=1)
y = heart_disease_shuffled["target"]
#Split the data into train, validation and test sets
train_split = round(0.7 * len(heart_disease_shuffled)) #70% of data
valid_split = round(train_split + 0.15 * len(heart_disease_shuffled)) # 15% of data
x_train, y_train = x[:train_split], y[:train_split]
x_valid, y_valid = x[train_split:valid_split], y[train_split:valid_split]
x_test, y_test = x[valid_split:], y[:valid_split]
clf=RandomForestClassifier()
clf.fit(x_train, y_train)
#Make Predictions
y_preds = clf.predict(x_valid)
#Evaluate the classifier on validation set
baseline_metrics = evaluate_preds(y_valid, y_preds)
baseline_metrics
# +
np.random.seed(42)
#Create a second classifier with different hyperparameters
clf_2 = RandomForestClassifier(n_estimators=100)
clf_2.fit(x_train, y_train)
#Make predictions with different hyperparameters
y_preds_2 = clf_2.predict(x_valid)
#Evaluate the second classifier
clf_2_metrics = evaluate_preds(y_valid, y_preds_2)
# +
clf_3 = RandomForestClassifier(max_depth=10)
clf_3.fit(x_train, y_train)
#Make predictions with different hyperparameters
y_preds_3 = clf_3.predict(x_valid)
#Evaluate the second classifier
clf_3_metrics = evaluate_preds(y_valid, y_preds_3)
# -
# ### 5.2 Hyperparameter tuning with RandomizedSearchCV
# +
from sklearn.model_selection import RandomizedSearchCV
grid = {"n_estimators": [10, 100, 200, 500, 1000, 1200],
"max_depth": [None, 5, 10, 20, 30],
"max_features": ["auto", "sqrt"],
'min_samples_split': [6],
"min_samples_leaf": [1, 2, 4]
}
np.random.seed(42)
#Split into x and y
x = heart_disease_shuffled.drop("target", axis=1)
y = heart_disease_shuffled["target"]
#Split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Instantiate RandomForestClassifier
clf = RandomForestClassifier(n_jobs=1)
#Setup RandomizedSearchCV
rs_clf = RandomizedSearchCV(estimator=clf,
param_distributions=grid,
n_iter=10, #
cv=5,
verbose=2)
#Fit the randomizedSearchCV version of clf
rs_clf.fit(x_train, y_train);
# -
rs_clf.best_params_
# +
# Make predictions with the best hyperparameters
rs_y_preds = rs_clf.predict(x_test)
#Evaluate the predictions
rs_metrics = evaluate_preds(y_test, rs_y_preds)
# -
# ### 5.3 Hyperparameters tuning using GridSearchCV
grid
grid_2 = {'n_estimators': [100, 200, 500],
'max_depth': [None],
'max_features': ['auto', 'sqrt'],
'min_samples_split': [6],
'min_samples_leaf': [1, 2, 4]}
# +
from sklearn.model_selection import GridSearchCV, train_test_split
np.random.seed(42)
#Split into x and y
x = heart_disease_shuffled.drop("target", axis=1)
y = heart_disease_shuffled["target"]
#Split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Instantiate RandomForestClassifier
clf = RandomForestClassifier(n_jobs=1)
#Setup RandomizedSearchCV
gs_clf = GridSearchCV(estimator=clf,
param_grid=grid_2,
cv=5,
verbose=2)
#Fit the randomizedSearchCV version of clf
gs_clf.fit(x_train, y_train);
# -
gs_clf.best_params_
# +
gs_y_preds = gs_clf.predict(x_test)
#Evaluate the predictions
gs_metrics = evaluate_preds(y_test, gs_y_preds)
# -
# Let's compare our different models metrics
# +
compare_metrics = pd.DataFrame({
"baseline": baseline_metrics,
"clf_2": clf_2_metrics,
"random search": rs_metrics,
"grid search": gs_metrics
})
compare_metrics.plot.bar(figsize=(10, 8));
# -
# ## 6. Saving and loading trained machine learning model
#
# There are two ways to save and load machine learning models:
#
# 1. With Python's `pickle` module
# 2. With the `joblib` module
#
# **Pickle**
# +
import pickle
#Save existing model to file
pickle.dump(gs_clf, open("gs_random_forest_model_1.pkl","wb"))
# -
#Load a save model
loaded_pickle_model = pickle.load(open("gs_random_forest_model_1.pkl","rb"))
#Make some predictions
pickle_y_preds = loaded_pickle_model.predict(x_test)
evaluate_preds(y_test, pickle_y_preds)
# **Joblib**
# +
from joblib import dump, load
#Save model to file
dump(gs_clf, filename="gs_random_forest_model_1.joblib")
# -
#Import joblib model
loaded_joblib_model=load(filename="gs_random_forest_model_1.joblib")
#Make and evaluate joblib predictions
joblib_y_preds = loaded_joblib_model.predict(x_test)
evaluate_preds(y_test, joblib_y_preds)
# ## 7. Putting it all together
data = pd.read_csv("car-sales-extended-missing-data.csv")
data
data.dtypes
data.isna().sum()
# Steps that we have to perform:
# 1. Fill missing data
# 2. Convert data to numbers
# 3. Build a model on the data
# +
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
#Modelling
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
#Setup random seed
import numpy as np
np.random.seed(42)
#Import data and drop rows with missing labels
data = pd.read_csv("car-sales-extended-missing-data.csv")
data.dropna(subset=["Price"], inplace=True)
#Define different features and transformer pipeline
categorical_features = ["Make", "Colour"]
categorical_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(handle_unknown="ignore"))
])
door_feature = ["Doors"]
door_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="constant", fill_value=4))
])
numeric_features = ["Odometer (KM)"]
numeric_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="mean"))
])
#Setup preprocessing steps (fill missing values, then convert to numbers)
preprocessor = ColumnTransformer(
transformers=[
("cat", categorical_transformer, categorical_features),
("door", door_transformer, door_feature),
("num", numeric_transformer, numeric_features)
])
#Create a preprocessing and modelling pipeline
model = Pipeline(steps=[
("preprocessor", preprocessor),
("model", RandomForestRegressor())
])
#Split the data
x = data.drop("Price", axis=1)
y = data["Price"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#Fit and score the model
model.fit(x_train, y_train)
model.score(x_test, y_test)
# -
# It's also possible to use `GridSearchCV` or `RandomizedSearchCV` with our `Pipeline`
# +
#Use GridSearchCV with our regression Pipeline
from sklearn.model_selection import GridSearchCV
pipe_grid = {
"preprocessor__num__imputer__strategy": ["mean", "median"],
"model__n_estimators": [100, 1000],
"model__max_depth": [None, 5],
"model__max_features": ["auto"],
"model__min_samples_split": [2, 4]
}
gs_model = GridSearchCV(model, pipe_grid, cv=5, verbose=2)
gs_model.fit(x_train, y_train)
# -
gs_model.score(x_test, y_test)
| Introduction-to-scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import sklearn.metrics
# %matplotlib inline
# !head notas_andes.dat
data = np.loadtxt("notas_andes.dat")
_ = plt.hist(data[:,4])
plt.xlabel("notas")
plt.ylabel("histograma")
plt.title("Mediana: {}".format(np.median(data[:,4])))
target = np.ones(len(data))
ii = data[:,4]<4.0
target[ii] = 0
target[~ii] = 1
X = data[:,:4]
print(np.shape(X))
import sklearn.tree
clf = sklearn.tree.DecisionTreeClassifier(max_depth=1)
clf.fit(X, target)
plt.figure(figsize=(10,10))
_= sklearn.tree.plot_tree(clf)
clf.predict(X)
print(sklearn.metrics.f1_score(target, clf.predict(X)))
clf = sklearn.tree.DecisionTreeClassifier(max_depth=2)
clf.fit(X, target)
print(sklearn.metrics.f1_score(target, clf.predict(X)))
plt.figure(figsize=(10,10))
_= sklearn.tree.plot_tree(clf)
print(clf.feature_importances_)
| IntroDataScience/ejercicios/16/arbol_notas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from typing import List
# +
Vector = List[float]
height_weight_age = [70, # inches,
170, # pounds,
40 ] # years
grades = [95, # exam1
80, # exam2
75, # exam3
62 ] # exam4
# +
def add(v: Vector, w: Vector) -> Vector:
"""Adds corresponding elements"""
assert len(v) == len(w), "vectors must be the same length"
return [v_i + w_i for v_i, w_i in zip(v, w)]
assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9]
# +
def subtract(v: Vector, w: Vector) -> Vector:
"""Subtracts corresponding elements"""
assert len(v) == len(w), "vectors must be the same length"
return [v_i - w_i for v_i, w_i in zip(v, w)]
assert subtract([5, 7, 9], [4, 5, 6]) == [1, 2, 3]
# +
def vector_sum(vectors: List[Vector]) -> Vector:
"""Sums all corresponding elements"""
# Check that vectors is not empty
assert vectors, "no vectors provided!"
# Check the vectors are all the same size
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), "different sizes!"
# the i-th element of the result is the sum of every vector[i]
return [sum(vector[i] for vector in vectors)
for i in range(num_elements)]
assert vector_sum([[1, 2], [3, 4], [5, 6], [7, 8]]) == [16, 20]
# +
def scalar_multiply(c: float, v: Vector) -> Vector:
"""Multiplies every element by c"""
return [c * v_i for v_i in v]
assert scalar_multiply(2, [1, 2, 3]) == [2, 4, 6]
# +
def vector_mean(vectors: List[Vector]) -> Vector:
"""Computes the element-wise average"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1, 2], [3, 4], [5, 6]]) == [3, 4]
# +
def dot(v: Vector, w: Vector) -> float:
"""Computes v_1 * w_1 + ... + v_n * w_n"""
assert len(v) == len(w), "vectors must be same length"
return sum(v_i * w_i for v_i, w_i in zip(v, w))
assert dot([1, 2, 3], [4, 5, 6]) == 32 # 1 * 4 + 2 * 5 + 3 * 6
# +
def sum_of_squares(v: Vector) -> float:
"""Returns v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
assert sum_of_squares([1, 2, 3]) == 14 # 1 * 1 + 2 * 2 + 3 * 3
# +
import math
def magnitude(v: Vector) -> float:
"""Returns the magnitude (or length) of v"""
return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
assert magnitude([3, 4]) == 5
# -
def squared_distance(v: Vector, w: Vector) -> float:
"""Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
return sum_of_squares(subtract(v, w))
def distance(v: Vector, w: Vector) -> float:
"""Computes the distance between v and w"""
return math.sqrt(squared_distance(v, w))
def distance(v: Vector, w: Vector) -> float: # type: ignore
return magnitude(subtract(v, w))
# +
# Another type alias
Matrix = List[List[float]]
A = [[1, 2, 3], # A has 2 rows and 3 columns
[4, 5, 6]]
B = [[1, 2], # B has 3 rows and 2 columns
[3, 4],
[5, 6]]
# -
from typing import Tuple
# +
def shape(A: Matrix) -> Tuple[int, int]:
"""Returns (# of rows of A, # of columns of A)"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0 # number of elements in first row
return num_rows, num_cols
assert shape([[1, 2, 3], [4, 5, 6]]) == (2, 3) # 2 rows, 3 columns
# -
def get_row(A: Matrix, i: int) -> Vector:
"""Returns the i-th row of A (as a Vector)"""
return A[i] # A[i] is already the ith row
def get_column(A: Matrix, j: int) -> Vector:
"""Returns the j-th column of A (as a Vector)"""
return [A_i[j] # jth element of row A_i
for A_i in A] # for each row A_i
from typing import Callable
def make_matrix(num_rows: int,
num_cols: int,
entry_fn: Callable[[int, int], float]) -> Matrix:
"""
Returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)
"""
return [[entry_fn(i, j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ... ]
for i in range(num_rows)] # create one list for each i
# +
def identity_matrix(n: int) -> Matrix:
"""Returns the n x n identity matrix"""
return make_matrix(n, n, lambda i, j: 1 if i == j else 0)
assert identity_matrix(5) == [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
# +
data = [[70, 170, 40],
[65, 120, 26],
[77, 250, 19],
# ....
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# user 0 1 2 3 4 5 6 7 8 9
#
friend_matrix = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
assert friend_matrix[0][2] == 1, "0 and 2 are friends"
assert friend_matrix[0][8] == 0, "0 and 8 are not friends"
# +
# only need to look at one row
friends_of_five = [i
for i, is_friend in enumerate(friend_matrix[5])
if is_friend]
assert friends_of_five == [4, 6, 7]
# -
| notebooks/04_linear_algebra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data = pd.read_csv("./telecom.csv")
data.shape
data.isnull().sum()
X = data.iloc[:,6:20].values
Y=data.iloc[:,20].values
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.20,random_state=101)
# +
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# -
from sklearn.metrics import accuracy_score
import xgboost as xgb
model=xgb.XGBClassifier(random_state=1,learning_rate=0.01)
model.fit(X_train, Y_train)
model.score(X_test,Y_test)
predicted_test = model.predict(X_test)
accuracy_test = accuracy_score(Y_test,predicted_test)
accuracy_test
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(Y_test, predicted_test))
print(classification_report(Y_test, predicted_test))
| XGBOOST_chrun_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 变分量子电路编译
# <em> Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved. </em>
# ## 概览
# 变分量子电路编译是一个通过优化参数化量子电路来模拟未知酉算子的过程,本教程我们将讨论两种未知酉算子的情况:一是给定酉算子 $U$ 的矩阵形式;二是给定一个实现 $U$ 的黑箱,可以将 $U$ 接入电路使用但不允许访问其内部构造。针对不同形式的 $U$,我们利用量桨构建量子线路训练损失函数,分别得到 $U$ 的近似酉算子 $V(\vec{\theta})$(这里我们用 $V(\vec{\theta})$ 表示参数化量子门序列所表示的酉算子,为简单起见下文我们用 $V$ 来表示)和 $V^{\dagger}$ 的量子线路,并根据经过 $U$ 和 $V$ 演化后的量子态的迹距离对结果进行评估。
# ## 背景
#
# 经典计算机早期的编译过程是将二进制数字转变为高低电平驱动计算机的电子器件进行运算,随后逐渐发展为便于处理书写的汇编语言;与经典计算机类似,对于量子计算机而言,量子编译就是将量子算法中的酉变换转化为一系列量子门序列从而实现量子算法的过程。目前含噪的中等规模量子 (Noisy Intermediate-Scale Quantum, NISQ) 设备存在诸如在量子比特数量、电路深度等方面的限制,这些限制给量子编译算法带来了巨大挑战。文献 [1] 提出了一种量子编译算法——量子辅助量子编译算法 (Quantum-Assisted Quantum Compiling, QAQC),能够有效地在 NISQ 设备上实现。QAQC 的目的是将未知的目标酉算子 $U$ 编译成可训练的参数化量子门序列,利用门保真度定义损失函数,通过设计变分量子电路不断优化损失函数,得到近似目标酉算子 $U$ 的 $V$,但如何衡量两个酉算子的近似程度呢?这里我们考虑 $V$ 的酉演化能够模拟 $U$ 酉演化的概率,即对输入态 $|\psi\rangle$,$U|\psi\rangle$ 和 $V|\psi\rangle$ 的重叠程度,也就是哈尔( Haar )分布上的保真度平均值:
#
# $$
# F(U,V)=\int_{\psi}|\langle\psi|V^{\dagger}U|\psi\rangle|^2d\psi,
# \tag{1}
# $$
#
# 当 $F(U,V)=1$ 时,存在$\phi$,$V=e^{i\phi}U$,即两个酉算子相差一个全局相位因子,此时我们称 $V$ 为 $U$ 的精确编译;当 $F(U,V)\geq 1-\epsilon$ 时,我们称 $V$ 为 $U$ 的近似编译,其中 $\epsilon\in[0,1]$ 为误差。基于此,我们可以构造以下的损失函数:
#
# $$
# \begin{aligned} C(U,V)&=\frac{d+1}{d}(1-F(U,V))\\
# &=1-\frac{1}{d^2}|\langle V,U\rangle|^2\\
# &=1-\frac{1}{d^2}|\text{tr}(V^{\dagger} U)|^2,
# \end{aligned}
# \tag{2}
# $$
#
# 其中 $n$ 为量子比特数,$d=2^n$,$\frac{1}{d^2}|\text{tr}(V^{\dagger} U)|^2$ 也被称为门保真度。
#
# 由 (2) 式可得当且仅当 $F(U,V)=1$ 时,$C(V,U)=0$ ,因此我们通过训练一系列门序列来最小化损失函数,从而得到近似目标酉算子 $U$ 的 $V$。
# ## 第一种情况 —— 矩阵形式的 $U$
#
# 下面我们先分析已知 $U$ 矩阵形式的情况,以 Toffoli 门为例,已知其矩阵表示为 $U_0$,搭建量子神经网络(即参数化量子电路)通过训练优化得到 $U_0$ 的近似电路分解。
#
# 我们在量桨中实现上述过程,首先引入需要的包:
import numpy as np
import paddle
from paddle_quantum.circuit import UAnsatz
from paddle_quantum.utils import dagger, trace_distance
from paddle_quantum.state import density_op_random
# 接下来将 Toffoli 门的矩阵形式 $U_0$ 输入电路中:
# +
n = 3 # 设定量子比特数
# 输入 Toffoli 门的矩阵形式
U_0 = paddle.to_tensor(np.matrix([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]],
dtype="float64"))
# -
# ### 搭建量子电路
#
# 不同的量子神经网络(Quantum Neural Network, QNN)表达能力不同,此处我们选择的是量桨中内置的 `complex_entangled_layer(theta, D)` 模板构造表达能力较强的电路模板搭建 QNN:
# 构建量子电路
def Circuit(theta, n, D):
# 初始化 n 个量子比特的量子电路
cir = UAnsatz(n)
# 内置的包含 U3 门和 CNOT 门的强纠缠电路模板
cir.complex_entangled_layer(theta[:D], D)
return cir
# ### 配置训练模型 —— 损失函数
#
# 接下来进一步定义损失函数 $C(U,V) = 1-\frac{1}{d^2}|\text{tr}(V^{\dagger} U)|^2$ 和训练参数。
# 训练模型cost-function
class Net(paddle.nn.Layer):
def __init__(self, shape, dtype="float64", ):
super(Net, self).__init__()
self.theta = self.create_parameter(shape=shape,
default_initializer=paddle.nn.initializer.Uniform(low=0.0, high=2 * np.pi),
dtype=dtype, is_bias=False)
def forward(self, n, D):
# 量子电路的矩阵表示
cir = Circuit(self.theta, n, D)
V = cir.U
# 直接构造 (1) 式为损失函数
loss =1 - (dagger(V).matmul(U_0).trace().abs() / V.shape[0]) ** 2
return loss, cir
# ### 配置训练模型 —— 模型参数
#
# 对 QNN 进行训练前,我们还需要进行一些训练的超参数设置,主要是 QNN 计算模块的层数 $D$、学习速率 LR 以及训练的总迭代次数 ITR。此处我们设置学习速率为 0.1,迭代次数为 150 次。读者可以自行调整来直观感受下超参数调整对训练效果的影响。
D = 5 # 量子电路的层数
LR = 0.1 # 基于梯度下降的优化方法的学习率
ITR = 150 # 训练的总迭代次数
# ### 进行训练
# +
# 确定网络参数维度
net = Net(shape=[D + 1, n, 3])
# 使用 Adam 优化器
opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters())
# 进行迭代
for itr in range(1, ITR + 1):
loss, cir = net.forward(n, D)
loss.backward()
opt.minimize(loss)
opt.clear_grad()
if itr % 30 == 0:
print("iter:", itr, "loss:", "%.4f" % loss.numpy())
if itr == ITR:
print("\n训练后的电路:")
print(cir)
theta_opt = net.theta.numpy()
print("优化后的参数 theta:\n", np.around(theta_opt, decimals=3))
# -
# 当已知目标酉算子的矩阵形式时,根据迭代过程及测试结果我们可以看到以 Toffoli 门的矩阵形式为例,搭建五层量子神经网络进行训练,迭代 150 次左右时,损失函数达到 0。
# ### 结果验证
#
# 下面我们随机选取 10 个密度矩阵,分别经过目标酉算子 $U$ 和近似酉算子 $V$ 的演化,计算真实的输出 `real_output` 和近似的输出 `simulated_output` 之间的迹距离 $ d(\rho, \sigma) = \frac{1}{2}\text{tr}\sqrt{(\rho-\sigma)^{\dagger}(\rho-\sigma)}$,迹距离越小,说明近似效果越好。
# +
s = 10 # 定义随机生成密度矩阵的数量
for i in range(s):
sampled = paddle.to_tensor(density_op_random(3).astype('complex128')) # 随机生成 3 量子比特的密度矩阵 sampled
simulated_output = paddle.matmul(paddle.matmul(cir.U, sampled), dagger(cir.U)) # sampled 经过近似酉算子演化后的结果
real_output = paddle.matmul(paddle.matmul(paddle.to_tensor(U_0), sampled), dagger(paddle.to_tensor(U_0))) # sampled 经过目标酉算子演化后的结果
print('sample:', i + 1, ':')
d = trace_distance(real_output.numpy(), simulated_output.numpy())
print(' trace distance is', np.around(d, decimals=5)) # 输出两种结果间的迹距离
# -
# 可以看到各个样本分别经过 $U$ 和 $V$ 的演化后迹距离都接近 0, 说明 $V$ 近似 $U$ 的效果很好。
# ## 第二种情况 —— 线路形式的 $U$
#
# 第二种情况下,我们假设 $U$ 以黑盒的形式给出,其保真度不能再直接计算,因此它需要通过一个电路来计算。接下来我们将演示如何用量子电路计算保真度。
#
# ### 利用量子电路图计算保真度
# 在实现 QAQC 的过程中,我们需要设计量子电路图来训练损失函数。QAQC 的 QNN 是嵌套在一个更大的量子电路中,整个量子电路如下图所示,其中 $U$ 表示需要近似的酉算子,$V^{\dagger}$ 是我们要训练的 QNN。这里我们利用 Toffoli 门作为黑箱。
#
# 
# <center> 图1: QAQC 量子电路图 [1]。 </center>
#
# 电路总共需要 $2n$ 量子比特,我们称前 $n$ 个量子比特为系统 $A$,后 $n$ 个为系统 $B$,整个电路涉及以下三步:
#
# - 首先通过通过 Hadamard 门和 CNOT 门操作生成 $A、B$ 的最大纠缠态;
# - 然后对 $A$ 系统进行 $U$ 操作,$B$ 系统执行 $V^{\dagger}$(即 $V$ 的复共轭);
# - 最后恢复第一步中的操作并在标准基下测量(也可以理解为在贝尔基下测量)。
#
# 经过上述操作,测量得到的全零态的概率即为 $\frac{1}{d^2}|\text{tr}(V^{\dagger} U)|^2$,关于图 1 的详细解释请参考文献 [1]。
# 这里的 QNN 我们依旧采用第一种情况中的电路,黑箱为 Toffoli 门。
#
# 下面我们在量桨上实现近似编译酉算子的过程:
# +
n = 3 # 设定量子比特数
# 构建量子电路
def Circuit(theta, n, D):
# 初始化 2n 个量子比特的量子电路
cir = UAnsatz(2 * n)
for i in range(n):
cir.h(i)
cir.cnot([i, n + i])
# 构建 U 的电路
cir.ccx([0, 1, 2])
# 构建 QNN
cir.complex_entangled_layer(theta, D, [3, 4, 5])
for l in range(n):
cir.cnot([n - 1 - l, 2 * n - 1 - l])
for m in range(n):
cir.h(m)
return cir
# -
# ### 配置训练模型 —— 损失函数
#
# 接下来进一步定义损失函数 $C(U,V) = 1-\frac{1}{d^2}|\text{tr}(V^{\dagger} U)|^2$ 和训练参数:
class Net(paddle.nn.Layer):
def __init__(self, shape, dtype="float64", ):
super(Net, self).__init__()
# 初始化层数以及各角度的参数,并用 [0, 2 * pi] 的均匀分布来填充角度的初始值
self.D = D
self.theta = self.create_parameter(shape=[D, n, 3],
default_initializer=paddle.nn.initializer.Uniform(low=0.0, high=2 * np.pi),
dtype=dtype, is_bias=False)
# 定义损失函数和向前传播机制
def forward(self):
# 量子电路的矩阵表示
cir = Circuit(self.theta, n, self.D)
# 输出经过线路后量子态的密度矩阵 rho
rho = cir.run_density_matrix()
# 计算损失函数 loss,其中输出密度矩阵的第一个元素即为全零态的概率
loss = 1 - paddle.real(rho[0][0])
return loss, cir
# ### 配置训练模型 —— 模型参数
#
# 我们设置学习速率为 0.1,迭代次数为 120 次。同样读者可以自行调整来直观感受下超参数调整对训练效果的影响。
D = 5 # 量子电路的层数
LR = 0.1 # 基于梯度下降的优化方法的学习率
ITR = 120 #训练的总迭代次数
# ### 进行训练
#
# 设置完训练模型的各项参数后,我们使用 Adam 优化器进行 QNN 的训练。
# +
# 确定网络的参数维度
net = Net(D)
# 使用 Adam 优化器
opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters())
# 优化循环
for itr in range(1, ITR + 1):
# 前向传播计算损失函数
loss, cir= net.forward()
# 反向传播极小化损失函数
loss.backward()
opt.minimize(loss)
opt.clear_grad()
# 打印训练结果
if itr % 20 == 0:
print("iter:",itr,"loss:","%.4f" % loss.numpy())
if itr == ITR:
print("\n训练后的电路:")
print('电路形式输入的 U 的近似电路:\n', cir)
# -
# 存储优化后的参数
theta_opt = net.theta.numpy()
# 当能够将 $U$ 的电路接入图 1 时,根据迭代过程及测试结果我们可以看到以 Toffoli 门为例,搭建一层量子神经网络进行训练,迭代 100 次左右时,损失函数趋近 0。
# ### 结果验证
#
# 与之前类似,我们同样随机选取 10 个密度矩阵,分别经过目标酉算子 $U$ 和近似酉算子 $V$ 的演化,计算真实的输出 `real_output` 和近似的输出 `simulated_output` 之间的迹距离,迹距离越小,说明近似效果越好。
s = 10 # 定义随机生成密度矩阵的数量
for i in range(s):
sampled = paddle.to_tensor(density_op_random(3).astype('complex128')) # 随机生成 4 量子比特的密度矩阵 sampled
# 构造目标酉算子对应的电路
cir_1 = UAnsatz(3)
cir_1.ccx([0, 1, 2])
# sampled 经过目标酉算子演化后的结果
real_output = paddle.matmul(paddle.matmul(cir_1.U, sampled), dagger(cir_1.U))
# 构造近似酉算子对应的电路
cir_2 = UAnsatz(3)
cir_2.complex_entangled_layer(paddle.to_tensor(theta_opt), D, [0, 1, 2])
# sampled 经过近似酉算子演化后的结果
simulated_output = paddle.matmul(paddle.matmul(cir_2.U, sampled), dagger(cir_2.U))
d = trace_distance(real_output.numpy(), simulated_output.numpy())
print('sample', i + 1, ':')
print(' trace distance is', np.around(d, decimals=5)) # 输出两种结果间的迹距离
# 可以看到各个样本分别经过 $U$ 和 $V$ 的演化后迹距离都接近 0, 说明 $V$ 近似 $U$ 的效果很好。
# ## 总结
#
# 本教程从目标酉算子输入形式为矩阵和电路形式两种情况对其进行量子编译,通过两个简单的例子利用量桨展示了量子编译的效果,并随机生成量子态密度矩阵,对分别经过目标酉算子与近似酉算子演化后的结果求迹距离检验近似效果,结果表明量子编译效果较好。
# _______
#
# ## 参考文献
# [1] <NAME>, et al. "Quantum-assisted quantum compiling." [Quantum 3 (2019): 140](https://quantum-journal.org/papers/q-2019-05-13-140/).
| tutorial/qnn_research/VQCC_CN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Exercise 2:Continue with Class 6 Exercise 4. Open data.csv, add a new column (categories for
# calories: few, normal or high). Apply label encoding / ordinal encoding / one-hot encoding to this
# new feature. Study correlation between duration and encoded calories features.
#Importing needed libraries
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white', context='notebook', palette='deep')
# # label encoding
#import data
df = pd.read_csv("data.csv")
#fitch the last column
stringCol = df.iloc[:,-1]
# apply label incoding to the last column
encoder = LabelEncoder()
encoder.fit(stringCol)
encoder.transform(stringCol)
#Replace cat_calories values with encoded labels
df["cat_calories"].replace(to_replace=df["cat_calories"].tolist(),
value=encoder.transform(stringCol),
inplace = True)
df.head()
# +
# Study correlation between duration and encoded calories features
# +
#Visualizing data
df.plot()
plt.show()
# scatter plot for two attributes
df.plot(kind = 'scatter', x = 'Duration', y = 'cat_calories')
plt.scatter(x = df['Duration'], y = df['cat_calories'])
plt.show()
df["Duration"].plot(kind = 'hist')
plt.show()
df["cat_calories"].plot(kind = 'hist')
plt.show()
# +
#Correlation matrix
corrMatrix = df.corr()
print (corrMatrix)
#Visualizing correlation matrix
sns.heatmap(corrMatrix, annot=True)
plt.show()
# +
#Analysing correlation between Duration and cat_calories
sns.jointplot(x="Duration", y="cat_calories", data=df)
plt.show()
plt.scatter(x="Duration", y="cat_calories", data=df)
plt.show()
#Correlation coefficient
corr= np.corrcoef(df["Duration"], df["cat_calories"])[0,1]
print("Correlation between Duration and cat_calories:",round(corr,2))
#Significance of correlation coefficient
ttest, pval =stats.ttest_ind(df["Duration"], df["cat_calories"])
print("Independent t-test:", ttest, pval)
# -
# # ordinal encoding
# +
#import data
df = pd.read_csv("data.csv")
# Create dictionary for mapping the ordinal numerical value
Cat_cal_dict = {'high':1, 'normal':2, 'few':3}
#replace values in "cat_calories" column with the dic values "1 or 2 or 3"
df['cat_calories'] = df.cat_calories.map(Cat_cal_dict)
df
# +
# Study correlation between duration and encoded calories features
#Correlation matrix
corrMatrix = df.corr()
print (corrMatrix)
#Visualizing correlation matrix
sns.heatmap(corrMatrix, annot=True)
plt.show()
# +
#Analysing correlation between Duration and cat_calories
sns.jointplot(x="Duration", y="cat_calories", data=df)
plt.show()
plt.scatter(x="Duration", y="cat_calories", data=df)
plt.show()
#Correlation coefficient
corr= np.corrcoef(df["Duration"], df["cat_calories"])[0,1]
print("Correlation between Duration and cat_calories:",round(corr,2))
#Significance of correlation coefficient
ttest, pval =stats.ttest_ind(df["Duration"], df["cat_calories"])
print("Independent t-test:", ttest, pval)
# -
# # one-hot encoding
# +
#import data
df = pd.read_csv("data.csv")
#Instantiate the OneHotEncoder object
#The parameter drop = ‘first’ will handle dummy variable traps
onehotencoder = OneHotEncoder(sparse=False, handle_unknown='error',
drop='first')
#Perform one-hot encoding
onehotencoder_df = pd.DataFrame(onehotencoder.fit_transform(df[["cat_calories"]]))
onehotencoder_df
#Merge one-hot encoding columns with dataframe
df = df.join(onehotencoder_df)
df.drop(columns=['cat_calories'], inplace=True)
df
# +
#One-hot encoding using scikit-learn
from sklearn.preprocessing import OneHotEncoder
df = pd.read_csv("data.csv")
#Instantiate the OneHotEncoder object
#The parameter drop = ‘first’ will handle dummy variable traps
onehotencoder = OneHotEncoder(sparse=False, handle_unknown='error',
drop='first')
#Perform one-hot encoding
onehotencoder_df = pd.DataFrame(onehotencoder.fit_transform(df[["cat_calories"]]))
onehotencoder_df
# #Merge one-hot encoding columns with dataframe
df = df.join(onehotencoder_df)
#drop 'cat_calories'
df.drop(columns=['cat_calories'], inplace=True)
df
# +
# Study correlation between duration and encoded calories features
#Correlation matrix
corrMatrix = df.corr()
print (corrMatrix)
#Visualisation of correlation matrix
corr = df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
plt.show()
# -
| Week_08/Week_8_Task.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # UART Demo
#
# This demo highlights the usefulness of using a more complex MMIO driver wrapper by implementing a wrapper to interact with UART hardware. This wrapper is included in the notebook as an example of how to create a more complicated MMIO driver, including how to interact with interrupts.
# +
import asyncio, time
from pynq import Interrupt
class UART() :
RX_OFFSET = 0x00
TX_OFFSET = 0x04
STATUS_OFFSET = 0x08
CONTROL_OFFSET = 0x0C
RX_AVAIL_BIT = 0x01
RX_FULL_BIT = 0x02
TX_EMPTY_BIT = 0x04
TX_FULL_BIT = 0x08
RST_FIFO_BIT = 0x02
CTRL_BIT_EN_INT = 0x10
CTRL_BIT_DIS_INT = 0XEF
def __init__(self, pr_region, interrupt = None, name=None):
self._mmio = pr_region.S_AXI.mmio
if name is None:
self.name = "UART_" + str(pr_region.description.get('fullpath'))
else:
self.name = name
if interrupt is not None:
self.interrupt = Interrupt(interrupt)
def txReady(self):
cur_val = self._mmio.read(self.STATUS_OFFSET)
return not (cur_val & self.TX_FULL_BIT)
def rxAvail(self):
cur_val = self._mmio.read(self.STATUS_OFFSET)
return (cur_val & self.RX_AVAIL_BIT) == self.RX_AVAIL_BIT
def enableInterrupts(self, enable):
ctrl = self._mmio.read(self.CONTROL_OFFSET)
if enable:
ctrl |= self.CTRL_BIT_EN_INT
else:
ctrl &= self.CTRL_BIT_DIS_INT
self._mmio.write(self.CONTROL_OFFSET, ctrl)
def write(self, msg):
for b in msg:
# Wait for ready to send
while not self.txReady():
pass
# Send data
self._mmio.write(self.TX_OFFSET, b)
def readRxByte(self):
byte = self._mmio.read(self.RX_OFFSET)
return byte
def WriteTxByte(self, byte):
# Wait for ready to send
while not self.txReady():
pass
self._mmio.write(self.TX_OFFSET, byte)
#timeout_secs can be initialized to None to disable timeout
def read(self, size=1, timeout_secs=1):
recvd = []
timeout = _Timeout(timeout_secs)
while len(recvd) < size:
#waits for data to be available
while not self.rxAvail() and not timeout.expired():
pass
#exits if time has expired.
if timeout.expired():
break
recvd.append(self._mmio.read(self.RX_OFFSET))
return recvd
def printStatus(self):
status = self._mmio.read(self.STATUS_OFFSET)
print(self.name + " status:")
print("\tRX Available: " + str((status & self.RX_AVAIL_BIT) == self.RX_AVAIL_BIT))
print("\tRX Full: " + str((status & self.RX_FULL_BIT) == self.RX_FULL_BIT))
print("\tTX Empty: " + str((status & self.TX_EMPTY_BIT) == self.TX_EMPTY_BIT))
print("\tTX Full: " + str((status & self.TX_FULL_BIT) == self.TX_FULL_BIT))
print("\tInterrupts Enabled: " + str((status & self.CTRL_BIT_EN_INT) == self.CTRL_BIT_EN_INT))
def resetFIFOs(self):
self._mmio.write(self.CONTROL_OFFSET, self.RST_FIFO_BIT)
# Run this interrupt handler until all messages have been received
# msg_size - Number of bytes to wait for (if 0, run forever)
async def isr_recv(self, msg_size = 0):
recvd_msg = []
while True:
await self.interrupt.wait()
if self.rxAvail():
recvd = self.readRxByte()
recvd_msg.append(recvd)
if msg_size > 0:
print(self.name + " isr received byte #" + str(len(recvd_msg)) + \
" of " + str(msg_size) + ": " + hex(recvd))
if (len(recvd_msg) == msg_size):
return recvd_msg
else:
print(self.name + " isr received byte #" + str(len(recvd_msg)) + ": " + hex(recvd))
# This class is part of pySerial. https://github.com/pyseraial/pyserial
# (C) 2001-2016 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
class _Timeout(object):
"""\
Abstraction for timeout operations. Using time.monotonic() if available
or time.time() in all other cases.
The class can also be initialized with 0 or None, in order to support
non-blocking and fully blocking I/O operations. The attributes
is_non_blocking and is_infinite are set accordingly.
"""
if hasattr(time, 'monotonic'):
# Timeout implementation with time.monotonic(). This function is only
# supported by Python 3.3 and above. It returns a time in seconds
# (float) just as time.time(), but is not affected by system clock
# adjustments.
TIME = time.monotonic
else:
# Timeout implementation with time.time(). This is compatible with all
# Python versions but has issues if the clock is adjusted while the
# timeout is running.
TIME = time.time
def __init__(self, duration):
"""Initialize a timeout with given duration"""
self.is_infinite = (duration is None)
self.is_non_blocking = (duration == 0)
self.duration = duration
if duration is not None:
self.target_time = self.TIME() + duration
else:
self.target_time = None
def expired(self):
"""Return a boolean, telling if the timeout has expired"""
return self.target_time is not None and self.time_left() <= 0
def time_left(self):
"""Return how many seconds are left until the timeout expires"""
if self.is_non_blocking:
return 0
elif self.is_infinite:
return None
else:
delta = self.target_time - self.TIME()
if delta > self.duration:
# clock jumped, recalculate
self.target_time = self.TIME() + self.duration
return self.duration
else:
return max(0, delta)
def restart(self, duration):
"""\
Restart a timeout, only supported if a timeout was already set up
before.
"""
self.duration = duration
self.target_time = self.TIME() + duration
# -
# ## Download the static bitstream
#
# We first need to download the static or full bitstream before any partial bitstreams can be downloaded. Note that if the bitstream is not in the same directory as the notebook then the full path needs to be provided.
#
# +
from pynq import Overlay
FULL_BITSTREAM_PATH = "/usr/local/lib/python3.6/dist-packages/prio/"
PARTIAL_BITSTREAM_PATH = "/usr/local/lib/python3.6/dist-packages/prio/partial/"
overlay = Overlay(FULL_BITSTREAM_PATH + "prio.bit")
# -
# ## Set up the reconfigurable region
# Notice that as with the full bitstream, the full path to the partial bitstream must be provided when it is located outside of the current notebook's directory.
#
# We will download partial bitstream and initialize each uart driver.
#
#
# +
overlay.pr_download("pr_1", PARTIAL_BITSTREAM_PATH + "pr_1_uart.bit")
uart1 = UART(overlay.pr_1)
overlay.pr_download("pr_3", PARTIAL_BITSTREAM_PATH + "pr_3_uart.bit")
uart3 = UART(overlay.pr_3)
# -
# ## Demo: Print UART Status
# Prints the status of both of the UART modules
uart1.resetFIFOs()
uart3.resetFIFOs()
uart1.printStatus()
uart3.printStatus()
# ## Demo: Bidirectional UART Messages
#
# This cell will transmit a message back and forth between partial region 1 and partial region 3. After running the cell you will see output showing the message that was sent and the message that was recieved, going both directions.
#
# ** Hardware setup:** For this demo you should connect a wire between Arduino pin 8 (uart1 RX) and Arduino pin 35 (uart3 TX), and a wire between Arduino Pin 9 (uart1 TX) and Arduino pin 34 (uart3 RX). (uart3 RX). (the two wires should criss-cross)
# +
import time
msg = [0xde, 0xad, 0xbe, 0xef]
print("***** Sending message: " + str(msg) + "*****")
uart1.write(msg)
time.sleep(1.0)
recvd = uart3.read(4)
if recvd == msg:
print("Success: correct message received")
else:
print("Failure: message received: (" + str(recvd) + ")")
msg = [0xaa, 0xbb, 0x55, 0x33]
print("\n***** Sending message: " + str(msg) + "*****")
uart3.write(msg)
time.sleep(2.0)
recvd = uart1.read(4)
if recvd == msg:
print("Success: correct message received")
else:
print("Failure: message received: (" + str(recvd) + ")")
# -
# ## Demo: Bidirectional UART Messages with Interrupts
# This demo will repeat the demonstration from above, but this time it will utilize the interrupt functionality present in the PR regions. We will first redownload the partial bitstreams and reinitialize uart object, this time with interrupts enabled.
#
# ** Hardware setup:** _(Same as previous demo)_ For this demo you should connect a wire between Arduino pin 8 (uart1 RX) and Arduino pin 35 (uart3 TX), and a wire between Arduino Pin 9 (uart1 TX) and Arduino pin 34 (uart3 RX). (uart3 RX). (the two wires should criss-cross)
# +
overlay.pr_download("pr_1", PARTIAL_BITSTREAM_PATH + "pr_1_uart.bit")
interrupt = overlay.interrupt_pins['pr_1/axi_uartlite_0/interrupt']['fullpath']
uart1 = UART(overlay.pr_1, interrupt)
overlay.pr_download("pr_3", PARTIAL_BITSTREAM_PATH + "pr_3_uart.bit")
interrupt = overlay.interrupt_pins['pr_3/axi_uartlite_0/interrupt']['fullpath']
uart3 = UART(overlay.pr_3, interrupt)
# +
import time
import asyncio
msg = [0xde, 0xad, 0xbe, 0xef]
uart1.resetFIFOs()
uart3.resetFIFOs()
# Send message from uart 1 to uart 3
print("***** Sending message: " + str(msg) + "*****")
uart3.enableInterrupts(True)
uart1.write(msg)
recvd = asyncio.get_event_loop().run_until_complete(uart3.isr_recv(len(msg)))
if recvd == msg:
print("Success: correct message received")
else:
print("Failure: message received: (" + str(recvd) + ")")
# Send message from uart 3 to uart 1
print("\n***** Sending message: " + str(msg) + "*****")
uart1.enableInterrupts(True)
uart3.write(msg)
recvd = asyncio.get_event_loop().run_until_complete(uart1.isr_recv(len(msg)))
if recvd == msg:
print("Success: correct message received")
else:
print("Failure: message received: (" + str(recvd) + ")")
# -
| boards/Pynq-Z1/notebooks/prio/uart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
nltk.download('vader_lexicon')
# +
hotel_rev=["Great Place to be when you are in bangalore.",
"The place was being renovated when i visited so the seating was limited.",
"Loved the ambience,Loved the food.",
"The food is delicious but ot over the top.",
"Service -Little Slow,Probably too many people.",
"The place is not easy to locate",
"Mushroom Friedrice was tasty"]
sid=SentimentIntensityAnalyzer()
for sentence in hotel_rev:
print(sentence)
ss=sid.polarity_scores(sentence)
for k in ss:
print('{0}: {1},'.format(k,ss[k],end=''))
print()
# -
| Sentiment Analysis using NLTK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hc07180011/testing-cv/blob/AdaptivePooling/feature_engineering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="7UPaK4EYi8mc" outputId="73c82351-8144-4488-e223-b66d04cab230"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="kRRJWp-LzZ9j"
# ## For loading .ipynb files to colab
# + id="Wa_45yNpjSsi"
# !cp ROC-Keras.ipynb /content/drive/My\ Drive/
# + colab={"base_uri": "https://localhost:8080/"} id="XQPbhHRKjyMB" outputId="ddd9fe88-21ea-4d67-beb9-3eeb4efc89cb"
# !apt-get install -qq xattr -y
# + colab={"base_uri": "https://localhost:8080/"} id="d0rksfswjgGe" outputId="39714128-2aff-4eb1-da60-da359267aee0"
# !xattr -p 'user.drive.id' /content/drive/My\ Drive/ROC-Keras.ipynb
# + [markdown] id="9s4or1n6zgW9"
# ## Asymmetric chunks padding
# + colab={"base_uri": "https://localhost:8080/"} id="tgSV9AZXxuWv" outputId="bc3dd588-8681-4378-9c36-af85b094df57"
import numpy as np
chunks = 30
x = np.zeros((680,1,6,6,256))
def _get_chunk_array(input_arr: np.array, chunk_size: int) -> list:
i_pad = np.pad(input_arr,(0,chunk_size-len(input_arr)%chunk_size),'constant')
asymmetric_chunks = np.split(
i_pad,
list(range(
chunk_size,
input_arr.shape[0] + 1,
chunk_size
))
)
print(i_pad.shape)
print(len(i_pad)/chunk_size)
# TODO: should we take the last chunk?
return np.array(asymmetric_chunks).tolist()
batch = _get_chunk_array(x,chunks)
# + [markdown] id="eZYXqohONFrr"
# # LabelBox test
# + colab={"base_uri": "https://localhost:8080/"} id="LcKzx81D_XEb" outputId="bae41b37-62c5-41b9-817c-7d9098ff731d"
# !pip install -q --upgrade tensorflow-hub \
# scikit-learn \
# seaborn \
# "labelbox[data]"
# + id="hPlOd4l8_W2_"
import labelbox
import random
import numpy as np
from labelbox import Client
from labelbox.schema.data_row_metadata import (
DataRowMetadata,
DataRowMetadataField,
DeleteDataRowMetadata,
)
from sklearn.random_projection import GaussianRandomProjection
import tensorflow as tf
import seaborn as sns
import tensorflow_hub as hub
from datetime import datetime
from tqdm.notebook import tqdm
import requests
from pprint import pprint
# + id="liA8ZUPDNSmB"
# Add your api key
API_KEY = '<KEY>'
client = Client(api_key=API_KEY)
# + colab={"base_uri": "https://localhost:8080/"} id="pcKiAhekNqro" outputId="0b21eb7a-4b9a-43d9-f957-27c5a35a97ea"
import labelbox
# Enter your Labelbox API key here
LB_API_KEY = API_KEY
# Create Labelbox client
lb = labelbox.Client(api_key=LB_API_KEY)
# Create a new dataset
dataset = lb.create_dataset(name="embeddings")
# Create data payload
# External ID is recommended to identify your data_row via unique reference throughout Labelbox workflow.
my_data_rows = [
{
"row_data": "https://picsum.photos/200/300",
"external_id": "uid_01"},
{
"row_data": "https://picsum.photos/200/400",
"external_id": "uid_02"
}
]
# Bulk add data rows to the dataset
task = dataset.create_data_rows(my_data_rows)
task.wait_till_done()
print(task.status)
# + [markdown] id="2d1lP7on1aoo"
# ## TFRecord encoding
# + id="OBmuxc0zNqg0"
import tqdm
import imageio
import numpy as np
import tensorflow as tf
# TFRecord helpers
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def mp4_2_numpy(filename):
"""Reads a video and returns its contents in matrix form.
Args:
filename (str): a path to a video
Returns:
np.array(): matrix contents of the video
"""
vid = imageio.get_reader(filename, 'ffmpeg')
# read all of video frames resulting in a (T, H, W, C) matrix
data = np.stack(list(vid.iter_data()))
return data
def line2example(line):
"""Reads a line from the datafile and returns an
associated TFRecords example containing the encoded data.
Args:
line (str): a line from the datafile
(formatted as {filepath} {label})
Returns:
tf.train.SequenceExample: resulting TFRecords example
"""
# extract information on dataexample
fn, label = line.split(' ')
label = int(label)
# read matrix data and save its shape
video_data = mp4file_2_numpy(fn)
t, h, w, c = video_data.shape
# save video as list of encoded frames using tensorflow's operation
img_bytes = [tf.image.encode_jpeg(frame, format='rgb') for frame in video_data]
with tf.Session() as sess:
img_bytes = sess.run(img_bytes)
sequence_dict = {}
# create a feature for each encoded frame
img_feats = [tf.train.Feature(bytes_list=tf.train.BytesList(value=[imgb])) for imgb in img_bytes]
# save video frames as a FeatureList
sequence_dict['video_frames'] = tf.train.FeatureList(feature=img_feats)
# also store associated metadata
context_dict = {}
context_dict['filename'] = _bytes_feature(fn.encode('utf-8'))
context_dict['label'] = _int64_feature(label)
context_dict['temporal'] = _int64_feature(t)
context_dict['height'] = _int64_feature(h)
context_dict['width'] = _int64_feature(w)
context_dict['depth'] = _int64_feature(c)
# combine list + context to create TFRecords example
sequence_context = tf.train.Features(feature=context_dict)
sequence_list = tf.train.FeatureLists(feature_list=sequence_dict)
example = tf.train.SequenceExample(context=sequence_context, feature_lists=sequence_list)
return example
def create_tfrecords(datafile_path, save_path):
"""Creates a TFRecords dataset from video files.
Args:
datafile_path (str): a path to the formatted datafiles (includes train.txt, etc.)
save_path (str): where to save the .tfrecord files
"""
save_path = pathlib.Path(save_path)
save_path.mkdir(exist_ok=True, parents=True)
# create a TFRecord for each datasplit
for dset_name in ['train.txt', 'test.txt', 'val.txt']:
# read the lines of the datafile
with open(datafile_path + dset_name, 'r') as f:
lines = f.readlines()
# write each example to a {split}.tfrecord (train.tfrecord, etc.)
record_file = str(save_path/'{}.tfrecord'.format(dset_name[:-4]))
with tf.python_io.TFRecordWriter(record_file) as writer:
for line in tqdm(lines):
example = line2example(line)
writer.write(example.SerializeToString())
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="NqsKsmm01nIY" outputId="609065ce-f920-4451-cf9f-ad4c2c6fe2dd"
IMAGE_SIZE_H,IMAGE_SIZE_W = 200,200
# define the features to decode
sequence_features = {
'video_frames': tf.io.FixedLenSequenceFeature([], dtype=tf.string)
}
context_features = {
'filename': tf.io.FixedLenFeature([], tf.string),
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'depth': tf.io.FixedLenFeature([], tf.int64),
'temporal': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64),
}
@tf.function
def resize(img):
return tf.image.resize(img, [IMAGE_SIZE_H, IMAGE_SIZE_W])
def _parse_example(example_proto):
# Parse the input tf.train.Example using the dictionary above.
context, sequence = tf.parse_single_sequence_example(example_proto,
context_features=context_features,
sequence_features=sequence_features)
# extract the expected shape
shape = (context['temporal'], context['height'], context['width'], context['depth'])
## the golden while loop ##
# loop through the feature lists and decode each image seperately
# decoding the first video
video_data = tf.image.decode_image(tf.gather(sequence['video_frames'], [0])[0])
video_data = tf.expand_dims(video_data, 0)
i = tf.constant(1, dtype=tf.int32)
# condition of when to stop / loop through every frame
cond = lambda i, _: tf.less(i, tf.cast(context['temporal'], tf.int32))
# reading + decoding the i-th image frame
def body(i, video_data):
# get the i-th index
encoded_img = tf.gather(sequence['video_frames'], [i])
# decode the image
img_data = tf.image.decode_image(encoded_img[0])
# append to list using tf operations
video_data = tf.concat([video_data, [img_data]], 0)
# update counter & new video_data
return (tf.add(i, 1), video_data)
# run the loop (use shape∈variants since video_data changes size)
_, video_data = tf.while_loop(cond, body, [i, video_data],
shape_invariants=[i.get_shape(), tf.TensorShape([None])])
# use this to set the shape + dtype
video_data = tf.reshape(video_data, shape)
video_data = tf.cast(video_data, tf.float32)
# resize each frame in video -- can apply different augmentations etc. like this
video_data = tf.map_fn(resize, video_data, back_prop=False, parallel_iterations=10)
label = context['label']
# return the data example and its corresponding label
return video_data, label
# create the dataset
dataset = tf.data.TFRecordDataset('train.tfrecord')\
.map(sequence_features)\
.batch(2)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# use standard tf training setup
with tf.Session() as sess:
batch_vid, batch_label = sess.run(next_element)
print(batch_vid.shape, batch_label)
# + id="RnG6ZZTL4KNJ"
import pathlib
import psutil
import imageio
import numpy as np
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
import multiprocessing as mp
from tqdm import tqdm
class TFRecords4Video():
def __init__(self, tfrecords_save_path, datafile_path, datafile_prefix,\
fn2video):
"""Writes video TFRecords for a given dataset.
Args:
tfrecords_save_path (str): Path to save TFRecords to.
datafile_path (str): Path to find train.txt, val.txt and test.txt
{train, test, val}.txt file lines are formatted as:
file label
datafile_prefix (str): Prefix path for files in train.txt. Paths
will be given to fn2video function as 'datafile_prefix/file'
fn2video (str or function): function which takes path and
returns the video matrix with size (T, H, W, C).
Already implemented use cases can use a string:
- 'video' for paths which point to a video file (calls vid2numpy)
- 'images' for paths which point to a folder of images
(calls images2numpy)
"""
self.tfrecords_save_path = pathlib.Path(tfrecords_save_path)
self.datafile_path = pathlib.Path(datafile_path)
self.datafile_prefix = pathlib.Path(datafile_prefix)
# create folders for TFRecord shards
self.tfrecords_save_path.mkdir(parents=True, exist_ok=True)
for split in ['train', 'val', 'test']:
(self.tfrecords_save_path/split).mkdir(exist_ok=True)
# function for fn -> video data (T, H, W, C)
if fn2video == 'video':
self.fn2video = vid2numpy
elif fn2video == 'images':
self.fn2video = images2numpy
else:
# allow custom parsing of video matrix
self.fn2video = fn2video
def extract_pathlabels(self, split):
"""Extracts absolute paths and labels from datafiles
({train, val, test}.txt) using
self.datafile_path and self.datafile_prefix
Args:
split (str): split to get paths from
must be a value in {'train', 'test', 'val'}
Returns:
tuple(list[pathlib.Path], list[int]): paths and labels from split's
datafile
"""
assert split in ['train', 'val', 'test'], "Invalid Split"
splitfile_path = self.datafile_path/'{}.txt'.format(split)
assert splitfile_path.exists(), "{} should exist.".format(splitfile_path)
with open(splitfile_path, 'r') as f:
lines = f.readlines()
skip_counter = 0
example_paths, example_labels = [], []
for line in tqdm(lines):
fn, label = line.split(' ')
fn, label = self.datafile_prefix/fn, int(label)
if pathlib.Path(fn).exists():
example_paths.append(fn)
example_labels.append(label)
else:
skip_counter += 1
print('\nNumber of files not found: {} / {}'.format(skip_counter, len(lines)))
if skip_counter > 0:
print('Warning: Some frames were not found, here is an example path \
to debug: {}'.format(fn))
return example_paths, example_labels
def get_example(self, filename, label):
"""Returns a TFRecords example for the given video located at filename
with the label label.
Args:
filename (pathlib.Path): path to create example from
label (int): class label for video
Returns:
tf.train.SequenceExample: encoded tfrecord example
"""
# read matrix data and save its shape
data = self.fn2video(filename)
t, h, w, c = data.shape
# save video as list of encoded frames using tensorflow's operation
img_bytes = [tf.image.encode_jpeg(frame, format='rgb') for frame in data]
with tf.Session() as sess:
img_bytes = sess.run(img_bytes)
sequence_dict = {}
# create a feature for each encoded frame
img_feats = [tf.train.Feature(bytes_list=\
tf.train.BytesList(value=[imgb])) for imgb in img_bytes]
# save video frames as a FeatureList
sequence_dict['video_frames'] = tf.train.FeatureList(feature=img_feats)
# also store associated meta-data
context_dict = {}
context_dict['filename'] = _bytes_feature(str(filename).encode('utf-8'))
context_dict['label'] = _int64_feature(label)
context_dict['temporal'] = _int64_feature(t)
context_dict['height'] = _int64_feature(h)
context_dict['width'] = _int64_feature(w)
context_dict['depth'] = _int64_feature(c)
# combine list + context to create TFRecords example
sequence_context = tf.train.Features(feature=context_dict)
sequence_list = tf.train.FeatureLists(feature_list=sequence_dict)
example = tf.train.SequenceExample(context=sequence_context, \
feature_lists=sequence_list)
return example
def pathlabels2records(self, paths, labels, split, max_bytes=1e9):
"""Creates TFRecord files in shards from the given path and labels
Args:
paths (list[pathlib.Path]): paths of videos to write to TFRecords
labels (list[int]): labels associated videos
split (str): datasplit to write to, one of: ('train', 'test', 'val')
max_bytes (int, optional): approx max size of each shard in bytes.
Defaults to 1e9.
"""
assert split in ['train', 'val', 'test'], "Invalid Split"
n_examples = len(paths)
print('Splitting {} examples into {:.2f} GB shards'.format(\
n_examples, max_bytes / 1e9))
# number of shutdowns + restarts to maintain ~1sec/iteration of encoding
# if factor = 1 it can go up to ~11sec/iteration (really slow)
# larger value = faster single processes but more shutdown/startup time
# smaller value = slower single process but less shutdown/startup time
factor = 90
n_processes = psutil.cpu_count()
print('Using {} processes...'.format(n_processes))
paths_split = np.array_split(paths, factor)
labels_split = np.array_split(labels, factor)
process_id = 0
pbar = tqdm(total=factor)
for (m_paths, m_labels) in zip(paths_split, labels_split):
# split data into equal sized chunks for each process
paths_further_split = np.array_split(m_paths, n_processes)
labels_further_split = np.array_split(m_labels, n_processes)
# multiprocess the writing
pool = mp.Pool(n_processes)
returns = []
for paths, labels in zip(paths_further_split, labels_further_split):
r = pool.apply_async(process_write, args=(paths, labels, split, \
max_bytes, process_id, self))
returns.append(r)
process_id += 1
pool.close()
# use this to view errors in children (if any)
for r in returns: r.get()
pool.join()
pbar.update(1)
pbar.close()
def split2records(self, split, max_bytes=1e9):
"""Creates TFRecords for a given data split
Args:
split (str): split to create for, in ['train', 'test', 'val']
max_bytes (int, optional): approx max size of each shard in bytes.
Defaults to 1e9.
"""
print('Starting processing split {}.'.format(split))
print('Extracting paths and labels...')
paths, labels = self.extract_pathlabels(split)
print('Writing to TFRecords...')
self.pathlabels2records(paths, labels, split, max_bytes)
print('Finished processing split {}.'.format(split))
def create_tfrecords(self, max_bytes=1e9):
"""Creates TFRecords for all splits ('train', 'test', 'val')
Args:
max_bytes (int, optional): approx max size of each shard in bytes.
Defaults to 1e9.
"""
for split in ['train', 'test', 'val']:
self.split2records(split, max_bytes)
# multiprocessing function
def process_write(paths, labels, split, max_bytes, process_id, tf4v):
"""Writes a list of video examples as a TFRecord.
Args:
paths (list[pathlib.Path]): paths to videos
labels (list[int]): associative labels for the videos
split (str): one of ['train', 'test', 'val']
max_bytes (int): Number of bytes per shard
process_id (int): id of processes
tf4v (TFRecords4Video): video processing class
Returns:
int: 1 for success
"""
shard_count, i = 0, 0
n_examples = len(paths)
while i != n_examples:
# tf record file to write to
tf_record_name = ('{}/{}-shard{}.tfrecord').format(split, \
process_id, shard_count)
record_file = tf4v.tfrecords_save_path/tf_record_name
with tf.python_io.TFRecordWriter(str(record_file)) as writer:
# split into approx. equal sized shards
while record_file.stat().st_size < max_bytes and i != n_examples:
# write each example to tfrecord
example_i = tf4v.get_example(paths[i], labels[i])
writer.write(example_i.SerializeToString())
# process next example
i += 1
# process a new shard
shard_count += 1
return 1
# TFRecords helpers
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# file -> video data functions
def vid2numpy(filename):
"""Reads a video and returns its contents in matrix form.
Args:
filename (pathlib.Path): a path to a video
Returns:
np.array(): matrix contents of the video
"""
vid = imageio.get_reader(str(filename), 'ffmpeg')
# read all of video frames resulting in a (T, H, W, C) matrix
data = np.stack(list(vid.iter_data()))
return data
def images2numpy(filename):
"""Reads a fold of images and returns its contents in matrix form.
Args:
filename (pathlib.Path): a path to a folder of frames
which make up a video.
Returns:
np.array(): matrix contents of the video
"""
data = np.stack([plt.imread(frame_path) \
for frame_path in filename.iterdir()])
return data
# + id="SvizUSne4hKA"
encoded = TFRecords4Video(
"/content/drive/MyDrive/google_cv/sample_augmentations",
"/content/drive/MyDrive/google_cv/flicker-detection",
"_encoded",
"video")
# + colab={"base_uri": "https://localhost:8080/"} id="d1MF_yBc5l45" outputId="677026c3-b4c3-425f-d464-e384ca7a883a"
| flicker_detection/flicker_detection/colab/feature_engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reproducible Data Analysis with Jupyter
# <NAME>
# When done, linearize by using "restart and run all"
# - this helps check your work
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
from jflow.data import get_data
df = get_data()
# #unix command
# !head fremont.csv
df.shape
df.head()
df.info()
df.describe()
df.info()
df.head()
df.plot()
# A little dense. These are hourly counts over 4 years.
#
# Resample, weekly...
#
# `resample`: Convenience method for __frequency conversion and resampling of time series__. Object must have a datetime-like index (DatetimeIndex, PeriodIndex, or TimedeltaIndex), or pass datetime-like values to the on or level keyword.
df.resample('W').sum().plot()
plt.style.available
df.resample('W').sum().plot()
# look for anual growth or trend. using daily sampling and rolling windows.
#
# each of the points is a rolling sum of the previous 365 days.
df.head()
df.resample('D').sum().rolling(365).sum().plot()
# Axes are a little suspect because they don't go to zero.
#
# Note how this is accomplished by setting 'ax'.
ax = df.resample('D').sum().rolling(365).sum().plot()
ax.set_ylim(0, None)
# Now the change does not appear as dramatic.
#
# Add a 'total' column
ax = df.resample('D').sum().rolling(365).sum().plot()
ax.set_ylim(0, None)
# Total is pretty consistent. About a million per year.
#
# Look at a trend within a group. Look at the time of day...
df.groupby(df.index.time).mean().plot()
# Eastbound peaks opposite of West.
df.groupby(df.index.month).mean().plot()
# Now, to see the 'whole' data
# +
pivoted = df.pivot_table('Total', index=df.index.time, columns=df.index.date)
pivoted.iloc[:5, :5] #look at the first 5x5 block
#each column is a day, each row is an hour
# -
#this gives you a line for each day in all of the years
#OFF during function and package debugging because LONG execution time
#pivoted.plot(legend=False)
#pull back the opacity to see density better...
#OFF during function and package debugging because LONG execution time
#pivoted.plot(legend=False, alpha=0.01)
# Hypothesis: peaks are weekdays. Broad usage (lower) arc is probably weekend.
# +
#see the docstring
# get_data?
# +
#see the source code
# get_data??
| EDA workflow 01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# # Measurement Error Mitigation
# -
from qiskit import QuantumCircuit, QuantumRegister, Aer, transpile, assemble
from qiskit_textbook.tools import array_to_latex
# ### Introduction
#
# The effect of noise is to give us outputs that are not quite correct. The effect of noise that occurs throughout a computation will be quite complex in general, as one would have to consider how each gate transforms the effect of each error.
#
# A simpler form of noise is that occurring during final measurement. At this point, the only job remaining in the circuit is to extract a bit string as an output. For an $n$ qubit final measurement, this means extracting one of the $2^n$ possible $n$ bit strings. As a simple model of the noise in this process, we can imagine that the measurement first selects one of these outputs in a perfect and noiseless manner, and then noise subsequently causes this perfect output to be randomly perturbed before it is returned to the user.
#
# Given this model, it is very easy to determine exactly what the effects of measurement errors are. We can simply prepare each of the $2^n$ possible basis states, immediately measure them, and see what probability exists for each outcome.
#
# As an example, we will first create a simple noise model, which randomly flips each bit in an output with probability $p$.
# +
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import pauli_error, depolarizing_error
def get_noise(p):
error_meas = pauli_error([('X',p), ('I', 1 - p)])
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error_meas, "measure") # measurement error is applied to measurements
return noise_model
# -
# Let's start with an instance of this in which each bit is flipped $1\%$ of the time.
noise_model = get_noise(0.01)
# Now we can test out its effects. Specifically, let's define a two qubit circuit and prepare the states $\left|00\right\rangle$, $\left|01\right\rangle$, $\left|10\right\rangle$ and $\left|11\right\rangle$. Without noise, these would lead to the definite outputs `'00'`, `'01'`, `'10'` and `'11'`, respectively. Let's see what happens with noise. Here, and in the rest of this section, the number of samples taken for each circuit will be `shots=10000`.
qasm_sim = Aer.get_backend('qasm_simulator')
for state in ['00','01','10','11']:
qc = QuantumCircuit(2,2)
if state[0]=='1':
qc.x(1)
if state[1]=='1':
qc.x(0)
qc.measure(0, 0)
t_qc = transpile(qc, qasm_sim)
qobj = assemble(t_qc)
counts = qasm_sim.run(qobj, noise_model=noise_model, shots=10000).result().get_counts()
print(state+' becomes', counts)
# Here we find that the correct output is certainly the most dominant. Ones that differ on only a single bit (such as `'01'`, `'10'` in the case that the correct output is `'00'` or `'11'`), occur around $1\%$ of the time. Those that differ on two bits occur only a handful of times in 10000 samples, if at all.
#
# So what about if we ran a circuit with this same noise model, and got an result like the following?
#
# ```
# {'10': 98, '11': 4884, '01': 111, '00': 4907}
# ```
#
# Here `'01'` and `'10'` occur for around $1\%$ of all samples. We know from our analysis of the basis states that such a result can be expected when these outcomes should in fact never occur, but instead the result should be something that differs from them by only one bit: `'00'` or `'11'`. When we look at the results for those two outcomes, we can see that they occur with roughly equal probability. We can therefore conclude that the initial state was not simply $\left|00\right\rangle$, or $\left|11\right\rangle$, but an equal superposition of the two. If true, this means that the result should have been something along the lines of:
#
# ```
# {'11': 4977, '00': 5023}
# ```
#
# Here is a circuit that produces results like this (up to statistical fluctuations).
qc = QuantumCircuit(2,2)
qc.h(0)
qc.cx(0,1)
qc.measure(0, 0)
t_qc = transpile(qc, qasm_sim)
qobj = assemble(t_qc)
counts = qasm_sim.run(qobj, noise_model=noise_model, shots=10000).result().get_counts()
print(counts)
# In this example we first looked at results for each of the definite basis states, and used these results to mitigate the effects of errors for a more general form of state. This is the basic principle behind measurement error mitigation.
#
# ### Error mitigation with linear algebra
#
# Now we just need to find a way to perform the mitigation algorithmically rather than manually. We will do this by describing the random process using matrices. For this we need to rewrite our counts dictionaries as column vectors. For example, the dictionary `{'10': 96, '11': 1, '01': 95, '00': 9808}` would be rewritten as
#
# $$
# C =
# \begin{pmatrix}
# 9808 \\
# 95 \\
# 96 \\
# 1
# \end{pmatrix}.
# $$
#
# Here the first element is that for `'00'`, the next is that for `'01'`, and so on.
#
# The information gathered from the basis states $\left|00\right\rangle$, $\left|01\right\rangle$, $\left|10\right\rangle$ and $\left|11\right\rangle$ can then be used to define a matrix, which rotates from an ideal set of counts to one affected by measurement noise. This is done by simply taking the counts dictionary for $\left|00\right\rangle$, normalizing it so that all elements sum to one, and then using it as the first column of the matrix. The next column is similarly defined by the counts dictionary obtained for $\left|01\right\rangle$, and so on.
#
# There will be statistical variations each time the circuit for each basis state is run. In the following, we will use the data obtained when this section was written, which was as follows.
#
# ```
# 00 becomes {'10': 96, '11': 1, '01': 95, '00': 9808}
# 01 becomes {'10': 2, '11': 103, '01': 9788, '00': 107}
# 10 becomes {'10': 9814, '11': 90, '01': 1, '00': 95}
# 11 becomes {'10': 87, '11': 9805, '01': 107, '00': 1}
# ```
#
# This gives us the following matrix.
#
# $$
# M =
# \begin{pmatrix}
# 0.9808&0.0107&0.0095&0.0001 \\
# 0.0095&0.9788&0.0001&0.0107 \\
# 0.0096&0.0002&0.9814&0.0087 \\
# 0.0001&0.0103&0.0090&0.9805
# \end{pmatrix}
# $$
#
# If we now take the vector describing the perfect results for a given state, applying this matrix gives us a good approximation of the results when measurement noise is present.
#
#
#
# $$ C_{noisy} = M ~ C_{ideal}$$
#
# .
#
# As an example, let's apply this process for the state $(\left|00\right\rangle+\left|11\right\rangle)/\sqrt{2}$,
#
# $$
# \begin{pmatrix}
# 0.9808&0.0107&0.0095&0.0001 \\
# 0.0095&0.9788&0.0001&0.0107 \\
# 0.0096&0.0002&0.9814&0.0087 \\
# 0.0001&0.0103&0.0090&0.9805
# \end{pmatrix}
# \begin{pmatrix}
# 5000 \\
# 0 \\
# 0 \\
# 5000
# \end{pmatrix}
# =
# \begin{pmatrix}
# 4904.5 \\
# 101 \\
# 91.5 \\
# 4903
# \end{pmatrix}.
# $$
#
# In code, we can express this as follows.
# +
import numpy as np
M = [[0.9808,0.0107,0.0095,0.0001],
[0.0095,0.9788,0.0001,0.0107],
[0.0096,0.0002,0.9814,0.0087],
[0.0001,0.0103,0.0090,0.9805]]
Cideal = [[5000],
[0],
[0],
[5000]]
Cnoisy = np.dot(M, Cideal)
array_to_latex(Cnoisy, pretext="\\text{C}_\\text{noisy} = ")
# -
# Either way, the resulting counts found in $C_{noisy}$, for measuring the $(\left|00\right\rangle+\left|11\right\rangle)/\sqrt{2}$ with measurement noise, come out quite close to the actual data we found earlier. So this matrix method is indeed a good way of predicting noisy results given a knowledge of what the results should be.
#
# Unfortunately, this is the exact opposite of what we need. Instead of a way to transform ideal counts data into noisy data, we need a way to transform noisy data into ideal data. In linear algebra, we do this for a matrix $M$ by finding the inverse matrix $M^{-1}$,
#
#
#
# $$C_{ideal} = M^{-1} C_{noisy}.$$
#
#
#
# +
import scipy.linalg as la
M = [[0.9808,0.0107,0.0095,0.0001],
[0.0095,0.9788,0.0001,0.0107],
[0.0096,0.0002,0.9814,0.0087],
[0.0001,0.0103,0.0090,0.9805]]
Minv = la.inv(M)
array_to_latex(Minv)
# -
# Applying this inverse to $C_{noisy}$, we can obtain an approximation of the true counts.
Cmitigated = np.dot(Minv, Cnoisy)
array_to_latex(Cmitigated, pretext="\\text{C}_\\text{mitigated}=")
# Of course, counts should be integers, and so these values need to be rounded. This gives us a very nice result.
# $$
# C_{mitigated} =
# \begin{pmatrix}
# 5000 \\
# 0 \\
# 0 \\
# 5000
# \end{pmatrix}
# $$
#
# This is exactly the true result we desire. Our mitigation worked extremely well!
# ### Error mitigation in Qiskit
from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
# The process of measurement error mitigation can also be done using tools from Qiskit. This handles the collection of data for the basis states, the construction of the matrices and the calculation of the inverse. The latter can be done using the pseudo inverse, as we saw above. However, the default is an even more sophisticated method using least squares fitting.
#
# As an example, let's stick with doing error mitigation for a pair of qubits. For this we define a two qubit quantum register, and feed it into the function `complete_meas_cal`.
qr = QuantumRegister(2)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
# This creates a set of circuits to take measurements for each of the four basis states for two qubits: $\left|00\right\rangle$, $\left|01\right\rangle$, $\left|10\right\rangle$ and $\left|11\right\rangle$.
for circuit in meas_calibs:
print('Circuit',circuit.name)
print(circuit)
print()
# Let's now run these circuits without any noise present.
# Execute the calibration circuits without noise
t_qc = transpile(meas_calibs, qasm_sim)
qobj = assemble(t_qc, shots=10000)
cal_results = qasm_sim.run(qobj, shots=10000).result()
# With the results we can construct the calibration matrix, which we have been calling $M$.
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
array_to_latex(meas_fitter.cal_matrix)
# With no noise present, this is simply the identity matrix.
#
# Now let's create a noise model. And to make things interesting, let's have the errors be ten times more likely than before.
noise_model = get_noise(0.1)
# Again we can run the circuits, and look at the calibration matrix, $M$.
# +
t_qc = transpile(meas_calibs, qasm_sim)
qobj = assemble(t_qc, shots=10000)
cal_results = qasm_sim.run(qobj, noise_model=noise_model, shots=10000).result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
array_to_latex(meas_fitter.cal_matrix)
# -
# This time we find a more interesting matrix, and one that we cannot use in the approach that we described earlier. Let's see how well we can mitigate for this noise. Again, let's use the Bell state $(\left|00\right\rangle+\left|11\right\rangle)/\sqrt{2}$ for our test.
# +
qc = QuantumCircuit(2,2)
qc.h(0)
qc.cx(0,1)
qc.measure(0, 0)
t_qc = transpile(qc, qasm_sim)
qobj = assemble(t_qc, shots=10000)
results = qasm_sim.run(qobj, noise_model=noise_model, shots=10000).result()
noisy_counts = results.get_counts()
print(noisy_counts)
# -
# In Qiskit we mitigate for the noise by creating a measurement filter object. Then, taking the results from above, we use this to calculate a mitigated set of counts. Qiskit returns this as a dictionary, so that the user doesn't need to use vectors themselves to get the result.
# +
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts()
# -
# To see the results most clearly, let's plot both the noisy and mitigated results.
from qiskit.visualization import plot_histogram
noisy_counts = results.get_counts()
plot_histogram([noisy_counts, mitigated_counts], legend=['noisy', 'mitigated'])
# Here we have taken results for which almost $20\%$ of samples are in the wrong state, and turned it into an exact representation of what the true results should be. However, this example does have just two qubits with a simple noise model. For more qubits, and more complex noise models or data from real devices, the mitigation will have more of a challenge. Perhaps you might find methods that are better than those Qiskit uses!
import qiskit
qiskit.__qiskit_version__
| content/ch-quantum-hardware/measurement-error-mitigation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # E-CEO Challenge #3 Evaluation
# ### Weights
#
# Define the weight of each wavelength
w_412 = 0.56
w_443 = 0.73
w_490 = 0.71
w_510 = 0.36
w_560 = 0.01
# ### Run
#
# Provide the run information:
# * run id
# * run metalink containing the 3 by 3 kernel extractions
# * participant
run_id = '0000000-150625115710650-oozie-oozi-W'
run_meta = 'http://sb-10-16-10-55.dev.terradue.int:50075/streamFile/ciop/run/participant-a/0000000-150625115710650-oozie-oozi-W/results.metalink?'
participant = 'participant-a'
# ### Define all imports in a single cell
import glob
import pandas as pd
from scipy.stats.stats import pearsonr
import numpy
import math
# ### Manage run results
#
# Download the results and aggregate them in a single Pandas dataframe
# !curl $run_meta | aria2c -d $participant -M -
# +
path = participant # use your path
allFiles = glob.glob(path + "/*.txt")
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
list_.append(df)
frame = pd.concat(list_)
# -
len(frame.index)
# Number of points extracted from MERIS level 2 products
# ### Calculate Pearson
#
# For all three sites, AAOT, BOUSSOLE and MOBY, calculate the Pearson factor for each band.
#
# > Note AAOT does not have measurements for band @510
# #### AAOT site
insitu_path = './insitu/AAOT.csv'
insitu = pd.read_csv(insitu_path)
frame_full = pd.DataFrame.merge(frame.query('Name == "AAOT"'), insitu, how='inner', on = ['Date', 'ORBIT'])
# +
frame_xxx= frame_full[['reflec_1_mean', 'rho_wn_IS_412']].dropna()
r_aaot_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @412")
# +
frame_xxx= frame_full[['reflec_2_mean', 'rho_wn_IS_443']].dropna()
r_aaot_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @443")
# +
frame_xxx= frame_full[['reflec_3_mean', 'rho_wn_IS_490']].dropna()
r_aaot_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @490")
# -
r_aaot_510 = 0
print("0 observations for band @510")
# +
frame_xxx= frame_full[['reflec_5_mean', 'rho_wn_IS_560']].dropna()
r_aaot_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @560")
# -
insitu_path = './insitu/BOUSS.csv'
insitu = pd.read_csv(insitu_path)
frame_full = pd.DataFrame.merge(frame.query('Name == "BOUS"'), insitu, how='inner', on = ['Date', 'ORBIT'])
# +
frame_xxx= frame_full[['reflec_1_mean', 'rho_wn_IS_412']].dropna()
r_bous_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @412")
# +
frame_xxx= frame_full[['reflec_2_mean', 'rho_wn_IS_443']].dropna()
r_bous_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @443")
# +
frame_xxx= frame_full[['reflec_3_mean', 'rho_wn_IS_490']].dropna()
r_bous_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @490")
# +
frame_xxx= frame_full[['reflec_4_mean', 'rho_wn_IS_510']].dropna()
r_bous_510 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @510")
# +
frame_xxx= frame_full[['reflec_5_mean', 'rho_wn_IS_560']].dropna()
r_bous_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @560")
# -
insitu_path = './insitu/MOBY.csv'
insitu = pd.read_csv(insitu_path)
frame_full = pd.DataFrame.merge(frame.query('Name == "MOBY"'), insitu, how='inner', on = ['Date', 'ORBIT'])
# +
frame_xxx= frame_full[['reflec_1_mean', 'rho_wn_IS_412']].dropna()
r_moby_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @12")
# +
frame_xxx= frame_full[['reflec_2_mean', 'rho_wn_IS_443']].dropna()
r_moby_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @443")
# +
frame_xxx= frame_full[['reflec_3_mean', 'rho_wn_IS_490']].dropna()
r_moby_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @490")
# +
frame_xxx= frame_full[['reflec_4_mean', 'rho_wn_IS_510']].dropna()
r_moby_510 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @510")
# +
frame_xxx= frame_full[['reflec_5_mean', 'rho_wn_IS_560']].dropna()
r_moby_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @560")
# -
[r_aaot_412, r_aaot_443, r_aaot_490, r_aaot_510, r_aaot_560]
[r_bous_412, r_bous_443, r_bous_490, r_bous_510, r_bous_560]
[r_moby_412, r_moby_443, r_moby_490, r_moby_510, r_moby_560]
# +
r_final = (numpy.mean([r_bous_412, r_moby_412, r_aaot_412]) * w_412 \
+ numpy.mean([r_bous_443, r_moby_443, r_aaot_443]) * w_443 \
+ numpy.mean([r_bous_490, r_moby_490, r_aaot_490]) * w_490 \
+ numpy.mean([r_bous_510, r_moby_510, r_aaot_510]) * w_510 \
+ numpy.mean([r_bous_560, r_moby_560, r_aaot_560]) * w_560) \
/ (w_412 + w_443 + w_490 + w_510 + w_560)
r_final
# -
| evaluation-participant-a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
def sample_spherical(npoints, ndim=3):
vec = np.random.randn(ndim, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec
INCIDENCE_ANGLE = np.pi / 6 # 30deg
# +
# Random points on a sphere surface
phi = np.linspace(0, np.pi, 20)
theta = np.linspace(0, 2 * np.pi, 40)
x = np.outer(np.sin(theta), np.cos(phi))
y = np.outer(np.sin(theta), np.sin(phi))
z = np.outer(np.cos(theta), np.ones_like(phi))
xi, yi, zi = sample_spherical(100)
fig, ax = plt.subplots(1, 1, subplot_kw={'projection':'3d', 'aspect': 'auto'})
ax.plot_wireframe(x, y, z, color='k', rstride=1, cstride=1)
ax.scatter(xi, yi, zi, s=100, c='r', zorder=10);
# +
# Random points within a sphere
rng = np.random.default_rng()
n = 50
R = 2
phi = rng.uniform(0, 2 * np.pi, n)
costheta = rng.uniform(-np.cos(INCIDENCE_ANGLE), np.cos(INCIDENCE_ANGLE), n)
u = rng.uniform(0, 1, n)
theta = np.arccos(costheta)
r = R * np.cbrt(u)
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
an = np.linspace(0, 2 * np.pi, 100)
fig, ax = plt.subplots(1, 1, subplot_kw={'projection':'3d', 'aspect': 'auto'})
ax.scatter(x, y, z, s=100, c='r', zorder=10);
# -
import numpy as np
from numpy.linalg import norm
from Viewpoints import load_mesh, TANK
from scipy.spatial.transform import Rotation as R
# Load Object model
mesh_model, facets, incidence_normals, mesh_centers, n = load_mesh(TANK)
incidence_angle=np.pi/6; dmin=.1; dmax=2
unit_norm = mesh_model.normals / norm(mesh_model.normals, axis=1)[:, None]
# +
# Generate random point within a box
# Generate 500 points randomly within a cone
mu = 500 # number of points
cone_points = np.zeros((N, 3))
rng = np.random.default_rng()
width = np.tan(incidence_angle) * dmax
center = np.array((0, 0, 1))
i = 0
while i < N:
z = rng.uniform(dmin, dmax)
x, y = rng.uniform(-width, width, 2)
point = np.array((x, y, z))
# Check if point is within incidence cone
theta = np.arccos(np.dot(center, point) / np.linalg.norm(point))
if theta < incidence_angle:
# add to cone_points
cone_points[i] = point
i = i + 1
# + tags=[]
# For all points in the mesh calculate a rectangular region to sample points from
for points, normal in zip(mesh_model.points, unit_norm):
pass # TODO
# -
points = mesh_model.points.reshape(-1, 3)
p = points[0]
normal = unit_norm[0]
print(p, normal)
# +
# Cross product of z = [0,0,1] with unit normal for this point will give axis of rotation
z = np.array((0, 0, 1))
dir = np.dot(z, normal)
theta = np.arccos(dir)
rot_vec = np.cross(z, normal)
if norm(rot_vec) == 0: # case for 0 cross product, set rotation axis as x-axis
rot_vec = np.array((1, 0, 0))
rot_vec = rot_vec / norm(rot_vec) * theta
rotation = R.from_rotvec(rot_vec)
rotated_cone_points = rotation.apply(cone_points)
# -
# Assert all rotated points are within 30deg of facet normal
r_unit_dir = rotated_cone_points / norm(rotated_cone_points, axis=1)[:, np.newaxis]
dot_r = np.dot(r_unit_dir, normal)
assert(np.all(np.arccos(dot_r) * 180 / np.pi < 30))
# +
# Now we have a set of viewpoints that are all in the visible space for this facet point.
# From each viewpoint, calculate the number of points this viewpoint can see
# TODO: for each viewpoint in rotated_cone_points
viewpoint = rotated_cone_points[0]
viewpoint_dir = p - viewpoint
viewpoint_dir = viewpoint_dir / norm(viewpoint_dir)
# Filter points within viewpoint field of View
fov_angle = np.pi / 4 # 90deg field of view
view_vectors = mesh_model.points.reshape(-1, 3) - viewpoint
view_vectors = view_vectors / norm(view_vectors, axis=1)[:, np.newaxis]
fov_theta = np.arccos(np.dot(view_vectors, viewpoint_dir))
fov_visible = fov_theta < fov_angle
# Filter points pointed towards viewpoint
incidence_theta = np.arccos(np.dot(unit_norm, viewpoint_dir))
incidence_visible = incidence_theta < incidence_angle
incidence_visible = np.array([val for val in incidence_visible for _ in range(3)]) # need to expand this to match fov_visible shape
visible_points = fov_visible.shape and incidence_visible
visible_point_indices = np.argwhere(visible_points).squeeze()
# TODO: ray-tracing to determine if there's a facet in front of this line of sight
# TODO: add other constraints on the viewpoint location like height
# +
# For this viewpoint, store the number of visible points and their indices
# TODO: initialize this somewhere logical, iterate through all randomly sampled viewpoints
viewpoint_visible_point_indices = [None] * N # an adjacency list whose index correspond to rotated_cone_points
viewpoint_visible_point_count = np.zeros(N)
viewpoint_visible_point_indices[0] = visible_point_indices
viewpoint_visible_point_count[0] = visible_point_indices.shape[0]
# Document the number of unique points seen so far
points_viewed = np.zeros()
# -
a = np.arange(3)
b = np.arange(3)
np.concatenate((a, b))
mesh_model
| output/MyNotes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="78d22595-32bc-4e8c-ae48-9315bdc4165f"
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose
from keras.models import Sequential, Model
from keras.layers.advanced_activations import LeakyReLU
from tensorflow.keras.optimizers import Adam, RMSprop
import tensorflow.keras.activations as activations
import tensorflow as tf
from keras.preprocessing import image
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
from google.colab import drive
import os
import time
# + id="bOqEj_jklWFL"
drive.mount('/gdrive', force_remount=True)
# + id="8IKIhphxta18"
# %cd /gdrive/My\ Drive/IU_MSDS/2021Fall_E533_DL/DLS\ Final\ Project\ /CUB_200_Numpy/
os.listdir()
# + id="96bc0500-ef81-4697-b59b-1f0e4b335da6"
desired_image_shape = (64, 64)
X_train = np.load('CUB200_img_64_c.npy')
X_train = X_train.astype('float32') / 255.0
np.random.shuffle(X_train)
print(f'\n{X_train.shape[0]} Images of the size {X_train.shape[1]}x{X_train.shape[2]} with {X_train.shape[3]} channels.')
# + id="ea823047-d5f1-4532-88a7-afc079b2b200"
plt.figure(1, figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.imshow(X_train[i])
plt.axis('off')
plt.show()
# + id="129e7860-1bcb-4c9c-a073-274487832ce7"
class GAN():
def __init__(self):
self.img_rows, self.img_cols, self.channels, self.latent_dim = X_train.shape[1], X_train.shape[2], X_train.shape[3], 100
self.img_shape = (self.img_rows, self.img_cols, self.channels)
optimizer1 = RMSprop(learning_rate=0.0008, clipvalue=1.0, decay=1e-8)
optimizer2 = RMSprop(learning_rate=0.0001, clipvalue=1.0, decay=1e-8)
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer1, metrics=['accuracy'])
self.generator = self.build_generator()
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
validity = self.discriminator(img)
self.combined = Model(z, validity)
self.discriminator.trainable = False
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer2, metrics=['accuracy'])
def build_generator(self):
model = Sequential()
model.add(Input(shape=(self.latent_dim,)))
model.add(Dense(512*4*4, use_bias=False))
model.add(Reshape((4,4,512)))
model.add(BatchNormalization(momentum=0.5))
model.add(ReLU())
model.add(Conv2DTranspose(256, 5, strides=2, padding='same', use_bias=False))
model.add(BatchNormalization(momentum=0.5))
model.add(ReLU())
model.add(Conv2DTranspose(128, 5, strides=2, padding='same', use_bias=False))
model.add(BatchNormalization(momentum=0.5))
model.add(ReLU())
model.add(Conv2DTranspose(64, 5, strides=2, padding='same', use_bias=False))
model.add(BatchNormalization(momentum=0.5))
model.add(ReLU())
model.add(Conv2DTranspose(3, 5, strides=2, activation='tanh', padding='same', use_bias=False))
return model
def build_discriminator(self):
model=Sequential()
model.add(Input(shape=self.img_shape))
model.add(Conv2D(64, 5, 2))
model.add(BatchNormalization(momentum=0.5))
model.add(LeakyReLU(0.2))
model.add(Conv2D(128, 5, 2))
model.add(BatchNormalization(momentum=0.5))
model.add(LeakyReLU(0.2))
model.add(Conv2D(256, 5, 2))
model.add(BatchNormalization(momentum=0.5))
model.add(LeakyReLU(0.2))
model.add(Conv2D(512, 5, 2))
model.add(BatchNormalization(momentum=0.5))
model.add(LeakyReLU(0.2))
model.add(Flatten())
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
return model
def train(self, X_train, epochs=30, batch_size=64, sample_interval=5):
real_labels = np.ones((batch_size, 1))
fake_labels = np.zeros((batch_size, 1))
d_loss = []
g_loss = []
for epoch in range(epochs):
tic = time.time()
d_loss_epoch = []
g_loss_epoch = []
for ite in range(0, X_train.shape[0], batch_size):
real_images = X_train[ite:ite+batch_size]
noise1 = np.random.normal(0, 1, (batch_size, self.latent_dim))
fake_images = self.generator.predict(noise1)
all_images = np.vstack([real_images, fake_images])
all_labels = np.vstack([real_labels, fake_labels])
shuffler = np.random.permutation(all_images.shape[0])
all_images = all_images[shuffler]
all_labels = all_labels[shuffler]
d_loss_epoch.append(self.discriminator.train_on_batch(all_images, all_labels)[0])
noise2 = np.random.normal(0, 1, (batch_size, self.latent_dim))
g_loss_epoch.append(self.combined.train_on_batch(noise2, real_labels)[0])
g_loss.append(sum(g_loss_epoch)/len(g_loss_epoch))
d_loss.append(sum(d_loss_epoch)/len(d_loss_epoch))
toc = time.time()
if epoch % sample_interval == 0:
print(f'Epoch {epoch} - D loss: {d_loss[-1]:.5f}, G_loss: {g_loss[-1]:.5f}, Epoch Time: {round((toc - tic) / 60.0, 3)} minutes')
self.sample_images(epoch)
print()
return d_loss, g_loss
def sample_images(self, epoch):
r, c = 3, 3
fig, axs = plt.subplots(r, c, figsize=(6, 6))
for i in range(r):
for j in range(c):
noise = np.random.normal(0, 1, (1, self.latent_dim))
gen_img = self.generator.predict(noise)[0]
gen_img = image.array_to_img(gen_img*255., scale=False)
axs[i,j].imshow(gen_img)
axs[i,j].axis('off')
plt.show()
# + id="c0db4705-cbec-44c7-a139-c339f961353d"
g = GAN()
# + id="8423a6c2-1dcd-4199-87d1-0e527d398360"
d_loss, g_loss = g.train(X_train, 200, 32, 1)
# + id="ZiTczHBt6KSt"
# %cd /gdrive/My\ Drive/IU_MSDS/2021Fall_E533_DL/DLS\ Final\ Project\ /Project_Notebooks/Project_Models/
# + id="ea0227ba-f0a1-4895-a7ac-f24fce46086f"
g.generator.save_weights('generator_dconv_64.h5')
g.discriminator.save_weights('discriminator_dconv_64.h5')
g.combined.save_weights('combined_dconv_64.h5')
# + id="e605ceb6-4261-4469-805e-469e63a6e2a3"
g_test = GAN()
gen = g_test.generator
gen.load_weights('generator_dconv_64.h5')
r, c = 3, 3
fig, axs = plt.subplots(r, c, figsize=(6, 6))
for i in range(r):
for j in range(c):
noise = np.random.normal(0, 1, (1, 100))
gen_img = gen.predict(noise)[0]
gen_img = image.array_to_img(gen_img*255., scale=False)
axs[i,j].imshow(gen_img)
axs[i,j].axis('off')
plt.show()
noise = np.random.normal(0, 1, (1, 100))
gen_img = gen.predict(noise)[0]
gen_img = image.array_to_img(gen_img*255., scale=False)
plt.imshow(gen_img)
plt.show()
# + id="2154ebb6-a60f-4e55-9bbc-808a6d0fe4c4"
| dconv_64.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/penguinian/tutorials/blob/master/app_jupyter.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="NdR7tRKf0BZr"
#hide
# !pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
# + id="BwQooyx60BZz"
from fastbook import *
# + id="BHJw3sD-0BZ0" active=""
# [appendix]
# [role="Jupyter notebook 101"]
# + [markdown] id="bILEpJn70BZ0"
# # Appendix: Jupyter Notebook 101
# + [markdown] id="KGWuUN9D0BZ0"
# You can read this tutorial in the book, but we strongly suggest reading it in a (yes, you guessed it) Jupyter Notebook. This way, you will be able to actually *try* the different commands we will introduce here. If you followed one of our tutorials in the previous section, you should have been left in the course folder. Just click on `nbs` then `dl1` and you should find the tutorial named `00_notebook_tutorial`. Click on it to open a new tab and you'll be ready to go.
#
# If you are on your personal machine, clone the course repository and navigate inside before following the same steps.
#
# + [markdown] id="uHR_JLQj0BZ1"
# ## Introduction
# + [markdown] id="95CXOzXw0BZ1"
# Let's build up from the basics: what is a Jupyter Notebook? Well, we wrote this book using Jupyter Notebooks. A notebook is a document made of cells. You can write in some of them (markdown cells) or you can perform calculations in Python (code cells) and run them like this:
# + id="um-g6IOB0BZ1" outputId="14a3df1b-4f44-4cb0-c430-e2b1f080774b"
1+1
# + [markdown] id="XF4iPORQ0BZ3"
# Cool, huh? This combination of prose and code makes Jupyter Notebook ideal for experimentation: we can see the rationale for each experiment, the code, and the results in one comprehensive document.
#
# Other renowned institutions in academia and industry use Jupyter Notebook, including Google, Microsoft, IBM, Bloomberg, Berkeley and NASA among others. Even Nobel-winning economists [use Jupyter Notebooks](https://paulromer.net/jupyter-mathematica-and-the-future-of-the-research-paper/) for their experiments and some suggest that Jupyter Notebooks will be the [new format for research papers](https://www.theatlantic.com/science/archive/2018/04/the-scientific-paper-is-obsolete/556676/).
#
# + [markdown] id="52vgMrPN0BZ3"
# ## Writing
# + [markdown] id="SH5oMK_H0BZ3"
# A type of cell in which you can write text is called a _Markdown cell_. [_Markdown_](https://en.wikipedia.org/wiki/Markdown) is a very popular markup language. To specify that a cell is Markdown you need to click in the drop-down menu in the toolbar and select Markdown.
#
# Click on the the '+' button on the left and select Markdown from the toolbar. Now you can type your first Markdown cell. Write 'My first markdown cell' and press run.
#
# 
#
# You should see something like this:
# + [markdown] id="RgE5ofvb0BZ4"
# My first markdown cell
# + [markdown] id="lMC0WFht0BZ4"
# Now try making your first _Code_ cell: follow the same steps as before but don't change the cell type (when you add a cell its default type is _Code_). Type something like 3/2. You should see '1.5' as output.
# + id="7o-T-D9a0BZ4" outputId="9e89d0fc-b338-42ac-f365-74fe8c3111d3"
3/2
# + [markdown] id="gmwFaaLa0BZ5"
# ## Modes
# + [markdown] id="CDHJX0t00BZ5"
# If you made a mistake in your *Markdown* cell and you have already run it, you will notice that you cannot edit it just by clicking on it. This is because you are in **Command Mode**. Jupyter Notebooks have two distinct modes:
#
# - Edit Mode:: Allows you to edit a cell's content.
#
# - Command Mode:: Allows you to edit the notebook as a whole and use keyboard shortcuts but not edit a cell's content.
#
# You can toggle between these two by either pressing <kbd>ESC</kbd> and <kbd>Enter</kbd> or clicking outside a cell or inside it (you need to double click if it's a Markdown cell). You can always tell which mode you're on: the current cell will have a green border in **Edit Mode** and a blue border in **Command Mode**. Try it!
#
# + [markdown] id="thS8ImPZ0BZ5"
# ## Other Important Considerations
# + [markdown] id="TyXNTr8x0BZ5"
# Your notebook is autosaved every 120 seconds. If you want to manually save it you can just press the save button on the upper left corner or press <kbd>s</kbd> in **Command Mode**.
#
# 
#
# To know if your *kernel* (the Python engine executing your instructions behind the scenes) is computing or not, you can check the dot in your upper right corner. If the dot is full, it means that the kernel is working. If not, it is idle. You can place the mouse on it and the state of the kernel will be displayed.
#
# 
#
# There are a couple of shortcuts you must know about which we use **all** the time (always in **Command Mode**). These are:
#
# - Shift+Enter:: Run the code or markdown on a cell
#
# - Up Arrow+Down Arrow:: Toggle across cells
#
# - b:: Create new cell
#
# - 0+0:: Reset Kernel
#
# You can find more shortcuts by typing <kbd>h</kbd> (for help).
#
# You may need to use a terminal in a Jupyter Notebook environment (for example to git pull on a repository). That is very easy to do: just press 'New' in your Home directory and 'Terminal'. Don't know how to use the Terminal? We made a tutorial for that as well. You can find it [here](https://course.fast.ai/terminal_tutorial.html).
#
# 
# + [markdown] id="KkPHvdV30BZ6"
# That's it. This is all you need to know to use Jupyter Notebooks. That said, we have more tips and tricks below, so don't jump to the next section just yet.
#
# + [markdown] id="FuD7PU750BZ7"
# ## Markdown Formatting
#
# + [markdown] id="_3DD-PM30BZ7"
# ### Italics, Bold, Strikethrough, Inline, Blockquotes and Links
# + [markdown] id="QOrLrNhH0BZ7"
# The five most important concepts to format your code appropriately when using Markdown are:
#
# - *Italics*:: Surround your text with \_ or \*.
#
# - **Bold**:: Surround your text with \__ or \**.
#
# - `inline`:: Surround your text with \`.
#
# - blockquote:: Place \> before your text.
#
# - [Links](http://course-v3.fast.ai/):: Surround the text you want to link with \[\] and place the link adjacent to the text, surrounded with ().
#
#
# + [markdown] id="z-OruhBb0BZ7"
# ### Headings
# + [markdown] id="QXguuWHG0BZ7"
# Notice that including a hashtag before the text in a markdown cell makes the text a heading. The number of hashtags you include will determine the priority of the header (# is level one, ## is level two, ### is level three and #### is level four). We will add three new cells with the + button on the left to see how every level of heading looks.
#
# In the notebook, double click on some headings and find out what level they are!
#
# + [markdown] id="o-NBNJZ30BZ8"
# ### Lists
# + [markdown] id="vXAcUMyj0BZ8"
# There are three types of lists in markdown.
#
# Ordered list:
#
# 1. Step 1
# 2. Step 1B
# 3. Step 3
#
# Unordered list
#
# * learning rate
# * cycle length
# * weight decay
#
# Task list
#
# - [x] Learn Jupyter Notebooks
# - [x] Writing
# - [x] Modes
# - [x] Other Considerations
# - [ ] Change the world
#
# In the notebook, double click on them to see how they are built!
#
# + [markdown] id="DEcvmjsF0BZ8"
# ## Code Capabilities
# + [markdown] id="9uH62x9B0BZ9"
# **Code** cells are different than **Markdown** cells in that they have an output cell. This means that we can _keep_ the results of our code within the notebook and share them. Let's say we want to show a graph that explains the result of an experiment. We can just run the necessary cells and save the notebook. The output will be there when we open it again! Try it out by running the next four cells.
# + id="OpVgGOH_0BZ9"
# Import necessary libraries
from fastai.vision.all import *
import matplotlib.pyplot as plt
# + id="yJVn2lSd0BZ9"
from PIL import Image
# + id="QbjjMBqg0BZ9" outputId="cc24582d-bc36-4a08-cad2-82efd6687b00"
a = 1
b = a + 1
c = b + a + 1
d = c + b + a + 1
a, b, c ,d
# + id="qqcGGsPE0BZ-" outputId="8109a09a-c59b-45f0-d7ef-e40ffd552432"
plt.plot([a,b,c,d])
plt.show()
# + [markdown] id="Qr5NiEEF0BZ-"
# We can also print images while experimenting.
# + id="UyxPsOW80BZ-" outputId="36b52bfb-7c03-463a-ad46-358f3b64013a"
Image.open(image_cat())
# + [markdown] id="E97uzvjB0BZ-"
# ## Running the App Locally
# + [markdown] id="0w88n-8V0BZ_"
# You may be running Jupyter Notebook from an interactive coding environment like Gradient, Sagemaker or Salamander. You can also run a Jupyter Notebook server from your local computer. What's more, if you have installed Anaconda you don't even need to install Jupyter (if not, just `pip install jupyter`).
#
# You just need to run `jupyter notebook` in your terminal. Remember to run it from a folder that contains all the folders/files you will want to access. You will be able to open, view, and edit files located within the directory in which you run this command but not files in parent directories.
#
# If a browser tab does not open automatically once you run the command, you should CTRL+CLICK the link starting with 'http://localhost:' and this will open a new tab in your default browser.
#
# + [markdown] id="qdwt9j9G0BZ_"
# ## Creating a Notebook
# + [markdown] id="MPtICwy70BZ_"
# Now that you have your own Jupyter Notebook server running, you will probably want to write your own notebook. Click on 'New' in the upper left corner and 'Python 3' in the drop-down list (we are going to use a [Python kernel](https://github.com/ipython/ipython) for all our experiments).
#
# 
#
# + [markdown] id="-ANBvGzy0BZ_"
# ## Shortcuts and Tricks
# + [markdown] id="be4x27Ln0BZ_"
# Here is a list of useful tricks when in a Jupyter Notebook. Make sure you learn them early and use them as often as you can!
#
# + [markdown] id="3712_fEx0BaA"
# ### Command Mode Shortcuts
# + [markdown] id="wDPKOv-n0BaA"
# There are a couple of useful keyboard shortcuts in `Command Mode` that you can leverage to make Jupyter Notebook faster to use. Remember that you can switch back and forth between `Command Mode` and `Edit Mode` with <kbd>Esc</kbd> and <kbd>Enter</kbd>.
#
# - m:: Convert cell to Markdown
# - y:: Convert cell to Code
# - d+d:: Delete cell
# - o:: Toggle between hide or show output
# - Shift+Arrow up/Arrow down:: Select multiple cells. Once you have selected them you can operate on them like a batch (run, copy, paste etc).
# - Shift+M:: Merge selected cells
#
# + [markdown] id="6DFcAOGf0BaA"
# ### Cell Tricks
# + [markdown] id="7Gf8Rwkt0BaA"
# There are also some tricks that you can code into a cell:
#
# - `?function-name`:: Shows the definition and docstring for that function
# - `??function-name`:: Shows the source code for that function
# - `doc(function-name)`:: Shows the definition, docstring **and links to the documentation** of the function
# (only works with fastai library imported)
# - Shift+Tab (press once):: See which parameters to pass to a function
# - Shift+Tab (press three times):: Get additional information on the method
# + [markdown] id="LG8677bL0BaB"
# ### Line Magics
# + [markdown] id="Qjc5stUz0BaB"
# Line magics are functions that you can run on cells. They should be at the beginning of a line and take as an argument the rest of the line from where they are called. You call them by placing a '%' sign before the command. The most useful ones are:
#
# - `%matplotlib inline`:: Ensures that all matplotlib plots will be plotted in the output cell within the notebook and will be kept in the notebook when saved.
#
# This command is always called together at the beginning of every notebook of the fast.ai course.
#
# ``` python
# # # %matplotlib inline
# ```
#
# - `%timeit`:: Runs a line ten thousand times and displays the average time it took to run.
# + id="4oavutTQ0BaB" outputId="50892f74-7e32-415d-a755-e26300d60061"
# %timeit [i+1 for i in range(1000)]
# + [markdown] id="M_xe7_880BaB"
# `%debug`: Inspects a function which is showing an error using the [Python debugger](https://docs.python.org/3/library/pdb.html). If you type this in a cell just after an error, you will be directed to a console where you can inspect the values of all the variables.
#
| app_jupyter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/souravgopal25/DeepLearnigNanoDegree/blob/master/FindingEdges.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="0DaynkQo_0Bm" colab_type="text"
#
# + id="NNrYnlJAAWzi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="dbc08815-c4a3-4556-a5b5-ec86108b64d1"
from google.colab import drive
drive.mount('/content/drive')
# + id="OObPMIEO_oXm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="9c2d8c63-0223-4575-fa82-392dfd0ed9ce"
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import numpy as np
# %matplotlib inline
file="drive/My Drive/curved_lane.jpg"
# Read in the image
image = mpimg.imread(file)
plt.imshow(image)
# + id="qwWLIYCU_0QZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="db0c463d-9a06-492d-8373-31c54457e038"
# Convert to grayscale for filtering
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray, cmap='gray')
# + [markdown] id="yAkAsnjWBTDp" colab_type="text"
# ### TODO: Create a custom kernel
#
# Below, you've been given one common type of edge detection filter: a Sobel operator.
#
# The Sobel filter is very commonly used in edge detection and in finding patterns in intensity in an image. Applying a Sobel filter to an image is a way of **taking (an approximation) of the derivative of the image** in the x or y direction, separately. The operators look as follows.
#
#
#
# + id="NkcPBPpaBkfL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="b3a71580-90dc-4bc5-b102-15b029415945"
file1="drive/My Drive/sobel_ops.png"
image = mpimg.imread(file1)
plt.imshow(image)
# + [markdown] id="zZ6tEkCSBn-d" colab_type="text"
# **It's up to you to create a Sobel x operator and apply it to the given image.**
#
# For a challenge, see if you can put the image through a series of filters: first one that blurs the image (takes an average of pixels), and then one that detects the edges.
# + id="mG3CCVloBAv2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="c38e0cff-898f-4bf9-8610-261a085fa400"
# Create a custom kernel
# 3x3 array for edge detection
sobel_y = np.array([[ -1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])
## TODO: Create and apply a Sobel x operator
sobel_x=np.array([[-1,0,1],
[-2,0,2],
[-1,0,1]])
# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel)
filtered_image = cv2.filter2D(gray, -1, sobel_x)
plt.title("Sobel-X")
plt.imshow(filtered_image, cmap='gray')
# + id="FQOQti-tBIO6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="05f9105d-9532-4844-94cd-0a220906dc8d"
# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel)
filtered_image = cv2.filter2D(gray, -1, sobel_y)
plt.title("Sobel-Y")
plt.imshow(filtered_image, cmap='gray')
# + id="aQlSqRILCb_i" colab_type="code" colab={}
| FindingEdges.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="L3RD4ixNFGQm"
# # **Testando o modelo do Detector de Emoções**
# + [markdown] id="Dn7-GZsYGPau"
#
# + [markdown] id="zz8aXNC5tOyC"
# # **Etapa 1 - Importando as bibliotecas**
# + id="Bx7mqEk_snHV" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2c151328-07c6-4a17-d965-6267c29cd625"
import cv2
import numpy as np
import pandas as pd
from google.colab.patches import cv2_imshow
import zipfile
# %tensorflow_version 2.x
import tensorflow
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
tensorflow.__version__
# + [markdown] id="xKwVf6vUGsbk"
# # **Etapa 2 - Conectando com o Drive e acessando os arquivos**
# + [markdown] id="AdOuSmncx3GN"
# Faça o download dos arquivos necessários aqui: https://drive.google.com/file/d/1JGhlcmLLnVO44uF8DdNvCMYt-gOr3iI2/view?usp=sharing
#
# Importe para o seu google drive. No meu caso eu importei para /Colab/Material.zip no meu Gdrive, caso faça upload para pasta diferente, lembre-se de alterar o caminho da descompactação no código abaixo.
#
# É recomendavel importar para o Gdrive, pois dessa forma reduz o tempo de upload para o Colab
# + id="9pXwxlUBG45d" colab={"base_uri": "https://localhost:8080/"} outputId="223bddb4-5ba0-40ac-b84e-cce1761ccc41"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="I3jDsBpcUUPm" colab={"base_uri": "https://localhost:8080/"} outputId="37b4da47-795c-4700-9610-17273359b32f"
path = "/content/gdrive/My Drive/Colab/Material.zip"
zip_object = zipfile.ZipFile(file = path, mode = "r")
zip_object.extractall('./')
zip_object.close
# + [markdown] id="o7nc4mAIAY4Q"
# # **Testar com foto capturada da webcam**
# + [markdown] id="bf0TTqWJAoCf"
# **Carregar biblioteca e função**
# + id="UtPQ510tAw5f"
from IPython.display import HTML, Audio
from google.colab.output import eval_js
from base64 import b64decode
import numpy as np
import io
from PIL import Image
VIDEO_HTML = """
<style>
.animate
{
transition: all 0.1s;
-webkit-transition: all 0.1s;
}
.action-button
{
position: relative;
padding: 10px 40px;
margin: 0px 10px 10px 0px;
float: right;
border-radius: 3px;
font-family: 'Lato', sans-serif;
font-size: 18px;
color: #FFF;
text-decoration: none;
}
.blue
{
background-color: #3498db;
border-bottom: 5px solid #2980B9;
text-shadow: 0px -2px #2980B9;
}
.action-button:active
{
transform: translate(0px,5px);
-webkit-transform: translate(0px,5px);
border-bottom: 1px solid;
}
.bloco{
width:400px;
background-color: black;
}
.txt{color:white;}
#espelho
{
transform: rotateY(180deg);
-webkit-transform:rotateY(180deg); /* Safari and Chrome */
-moz-transform:rotateY(180deg); /* Firefox */
}
</style>
<div class="bloco">
<h1 class="txt">Detector de Emoções</h1>
<video id="espelho" autoplay
width=%d height=%d style='cursor: pointer;'></video>
<button type="button" class="action-button shadow animate blue">Tirar Foto</button>
</div>
<script>
var video = document.querySelector('video')
var botao = document.querySelector('button')
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream=> video.srcObject = stream)
var data = new Promise(resolve=>{
botao.onclick = ()=>{
var canvas = document.createElement('canvas')
var [w,h] = [video.offsetWidth, video.offsetHeight]
canvas.width = w
canvas.height = h
canvas.getContext('2d').scale(-1, 1);
canvas.getContext('2d')
.drawImage(video, 0, 0, w*-1, h)
video.srcObject.getVideoTracks()[0].stop()
video.replaceWith(canvas)
resolve(canvas.toDataURL('image/jpeg', %f))
}
})
</script>
"""
def tirar_foto(filename='photo.jpg', quality=100, size=(400,300)):
display(HTML(VIDEO_HTML % (size[0],size[1],quality)))
data = eval_js("data")
binary = b64decode(data.split(',')[1])
f = io.BytesIO(binary)
return np.asarray(Image.open(f))
# + [markdown] id="xJT4LsL9A0NG"
# **Capturando a foto**
# + id="RBkBec-xA6Sm" colab={"base_uri": "https://localhost:8080/", "height": 751} outputId="9105d7dd-ebe4-40a6-8ab9-5489c2772ef2"
# Clique na imagem da webcam para tirar uma foto
imagem = tirar_foto()
# Inverte a ordem dos canais (utilizar caso a imagem capturada fique com cores invertidas)
imagem = cv2.cvtColor(imagem, cv2.COLOR_BGR2RGB)
#imagem = cv2.flip(imagem,1) ~ espelha a imagem. Como no js já estou espelhando, descomente para ver o resultado real da img
cv2_imshow(imagem)
cv2.imwrite("testecaptura.jpg",imagem)
# + id="z86R-DzWA8uW"
cascade_faces = 'Material/haarcascade_frontalface_default.xml'
caminho_modelo = 'Material/modelo_01_expressoes.h5'
face_detection = cv2.CascadeClassifier(cascade_faces)
classificador_emocoes = load_model(caminho_modelo, compile=False)
# + id="fZRrcDHrFgjO" colab={"base_uri": "https://localhost:8080/", "height": 636} outputId="a39e4bb8-7540-45d5-b6c8-e53303938106"
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
# Carrega o modelo
face_detection = cv2.CascadeClassifier(cascade_faces)
classificador_emocoes = load_model(caminho_modelo, compile=False)
expressoes = ["Raiva", "Nojo", "Medo", "Feliz", "Triste", "Surpreso", "Neutro"]
original = imagem.copy()
faces = face_detection.detectMultiScale(original,scaleFactor=1.1,minNeighbors=3,minSize=(20,20))
cinza = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
if len(faces) > 0:
for (fX, fY, fW, fH) in faces:
roi = cinza[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = classificador_emocoes.predict(roi)[0]
print(preds)
emotion_probability = np.max(preds)
label = expressoes[preds.argmax()]
cv2.putText(original, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2, cv2.LINE_AA)
cv2.rectangle(original, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
else:
print('Nenhuma face detectada')
cv2_imshow(original)
probabilidades = np.ones((250, 300, 3), dtype="uint8") * 255
# Mostra gráfico apenas se detectou uma face
if len(faces) == 1:
for (i, (emotion, prob)) in enumerate(zip(expressoes, preds)):
# Nome das emoções
text = "{}: {:.2f}%".format(emotion, prob * 100)
w = int(prob * 300)
cv2.rectangle(probabilidades, (7, (i * 35) + 5),
(w, (i * 35) + 35), (200, 250, 20), -1)
cv2.putText(probabilidades, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(0, 0, 0), 1, cv2.LINE_AA)
cv2_imshow(probabilidades)
cv2.imwrite("captura.jpg",original)
cv2.destroyAllWindows()
| webcamp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bonus: Temperature Analysis I
import pandas as pd
from datetime import datetime as dt
# "tobs" is "temperature observations"
df = pd.read_csv('hawaii_measurements.csv')
df.head()
# +
# Convert the date column format from string to datetime
# +
# Set the date column as the DataFrame index
# +
# Drop the date column
# -
# ### Compare June and December data across all years
from scipy import stats
# +
# Filter data for desired months
# +
# Identify the average temperature for June
# +
# Identify the average temperature for December
# +
# Create collections of temperature data
# +
# Run paired t-test
# -
# ### Analysis
| .ipynb_checkpoints/temp_analysis_bonus_1_starter-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QCoDeS Example with DynaCool PPMS
#
# This notebook explains how to control the DynaCool PPMS from QCoDeS.
#
# For this setup to work, the proprietary `PPMS Dynacool` application (or, alternatively `Simulate PPMS Dynacool`) must be running on some PC. On that same PC, the `server.py` script (found in `qcodes/instrument_drivers/QuantumDesign/DynaCoolPPMS/private`) must be running. The script can be run from the command line with no arguments and will run under python 3.6+.
#
# The architecture is as follows:
#
# The QCoDeS driver sends strings via VISA to the server who passes those same strings on to the `CommandHandler` (found in `qcodes/instrument_drivers/QuantumDesign/DynaCoolPPMS/commandhandler`). The `CommandHandler` makes the calls into the proprietary API. The QCoDeS driver can thus be called on any machine that can communicate with the machine hosting the server.
#
# Apart from that, the driver is really simple. For this notebook, we used the `Simulate PPMS Dynacool` application running on the same machine as QCoDeS.
# %matplotlib notebook
from qcodes.instrument_drivers.QuantumDesign.DynaCoolPPMS.DynaCool import DynaCool
# To instantiate the driver, simply provide the address and port in the standard VISA format.
# The connect message is not too pretty, but there does not seem to be a way to query serial and firmware versions.
dynacool = DynaCool('dynacool', address="TCPIP0::127.0.0.1::5000::SOCKET")
# To get an overview over all available parameters, use `print_readable_snapshot`.
#
# A value of "Not available" means (for this driver) that the parameter has been deprecated.
dynacool.print_readable_snapshot(update=True)
# ## Temperature Control
#
# As soon as ANY of the temperature rate, the temperature setpoint, or the temperature settling mode parameters has been set, the system will start moving to the given temperature setpoint at the given rate using the given settling mode.
#
# The system can continuously be queried for its temperature.
# +
from time import sleep
import matplotlib.pyplot as plt
import numpy as np
# example 1
dynacool.temperature_rate(0.1)
dynacool.temperature_setpoint(dynacool.temperature() - 1.3)
temps = []
while dynacool.temperature_state() == 'tracking':
temp = dynacool.temperature()
temps.append(temp)
sleep(0.75)
print(f'Temperature is now {temp} K')
# -
plt.figure()
timeax = np.linspace(0, len(temps)*0.2, len(temps))
plt.plot(timeax, temps, '-o')
plt.xlabel('Time (s)')
plt.ylabel('Temperature (K)')
# ## Field Control
#
# The field has **five** related parameters:
#
# - `field_measured`: The (read-only) field strength right now.
# - `field_target`: The target field that the `ramp` method will ramp to when called. Setting this parameter does **not** trigger a ramp
# - `field_rate`: The field ramp rate with initial value of `0`.
# - `field_approach`: The approach that the system should use to ramp. By default it is set to `linear`.
# - `field_ramp`: This is a convenience parameter that sets the target field and then triggers a blocking ramp.
#
# The idea is that the user first sets the `field_target` and then ramps the field to that target using the `ramp` method. The ramp method takes a `mode` argument that controls whether the ramp is blocking or non-blocking.
#
# Using the simulation software, the field change is instanteneous irrespective of rate. We nevertheless include two examples of ramping here.
# ### A blocking ramp
# First, we set a field target:
field_now = dynacool.field_measured()
target = field_now + 1
dynacool.field_target(target)
# Note that the field has not changed yet:
assert dynacool.field_measured() == field_now
# And now we ramp:
dynacool.ramp(mode='blocking')
# The ramping will take some finite time on a real instrument. The field value is now at the target field:
print(f'Field value: {dynacool.field_measured()} T')
print(f'Field target: {dynacool.field_target()} T')
# ### A non-blocking ramp
#
# The non-blocking ramp is very similar to the the blocking ramp.
# +
field_now = dynacool.field_measured()
target = field_now - 0.5
dynacool.field_target(target)
assert dynacool.field_measured() == field_now
dynacool.ramp(mode='non-blocking')
# Here you can do stuff while the magnet ramps
print(f'Field value: {dynacool.field_measured()} T')
print(f'Field target: {dynacool.field_target()} T')
# -
# ### Using the `field_ramp` parameter
#
# The `field_ramp` parameter sets the target field and ramp when being set.
print(f'Now the field is {dynacool.field_measured()} T...')
print(f'...and the field target is {dynacool.field_target()} T.')
dynacool.field_ramp(1)
print(f'Now the field is {dynacool.field_measured()} T...')
print(f'...and the field target is {dynacool.field_target()} T.')
| docs/examples/driver_examples/Qcodes example with DynaCool PPMS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch
# language: python
# name: pytorch
# ---
# +
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
IN_COLAB
debug=False
DOWNLOAD_DATA = False
# -
# ### Data Preparation
# - Download of Dataset
# **P.S**: Randomly Sampled 10 instances from each target class as described in the paper.
# - Option 1: Download from Archive.org
# - [Archive Link](https://archive.org/details/Imagenet_NAG)
# - [train.zip](https://archive.org/download/Imagenet_NAG/train.zip)
# - [valid.zip](https://archive.org/download/Imagenet_NAG/valid.zip)
# - Option 2 : Mega Download Link for Train abd Validation data of Imagenet 2012 (Obtained from Kaggle)
# - Validation Data: [Mega Link](https://mega.nz/#!yDoTDIyD!RjN6OBA92-KLpNqDeLS3OzwmAYesEbTsiQat9hT6p6s)
# - Trainning Data: [Mega Link](https://mega.nz/#!vKY0WSDa!4aibnBkiXUrO9MkhQlLGXac7wLF5HY7O4LzfdFEaeQU)
# <!-- - If link fails to work use the following Colab notebook to generate your own subset of trainning examples. [Link](https://colab.research.google.com/drive/1LbZBfgqntWb3HuC3UFyF_FvwnHtd1xTA) -->
# - Setting up of Folder Structure
# For Easier handling and reproducibility of results download from mega link
#
if IN_COLAB or DOWNLOAD_DATA:
# !mkdir ILSVRC
# !sudo apt install aria2 zip -y
# !aria2c -x 4 https://archive.org/download/Imagenet_NAG/train.zip -o train.zip
# !aria2c -x 4 https://archive.org/download/Imagenet_NAG/valid.zip -o valid.zip
# !wget https://archive.org/download/Imagenet_NAG/LOC_val_solution.csv -O ILSVRC/LOC_val_solution.csv
# !wget https://archive.org/download/Imagenet_NAG/LOC_synset_mapping.txt -O ILSVRC/LOC_synset_mapping.txt
# !unzip -qq train.zip -d ILSVRC/
# !unzip -qq valid.zip -d ILSVRC/
# TODO Download Data from Archive
# Extract and Do the Pre-Processing
# # !rm train.zip
# # !rm valid.zip
# +
from glob import glob
train_ok = True
val_ok = True
print("Training Data Verification")
cls_count = len(glob("ILSVRC/train/*"))
print("Total Number of Classes: {} in train directory".format(cls_count))
count = 0
for cls_ in glob("ILSVRC/train/*"):
imgs = glob(cls_ + "/*")
img_count = len(imgs)
count += img_count
if img_count != 10:
print(cls_.split("/")[-1], img_count)
train_ok=False
print("Total {} number of files in {} classes. i.e 10 Images/Class".format(count, cls_count))
print("Validation Data Verification")
val_files = glob("ILSVRC/valid/*")
val_count = len(val_files)
if val_count == 50000:
print("Validation Data has correct number of files i.e {}".format(val_count))
else:
print("Validation Data has some issue. Has following number of file : {}. Kindly Check!!".format(val_count))
val_ok=False
if train_ok and val_ok:
print("Dataset is Setup Correctly")
# -
# ### Imports
# +
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data import DataLoader,Dataset
import torchvision
import torchvision.models as tvm
from torchvision import transforms
from torchvision.datasets.folder import DatasetFolder,ImageFolder
import numpy as np
from glob import glob
from PIL import Image
import pandas as pd
import os,time,gc
from pathlib import Path
from tqdm import tqdm_notebook as tqdm
import datetime,random,string
# +
ngpu=torch.cuda.device_count()
device = torch.device("cuda" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
print("Using Pytorch Version : {} and Torchvision Version : {}. Using Device {}".format(torch.__version__,torchvision.__version__,device))
# -
# ### Dataset and Dataloaders Setup
dataset_path=r'ILSVRC/'
train_dataset_path=dataset_path+'train'
test_dataset_path=dataset_path+'valid'
print("Dataset root Folder:{}. Train Data Path: {}. Validation Data Path {}".format(dataset_path,train_dataset_path,test_dataset_path))
# +
# Preparation of Labels
label_dict={}
label_idx={}
with open('ILSVRC/LOC_synset_mapping.txt') as file:
lines=file.readlines()
for idx,line in enumerate(lines):
label,actual =line.strip('\n').split(' ',maxsplit=1)
label_dict[label]=actual
label_idx[label]=idx
# -
# ### Transforms
# +
# transforms
size=224
# Imagenet Stats
vgg_mean = [103.939, 116.779, 123.68]
preprocess=transforms.Compose([transforms.Resize((size,size)),
transforms.ToTensor(),
transforms.Normalize(vgg_mean,(0.5, 0.5, 0.5))])
# -
# ### Dataset and Dataloaders
#
# +
class CustomDataset(Dataset):
def __init__(self, subset, root_dir, transform=None):
self.root_dir=root_dir
self.transform=transform
self.subset=subset
if self.subset=='train':
data_dir=os.path.join(self.root_dir,self.subset)
self.images_fn=glob(f'{data_dir}/*/*')
self.labels=[Path(fn).parent.name for fn in self.images_fn]
elif subset =='valid':
df=pd.read_csv('ILSVRC/LOC_val_solution.csv')
df['label']=df['PredictionString'].str.split(' ',n=1,expand=True)[0]
df=df.drop(columns=['PredictionString'])
self.images_fn='ILSVRC/valid/'+df['ImageId'].values+'.JPEG'
self.labels=df['label']
else:
raise ValueError
print(f" Number of instances in {self.subset} subset of Dataset: {len(self.images_fn)}")
def __getitem__(self,idx):
fn=self.images_fn[idx]
label=self.labels[idx]
image=Image.open(fn)
if image.getbands()[0] == 'L':
image = image.convert('RGB')
if self.transform:
image = self.transform(image)
# print(type(image))
return image,label_idx[label]
def __len__(self):
return len(self.images_fn)
data_train=ImageFolder(root='ILSVRC/train',transform=preprocess)
class2idx=data_train.class_to_idx
data_valid=CustomDataset(subset='valid',root_dir=dataset_path,transform=preprocess)
train_num = len(data_train)
val_num = len(data_valid)
# -
# # Proposed Approach
#
#
# 
#
# - **Core idea is to model the distribution of universal adversarial perturbations for a given classifier.**
# - The image shows a batch of B random vectors {z}<sub>B</sub> transforming into perturbations {delta}<sub>B</sub> by G which get added to the batch of data samples {x}<sub>B</sub>.
# - The top portion shows adversarial batch (X<sub>A</sub>), bottom portion shows shuffled adversarial batch (X<sub>S</sub>) and middle portion shows the benign batch (X<sub>B</sub>). The Fooling objective Lf (eq. 2) and Diversity objective Ld (eq. 3) constitute the loss.
# ### Note
# - Note that the target CNN (f) is a trained classifier and its parameters are not updated during the proposed training. On the other hand, the parameters of generator (G) are randomly initialized and learned through backpropagating the loss. (Best viewed in color).
# ### Loss Functions/Objectives
#
# +
def fooling_objective(qc_):
'''Helper function to computer compute -log(1-qc'),
where qc' is the adversarial probability of the class having
maximum probability in the corresponding clean probability
qc' ---> qc_
Parameters:
prob_vec : Probability vector for the clean batch
adv_prob_vec : Probability vecotr of the adversarial batch
Returns:
-log(1-qc') , qc'
'''
# Get the largest probablities from predictions : Shape (bs,1)
qc_=qc_.mean()
return -1*torch.log(1-qc_) , qc_
def diversity_objective(prob_vec_no_shuffle, prob_vec_shuffled):
'''Helper function to calculate the cosine distance between two probability vectors
Parameters:
prob_vec : Probability vector for the clean batch
adv_prob_vec : Probability vector for the adversarial batch
Returns :
Cosine distance between the corresponding clean and adversarial batches
'''
return torch.cosine_similarity(prob_vec_no_shuffle,prob_vec_shuffled).mean()
## TODO
def intermediate_activation_objective(layer_name=None):
''' Extract the activations of any intermediate layer for:
1. batch of images (of batch size=32) corrupted by the perturbations (of batch size=32)
2. same batch of images corrupted by same batch of perturbations but in different (random) order
(in this case the intermdeiate layer is set to 'res4f' of ResNet 50 architecture)
'''
if arch =='resnet50':
layer_name='res4f'
pass
# +
# Effect of ConvTranspose2d : combination of upsampling and convolution layers is equal to a strided
# convolutional layer. increase the spatial resolution of the tensor
# def __call__(self):
# Dont Override the __call__ method. Pytorch does forward and backward hooks required,
# Always use forward method to avoid any issues
# -
# ### Generator
# - Architecture of our generator (G) unchanged for different target CNN architectures
#
# 
# +
from torch import nn
ngf=128
nz= latent_dim=10
e_lim = 10
nc=3 # Number of Channels
# Fixed Architecture: Weights will be updated by Backprop.
class AdveraryGenerator(nn.Module):
def __init__(self,e_lim):
super(AdveraryGenerator, self).__init__()
self.e_lim = e_lim
self.main = nn.Sequential(
nn.ConvTranspose2d( in_channels=nz,out_channels= 1024, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( 512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(256, 128, 4, 2, 2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( 128, 64, 4, 2, 2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# state size. (nc) x 64 x 64
nn.ConvTranspose2d( 64, 3, 4, 4,4, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(True),
nn.Tanh()
)
def forward(self, x):
return self.e_lim * self.main(x) # Scaling of ε
adversarygen=AdveraryGenerator(e_lim).to(device)
# -
# #### Debugging
#
if debug:
try:
from torchsummary import summary
summary(adversarygen,(nz,1,1))
except:
raise('Check torchsummary is installed. If not install using the command pip install torchsummary')
# ### Setting up Discriminator : Model : Architecture
#
# +
from torchvision.models import googlenet, vgg16 , vgg19, resnet152, resnet50
model_dict ={
'googlenet': googlenet,
'vgg16': vgg16 ,
'vgg19':vgg19,
'resnet152':resnet152,
'resnet50':resnet50
}
# -
# ### Choice of Hyperparameters
# - The architecture of the generator consists of 5 deconv layers. The final deconv layer is followed by a tanh non-linearity and scaling by epsillon (10)
#
# Get all Pretrained Weights:
# for arch in model_dict.keys():
# model=model_dict[arch](pretrained=True)
# +
# epsillon=10
# batch_size=32
# latent_dim = 10
img_h,img_w,img_c=(224,224,3)
latent_dim=10
arch='resnet50'
archs=model_dict.keys() # ['vgg-f','vgg16','vgg19','googlenet','resnet50','resnet152']
def get_bs(arch):
if torch.cuda.is_available():
# GPU_BENCHMARK= 8192.0
# GPU_MAX_MEM = torch.cuda.get_device_properties(device).total_memory / (1024*1024)
# BS_DIV= GPU_BENCHMARK/GPU_MAX_MEM
# print(f"Current GPU MAX Size : {GPU_MAX_MEM}. {BS_DIV}")
if arch not in ['resnet50','resnet152']:# ['vgg16','vgg19','vgg-f','googlenet']:
bs=int(64)
elif arch in ['resnet50','resnet152']:
bs=int(32)
else:
raise ValueError(f'Architecture type not supported. Please choose one from the following {archs}')
else:
bs=8 # OOM Error
return bs
get_bs(arch)
# -
model=model_dict[arch](pretrained=True)
model
# ### Other Utils
# +
def save_checkpoint(model, to_save, filename='checkpoint.pth'):
"""Save checkpoint if a new best is achieved"""
if to_save:
print ("=> Saving a new best")
torch.save(model.state_dict(), filename) # save checkpoint
else:
print ("=> Validation Accuracy did not improve")
def save_perturbations(noise,arch,epoch,wabdb_flag=False):
rand_str= ''.join( random.choice(string.ascii_letters) for i in range(6))
os.makedirs(f"{arch}-{rand_str}",exist_ok=True)
perturbations=noise.permute(0,2,3,1).cpu().detach().numpy()*255
np.save(f'{arch}-{rand_str}/Perturbations_{arch}_{epoch}.npy', perturbations)
for perturb_idx,perturbation in enumerate(perturbations[:,]):
im = Image.fromarray(perturbation.astype(np.uint8))
if wabdb_flag:
wandb.log({"noise": [wandb.Image(im, caption=f"Noise_{arch}_{epoch}_{perturb_idx}")]})
im.save(f'{arch}-{rand_str}/Perturbations_{arch}_{epoch}_{perturb_idx}.png')
# TODO
def visualize_perturbations():
# MAtplotlib Subplot ?
# Subplots(4*4) or (3*3)
# From Memory or Disk - Epoch number ?
pass
def get_preds(predictions,return_idx=False, k=1):
idxs= torch.argsort(predictions,descending=True)[:,:k]
if return_idx:
return predictions[:,idxs], idxs
return predictions[:,idxs]
# -
# #### Validating Model Utils
# +
# val_iterations = val_num/bs
def compute_fooling_rate(prob_adv,prob_real):
'''Helper function to calculate mismatches in the top index vector
for clean and adversarial batch
Parameters:
prob_adv : Index vector for adversarial batch
prob_real : Index vector for clean batch
Returns:
Number of mismatch and its percentage
'''
nfool=0
size = prob_real.shape[0]
for i in range(size):
if prob_real[i]!=prob_adv[i]:
nfool = nfool+1
return nfool, 100*float(nfool)/size
def validate_generator_old(noise,val_dl,val_iterations=10):
total_fool=0
print("############### VALIDATION PHASE STARTED ################")
train_log.writelines("############### VALIDATION PHASE STARTED ################")
for val_idx in range(val_iterations):
for batch_idx, data in enumerate(val_dl):
images = data[0].to(device)
# labels = data[1].to(device)
prob_vec_clean = F.softmax(D_model(images),dim=0) # Variable q
prob_vec_no_shuffle = D_model(images + noise)
nfool, _ = compute_fooling_rate(prob_vec_no_shuffle,prob_vec_clean)
total_fool += nfool
fool_rate = 100*float(total_fool)/(val_iterations*batch_size)
print(f"Fooling rate: {foolr}. Total Items Fooled :{total_fool}")
train_log.writelines(f"Fooling rate: {foolr}. Total Items Fooled :{total_fool}")
def validate_generator(noise,D_model,val_dl):
total_fool=0
for batch_idx, data in tqdm(enumerate(val_dl),total = val_num//val_dl.batch_size):
val_images = data[0].to(device)
val_labels = data[1].to(device)
prob_vec_clean,clean_idx = get_preds(F.softmax(D_model(val_images),dim=0),return_idx=True) # Variable q
prob_vec_no_shuffle,adv_idx = get_preds(F.softmax(D_model(val_images + noise),dim=0),return_idx=True)
nfool, _ = compute_fooling_rate(adv_idx,clean_idx)
total_fool += nfool
fool_rate = 100*float(total_fool)/(val_num)
return fool_rate,total_fool
# +
## Test Fooling Objective
adv = torch.randint(0,1000,(32,1))
real = torch.randint(0,1000,(32,1))
# -
# #### Setup Wandb
# +
# Setup Wandb
import wandb
wandb.login()
wandb.init(project="NAG_Pytorch")
# -
# # Fit and Train the Generator
[ random.choice(string.ascii_letters) for i in range(6)]
# +
def fit(nb_epochs,D_model,dls,optimizer,adversarygen=adversarygen):
# Set the Discriminator in Eval mode; Weights are fixed.
train_dl,val_dl = dls
D_model=D_model.to(device)
D_model.eval()
timestamp=datetime.datetime.now().strftime("%d%b%Y_%H_%M")
train_log = open(f'train_log_{arch}_{timestamp}.txt','w')
for epoch in tqdm(range(nb_epochs),total=nb_epochs):
running_loss=0
rand_str= ''.join( random.choice(string.ascii_letters) for i in range(6))
train_log.writelines(f"############### TRAIN PHASE STARTED : {epoch}################")
for batch_idx, data in tqdm(enumerate(train_dl),total = train_num//train_dl.batch_size):
# Move Data and Labels to device(GPU)
images = data[0].to(device)
labels = data[1].to(device)
# Generate the Adversarial Noise from Uniform Distribution U[-1,1]
latent_seed = 2 * torch.rand(bs, nz, 1, 1, device=device,requires_grad=True) -1 # (r1 - r2) * torch.rand(a, b) + r2
noise = adversarygen(latent_seed)
optimizer.zero_grad()
# XB = images
#preds_XB = f(images)
prob_vec_clean = F.softmax(D_model(images),dim=0) # Variable q
clean_preds ,clean_idx = get_preds(prob_vec_clean,return_idx=True,k=1)
#XA = images+noise
#preds_XA = f(images + noise)
prob_vec_no_shuffle = D_model(images + noise)
qc_ = F.softmax(prob_vec_no_shuffle,dim=0).gather(1,clean_idx) # Variable q'c
# 1. fooling_objective: encourages G to generate perturbations that decrease confidence of benign predictions
fool_obj, mean_qc_ = fooling_objective(qc_)
# Perturbations are shuffled across the batch dimesion to improve diversity
#XS = images+ noise[torch.randperm(bs)]
prob_vec_shuffled = D_model(images + noise[torch.randperm(bs)])
# 2. encourages Generator to explore the space of perturbations and generate a diverse set of perturbations
divesity_obj=diversity_objective(prob_vec_no_shuffle, prob_vec_shuffled)
# Compute Total Loss
total_loss = divesity_obj + fool_obj
# Lets perform Backpropagation to compute Gradients and update the weights
total_loss.backward()
optimizer.step()
# wandb Logging
# perturbations=noise.permute(0,2,3,1).cpu().detach().numpy()*255
# for perturb_idx,perturbation in enumerate(perturbations[:,]):
# im = Image.fromarray(perturbation.astype(np.uint8))
# wandb.log({"noise": [wandb.Image(im, caption=f"Noise_{arch}_{epoch}_{perturb_idx}")]})
wandb.log({"fool_obj": fool_obj.item(),
"divesity_obj": divesity_obj.item(),
"total_loss":total_loss.item(),
})
running_loss += total_loss.item()
if batch_idx!=0 and batch_idx % 100 ==0 :
train_log.writelines(f"############### VALIDATION PHASE STARTED : {epoch}, Step : {int(batch_idx / 100)} ################")
fool_rate,total_fool= validate_generator(noise,D_model,val_dl)
print(f"Fooling rate: {fool_rate}. Total Items Fooled :{total_fool}")
train_log.writelines(f"Fooling rate: {fool_rate}. Total Items Fooled :{total_fool}")
print(f"Diversity Loss :{divesity_obj.item()} \n Fooling Loss: {fool_obj.item()} \n")
print(f"Total Loss after Epoch No: {epoch +1} - {running_loss/(train_num//train_dl.batch_size)}")
train_log.writelines(f"Loss after Epoch No: {epoch +1} is {running_loss/(train_num//train_dl.batch_size)}")
# to_save can be any expression/condition that returns a bool
save_checkpoint(adversarygen, to_save= True, filename=f'GeneratorW_{arch}_{epoch}_{rand_str}.pth')
if epoch % 1 == 0:
# save_perturbations(noise,arch,epoch)
save_perturbations(noise,arch,epoch,wabdb_flag=True)
train_log.close()
# -
# # Start Actual Trainning
# +
total_epochs = 20
lr = 1e-3
# Setting up Dataloaders
import time,gc
# for arch in model_dict.keys():
arch='vgg16'
start= time.time()
print(f"Training Generator for Arch {arch}")
model= model_dict[arch](pretrained=True)
bs = get_bs(arch) - 16
print(bs)
train_dl=DataLoader(data_train,batch_size=bs,shuffle=True,num_workers=4,pin_memory=True,drop_last=True)
val_dl=DataLoader(data_valid,batch_size=bs,shuffle=True,num_workers=4,pin_memory=True,drop_last=True)
dls = [train_dl,val_dl]
optimizer = optim.Adam(adversarygen.parameters(), lr=lr)
# del model, train_dl, val_dl,dls ,optimizer
# torch.cuda.empty_cache()
# gc.collect()
print(f"Elsasped Time {time.time()-start} Seconds")
# -
fit(nb_epochs=total_epochs,D_model=model,dls=dls,optimizer=optimizer)
# # Misc:
#
# +
# with tqdm(total = len(traindata)) as epoch_pbar:
# epoch_pbar.set_description(f'Epoch {epoch}')
# https://discuss.pytorch.org/t/training-a-model-via-a-train-method/58567
# -
# ##### Setup Caffenet and VGG-F (TODO)
#
# #### Loading VGG-F
# #### To do : Need to Write a Custom Model from scratch for VGG -F and load weights from caffe model
# - Refer Here : https://github.com/val-iisc/nag/blob/83564eb4a8b5177660e2f6566dd63faa16f76773/nets/vgg_f.py
# - https://github.com/val-iisc/nag/blob/83564eb4a8b5177660e2f6566dd63faa16f76773/misc/convert_weights.py
# - Here VGGF refers to VGG-Face model http://www.vlfeat.org/matconvnet/pretrained/.
# - How we can use that to classify Imagenet ?
# - load caffe prototxt and weights directly in pytorch --> https://github.com/marvis/pytorch-caffe
# - Convert Caffe models to Pytorch : https://github.com/vadimkantorov/caffemodel2pytorch
# #### Check this link for Conversion Tutorial : [Link](https://colab.research.google.com/drive/1i2dq6qctPvrLREhKOZNNBsNfKuaS0HYQ)
# ### Downloading Trained Weights from Google Drive (TODO)
#
# +
# #TODO
# # !pip install gdown
# import gdown
# import zipfile
# url = 'https://drive.google.com/uc?id=0B9P1L--7Wd2vU3VUVlFnbTgtS2c' # Need to update actual URL
# output = 'weights.zip'
# gdown.download(url, output, quiet=True)
# zipdata = zipfile.ZipFile('weights.zip')
# zipinfos = zipdata.infolist()
# for zipinfo in zipinfos:
# zipdata.extract(zipinfo)
# -
# ### Evaluating NAG performance across Models: For Tabular Column Generation
#
# # Question to be Answered
# - In the Repo code train_generator.py
# L241 : feature_loss = -10*tf.reduce_mean(tf.squared_difference(f1_res4f,f2_res4f))#feature_distance(f1_res4f,f2_res4f)
# # Why is it computed and multiplied by 10 ?
#
# ### Steps to evaluate the perturbations generated by Generator Network (TODO)
# arch='Fixed'
# for modelarch, model in model_dict.items():
# num_iteration = 10 # Blackbox Settings
# if modelarch == arch:
# num_iteration =100 # Whitebox Settings
# for i range(num_iteration)
# 1. Load the Weights of the Generator
# 2. Generate a Perturbation using a random vector of dimension latent_dim,1
# 3. Add the noise to a sample image
#
# ## References:
# - GAN Architecture : Pytorch Tutorial
# - [Transpose Convolution Docs](https://pytorch.org/docs/stable/nn.html?highlight=convtranspose2d#torch.nn.ConvTranspose2d)
# +
# def fit(nb_epochs,D_model,dls,optimizer,adversarygen=adversarygen):
# # Set the Discriminator in Eval mode; Weights are fixed.
# train_dl,val_dl = dls
# D_model=D_model.to(device)
# D_model.eval()
# train_log = open(f'train_log_{arch}.txt','w')
# for epoch in tqdm(range(nb_epochs),total=nb_epochs):
# running_loss=0
# print(f"############### TRAIN PHASE STARTED : {epoch} ################")
# train_log.writelines(f"############### TRAIN PHASE STARTED : {epoch}################")
# tic=time.time()
# for batch_idx, data in tqdm(enumerate(train_dl),total = train_num//train_dl.batch_size):
# # Move Data and Labels to device(GPU)
# images = data[0].to(device)
# labels = data[1].to(device)
# # Generate the Adversarial Noise from Uniform Distribution U[-1,1]
# latent_seed = 2 * torch.rand(bs, nz, 1, 1, device=device,requires_grad=True) -1 # (r1 - r2) * torch.rand(a, b) + r2
# noise = adversarygen(latent_seed)
# optimizer.zero_grad()
# # XB = images
# #preds_XB = f(images)
# prob_vec_clean = F.softmax(D_model(images),dim=0) # Variable q
# _ ,clean_idx = get_preds(prob_vec_clean,return_idx=True,k=1)
# #XA = images+noise
# #preds_XA = f(images + noise)
# prob_vec_no_shuffle = D_model(images + noise)
# qc_ = F.softmax(prob_vec_no_shuffle,dim=0)[:,clean_idx] # Variable q'c
# # 1. fooling_objective: encourages G to generate perturbations that decrease confidence of benign predictions
# fool_obj, mean_qc_ = fooling_objective(qc_)
# # Perturbations are shuffled across the batch dimesion to improve diversity
# #XS = images+ noise[torch.randperm(bs)]
# prob_vec_shuffled = D_model(images + noise[torch.randperm(bs)])
# # 2. encourages Generator to explore the space of perturbations and generate a diverse set of perturbations
# divesity_obj=diversity_objective(prob_vec_no_shuffle, prob_vec_shuffled)
# # Compute Total Loss
# total_loss = divesity_obj+fool_obj
# # Lets perform Backpropagation to compute Gradients and update the weights
# total_loss.backward()
# optimizer.step()
# running_loss += total_loss.item()
# if batch_idx!=0 and batch_idx % 100 ==0 :
# print(f"############### VALIDATION PHASE STARTED : {epoch}, Step :{int(batch_idx / 100)} ################")
# train_log.writelines(f"############### VALIDATION PHASE STARTED : {epoch}, Step :{int(batch_idx / 100)} ################")
# ticval=time.time()
# fool_rate,total_fool= validate_generator(noise,D_model,val_dl)
# print(f"Fooling rate: {fool_rate}. Total Items Fooled :{total_fool}")
# train_log.writelines(f"Fooling rate: {fool_rate}. Total Items Fooled :{total_fool}")
# print(f"Time Elasped for Validating: {time.time()-ticval} Seconds")
# print(f"Loss after Epoch No: {epoch +1} is {running_loss}")
# train_log.writelines(f"Loss after Epoch No: {epoch +1} is {running_loss}")
# # to_save can be any expression/condition that returns a bool
# save_checkpoint(adversarygen, to_save= True, filename=f'GeneratorW_{arch}_{epoch}.pth')
# if epoch % 1 == 0:
# save_perturbations(noise,arch,epoch)
# print(f"Time Elasped for Trainning one Epoch : {time.time()-tic} Seconds")
# train_log.close()
| nbs/ColabVersion_Final-vgg16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext watermark
# %watermark -a '<NAME>' -u -d -v -p numpy,xarray,scipy,pandas,sklearn,matplotlib,seaborn,qgrid,rpy2,libpgm,pgmpy,networkx,graphviz,pybnl,pytest
# +
# %matplotlib inline
import numpy as np, pandas as pd, xarray as xr, matplotlib.pyplot as plt, seaborn as sns
import sklearn, sklearn.pipeline
import networkx as nx, graphviz, networkx.algorithms.dag
import random
import itertools
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# pd.set_option('display.float_format', lambda x: '%.2f' % x)
np.set_printoptions(edgeitems=10)
np.set_printoptions(suppress=True)
np.core.arrayprint._line_width = 180
sns.set()
# +
from IPython.display import display, HTML
from IPython.display import display_html
def display_side_by_side(*args):
html_str=''
for df in args:
if type(df) == np.ndarray:
df = pd.DataFrame(df)
html_str+=df.to_html()
html_str = html_str.replace('table','table style="display:inline"')
# print(html_str)
display_html(html_str,raw=True)
CSS = """
.output {
flex-direction: row;
}
"""
def display_graphs_side_by_side(*args):
html_str='<table><tr>'
for g in args:
html_str += '<td>'
html_str += g._repr_svg_()
html_str += '</td>'
html_str += '</tr></table>'
display_html(html_str,raw=True)
display(HTML("<style>.container { width:70% !important; }</style>"))
# -
# %load_ext rpy2.ipython
# %load_ext autoreload
# %autoreload 1
# %aimport pybnl.bn
# %aimport dsbasics.bin
# +
import locale
locale.setlocale(locale.LC_ALL, 'C')
import rpy2, rpy2.rinterface, rpy2.robjects, rpy2.robjects.packages, rpy2.robjects.lib, rpy2.robjects.lib.grid, \
rpy2.robjects.lib.ggplot2, rpy2.robjects.pandas2ri, rpy2.interactive.process_revents, \
rpy2.interactive, rpy2.robjects.lib.grdevices
# rpy2.interactive.process_revents.start()
rpy2.robjects.pandas2ri.activate()
# -
# # House Prices in Ames, Iowa
# * [Ames, Iowa: Alternative to the Boston Housing Data as an End of Semester Regression Project](http://ww2.amstat.org/publications/jse/v19n3/decock.pdf)
# * [AmesResidential.pdf](https://ww2.amstat.org/publications/jse/v19n3/decock/AmesResidential.pdf)
# * [DataDocumentation.txt](https://ww2.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt)
# * [AmesHousing.txt](https://ww2.amstat.org/publications/jse/v19n3/decock/AmesHousing.txt)
# * [AmesHousing.xls](http://www.amstat.org/publications/jse/v19n3/decock/AmesHousing.xls)
# * Also on [kaggle](https://www.kaggle.com/c/house-prices-advanced-regression-techniques)
# The below example reproduces the example from chapter 5 (page 79) in [Bayesian Networks and BayesiaLab: A Practical Introduction for Researchers](https://www.amazon.com/Bayesian-Networks-BayesiaLab-Introduction-Researchers/dp/0996533303).
df = pd.read_csv('./AmesHousing.txt.gz', sep='\t', index_col=0)
df['MS SubClass'] = df['MS SubClass'].apply(lambda x: '{0:0>3}'.format(x))
df.iloc[:5,:15]
df.columns
discrete_non_null, discrete_with_null, continuous_non_null, continuous_with_null, levels_map = pybnl.bn.discrete_and_continuous_variables_with_and_without_nulls(df, cutoff=30)
# discrete_non_null, discrete_with_null, continuous_non_null, continuous_with_null, levels_map
# +
ddf = df.copy()
#cat_columns = ['Alley', 'Bedroom AbvGr', 'Bldg Type', 'Bsmt Cond', ]
cat_columns = [
'MS SubClass', 'MS Zoning', 'Street', 'Alley', 'Land Contour', 'Lot Config', 'Neighborhood', 'Condition 1', 'Condition 2', 'Bldg Type', 'House Style',
'Roof Style', 'Roof Matl', 'Exterior 1st', 'Exterior 2nd', 'Mas Vnr Type', 'Foundation', 'Heating', 'Central Air', 'Garage Type', 'Misc Feature', 'Sale Type', 'Sale Condition'
] + [
'Overall Qual', 'Overall Cond'
]
cat_columns_ordinal = [
('Lot Shape', ['Reg','IR1','IR2','IR3']),
('Utilities', ['AllPub','NoSewr','NoSeWa','ELO']),
('Land Slope', ['Gtl', 'Mod', 'Sev']),
('Exter Qual', ['Ex', 'Gd', 'TA', 'Fa', 'Po']),
('Exter Cond', ['Ex', 'Gd', 'TA', 'Fa', 'Po']),
('Bsmt Qual', ['Ex', 'Gd', 'TA', 'Fa', 'Po', 'NA']),
('Bsmt Cond', ['Ex', 'Gd', 'TA', 'Fa', 'Po', 'NA']),
('Bsmt Exposure', ['Gd', 'Av', 'Mn', 'No', 'NA']),
('BsmtFin Type 1', ['GLQ', 'ALQ', 'BLQ', 'Rec', 'LwQ', 'Unf', 'NA']),
('BsmtFin Type 2', ['GLQ', 'ALQ', 'BLQ', 'Rec', 'LwQ', 'Unf', 'NA']),
('Heating QC', ['Ex', 'Gd', 'TA', 'Fa', 'Po']),
('Electrical', ['SBrkr', 'FuseA', 'FuseF', 'FuseP', 'Mix']),
('Kitchen Qual', ['Ex', 'Gd', 'TA', 'Fa', 'Po']),
('Functional', ['Typ', 'Min1', 'Min2', 'Mod', 'Maj1', 'Maj2', 'Sev', 'Sal']),
('Fireplace Qu', ['Ex', 'Gd', 'TA', 'Fa', 'Po', 'NA']),
('Garage Finish', ['Fin', 'RFn', 'Unf', 'NA']),
('Garage Qual', ['Ex', 'Gd', 'TA', 'Fa', 'Po', 'NA']),
('Garage Cond', ['Ex', 'Gd', 'TA', 'Fa', 'Po', 'NA']),
('Paved Drive', ['Y', 'P', 'N']),
('Pool QC', ['Ex', 'Gd', 'TA', 'Fa', 'NA']),
('Fence', ['GdPrv', 'MnPrv', 'GdWo', 'MnWw', 'NA']),
]
continuous_columns = [
'Lot Frontage', 'Lot Area', 'Mas Vnr Area', 'BsmtFin SF 1', 'BsmtFin SF 2', 'Bsmt Unf SF', 'Total Bsmt SF', '1st Flr SF', '2nd Flr SF', 'Low Qual Fin SF', 'Gr Liv Area', 'Garage Area', 'Wood Deck SF', 'Open Porch SF', 'Enclosed Porch', '3Ssn Porch',
'Screen Porch', 'Pool Area', 'Misc Val', 'SalePrice'
]
discrete_columns = ['Year Built', 'Year Remod/Add', 'Bsmt Full Bath', 'Bsmt Half Bath', 'Full Bath', 'Half Bath', 'TotRms AbvGrd', 'Fireplaces', 'Garage Yr Blt', 'Garage Cars', 'Mo Sold', 'Yr Sold', 'Bedroom AbvGr', 'Kitchen AbvGr']# do not exist: 'Bedroom', 'Kitchen'
for col in cat_columns:
levels = levels_map[col]
# print('col: {}'.format(col))
# if all([isinstance(level, (int, float)) for level in levels]):
if all([np.issubdtype(type(level), np.number) for level in levels]):
# print('int, float column: {}'.format(col))
levels = sorted(levels)
ddf[col] = df[col].astype(pd.api.types.CategoricalDtype(levels, ordered=True))
else:
ddf[col] = df[col].astype(pd.api.types.CategoricalDtype(levels, ordered=False))
for col, levels in cat_columns_ordinal:
ddf[col] = df[col].astype(pd.api.types.CategoricalDtype(levels[::-1], ordered=True))
for col in continuous_columns:
ddf[col] = df[col].astype(float)
for col in discrete_columns:
if pd.isnull(df[col]).any():
ddf[col] = df[col].astype(float)
else:
ddf[col] = df[col].astype(int)
# col = 'Alley'
# ddf[col]
# ddf[~pd.isnull(ddf[col])][col]
# value = np.nan
# ddf.loc[df[col]==value,col]
# -
# [Working with Pandas: Fixing messy column names](https://medium.com/@chaimgluck1/working-with-pandas-fixing-messy-column-names-42a54a6659cd)
ddf.columns = ddf.columns.str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
type(ddf.columns)
ddf.head()
# +
# ddf.to_hdf('AmesHousing.h5', 'AmesHousing',format='table', append=False)
# +
# pd.read_hdf('AmesHousing.h5', 'AmesHousing').head()
# -
# ## Treating Filtered Values ('FV')
# See page 84 in "Bayesian Networks and BayesiaLab"
# ### Bsmt fields
bsmt_fields_ = ['Bsmt Qual', 'Bsmt Cond', 'Bsmt Exposure', 'BsmtFin Type 1', 'BsmtFin Type 2', 'Bsmt Full Bath', 'Bsmt Half Bath',
'BsmtFin SF 1', 'BsmtFin SF 2', 'Bsmt Unf SF', 'Total Bsmt SF']
bsmt_fields = pd.Index(bsmt_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf[bsmt_fields].query('Bsmt_Qual == "NA"')
# ddf[ddf['Bsmt_Qual'] == 'NA'][bsmt_fields]
# df[bsmt_fields_][df['Bsmt Qual'] == 'NA']
# It seems that there are no filtered values for 'Bsmt' fields, e.g. each home contains a basement.
# [Querying for NaN and other names in Pandas](https://stackoverflow.com/questions/26535563/querying-for-nan-and-other-names-in-pandas)
ddf[bsmt_fields][pd.isnull(ddf.Bsmt_Qual)].head()
df[bsmt_fields_][pd.isnull(df['Bsmt Qual'])].head()
# But there are quite a lot of 'NaN' entries. Not sure why the data description contains an "NA" value as "No Basement", but no actual data-set uses it. Most likely these values are supposed to be "NA"
bsmt_na_fields = ['Bsmt_Qual', 'Bsmt_Cond', 'Bsmt_Exposure', 'BsmtFin_Type_1', 'BsmtFin_Type_2']
ddf.loc[pd.isnull(ddf.Bsmt_Qual), bsmt_na_fields] = "NA"
ddf[bsmt_fields].query('Bsmt_Qual == "NA"').head()
# ### Electrical field
ddf.Electrical.value_counts(dropna=False)
ddf[pd.isnull(ddf.Electrical)]
# The one NaN value seems to be a missing value
# ### Fireplaces
fireplaces_fields_ = ['Fireplaces', 'Fireplace Qu']
fireplaces_fields = pd.Index(fireplaces_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf[fireplaces_fields].query('Fireplaces == 0').head()
ddf.loc[ddf.Fireplaces == 0,['Fireplace_Qu']] = 'NA'
ddf[fireplaces_fields].query('Fireplaces == 0').head()
# ### Garage fields
garage_fields_ = ['Garage Type', 'Garage Finish', 'Garage Cars', 'Garage Qual', 'Garage Cond', 'Garage Yr Blt', 'Garage Area']
garage_fields = pd.Index(garage_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf[garage_fields][pd.isnull(ddf.Garage_Type)].head()
ddf['Garage_Type'] = ddf['Garage_Type'].astype(str)\
.astype(pd.api.types.CategoricalDtype(set(list(ddf.Garage_Type.dtype.categories) + ['NA'])))
ddf.Garage_Type.dtype.categories
ddf.loc[pd.isnull(ddf.Garage_Type),['Garage_Type', 'Garage_Finish', 'Garage_Qual', 'Garage_Cond']] = 'NA'
ddf.loc[ddf.Garage_Type == 'NA',['Garage_Yr_Blt']] = -1.0
#ddf[garage_fields][pd.isnull(ddf.Garage_Yr_Blt)]
#ddf['Garage_Yr_Blt'] = ddf['Garage_Yr_Blt'].astype(int)
ddf[garage_fields][ddf.Garage_Type == 'NA'].head()
# ### Mas Vnr fields
mas_vnr_fields_ = ['Mas Vnr Type', 'Mas Vnr Area']
mas_vnr_fields = pd.Index(mas_vnr_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf[mas_vnr_fields][pd.isnull(ddf.Mas_Vnr_Type)].head()
ddf.Mas_Vnr_Type.dtype
ddf.loc[pd.isnull(ddf.Mas_Vnr_Type), ['Mas_Vnr_Type']] = 'None'
ddf.loc[ddf.Mas_Vnr_Type == 'None', ['Mas_Vnr_Area']] = 0.0
ddf[mas_vnr_fields][ddf.Mas_Vnr_Type == 'None'].head()
# ### Pool fields
pool_fields_ = ['Pool QC', 'Pool Area']
pool_fields = pd.Index(pool_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf[pool_fields][pd.isnull(ddf.Pool_QC)].head()
ddf.loc[pd.isnull(ddf.Pool_QC), ['Pool_QC']] = 'NA'
ddf[pool_fields][ddf.Pool_QC == 'NA'].head()
# ### Fence field
fence_fields_ = ['Fence']
fence_fields = pd.Index(fence_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf.loc[pd.isnull(ddf.Fence), ['Fence']] = 'NA'
ddf[fence_fields][ddf.Fence == 'NA'].head()
# ### Misc Feature field
misc_feature_fields_ = ['Misc Feature']
misc_feature_fields = pd.Index(misc_feature_fields_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
ddf['Misc_Feature'] = ddf['Misc_Feature'].astype(str)\
.astype(pd.api.types.CategoricalDtype(set(list(ddf.Misc_Feature.dtype.categories) + ['NA'])))
ddf.loc[pd.isnull(ddf.Misc_Feature), ['Misc_Feature']] = 'NA'
ddf[misc_feature_fields][ddf.Misc_Feature == 'NA'].head()
# ### Check remaining nan fields
_, discrete_with_null_, _, continuous_with_null_, _ = pybnl.bn.discrete_and_continuous_variables_with_and_without_nulls(ddf, cutoff=30)
discrete_with_null_, continuous_with_null_
ddf[bsmt_fields][pd.isnull(ddf.Bsmt_Exposure)]
# Just set the few NaN values to 'No'
ddf.loc[pd.isnull(ddf.Bsmt_Exposure),['Bsmt_Exposure']] = 'No'
ddf[bsmt_fields][pd.isnull(ddf.BsmtFin_Type_2)]
# Just set the few NaN values to 'Unf'
ddf.loc[pd.isnull(ddf.BsmtFin_Type_2),['BsmtFin_Type_2']] = 'Unf'
ddf[bsmt_fields][pd.isnull(ddf.Bsmt_Full_Bath)]
ddf[bsmt_fields][pd.isnull(ddf.Bsmt_Half_Bath)]
# Just set all the NaN values to 0.0
ddf.loc[pd.isnull(ddf.Bsmt_Full_Bath),['Bsmt_Full_Bath','Bsmt_Half_Bath','BsmtFin_SF_1','BsmtFin_SF_2','Bsmt_Unf_SF','Total_Bsmt_SF']] = [0.0,0.0,0.0,0.0,0.0,0.0]
ddf[['Electrical']][pd.isnull(ddf.Electrical)]
# Just set this single NaN value to 'Mix'
ddf.loc[pd.isnull(ddf.Electrical),['Electrical']] = 'Mix'
# The remaining NaN garage fields seem to be really missing values so don't touch them.
ddf[garage_fields][pd.isnull(ddf.Garage_Finish)]
ddf[garage_fields][pd.isnull(ddf.Garage_Cars)]
ddf[garage_fields][pd.isnull(ddf.Garage_Qual)]
ddf[garage_fields][pd.isnull(ddf.Garage_Cond)]
# The Lot_Frontage NaN fields seem to be really missing values so don't touch them.
ddf[pd.isnull(ddf.Lot_Frontage)].head()
_, discrete_with_null_, _, continuous_with_null_, _ = pybnl.bn.discrete_and_continuous_variables_with_and_without_nulls(ddf, cutoff=30)
discrete_with_null_, continuous_with_null_
# ## Binning / Discretization
ddf1 = ddf.copy()
# ### The target variable is SalePrice
ddf.SalePrice.describe()
ddf1.SalePrice = pd.cut(ddf.SalePrice, [0.0,75000.0, 150000.0, 225000.0, 300000.0,np.PINF], right=False)
ddf1['SalePrice'].value_counts()
# ### Continuous, discrete and ordinal variables
target_variable_decision_tree_binning_variables_ = [c for c,r in cat_columns_ordinal] + continuous_columns + discrete_columns + ['Overall Qual', 'Overall Cond']
target_variable_decision_tree_binning_variables = pd.Index(target_variable_decision_tree_binning_variables_).str.strip().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
# target_variable_decision_tree_binning_variables
tvbt = dsbasics.bin.TargetVariableDecisionTreeBinTransformer(max_leaf_nodes=3)
ddf1.loc[:,target_variable_decision_tree_binning_variables] = \
tvbt.fit_transform(ddf[target_variable_decision_tree_binning_variables], ddf1.SalePrice)
ddf1.head()
len(df.columns),len(ddf1.columns)
ddf['Overall_Qual'].value_counts().sort_index().plot(kind='bar');
ddf1['Overall_Qual'].value_counts().sort_index().plot(kind='bar');
# ### Drop unused columns
if any(ddf1.columns.isin(['PID'])):
ddf1.drop('PID', axis=1, inplace=True)
ddf1.columns
# ### Convert interval indices to strings
ddf2_0 = pybnl.bn.convert_interval_index_categories_to_string_categories(ddf1,inplace=False)
# ### Rename columns to fit with R conventions
ddf2 = ddf2_0.rename(columns={
"1st_Flr_SF":"X1st_Flr_SF",
"2nd_Flr_SF":"X2nd_Flr_SF",
"3Ssn_Porch":"X3Ssn_Porch",
"Year_Remod/Add":"Year_Remod_Add"
})
ddf2.Bsmt_Full_Bath.dtype
ddf2.Bsmt_Full_Bath.value_counts(dropna=False)
# ## Naive Bayes Classifier
# ### bnlearn by hand
# #### Define net
# +
dg = nx.DiGraph()
dg.add_nodes_from(ddf2.columns.values)
in_vars = ddf2.columns.values[:-1]
out_var = ddf2.columns.values[-1:]
dg.add_edges_from(list(itertools.product(out_var, in_vars)))
# -
ns = pybnl.bn.digraph2netstruct(dg)
# ns.dot()
display(HTML(ns.dot()._repr_svg_()))
ddf2.columns
nb1 = pybnl.bn.ParametricEMNetAndDataDiscreteBayesNetwork(ddf2, dg=dg)
nb1.df.Bsmt_Full_Bath.dtype
print('self.df: {}'.format(nb1.df.Bsmt_Full_Bath.dtype.categories))
#nb1.df.Bsmt_Full_Bath.dtype.__class__
nb1.fit()
# +
# nb1.im_.imputed_['Bsmt_Full_Bath'].astype(str).values
# +
# nb1.im_.df['Bsmt_Full_Bath'].astype(str).values
# +
# nb1.im_.df.Bsmt_Full_Bath.head()
# +
# nb1.im_.imputed_.Bsmt_Full_Bath.head()
# +
# print(nb1.im_.r_df_.rx('Bsmt_Full_Bath'))
# -
tmp_r_df_ = nb1.r_df_
# print(nb1.r_df_.rx('Bsmt_Full_Bath'))
# + magic_args="-i tmp_r_df_" language="R"
# is.factor(tmp_r_df_$Bsmt_Full_Bath)
# # levels(tmp_r_df_["Bsmt_Full_Bath"])
# #str(tmp_r_df_)
# -
tmp_rnet = nb1.f_.rnet
tmp_rfit = nb1.f_.rfit
# print(nb1.f_.rfit)
# + magic_args="-i tmp_rnet -i tmp_rfit -i tmp_r_df_" language="R"
# is.factor(tmp_r_df_$Bsmt_Full_Bath)
# saveRDS(tmp_rnet , 'ames_housing_rnet.rds')
# saveRDS(tmp_r_df_, 'ames_housing_rdf.rds')
# saveRDS(tmp_rfit, 'ames_housing_rfit.rds')
# + language="R"
# nrow(tmp_r_df_)
# -
# ### bnlearn via naive.bayes
# ### sklearn MultinomialNB
| examples/010-2018-06-17-ames-house-prices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python397jvsc74a57bd042fd40e048e0585f88ec242f050f7ef0895cf845a8dd1159352394e5826cd102
# ---
# +
#default_exp test_utils
# + [markdown] tags=[]
# # Internal Testing Utilities
# > Utilities that help to test and display intermediate results of custom Preprocessors
# -
#export
from nbconvert import MarkdownExporter
from traitlets.config import Config
from fastcore.xtras import Path
#export
def run_preprocessor(pp, nbfile, template_file='ob.tpl', display_results=False):
"Runs a preprocessor with the MarkdownExporter and optionally displays results."
c = Config()
c.MarkdownExporter.preprocessors = pp
tmp_dir = Path(__file__).parent/'templates/'
tmp_file = tmp_dir/f"{template_file}"
c.MarkdownExporter.template_file = str(tmp_file)
exp = MarkdownExporter(config=c)
result = exp.from_filename(nbfile)
if display_results: print(result[0])
return result
#export
def show_plain_md(nbfile):
md = MarkdownExporter()
print(md.from_filename(nbfile)[0])
show_plain_md('test_files/hello_world.ipynb')
| nbs/test_utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Programming for Data Analysis Project 2018
#
# ### <NAME> G00281051
#
# #### Problem statement
#
# For this project you must create a data set by simulating a real-world phenomenon of your choosing. You may pick any phenomenon you wish – you might pick one that is of interest to you in your personal or professional life. Then, rather than collect data related to the phenomenon, you should model and synthesise such data using Python. We suggest you use the numpy.random package for this purpose.
#
# Specifically, in this project you should:
#
# 1. Choose a real-world phenomenon that can be measured and for which you could collect at least one-hundred data points across at least four different variables.
# 2. Investigate the types of variables involved, their likely distributions, and their relationships with each other.
# 3. Synthesise/simulate a data set as closely matching their properties as possible.
# 4. Detail your research and implement the simulation in a Jupyter notebook – the data set itself can simply be displayed in an output cell within the notebook.
#
# ### 1. Choose a real-world phenomenon that can be measured and for which you could collect at least one-hundred data points across at least four different variables.
#
# For the purpose of this project, I shall extract some wave buoy data from the [M6 weather buoy](http://www.marine.ie/Home/site-area/data-services/real-time-observations/irish-weather-buoy-network) off the westcoast of Ireland. I surf occassionally, and many surfers, like myself; use weather buoy data in order to predict when there will be decent waves to surf. There are many online resources that provide such information, but I thought this may be an enjoyable exploration of raw data that is used everyday, worldwide.
# +
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Downloaded hly62095.csv from https://data.gov.ie/dataset/hourly-data-for-buoy-m6
# Opened dataset in VSCode. It contains the label legend, so I have skipped these rows.
# I also only want to utilise 4 relevant columns of data, I'll use the 'usecols' arguement:
# https://realpython.com/python-data-cleaning-numpy-pandas/#dropping-columns-in-a-dataframe
df = pd.read_csv("hly62095.csv", skiprows = 19, low_memory = False, usecols= ['date', 'dir', 'per', 'wavht'])
# Change the date column to a Pythonic datetime -
# reference: https://github.com/ianmcloughlin/jupyter-teaching-notebooks/raw/master/time-series.ipynb
df['datetime'] = pd.to_datetime(df['date'])
# -
# Downloaded hly62095.csv from https://data.gov.ie/dataset/hourly-data-for-buoy-m6. Opened dataset in VSCode. It contains the label legend, so I have skipped these rows 1-19:
#
# ###Label legend
#
# ```
# 1. Station Name: M6
# 2. Station Height: 0 M
# 3. Latitude:52.990 ,Longitude: -15.870
# 4.
# 5.
# 6. date: - Date and Time (utc)
# 7. temp: - Air Temperature (C)
# 8. rhum: - Relative Humidity (%)
# 9. windsp: - Mean Wind Speed (kt)
# 10. dir: - Mean Wind Direction (degrees)
# 11. gust: - Maximum Gust (kt)
# 12. msl: - Mean Sea Level Pressure (hPa)
# 13. seatp: - Sea Temperature (C)
# 14. per: - Significant Wave Period (seconds)
# 15. wavht: - Significant Wave Height (m)
# 16. mxwav: - Individual Maximum Wave Height(m)
# 17. wvdir: - Wave Direction (degrees)
# 18. ind: - Indicator
# 19.
# 20. date,temp,rhum,wdsp,dir,gust,msl,seatp,per,wavht,mxwave,wvdir
# 21. 25-sep-2006 09:00,15.2, ,8.000,240.000, ,1007.2,15.4,6.000,1.5, ,
# 22. 25-sep-2006 10:00,15.2, ,8.000,220.000, ,1008.0,15.4,6.000,1.5, ,.........
#
# ```
# View DataFrame
df
# There are a significant missing datapoints, and its a large sample, with 94248 rows. I'm going to explore this further, and extract the relevant data for September 2018. This will give me enough data to explore and simulate for this project.
# First, I'll describe the datatypes in the set.
df.describe()
# I want to view the data for September 2018. So I'll extract the relevant datapoints from this dataset.
# +
# Create a datetime index for a data frame.
# Adapted from: https://pandas.pydata.org/pandas-docs/stable/timeseries.html
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html
rng = pd.date_range(start='1-sep-2018', periods=30, freq='D')
# -
rng
# I'm using 4 variables from the dataset. These are;
#
# 1. date: - Date and Time (utc)
# 2. dir: - Mean Wind Direction (degrees)
# 3. per: - Significant Wave Period (seconds) - This is important for quality waves!
# 4. wavht: - Significant Wave Height (m)
df.head(10)
# Next, I'll display the data columns from 1st September 2018. Since I've already removed rows 1-19, the row label numbers have been modified by pandas. In this case - I worked backwards to find the right label number.
#
# I'm going to name this smaller dataframe 'wavedata'.
wavedata = df.loc['93530':]
wavedata
# I'm now going to check for Null values in the data.
# checking for null values
# There are no null values in this dataframe, according to pandas!
wavedata.isnull().sum()
# ### 2. Investigate the types of variables involved, their likely distributions, and their relationships with each other.
#
# Now for some exploratory data analysis.
# Check datatypes
df.dtypes
# #### Explore distributions
# +
# %matplotlib inline
plt.figure(figsize=(26, 10))
plot = sns.scatterplot(x="datetime", y="wavht", hue='per', data=wavedata)
# -
# It looks like I've a problem with this data! It's mostly objects, not floats.
| Programming for Data Analysis Project 2018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import similaritydf
from similaritydf import get_df
topics = ['Kygo','Odesza','<NAME>','<NAME>','Grum','Cascada','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','Chemistry','Biology','Physics','Periodic table']
test_df = get_df(topics)
def see_topic(df, val):
return df[(df['Topic 1'] == topics[val]) | (df['Topic 2'] == topics[val])].sort_values('Probability', ascending=False)
see_topic(test_df, 0)
see_topic(test_df, 1)
see_topic(test_df, 2)
see_topic(test_df, 3)
see_topic(test_df, 4)
see_topic(test_df, 5)
see_topic(test_df, 6)
see_topic(test_df, 7)
see_topic(test_df, 8)
see_topic(test_df, 9)
see_topic(test_df, 10)
see_topic(test_df, 11)
see_topic(test_df, 12)
see_topic(test_df, 13)
see_topic(test_df, 14)
see_topic(test_df, 15)
| TEST03. Testing similaritydf to get dataframe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import math
import pandas
from datetime import datetime
import png
# +
infile = '../data/ice-dates.csv'
data = pandas.read_csv(infile, usecols = ["YEAR1","CLOSEDDATE","OPENEDDATE"],
parse_dates = ["CLOSEDDATE","OPENEDDATE"])
# +
marker_month = "11"
marker_day = "20"
for state in ["CLOSED","OPENED"]:
data[state+'OFFSET'] = data.apply(lambda x: x[state+'DATE'] - datetime.strptime(str(x['YEAR1'])+"-"+marker_month+"-"+marker_day,'%Y-%m-%d'), axis = 1)
data[state+'NORMOFFSET'] = data.apply(lambda x: math.floor(x[state+'OFFSET'].total_seconds() / (60 * 60 * 24) / 3), axis = 1)
# +
#data.head(20)
# -
start_yr = int(data['YEAR1'].min())
end_yr = int(data['YEAR1'].max())
height = end_yr - start_yr + 1
delta_min = data['CLOSEDNORMOFFSET'].min()
delta_max = data['OPENEDNORMOFFSET'].max()
width = delta_max - delta_min
print(width, height)
# +
bg_color = (0, 0, 225)
ice_color = (250, 250, 250)
lt_blue = (209, 237, 242)
color_toggle = [bg_color, ice_color]
# -
years = list(data['YEAR1'].unique())
len(years)
# +
px = []
block_width = 3
block_height = 5
tmp_index = 5
stripe = []
for bwid in range(block_width*width):
stripe.extend(lt_blue)
for year in years:
change_points = []
for i,row in data[data.YEAR1 == year].iterrows():
for change_types in ['CLOSEDNORMOFFSET','OPENEDNORMOFFSET']:
change_points.append(row[change_types])
change_points.append(width+1)
point_index = 0
current_change_point = change_points[point_index]
color_index = 0
current_color = color_toggle[color_index]
row = []
for date in range(width):
if date >= current_change_point:
point_index = point_index + 1
current_change_point = change_points[point_index]
color_index = (color_index + 1) % 2
current_color = color_toggle[color_index]
for bw in range(block_width):
row.extend(current_color)
for bh in range(block_height):
#print(row)
px.append(row)
for bh in range(block_height):
#print(row)
px.append(stripe)
# -
import png
f = open('../exports/sample.png', 'wb')
w = png.Writer(width*block_width, height*block_height*2, greyscale = False)
w.write(f, px)
f.close()
stripe[:12]
data.head()
| scripts/saving-img.ipynb |
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/khbae/trading/blob/master/02_Pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="rzORhdHAOYTL"
# # Pandas
# "pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language."
# * https://pandas.pydata.org/
#
# The ** pandas ** package is a package for handling data frames and time series data. Along with the array of ** numpy ** packages, we can use it to perform regression analysis or various financial analyzes.
# + colab={} colab_type="code" id="hB7DHMq5OSGT"
# import numpy and pandas
import numpy as np
import pandas as pd
# + [markdown] colab_type="text" id="hrgpQxDTOiR6"
# ## Series
#
# Variables used in pandas consist of **series** and **dataframe**.
# * series: a column vector
# * dataframe: a set of column vectors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="j3ETEBG8OjfQ" outputId="fd4415da-9fc3-4c09-babf-fa8d9cd16ced"
## Series sample
series1 = pd.Series(np.random.rand(10),name = 'series1')
type(series1)
# + colab={"base_uri": "https://localhost:8080/", "height": 209} colab_type="code" id="tHBBGHwkP8TM" outputId="a1b0f490-b147-48c8-e914-6a5d7739f5eb"
# Vector multiplied by a constant
series1 * 1000
# + colab={"base_uri": "https://localhost:8080/", "height": 174} colab_type="code" id="TABNJ4F_QTXo" outputId="b1fb722c-d35c-4ab9-d972-24e79f1b105c"
# Summary Statistics
series1.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 209} colab_type="code" id="E3YCo1VKQb9S" outputId="e66e0728-75d9-48b5-887b-469eb906c2bc"
# Assign row names
series1.index = ['a','b','c','d','e','f','g','h','i','j']
series1
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="mgyd3MZUQtUe" outputId="7d398476-66d5-427e-a768-8679540ef2fa"
# Show index in series
series1.index
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="NNBoX5x9Q_76" outputId="62375eb5-d762-41b6-9365-46456ca70cac"
# Choose data
print(series1['g'])
print(series1[6])
# + [markdown] colab_type="text" id="Lk3MQBRcRlF3"
# ## dataframe
#
# A data frame can be said to be a combination of series, and has independent characteristics for each column.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="U7Qrfj-jRaTl" outputId="0593b798-4709-4ca6-fce9-53e1b7466ba0"
# Create dict
dict1 = {
'column1' : [1,2,3,4,5],
'column2' : ['amazon','ms','apple','google','samsung'],
'returns' : np.random.rand(5)
}
type(dict1)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="rZ6B4GKOSFwy" outputId="4e7c71ac-6b55-46d4-bc26-887dec9ed6d8"
# Convert Dict to dataframe
dfram = pd.DataFrame(dict1,pd.date_range('2015-01-01',periods=5))
dfram
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="fVRZA7tNUNji" outputId="5197ffc0-5429-4169-8548-bf7eb7cc07d4"
# data frame columns and index
dfram.index
# + [markdown] colab_type="text" id="Ll7cgw1sUgBs"
# dataframe을 인덱싱하는 함수로는 iloc가 있고, 각 행좌표와 열좌표를 인자로 한다.
# + colab={"base_uri": "https://localhost:8080/", "height": 111} colab_type="code" id="M7ZEWXjaUhNm" outputId="5283d4b0-8a1d-49b4-bd99-8c8acde3ab13"
# iloc
dfram.iloc[1:3,0:3]
# + [markdown] colab_type="text" id="f5kf3ZYjVBI6"
# **dataframe**을 또 다른 인덱싱하는 함수로는 loc가 있고, 각 행이름과 열이름를 인자로 한다.
# + colab={"base_uri": "https://localhost:8080/", "height": 111} colab_type="code" id="TehaqsrXUeqi" outputId="9323c346-95c8-4843-b122-e38734d11291"
# use to_datetime function
dfram.loc[pd.to_datetime(['2015-01-01','2015-01-03']),['column1','column2']]
# + colab={"base_uri": "https://localhost:8080/", "height": 359} colab_type="code" id="pt-G7Yr1Svis" outputId="aff44189-09f1-4172-8d29-04f41ea0390b"
# Combine Series : pd.concat function
series1 = pd.Series(np.random.rand(10),name = 'series1')
series2 = pd.Series(np.random.rand(10),name = 'series2')
series3 = pd.Series(np.random.rand(10),name = 'series3')
dfram2 = pd.concat([series1,series2,series3],axis=1)
dfram2
# + [markdown] colab_type="text" id="9HJEFcWPWM0c"
# **dataframe**을 직접 웹에서 받아오는 방법도 존재한다. 웹에서 받는 경우는 야후 파이낸스에서 데이터를 가져오는 경우와, quandl에서 가져오는 경우로 나뉜다.
# + colab={"base_uri": "https://localhost:8080/", "height": 390} colab_type="code" id="qqqMLtdHT2yw" outputId="e8a3301a-15a2-444b-c795-792522f57091"
# !pip install -q pandas_datareader # pandas_datareader 패키지를 인스톨 하는 코드이다.
import pandas_datareader.data as web
import datetime # 원하는 날짜를 형성하여 만들 수 가 있다.
start = datetime.datetime(2017, 10, 19)
end = datetime.datetime(2017, 11, 1)
aapl = web.DataReader('AAPL','google',start,end)
aapl
# + colab={"base_uri": "https://localhost:8080/", "height": 1041} colab_type="code" id="Z_Zob0p6Wcs-" outputId="94b10418-6041-4e65-fa61-5ca1d0ecba0d"
# !pip install -q quandl # quandl 패키지를 인스톨 하는 코드이다.
import quandl
# qunadl.get(qunadl_data_code, periods, api_key)
# quandl에서 데이터를 받는 코드는 데이터 코드, 기간, 회원가입시 발급받을 수 있는 개인 api_key로 이루어진다. [ https://www.quandl.com/ ]
data = quandl.get("BCHARTS/BITSTAMPUSD",start_date="2017-12-01", end_date="2017-12-31") # 이 데이터는 api_key가 필요없이 받아올 수 있다.
data
# + colab={"base_uri": "https://localhost:8080/", "height": 429} colab_type="code" id="sIUBKOHaXetc" outputId="56c24151-7da0-47ae-ad51-37d1ca4c55d2"
# Plot graph. Set title, xlabel, ylabel and xlim
type(data)
ax = data['Close'].plot()
ax.set_title('Title Here XXX')
ax.set_xlabel('xlabel Here YYY')
ax.set_ylabel('ylabel Here ZZZ')
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="BFU3NSouXvMo" outputId="37e8ac76-0150-4454-dca5-5f6b0e7cc52f"
# Simple Linear Regression: scipy, stats, linregress
from scipy import stats
slope, intercept, rvalue, pvalue, stderr = stats.linregress(data['Volume (BTC)'],data['Close'])
print(slope)
print(stderr)
print(slope/stderr)
| 02_Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="a91bea06a94ce97847cb46a9809e6b13e40bf2f8"
# ## Deep Learning Framework Power Scores 2018
# ## By <NAME>
#
# ### See [this Medium article](https://towardsdatascience.com/deep-learning-framework-power-scores-2018-23607ddf297a) for a discussion of the state of Python deep learning frameworks in 2018 featuring these charts.
#
# I'm going to use plotly and pandas to make interactive visuals for this project.
#
# Updated Sept. 20-21, 2018 to include Caffe, DL4J, Caffe2, and Chainer and several improved metrics.
# + [markdown] _uuid="5e57f4f0d92a885a98b55cd49a98096de5a8abcc"
# # Please upvote this Kaggle kernel if you find it helpful.
# + _uuid="f3fb44a1a0148c4dc68d3b600217db9f0efe790c"
# import the usual frameworks
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import collections
import warnings
from IPython.core.display import display, HTML
from sklearn.preprocessing import MinMaxScaler
import os
print(os.listdir("../input"))
# import plotly
import plotly.figure_factory as ff
import plotly.graph_objs as go
import plotly.offline as py
import plotly.tools as tls
# for color scales in plotly
import colorlover as cl
# define color scale https://plot.ly/ipython-notebooks/color-scales/
cs = cl.scales['10']['div']['RdYlGn'] # for most charts
cs7 = cl.scales['7']['qual']['Dark2'] # for stacked bar charts
# configure things
warnings.filterwarnings('ignore')
pd.options.display.float_format = '{:,.2f}'.format
pd.options.display.max_columns = 999
py.init_notebook_mode(connected=True)
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# + [markdown] _uuid="3e9147b30e8a4c03454e88fa2dbfac869cdb6796"
# List package versions for reproducibility.
# + _uuid="032debf495e88d72bcbf0c71e1ca575edecc237a"
# #!pip list
# + [markdown] _uuid="1c4b10672a59810b888587f3b9520098df1ee6d9"
# Read in the data from the csv. The Google sheet that holds the data is available [here](https://docs.google.com/spreadsheets/d/1mYfHMZfuXGpZ0ggBVDot3SJMU-VsCsEGceEL8xd1QBo/edit?usp=sharing).
# + _uuid="5c12e16726c30bfcdd3b0aacab86f0ab25a9ea64"
new_col_names = ['framework','indeed', 'monster', 'simply', 'linkedin', 'angel',
'usage', 'search', 'medium', 'books', 'arxiv', 'stars',
'watchers', 'forks', 'contribs',
]
df = pd.read_csv('../input/ds13.csv',
skiprows=4,
header=None,
nrows=11,
thousands=',',
index_col=0,
names=new_col_names,
usecols=new_col_names,
)
df
# + [markdown] _uuid="e28ebf3e456dd61cc442fe31f812c92b78fedcac"
# Cool. We used the read_csv parameters to give us just what we wanted.
# + [markdown] _uuid="b2c570c88293185e4724b2caba4cd635bf1d0747"
# ## Basic Data Exploration
# Let's see what the data look like.
# + _uuid="77c7bdd69597287333d3ef931d4d44bf36c64b5c"
df.info()
# + _uuid="5557480378c71f18796789d5a52824536e3ca9ad"
df.describe()
# + [markdown] _uuid="d35b0dac3c8e6a8dcef9c0ea6cd921612394ec0c"
# Looks like pandas read the usage column as a string because of it's percent sign. Let's make that a decimal.
# + _uuid="148b678b1f7aff56d45741b20726046c859d7ca8"
df['usage'] = pd.to_numeric(df['usage'].str.strip('%'))
df['usage'] = df['usage'].astype(int)
df
# + _uuid="85f78e428e45d7ace1460f80f2cac2e82ce58688"
df.info()
# + [markdown] _uuid="deb765bae37ab310f1478b6253b6a25146f5d22d"
# All ints! Great!
# + [markdown] _uuid="52d0441e5dab9b2de7bb0a2c75f3a22b62743adb"
# # Plotly
# Let's make interactive plots with plotly for each popularity category.
# + [markdown] _uuid="21f16d313950450044bd5715fb9e085386c05028"
# ## Online Job Listings
# I looked at how many times each framework appeared in searches on job listing websites. For more discussion see the Medium Article the accompanies this notebook here.
# + _uuid="16e9eda2dd2be652af28ea8e86ed5edea0fcbb92"
# sum groupby for the hiring columns
df['hiring'] = df['indeed'] + df['monster'] + df['linkedin'] + df['simply'] + df['angel']
# + _uuid="b2b3713afedae43b037c82ce845a9285d9e77b82"
df
# + _uuid="a32c444f9c19dcd926b17c57ac42b9681682926f"
data = [go.Bar(
x=df.index,
y=df.hiring,
marker=dict(color=cs),
)
]
layout = {'title': 'Online Job Listings',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "Quantity"},
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="99b617b23e56f5d4e853a2b47557b817739d43bd"
# That's just the aggregate listings. Let's plot the job listing mentions for each website in a stacked bar chart. This will take multiple traces.
# + _uuid="474b0ce5c9f1733190369f1b5e52360ebd941740"
y_indeed = df['indeed']
y_monster = df['monster']
y_simply = df['simply']
y_linkedin = df['linkedin']
y_angel = df['angel']
# + _uuid="489ab9534acbd314d2ed9d85fca1db6b372cd673"
indeed = go.Bar(x=df.index, y=y_indeed, name = 'Indeed')
simply = go.Bar(x=df.index, y=y_simply, name='Simply Hired')
monster = go.Bar(x=df.index, y=y_monster, name='Monster')
linked = go.Bar(x=df.index, y=y_linkedin, name='LinkedIn')
angel = go.Bar(x=df.index, y=y_angel, name='Angel List')
data = [linked, indeed, simply, monster, angel]
layout = go.Layout(
barmode='stack',
title='Online Job Listings',
xaxis={'title': 'Framework'},
yaxis={'title': 'Mentions', 'separatethousands': True},
colorway=cs,
)
fig = go.Figure(data = data, layout = layout)
py.iplot(fig)
# + [markdown] _uuid="60e0d97cb36a3b7535c9a29b8e746a7e755065f5"
# Cool. Now let's see how this data looks with grouped bars instead of stacked bars by changing the barmode to "group".
# + _uuid="5a02824fc45a9eb5244f79294cf6c869932a0ae5"
indeed = go.Bar(x=df.index, y=y_indeed, name = "Indeed")
simply = go.Bar(x=df.index, y=y_simply, name="Simply Hired")
monster = go.Bar(x=df.index, y=y_monster, name="Monster")
linked = go.Bar(x=df.index, y=y_linkedin, name="LinkedIn")
angel = go.Bar(x=df.index, y=y_angel, name='Angel List')
data = [linked, indeed, simply, monster, angel]
layout = go.Layout(
barmode='group',
title="Online Job Listings",
xaxis={'title': 'Framework'},
yaxis={'title': "Listings", 'separatethousands': True,
}
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="5b6b9a752c28bff6a4f854b56a837da6a6c71838"
# ## KDnuggets Usage Survey
# Let's look at usage as reported in KDnuggets 2018 survey.
# + _uuid="4192746d4ecfbf4e8b99141e63ac015f3260f9a7"
# Make sure you have colorlover imported as cl for color scale
df['usage'] = df['usage'] / 100
# + [markdown] _uuid="d9fcef25d82c5bf6a66a43fdad901e43aff8d8d1"
# ## Google Search Volume
# As of Sept. 15, 2018.
# + _uuid="fd8ab68d6f03a9c70807cdc97a0bea407a5783c9"
data = [
go.Bar(
x=df.index,
y=df['usage'],
marker=dict(color=cs)
)
]
layout = {
'title': 'KDnuggets Usage Survey',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "% Respondents Used in Past Year", 'tickformat': '.0%'},
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + _uuid="a147da72b159c2b0f2b708af3692932d6408edae"
data = [
go.Bar(
x = df.index,
y = df['search'],
marker = dict(color=cs),
)
]
layout = {
'title': 'Google Search Volume',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "Relative Search Volume"},
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="a6e06c4826a9ff29008d10929f414de1ef774f8a"
# ## Medium Articles
# Past 12 months.
# + _uuid="e70ab521830995f8e6e62516241f8f48f967cef0"
# Make sure you have colorlover imported as cl for color scale
# cs is defined in first cell
data = [
go.Bar(
x=df.index,
y=df['medium'],
marker=dict(color=cs) ,
)
]
layout = {
'title': 'Medium Articles',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "Articles"},
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="11083db3187d0af7f0fffd278cd63c74f0276bad"
# ## Amazon Books
# + _uuid="a3c86aac9c2d0ee0c0de733ffac2a0602a7d14f1"
data = [
go.Bar(
x=df.index,
y=df['books'],
marker=dict(color=cs),
)
]
layout = {
'title': 'Amazon Books',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "Books"},
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="b4b7c7d872126b440a6447a9a469360fe083caa3"
# ## ArXiv Articles
# Past 12 months.
# + _uuid="364eeff0093bc59938f254b1c112dcfb8944c963"
data = [
go.Bar(
x=df.index,
y=df['arxiv'],
marker=dict(color=cs),
)
]
layout = {
'title': 'ArXiv Articles',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "Articles"},
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="1c91ee725e0f84dca231777f7e5d70ea1a5bbab3"
# # GitHub Activity
# Let's make another stacked bar chart of the four GitHub categories.
# + _uuid="13ee09fc5aa8b182ebc8a91ab76a75b17619f08a"
y_stars = df['stars']
y_watchers = df['watchers']
y_forks = df['forks']
y_contribs = df['contribs']
stars = go.Bar(x = df.index, y=y_stars, name="Stars")
watchers = go.Bar(x=df.index, y=y_watchers, name="Watchers")
forks = go.Bar(x=df.index, y=y_forks, name="Forks")
contribs = go.Bar(x=df.index, y=y_contribs, name="Contributors")
data = [stars, watchers, forks, contribs]
layout = go.Layout(barmode='stack',
title="GitHub Activity",
xaxis={'title': 'Framework'},
yaxis={
'title': "Quantity",
'separatethousands': True,
}
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="e58b069e7dd390dfeb65b17bd23217a581c8c0ed"
# This configuration doesn't make the most sense, because there are going to be way more stars than contributors. It's not an apples to apples comparison. Let's try four subplots instead.
# + _uuid="815d4f533e7846be6c42c65e82dc34cc2610026e"
trace1 = go.Bar(
x=df.index,
y=df['stars'],
name='Stars',
marker=dict(color=cs),
)
trace2 = go.Bar(
x=df.index,
y=df['forks'],
name ="Forks",
marker=dict(color=cs)
)
trace3 = go.Bar(
x=df.index,
y=df['watchers'],
name='Watchers',
marker=dict(color=cs)
)
trace4 = go.Bar(
x=df.index,
y=df['contribs'],
name='Contributors',
marker=dict(color=cs),
)
fig = tls.make_subplots(
rows=2,
cols=2,
subplot_titles=(
'Stars',
'Forks',
'Watchers',
'Contributors',
)
)
fig['layout']['yaxis3'].update(separatethousands = True)
fig['layout']['yaxis4'].update(separatethousands = True)
fig['layout']['yaxis2'].update(tickformat = ',k', separatethousands = True)
fig['layout']['yaxis1'].update(tickformat = ',k', separatethousands = True)
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 2)
fig.append_trace(trace3, 2, 1)
fig.append_trace(trace4, 2, 2)
fig['layout'].update(title = 'GitHub Activity', showlegend = False)
py.iplot(fig)
# + [markdown] _uuid="8b2192e20eb9f82f1a5c57c035c6468bcdbf12bb"
# This presentation shows the information in a more comprehensible and appropriate format.
# + [markdown] _uuid="dc349da513db7f10d367354847998b49d324096f"
# # Scale and Aggregate for Power Scores
# Scale each column. For each column we'll use MinMaxScaler to subtract the minumum and divide by the original max - original min.
# + _uuid="c3d6ede7f80bd83a482197b8ea365650ec798bff"
df.info()
# + _uuid="90de34cb37e8c36b429e383c20704b6de2f9bc43"
scale = MinMaxScaler()
scaled_df = pd.DataFrame(
scale.fit_transform(df),
columns = df.columns,
index = df.index)
# + _uuid="ec2a78ffdafbe829292fd99225d48b2364437545"
scaled_df
# + [markdown] _uuid="1ad75d99ddc77a83f5d17c7db96788f6720a23f7"
# ### Scaled Online Job Listings
# Let's combine the scaled online job listing columns into a new column.
# + _uuid="6f645ff3d5e4d13e8bfb10d052a46d98a85342bd"
scaled_df['hiring_score'] = scaled_df[['indeed', 'monster', 'simply', 'linkedin', 'angel']].mean(axis=1)
# + _uuid="0c94cdb2f2ce2047f0ad136f402b3a7283928b4e"
scaled_df
# + [markdown] _uuid="cfb9f50f8845ef8d5d3649c2d81ae263050611c9"
# Now we have a hiring score.
# + [markdown] _uuid="e786bb0202bc558790cebb551c658e04e54daaa6"
# ### Scaled GitHub Activity
#
# Let's combine the scaled GitHub columns into a new column.
# + _uuid="7f40874e9802d8835f35546ed7ed166a98186a11"
scaled_df['github_score'] = scaled_df[['stars', 'watchers', 'forks', 'contribs']].mean(axis=1)
# + _uuid="3ebc942d4e8968bebf9212a36d482c7a8317f319"
scaled_df
# + [markdown] _uuid="c475e5274adbf9edcd01501ec5cc918e4fb4a074"
# Now we have all our aggregate columns and are ready to turn to the weights.
# + [markdown] _uuid="3043fe82cdf2ccab22a9b5510d62516ccce7a36e"
# ## Weights
# + [markdown] _uuid="3afa636153bf9c65a95b01532c33f011b9754f15"
# Let's make a pie chart of weights by category.
# + _uuid="ff1a609cd3adaf98303b00140e301d14bd91ef58"
weights = {'Online Job Listings ': .3,
'KDnuggets Usage Survey': .2,
'GitHub Activity': .1,
'Google Search Volume': .1,
'Medium Articles': .1,
'Amazon Books': .1,
'ArXiv Articles': .1 }
# + _uuid="c7a7dff08840d6df0088b18ca8612f4c7e489947"
# changing colors because we want to show these aren't the frameworks
weight_colors = cl.scales['7']['qual']['Set1']
common_props = dict(
labels = list(weights.keys()),
values = list(weights.values()),
textfont=dict(size=16),
marker=dict(colors=weight_colors),
hoverinfo='none',
showlegend=False,
)
trace1 = go.Pie(
**common_props,
textinfo='label',
textposition='outside',
)
trace2 = go.Pie(
**common_props,
textinfo='percent',
textposition='inside',
)
layout = go.Layout(title = 'Weights by Category')
fig = go.Figure([trace1, trace2], layout=layout)
py.iplot(fig)
# + [markdown] _uuid="e2c84ca9733d8d3163aa0e8e74f1b4acdc844445"
# ## Weight the Categories
# + _uuid="f5d1eb479d788beef0aaecc0edbe4c7b260e47a9"
scaled_df['w_hiring'] = scaled_df['hiring_score'] * .3
scaled_df['w_usage'] = scaled_df['usage'] * .2
scaled_df['w_github'] = scaled_df['github_score'] * .1
scaled_df['w_search'] = scaled_df['search'] * .1
scaled_df['w_arxiv'] = scaled_df['arxiv'] * .1
scaled_df['w_books'] = scaled_df['books'] * .1
scaled_df['w_medium'] = scaled_df['medium'] * .1
# + _uuid="5e51a796d7aa45ee156e281829bb7cfae4115035"
weight_list = ['w_hiring', 'w_usage', 'w_github', 'w_search', 'w_arxiv', 'w_books', 'w_medium']
scaled_df = scaled_df[weight_list]
scaled_df
# + [markdown] _uuid="f00fe5959fb7e3576c92d3b2dfb22a0b0c6e4fc6"
# ## Power Scores
# Let's make the power score column by summing the seven category scores.
# + _uuid="61a6bcbfd5cb59c46e855367068dda6099748629"
scaled_df['ps'] = scaled_df[weight_list].sum(axis = 1)
scaled_df
# + [markdown] _uuid="b60aef0b45cceb6898344158b7ed889ef45115ac"
# Let's clean things up for publication
# + _uuid="9a60dab959409d1912113bd5304d133006efa1ae"
p_s_df = scaled_df * 100
p_s_df = p_s_df.round(2)
p_s_df.columns = ['Job Search Listings', 'Usage Survey', 'GitHub Activity', 'Search Volume', 'ArXiv Articles', 'Amazon Books', 'Medium Articles', 'Power Score']
p_s_df.rename_axis('Framework', inplace = True)
p_s_df
# + [markdown] _uuid="1c9f48ceb8f5d414a3480ec678fb41a5a628b066"
# Let's make a bar chart of the power scores.
# + _uuid="6f69cd7233e4d7af07b61cfda87145421d0fdf2e"
data = [
go.Bar(
x=scaled_df.index, # you can pass plotly the axis
y=p_s_df['Power Score'],
marker=dict(color=cs),
text=p_s_df['Power Score'],
textposition='outside',
textfont=dict(size=10)
)
]
layout = {
'title': 'Deep Learning Framework Power Scores 2018',
'xaxis': {'title': 'Framework'},
'yaxis': {'title': "Score"}
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _uuid="d9377852c504ffd1df737796f16fc22e850c776b"
# ### That's the end!
# ### See [this Medium article](https://towardsdatascience.com/deep-learning-framework-power-scores-2018-23607ddf297a) for a discussion of the state of Python deep learning frameworks in 2018 featuring these charts.
# ## Pleave upvote if you found this interesting or informative!
| deep learning power scores 9.25.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# + [markdown] colab_type="text" id="ZrwVQsM9TiUw"
# ##### Copyright 2019 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + colab={} colab_type="code" id="CpDUTVKYTowI"
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="ltPJCG6pAUoc"
# # TFP Probabilistic Layers: Variational Auto Encoder
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="WRVR-tGTR31S"
# In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers."
# + [markdown] colab_type="text" id="uiR4-VOt9NFX"
# ### Dependencies & Prerequisites
#
# + colab={} colab_type="code" id="Cg5t0lWXGY0z"
#@title Install { display-mode: "form" }
TF_Installation = "Nightly" #@param ["Nightly", "Stable", "System"]
if TF_Installation == "Nightly":
# !pip install -q tf-nightly
print("Installation of `tf-nightly` complete.")
elif TF_Installation == "Stable":
# !pip install -q --upgrade tensorflow
print("Installation of `tensorflow` complete.")
elif TF_Installation == "System":
pass
else:
raise ValueError("Selection Error: Please select a valid "
"installation option.")
# + colab={} colab_type="code" id="9clSiUTiT3G1"
#@title Install { display-mode: "form" }
TFP_Installation = "Nightly" #@param ["Nightly", "Stable", "System"]
if TFP_Installation == "Nightly":
# !pip install -q tfp-nightly
print("Installation of `tfp-nightly` complete.")
elif TFP_Installation == "Stable":
# !pip install -q --upgrade tensorflow-probability
print("Installation of `tensorflow-probability` complete.")
elif TFP_Installation == "System":
pass
else:
raise ValueError("Selection Error: Please select a valid "
"installation option.")
# + colab={} colab_type="code" id="kZ0MdF1j8WJf"
#@title Import { display-mode: "form" }
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tf.enable_v2_behavior()
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
# + [markdown] colab_type="text" id="N8Shtn_e99XC"
# ### Load Dataset
# + colab={} colab_type="code" id="daPl6ycN9cD3"
datasets, datasets_info = tfds.load(name='mnist',
with_info=True,
as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval.
image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize.
return image, image
train_dataset = (datasets['train']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE)
.shuffle(int(10e3)))
eval_dataset = (datasets['test']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE))
# + [markdown] colab_type="text" id="CI-VFyp8-BIa"
# ### VAE Code Golf
# + [markdown] colab_type="text" id="MKgRI5eoS2rx"
# #### Specify model.
# + colab={} colab_type="code" id="rd3Voa64_Gtv"
input_shape = datasets_info.features['image'].shape
encoded_size = 16
base_depth = 32
# + colab={} colab_type="code" id="9d7Jbm66FN_u"
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1)
# + colab={} colab_type="code" id="eRHjRtAL-e33"
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4 * encoded_size, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
activation=None),
tfpl.MultivariateNormalTriL(
encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
# + colab={} colab_type="code" id="baP--pt6-ewK"
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1,
padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
# + colab={} colab_type="code" id="7itugvZVLyWL"
vae = tfk.Model(inputs=encoder.inputs,
outputs=decoder(encoder.outputs[0]))
# + [markdown] colab_type="text" id="-ckYuzfILkVb"
# #### Do inference.
# + colab={"height": 590} colab_type="code" executionInfo={"elapsed": 269250, "status": "ok", "timestamp": 1551300684365, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="e7f1u-Ya-axQ" outputId="e989c831-6075-4ebd-b628-b9f80e820a90"
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3),
loss=negloglik)
vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
# + [markdown] colab_type="text" id="hC4rNz9t_zpo"
# ### Look Ma, No ~~Hands~~Tensors!
# + colab={} colab_type="code" id="3ZqfOYMP_2p_"
# We'll just examine ten random digits.
x = eval_dataset.make_one_shot_iterator().get_next()[0][:10]
xhat = vae(x)
assert isinstance(xhat, tfd.Distribution)
# + cellView="form" colab={} colab_type="code" id="MM7wW4S2OrBt"
#@title Image Plot Util
import matplotlib.pyplot as plt
def display_imgs(x, y=None):
if not isinstance(x, (np.ndarray, np.generic)):
x = np.array(x)
plt.ioff()
n = x.shape[0]
fig, axs = plt.subplots(1, n, figsize=(n, 1))
if y is not None:
fig.suptitle(np.argmax(y, axis=1))
for i in xrange(n):
axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray')
axs.flat[i].axis('off')
plt.show()
plt.close()
plt.ion()
# + colab={"height": 428} colab_type="code" executionInfo={"elapsed": 1554, "status": "ok", "timestamp": 1551300691476, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="ow7rfh6YLLx1" outputId="29c2f6a9-f53b-43bc-9d09-155a699d9be7"
print('Originals:')
display_imgs(x)
print('Decoded Random Samples:')
display_imgs(xhat.sample())
print('Decoded Modes:')
display_imgs(xhat.mode())
print('Decoded Means:')
display_imgs(xhat.mean())
# + colab={} colab_type="code" id="C3_5HPUCQpYO"
# Now, let's generate ten never-before-seen digits.
z = prior.sample(10)
xtilde = decoder(z)
assert isinstance(xtilde, tfd.Distribution)
# + colab={"height": 325} colab_type="code" executionInfo={"elapsed": 1159, "status": "ok", "timestamp": 1551300786264, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="_jMPwz8r9pYX" outputId="19f46823-9fa5-434b-a4f7-e9ffd4f250ab"
print('Randomly Generated Samples:')
display_imgs(xtilde.sample())
print('Randomly Generated Modes:')
display_imgs(xtilde.mode())
print('Randomly Generated Means:')
display_imgs(xtilde.mean())
| tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Classification using Neural Network (NN) with Scikit-Learn
# !pip install opencv-python imutils scikit-learn
# +
import os
import random
import cv2
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
# -
# ## Loading Images
# %matplotlib inline
image_paths = list(paths.list_images('datasets/animals'))
print(image_paths)
random.seed(42)
random.shuffle(image_paths)
print(image_paths)
# +
image = cv2.imread(image_paths[2500])
plt.figure(figsize=(10, 10))
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(rgb_image);
# -
data = []
labels = []
# **Note:** Machine learning models take a *fixed size input*.
for image_path in image_paths:
image = cv2.imread(image_path)
label = image_path.split(os.path.sep)[-2]
image = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA)
data.append(image)
labels.append(label)
data = np.array(data)
labels = np.array(labels)
plt.figure(figsize=(10, 10))
rgb_image = cv2.cvtColor(data[2500], cv2.COLOR_BGR2RGB)
plt.imshow(rgb_image);
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
data.shape
data = data.reshape((data.shape[0], 3072))
# Normalize images to the range [0, 1].
data = data.astype('float') / 255.0
data.shape
le = LabelEncoder()
labels = le.fit_transform(labels)
labels
le.classes_
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.25, random_state=30)
# ## Building a NN Model
nn = MLPClassifier()
nn.fit(X_train, y_train)
y_pred = nn.predict(X_test)
print(classification_report(y_test, y_pred, target_names=le.classes_))
| 03-nn-with-sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
# !pip show tensorboard
# + pycharm={"name": "#%%\n"}
# log_folder = '/reports/tensorboard_logs'
# %reload_ext tensorboard
# %tensorboard --logdir reports/tensorboard_logs
| unused/tensorboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to EPS 88
#
# This cell is Markdown text. It is the cell type where we can type text that isn't code. Go ahead and double click in this cell and you will see that you can edit it. **Type something here:**
# Let's get going right away by dealing with some data within this Jupyter notebook. The first bit of code you will run is in the cell below. This is a code cell rather than a markdown cell (you can chance the cell type using the drop-down box above). You can either hit the play button above, or more efficiently press *shift+enter* on your keyboard to run the code.
# +
#This cell is a code cell. It is where we can type code that can be executed.
#The hashtag at the start of this line makes it so that this text is a comment not code.
import pandas as pd
# -
# The reason why we execute the code ```import pandas as pd``` is so that we can use the functions of the ```pandas``` library which provides really helpful data structures and data analysis tools. We are using the standard convention of importing it using the nickname ```pd```. One of the fantastic things about doing data analysis in Python is the availability of great data analysis tools such as ```pandas```. One of the frustrating things can be learning how to use these diverse tools and which to use when. You will get more and more comfortable with these tools as the term progresses.
# # Finding Birthquakes
#
# Your birthquake is the largest magnitude earthquake that occured on the day you were born. In this in-class exercise, we are going to search an earthquake catalog and find your birthquake.
#
# To do so, we are going to download data from the US Geological Survey (USGS) Earthquake Hazards program. https://earthquake.usgs.gov
#
# We are going to use an API that lets us send an url to the USGS and get earthquake information for a set of specified parameters.
#
# ## Finding a birthquake
#
# Let's do it first for my one of my favorite singers, <NAME>, born May 25, 1976. We will define his birthday in year-month-day format and the day after in the same format in order to make a url that gets data starting on 12 am on his birthday and ending 12 am of the next day. We are putting the quote marks (' ') around the dates so that they are **strings** (the python data type that is a sequence of text).
Codys_birthday = '1976-05-25'
day_after_Codys_birthday = '1976-05-26'
# What we just did in the code above is to define the variable ```Doug_birthday``` to be set to be the string '1963-01-23'. If we run a code cell with just that variable, Jupyter will show the variable as the code output.
Codys_birthday
# Another way to see the variable is to tell python to print it using the ```print()``` function.
print(Codys_birthday)
# ### Defining Codys's birthday earthquake URL
#
# To make a url that we can send to the USGS and get back data, we need to insert these dates into the USGS earthquake API url format. We will define that standard_url as a string and then add the dates that were set above to be the starttime and endtime.
# +
standard_url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=csv&orderby=magnitude'
Codys_birthquake_url = standard_url + '&starttime=' + Codys_birthday + '&endtime=' + day_after_Codys_birthday
Codys_birthquake_url
# -
# ### Getting Cody's birthday earthquakes
#
# We now have a url that we can use to get data from the USGS. We could cut and paste this url (without the quotation marks) into a web browser. Go ahead and give that a try.
#
# Alternatively, we can use the `pandas` functions that we imported at the top of this notebook to get these data. The standard way to use ```pandas``` is to import it with the shorthand ``pd``. We will use the ```pd.read_csv()``` function which will take the data that comes from the USGS url and make it into a DataFrame. A DataFrame is a data structure with columns of different data that are defined with column names.
Codys_birthday_earthquakes = pd.read_csv(Codys_birthquake_url)
# These data are sorted by magnitude with the largest magnitude earthquakes at top. Let's look at the first 5 rows of the DataFrame using the ```.head()``` function.
Codys_birthday_earthquakes.head()
# We can just look at the first row by applying the ```.loc``` function to the DataFrame and calling the index 0. Python is zero-indexed so the first row is row zero. We can apply .loc to the first row to see all the details about my birthquake.
Codys_birthday_earthquakes.loc[0]
# It can be useful to return a single value which can be done by calling both the the row and the column using the ```.loc``` function.
Codys_birthday_earthquakes.loc[0]['mag']
# When working with Python for this course, you are going to get errors. I get errors everyday. They can look intimidating, but they often are informative (particularly if you look at the bottom). This code cell below should result in an error. Go ahead and execute it and let's have a look at the result.
Codys_birthday_earthquakes.loc[0]['birthday_cake']
# #### Cody's birthquake
#
# The largest earthquake on May 25, 1976 was a magnitude (column `mag`) 5.3 that occured off an island of Papua New Guinea (column `place`).
# ## Finding your birthquake or that of public person
# ### Enter your birthday
#
# Enter your birthday and the date after in year-mm-dd format.
# +
### Enter your birthday here within the quotes in year-mm-dd format
your_birthday = ''
day_after_your_birthday = ''
# -
# #### Defining your birthday earthquake URL
# +
standard_url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=csv&orderby=magnitude'
your_birthquake_url = standard_url + '&starttime=' + your_birthday + '&endtime=' + day_after_your_birthday
your_birthquake_url
# -
# #### Getting your birthday earthquakes
your_birthday_earthquakes = pd.read_csv(your_birthquake_url)
# ### Looking at your birthday earthquakes
your_birthday_earthquakes.head()
your_birthday_earthquakes.loc[0]
# What is the magnitude of your birthquake? Where did it occur?
# # Group discussion on plate tectonic data
#
# Now we will test using zoom breakout rooms to get into groups.
#
# Once in groups, we are going to consider a hypothetical scenario:
#
# **Your group is is dropped on an Earth-sized exoplanet in a far-away star system. You have infinite resources and infinite technology and are tasked with acquiring data to determine whether the planet has active plate tectonics or not.**
#
# **To be an effective research team, you are going to want to know each other. So go ahead and introduce yourselves to each other. Tell each other what you know about your birthquake.**
#
# **Following introductions, brainstorm and discuss what data types do you develop and how do you interpret them to answer the research question?**
#
# **Nominate a spokesperson from your group that will report back your ideas for a whole class discussion.**
# Feel free to take notes in this markdown cell. Double click this line.
# ### Save and Submit Assignment
#
# Save your work by using the 'download as' option from the 'file' pulldown menu. You may save as http or pdf. Upload your saved file to bcourses for credit.
| test_folder/tests/W0_inclass_birthquake.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Konsep - konsep pemrograman lanjut
# ## Fitur - fitur *built-in*
# ### `seq()`: Mendefinisikan sikuen
seq(0,10, by=2)
seq(0,100, by = 10)
seq(0,30, by = 2)
# ### `sort()` : Mengurutkan vektor
v <- c(2,7,1,49,54,32)
v
sort(v) # dari kecil ke besar
sort(v, decreasing = T) # dari besar ke kecil
nama <- c('s', 'a', 'n', 'd', 'y')
nama
sort(nama)
nama <- c('s', 'a', 'n', 'd', 'Y')
sort(nama)
nama <- c('s', 'a', 'n', 'd', 'Y','A')
sort(nama)
# ### `rev()`: Membalikan elemen di dalam suatu objek
b <- seq(1,10)
b
rev(b)
d <- c('a','b','e','d')
d
rev(d)
# ### `str()`: Menunjukkan struktur dari suatu objek
str(b)
str(mtcars)
summary(mtcars)
# ### `append()`: Menggabungkan objek
v1 <- seq(1,5)
v2 <- seq(10,30, by=10)
append(v1,v2)
# ### Memeriksa dan mengonversi tipe data pada objek - objek R
v1
is.vector(v1)
is.data.frame(v1)
is.data.frame(mtcars)
as.list(v1)
as.matrix(v1)
# ## Fungsi - fungsi `apply`
v <- seq(10,50,by=10)
v
sample(v,2) # mengambil dua buah sampel acak dari vektor
sample(1:100,5)
v <- 1:5
v
tambah_acak <- function(x){
acak <- sample(1:100,1)
return(x + acak)
}
tambah_acak(10)
hasil <- tambah_acak(20)
hasil
# ### `lapply()`: dalam bentuk list
lapply(v, tambah_acak)
# outputnya dalam bentuk list
# ## `sapply()`: dalam bentuk vektor
sapply(v,tambah_acak)
v1 <- seq(5,25, by=5)
kuadrat <- function(bil){
return(bil^2)
}
kuadrat(5)
lapply(v, kuadrat)
sapply(v, kuadrat)
# ### Fungsi anonim
v
kuadrat <- function(bil){
return(bil^2)
}
sapply(v, function(bil){bil^2}) # fungsi anonim
# ### Fungsi `apply` dengan banyak *input*
v
tambah_dua_bil <- function(b1,b2){
return(b1+b2)
}
tambah_dua_bil(20,30)
sapply(v, tambah_dua_bil) # error
sapply(v, tambah_dua_bil, b2 = 10)
# ## Ekspresi regular : RegEx
txt <- "Halo semuanya! Selamat Pagi! Cuaca lagi bagus, nih buat touring."
txt
grepl("Halo",txt) # kata "Halo" ada di txt
grepl("Malam", txt)
grepl("halo", txt) # Sifatnya case-sensitive
v <- c('a','d','k', 'l','t','k')
grepl('k',v)
grep('k',v) # outputnya indeks
grep('a',v)
# ## Fungsi - fungsi matematika
# ### `abs()`: menghitung nilai absolut
abs(-2)
v <- c(-3,-5,7,10)
abs(v)
# ### `sum()`: menghitung penjumlahan seluruh elemen
sum(2,4,6)
v <- c(2,3,4,5)
sum(v)
# ### `mean()`: menghitung rata - rata aritmatika
mean(v)
mean(c(3,4,5))
# ### `round()`: membulatkan nilai
round(2.777645)
round(2.777645, digits=2)
round(2.777645, 4)
# ## *Dates* dan *Timestamps*
Sys.Date() # waktu saat ini
d <- Sys.Date()
d
class(d)
d <- '1993-03-13'
d
class(d)
# dikonversi menjadi date
b.day <- as.Date(d)
b.day
class(b.day)
as.Date('Mar-13-93') # format tidak sesuai
as.Date('Mar-13-93', format = '%b-%d-%y')
# * `%d`: hari (desimal)
# * `%m`: bulan (desimal)
# * `%b`: bulan (singkatan)
# * `%B`: bulan (tidak disingkat)
# * `%y`: tahun (2 digit)
# * `%Y`: tahun (4 digit)
as.Date('March,01,2009', format= "%B, %d, %Y")
# +
# POSIXct
# -
as.POSIXct('11:03:05', format='%H:%M:%S')
strptime('11:03:05', format = '%H:%M:%S') # lebih banyak dipakai di pemrograman R
| notebooks/08Progjut.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Customize Dfr-Browser
# This notebook will customize a dfr-browser created using the `create_dfrbrowser` notebook. You can only customize one dfr-browser at a time. For configuration options, see the "Configuration" cell below.
# ### Info
#
# __authors__ = '<NAME>, <NAME>'
# __copyright__ = 'copyright 2019, The WE1S Project'
# __license__ = 'GPL'
# __version__ = '2.0'
# __email__ = '<EMAIL>'
# ## Settings
# +
# Python imports
import datetime
import json
import os
import re
from pathlib import Path
from IPython.display import display, HTML
# Define paths
# current_dir = %pwd
project_dir = str(Path(current_dir).parent.parent)
name = os.path.basename(os.path.normpath(project_dir))
created = datetime.datetime.today().strftime('%Y-%m-%d')
# Display the project directory
display(HTML('<p style="color: green;">Setup complete.</p>'))
# -
# ## Dfr-Browser Settings
#
# You may only customize one dfr-browser at a time. Select the dfr-browser you want to customize in the cell below.
#
# To select your browser, navigate to your `modules/dfr_browser` inside your project directory. There, you should see directories called `topicsn1`, `topics2`, etc, where `n` is the number of topics you modeled. You should see one browser folder for each browser you produced in the `create_dfrbrowser` notebook. Assign the directory name of the browser you want to customize to the `selection` variable in the cell below. For example, if you wanted to customize a browser called `topics10`, your `selection` variable would look like this:
#
# `selection = 'topics10'`
#
# Please follow this format exactly.
selection = '' # E.g. 'topics100'
# Find paths to `info.json` and `dfb.min.js` files
info_file = current_dir + '/' + selection + '/data/info.json'
# ## Configuration
#
# Change the `set_` variable to `True` for any option you wish to configure in the below cell.
# +
# A title for the browser. More than 30 characters may not display well.
set_title = False
title = '' # E.g. 'The Guardian'
# Provide information about each contributor as shown below. Only the 'title' property is
# required. If there is one contributor, the second set of properties between {} can be
# deleted. Further sets of properties can be added if there are more contributors.
set_contributors = False
contributors = [
{
'title': '<NAME> first contributor',
'email': 'Email of first contributor'
},
{
'title': '<NAME> second contributor',
'email': 'Email of second contributor'
}
]
# A description of the topic model. Must be formatted in HTML between the sets of three
# quotation marks.
set_meta_info = False
meta_info = """<h2>About this Model</h2>"""
# A list of custom metadata fields to be displayed in dfr-browser document citations.
set_custom_metadata_fields = False
custom_metadata_fields = []
# The number of words to display in topic bubbles. Font sizes 5px to 11px will fit 8 words.
set_num_top_words = False
num_top_words = 5
# The font size range of the words in the topic bubbles.
set_size_range = False
size_range = [6, 14]
# The font size for topic labels shown when you mouse over a topic bubble. The default is 18.
set_name_size = False
name_size = 14
# The number of documents displayed in Topic View. The default is 20.
set_num_docs = False
num_docs = 30
# Replace the labels below with your own in the format shown. If you omit a label, it will be
# given a topic number. In other words, you don't have to label every topic.
# Note: Some labels may not fit in the topic bubbles. Labels look best if they contain short
# words separated by spaces. "Planes, Trains, and Automobiles" is an example of a label that
# will fit within the available space.
set_topic_labels = False
topic_labels = {
'1': 'LABEL1',
'2': 'LABEL2',
'3': 'LABEL3'
}
# Ensure that topic labels have prefixed numbers
for key, value in topic_labels.items():
topic_labels[key] = key + ': ' + re.sub('[0-9]+:\s+', '', value)
# -
# ## Run
#
# Modify the Dfr-browser's `info.json file`.
# +
# Read the default info.json file
try:
with open(info_file, 'r') as f:
info = json.loads(f.read())
except:
print('Could not open the info.json file. Please make sure that the file exists and that the path configuration is correct.')
# Convert Markdown to HTML
# Add custom properties
try:
info = {}
info['VIS'] = {
'condition': {
'spec': {
'field': 'date',
'n': 1,
'unit': 'year'
},
'type': 'time'
}
}
if set_title == True:
info['title'] = title
if set_contributors == True:
info['contributors'] = contributors
if set_meta_info == True:
info['meta_info'] = meta_info
if set_custom_metadata_fields == True:
info['VIS']['metadata'] = {'spec': {'extra_fields': custom_metadata_fields}}
if set_num_top_words == True:
if 'model_view' not in info['VIS']:
info['VIS']['model_view'] = {}
if 'plot' not in info['VIS']['model_view']:
info['VIS']['model_view']['plot'] = {}
info['VIS']['model_view']['plot']['words'] = num_top_words
if set_size_range == True:
if 'model_view' not in info['VIS']:
info['VIS']['model_view'] = {}
if 'plot' not in info['VIS']['model_view']:
info['VIS']['model_view']['plot'] = {}
info['VIS']['model_view']['plot']['size_range'] = size_range
if set_name_size == True:
if 'model_view' not in info['VIS']:
info['VIS']['model_view'] = {}
if 'plot' not in info['VIS']['model_view']:
info['VIS']['model_view']['plot'] = {}
info['VIS']['model_view']['plot']['name_size'] = name_size
if set_num_docs == True:
if 'topic_view' not in info['VIS']:
info['VIS']['topic_view'] = {}
info['VIS']['topic_view']['docs'] = num_docs
if set_topic_labels == True:
info['topic_labels'] = topic_labels
except:
display(HTML('<p style="color: red;">Could not modify the data. Please check that the formatting of your configuration values is correct.</p>'))
try:
info['description'] = meta_info
info['name'] = name
info['created'] = 'created'
info['contributors'] = contributors
except:
display(HTML('<p style="color: red;">Could not modify the data. Please check that you have run both the Settings and Configuration cells without errors.</p>'))
# Save the modified info.json file
try:
with open(info_file, 'w') as f:
f.write(json.dumps(info, indent=2, sort_keys=False))
display(HTML('<p style="color: green;">Done! The new <a href="' + info_file + '" target="_blank">info.json</a> file is shown below. Reload your topic model browser to see the changes.</p>'))
print('\n-----------\n')
# Print the info.json file
print(json.dumps(info, indent=2, sort_keys=False))
except:
display(HTML('<p style="color: red;">Could not save the modifications to info.json. There may have been a formatting error or change to the file path.</p>'))
| src/templates/v0.1.9/modules/dfr_browser/customize_dfrbrowser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
dict = {'Name': ['Tom', 'Dick', 'Harry'],
'Age': [24,35,29],
'Shoe Size':[7,8,9]}
df = pd.DataFrame(dict)
df
df.to_csv('people.csv')
my_data = pd.read_csv('people.csv', index_col=0)
my_data
income_data = pd.read_csv('/home/jatin/devel/jupyter/dataset/us-income-annual.csv', delimiter=';')
income_data
print(income_data.keys())
income_data['Ticker']
print(type(income_data['Report Date']), type(income_data['Report Date'].values))
my_set = set(income_data['Ticker'].values)
print(my_set)
print('*'*80)
print(income_data['Ticker'].unique())
isit2017 = (income_data['Fiscal Year'] == 2017)
print(isit2017) # This is a Boolean mask
x = income_data[isit2017]
print(x)
print(x['Ticker'])
income_data.sort_values(by=['Net Income'])
income_data['Net Income'].hist() # creating a raw histogram
# Plot graph with just 2017 data, logarithmic Y axis and more bins for greater resolution.
from matplotlib import pyplot as plt
income_data[income_data['Fiscal Year']==2017]['Net Income'].hist(bins=100, log=True)
plt.title('USA Corporate Net Income for 2017 Histogram')
plt.xlabel('Value in USD')
plt.ylabel('Number of Instances')
print("Max Value is:", income_data[income_data['Fiscal Year']==2017]['Net Income'].max())
income_data.describe()
| aiml2_numpy_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from os import path
# +
L3_dir = "../../3LayerSapphire/MF/HWPSS_data/20_deg"
L4_dir = "HWPSS_data/20_deg/"
L5_dir = "../../5LayerSapphire/MF/HWPSS_data/20_deg"
for d in [L3_dir, L4_dir, L5_dir]:
file = path.join(d, "Trans.npy")
lables, freqs, A2up, A4up, A2pp, A4pp = np.load(file)
plt.plot(freqs, A2up * 2)
plt.legend()
plt.show()
# +
file= "test/0_deg/Trans.npy"
lables, freqs, A2up, A4up, A2pp, A4pp = np.load(file)
plt.plot(freqs, A2up * 2)
file= "HWPSS_data/0_deg/Trans.npy"
lables, freqs, A2up, A4up, A2pp, A4pp = np.load(file)
plt.plot(freqs, A2up * 2)
# -
| HWP/4LayerSapphire/MF/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Elementary functions 2
#
# Now that we know what an inverse function is, let us think about the inverse functions of elementary functions we explored in a previous notebook.
#
# (elfun2_roots)=
# ## Roots
# + tags=["hide-input"]
import numpy as np
import matplotlib.pyplot as plt
x1 = np.linspace(0, 10, 101)
x2 = np.linspace(-10, 10, 101)
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(x1, x1**2, label=r'$G(x^2)$')
ax[0].plot(x1, np.sqrt(x1), label=r'$G(x^{1/2})$')
ax[0].set_xlim(0, 10)
ax[0].set_ylim(0, 10)
ax[1].plot(x2, x2**3, label=r'$G(x^3)$')
ax[1].plot(x2, np.cbrt(x2), label=r'$G(x^{1/3})$')
ax[1].set_ylim(-10, 10)
ax[1].set_yticks([-10, -5, 5, 10])
for i in range(2):
ax[i].plot(x2, x2, '--k', alpha=0.2)
ax[i].text(7.5, 7, 'y=x', alpha=0.5)
ax[i].set_aspect('equal')
ax[i].spines['left'].set_position('zero')
ax[i].spines['bottom'].set_position('zero')
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].legend(loc='best')
plt.show()
# -
# (elfun2_log)=
# ## Logarithmic functions
#
# The exponential function $f: \mathbb{R} \to (0, + \infty), f(x)=a^x$ is an injection because it is strictly increasing and it is a surjection because its image is $f(\mathbb{R}) = (0, + \infty)$. Therefore, it is a bijection and the inverse function $f^{-1} : (0, + \infty) \to \mathbb{R}$ exists.
#
# We denote this inverse function as $\log_a x := f^{-1}(x)$ and we call it the **logarithm with base a**.
#
# We call a logarithm with base **$e$** the **natural logarithm** and define it as:
#
# $$ \ln x := \log_e x $$
#
# ```{admonition} Properties
#
# Domain: $(0, + \infty)$, range: $\mathbb{R}$
#
# $$ \log_a x = y \implies x = a^y $$
#
# $$ (f \circ f^{-1})(y) = a^{\log_a y} = y, \forall y > 0 $$
# $$ (f^{-1} \circ f)(x) = \log_a a^x = x, \forall x \in \mathbb{R} $$
#
# For all $x, y \in (0, + \inft), a, b, c \in (0, 1) \cup (1, + \infty)$:
#
# $$ \log_a 1 = 0 $$
#
# Since the exponential function maps a sum to a product, i.e. $a^{x+y} = a^x a^y$, the logarithmic function maps a product to a sum:
#
# $$ \begin{align}
# \log_a (xy) & = \log_a x + \log_a y \\
# \log_a \frac{x}{y} & = \log_a x - \log_a y
# \end{align}$$
#
# $$ \begin{align}
# \log_a x^\alpha & = \alpha \log_a x \\
# \log_{a^\beta} x & = \frac{1}{\beta} \log_a x
# \end{align} $$
#
# Change of base:
#
# $$ \log_b c = \frac{\log_a c}{\log_a b} $$
#
# ```
# + tags=["hide-input"]
x1 = np.linspace(-10, 10, 201)
x2 = np.linspace(0.001, 10, 301)
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(x1, 0.5**x1, label=r'$0.5^x$')
ax[0].plot(x2, np.log(x2)/np.log(0.5), label=r'$\log_{0.5}x$')
ax[0].set_title('0 < a < 1')
ax[1].plot(x1, np.exp(x1), label=r'$e^x$')
ax[1].plot(x2, np.log(x2), label=r'$\ln x$')
ax[1].set_title('a > 1')
for i in range(2):
ax[i].plot(x1, x1, '--k', alpha=0.2)
ax[i].set_aspect('equal')
ax[i].spines['left'].set_position('zero')
ax[i].spines['bottom'].set_position('zero')
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].legend(loc='upper right')
ax[i].set_ylim(-4, 10)
ax[i].set_xlim(-4, 10)
# -
# (elfun2_arcus)=
# ## Inverse trigonometric functions
#
# ```{index} Inverse trigonometric functions (arcus)
# ```
#
# **Inverse trigonometric functions** are also called **arcus functions** so we denote them with the prefix $\text{arc-}$, e.g. $\arcsin x$. They are also often denoted as $\sin^{-1} x$.
#
# Periodic functions are not injections so they do not have inverses. Therefore, we need to restrict them to parts of their natural domain where they are strictly monotonic - here they are injections.
#
# Then trigonometric functions with the following domain restrictions are bijections:
#
# $$ \begin{aligned}
# & \left. \sin \right|_{[-\frac{\pi}{2}, \frac{\pi}{2}]} : [-\frac{\pi}{2}, \frac{\pi}{2}] \to [-1, 1] \\
# & \left. \cos \right|_{[0, \pi]} : [0, \pi] \to [-1, 1] \\
# & \left. \tan \right|_{(-\frac{\pi}{2}, \frac{\pi}{2})} : (-\frac{\pi}{2}, \frac{\pi}{2}) \to \mathbb{R} \\
# & \left. \csc \right|_{(-\frac{\pi}{2}, 0) \cup (0, \frac{\pi}{2})} : (-\frac{\pi}{2}, 0) \cup (0, \frac{\pi}{2}) \to (- \infty, -1) \cup (-1, + \infty) \\
# & \left. \sec \right|_{(0, \frac{\pi}{2}) \cup (\frac{\pi}{2}, \pi)} : (0, \frac{\pi}{2}) \cup (\frac{\pi}{2}, \pi) \to (- \infty, -1) \cup (-1, + \infty) \\
# & \left. \cot \right|_{(0, \pi)} : (0, \pi) \to \mathbb{R}
# \end{aligned}$$
#
# so we can define their inverse functions:
#
# $$ \begin{aligned}
# & \arcsin : [-1, 1] \to [-\frac{\pi}{2}, \frac{\pi}{2}] \\
# & \arccos : [-1, 1] \to [0, \pi] \\
# & \arctan : \mathbb{R} \to (-\frac{\pi}{2}, \frac{\pi}{2}) \\
# & \text{arccsc} : (- \infty, -1) \cup (-1, + \infty) \to (-\frac{\pi}{2}, 0) \cup (0, \frac{\pi}{2}) \\
# & \text{arcsec} : (- \infty, -1) \cup (-1, + \infty) \to (0, \frac{\pi}{2}) \cup (\frac{\pi}{2}, \pi) \\
# & \text{arccot} : \mathbb{R} \to (0, \pi)
# \end{aligned}$$
# + tags=["hide-input"]
# for demonstration purposes will suppress warnings when plotting
import warnings; warnings.simplefilter('ignore')
ax = [0, 0, 0]
fig = plt.figure(constrained_layout=True, figsize=(10, 8))
gs = plt.GridSpec(2, 2, width_ratios=[1.3, 3], height_ratios=[1, 1])
ax[0] = fig.add_subplot(gs[:, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[2] = fig.add_subplot(gs[1, 1])
x = np.linspace(-1, 1, 101)
ax[0].plot(x, np.arcsin(x), label=r'$\arcsin x$')
ax[0].plot(x, np.arccos(x), label=r'$\arccos x$')
# will include 0 to separate lines joining
x = np.concatenate((np.linspace(-5, -1, 100), [0], np.linspace(1, 5, 100)))
ax[1].plot(x, np.arcsin(1/x), label=r'arccsc $x$')
ax[1].plot(x, np.arccos(1/x), label=r'arcsec $x$')
x = np.linspace(-10, 10, 201)
ax[2].plot(x, np.arctan(x), label=r'$\arctan x$')
ax[2].plot(x, np.pi/2 - np.arctan(x), label=r'arccot $x$')
for i in range(3):
ax[i].spines['left'].set_position('zero')
ax[i].spines['bottom'].set_position('zero')
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].legend(loc='best')
ax[i].set_yticks(np.pi * np.array([-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]))
ax[i].set_yticklabels(['$-2\pi $', r'$-\frac{3}{2} \pi $', r'$-\pi$',
r'$-\frac{1}{2}\pi$', '0', r'$\frac{1}{2}\pi$',
r'$\pi$', r'$\frac{3}{2}\pi$', r'$2\pi$'])
ax[0].set_xlim(-1.2, 1.2)
ax[0].set_ylim(-1.8, 3.3)
ax[1].set_xlim(-5, 5)
ax[1].set_ylim(-1.7, 3.3)
ax[2].set_ylim(-1.7, 3.3)
plt.show()
# -
# ```{admonition} Properties
#
# For a more complete list of properties and their visualisation, visit [Wikipedia](https://en.wikipedia.org/wiki/Inverse_trigonometric_functions#Relationships_between_trigonometric_functions_and_inverse_trigonometric_functions). Here we will name a few.
#
# Complementary angles:
#
# $$ \arccos x = \frac{\pi}{2} - \arcsin x, \quad \text{arccot} x = \frac{\pi}{2} - \arctan x, \quad \text{arccsc} x = \frac{\pi}{2} - \text{arcsec} x $$
#
# Reciprocal arguments:
#
# $$ \begin{aligned}
# \arccos \left( \frac{1}{x} \right) = \text{arcsec} x, \quad & \quad \text{arcsec} \left( \frac{1}{x} \right) = \arccos x \\
# \arcsin \left( \frac{1}{x} \right) = \text{arccsc} x, \quad & \quad \text{arccsc} \left( \frac{1}{x} \right) = \arcsin x \\
# \text{if } x > 0: \arctan \left( \frac{1}{x} \right) = \text{arccot} x, \quad & \quad \text{arccot} \left( \frac{1}{x} \right) = \arctan x \\
# \text{if } x < 0: \arctan \left( \frac{1}{x} \right) = \text{arccot} x - \pi, \quad & \quad \text{arccot} \left( \frac{1}{x} \right) = \arctan x + \pi \\
# \end{aligned} $$
#
# ```
# (elfun2_area)=
# ## Inverse hyperbolic functions
#
# ```{index} Inverse hyperbolic functions (area)
# ```
#
# **Inverse hyperbolic functions** are also called **area functions** so we denote them with the prefix $\text{ar-}$, e.g. $\arsinh x$. They are also often denoted as $\sinh^{-1} x$, for example.
#
# Hyperbolic functions
#
# $$ \begin{align}
# \sinh : \mathbb{R} \to \mathbb{R} \\
# \left. \cosh \right|_{[0, + \infty)} : [0, + \infty) \to [1, + \infty) \\
# \tanh : \mathbb{R} \to (-1, 1) \\
# \coth : \mathbb{R} \backslash \{ 0 \} \to (- \infty, -1) \cup (1, + \infty)
# \end{align} $$
#
# are bijections, so we can define their inverses:
#
# $$ \begin{align}
# \text{arsinh} := \sinh ^{-1} : \mathbb{R} \to \mathbb{R} \\
# \text{arcosh} := \left ( \left. \cosh \right|_{[0, + \infty)} \right)^{-1} : [1, + \infty) \to [0, + \infty) \\
# \text{artanh} := \tanh^{-1} : (-1, 1) \to \mathbb{R} \\
# \text{arcoth} := \coth^{-1} : (- \infty, -1) \cup (1, + \infty) \to \mathbb{R} \backslash \{ 0 \}
# \end{align} $$
#
# where
#
# $$
# \text{arsinh} x = \ln ( x + \sqrt{x^2 + 1} ), \qquad \text{arcosh} x = \ln (x + \sqrt{x^2 - 1}), \\
# \text{artanh} x = \frac{1}{2} \ln \left( \frac{1+x}{1-x} \right ), \qquad \text{arcoth} x = \frac{1}{2} \ln \left( \frac{x+1}{x-1} \right )
# $$
#
# ```{admonition} Derivation of arcosh $x$
#
# For demonstration purposes let us derive the above equation for $\text{arcosh} x$. Recall that $\cosh x = \frac{e^x + e^{-x}}{2}$. Let that equal some $y$, so that $\frac{e^x + e^{-x}}{2} = y$ or $e^x + e^{-x} - 2y = 0$. Now let $u = e^x$ and multiply both sides by $u$. We get:
#
# $$ u^2 - 2uy + 1 = 0., $$
#
# which is a simple quadratic equation with solutions:
#
# $$ u_{1,2} = y \pm \sqrt{y^2 - 1}. $$
#
# But since $u = e^x > 0$ the only possible solution is
#
# $$ u = e^x = y + \sqrt{y^2 - 1}. $$
#
# We take the natural logarithm of both sides:
#
# $$ x = \ln \left ( y + \sqrt{y^2 -1} \right ) $$
#
# ```
# + tags=["hide-input"]
fig = plt.figure(figsize=(10, 8))
ax = plt.gca()
x = np.linspace(-4, 4, 201)
plt.plot(x, np.log(x + np.sqrt(x**2 + 1)), label=r'arsinh $x$')
x = np.linspace(1, 4, 101)
plt.plot(x, np.log(x + np.sqrt(x**2 - 1)), label=r'arcosh $x$')
x = np.linspace(-0.999, 0.999, 200)
plt.plot(x, np.log((1+x)/(1-x)) / 2, label=r'artanh $x$')
# will include 0 to avoid joining lines, this returns an error
x = np.concatenate((np.linspace(-4, -1.001, 100), [0], np.linspace(1.001, 4, 100)))
plt.plot(x, np.log((x+1)/(x-1)) / 2, label=r'artanh $x$')
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_aspect('equal')
ax.legend(loc='best')
plt.show()
# -
| notebooks/c_mathematics/sets_and_functions/5_elementary_functions_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/ShapeAndSpace/shape-and-space.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + tags=["hide-input"]
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
# + tags=["hide-input"] language="html"
# <style>
# .output_wrapper button.btn.btn-default,
# .output_wrapper .ui-dialog-titlebar {
# display: none;
# }
# </style>
# -
#
# # Shape and Space: Geometry
# + tags=["hide-input"]
import matplotlib.pyplot as plt
import ipywidgets
from ipywidgets import widgets, interact, interact_manual, Button, Layout
from IPython.display import Javascript, display
# -
# Geometry is one of the most valuable topics of everyday life. It is important for driving, cooking, drawing, decorating, and so many other tasks. It is also instinctive to human beings. Even babies use geometry when deciding that circular pieces fit in circular holes. So, this notebook is describing the math behind natural activities.
#
# In this notebook, you will learn how to define and explain length, width. You will also learn how to explain height, depth, thickness, perimeter and circumference. This notebook will help introduce and explain the metric units. These topics will be taught through visual aids and interactive models.
# ## Background On Shapes
#
# To start describing geometric shapes, we will first have to decide in which way the shapes appear:
#
# ## Two Dimensions
#
# In this case, objects appear flat. The following shapes illustrate this idea:
# <img src="images/2dObjectsEmpty.png">
# While these objects may seem very simple, they are a good way to introduce *length*, *width*, *perimeter*, and *circumference*.
# ## Length and Width
# The **length** of some **rectangle** is the *longest* side, (Or *edge*), of it. But, the **width**, is the *shortest* side of a **rectangle**. A useful way to remember this is to think of **L**ength as **L**ong.
#
# We can label the rectangular shape with its length and width:
# <img src="images/2dObjectsRectanglesHeight.png">
# Now, you may be wondering, "How do we fill out the **length** and **width** of a **square**, where all sides are equal?" Because all sides are equal, this means that length and width are *interchangeable*. So ,we can have either of the following:
# <img src="images/2dObjectsAllSquares.png">
# Thus, we've found the **width** and **length** of all the rectangular shapes:
# <img src="images/2dObjectsSquares.png">
# ### Practice Question 1
#
# Given the following **rectangle**:
# <img src="images/PracticeQuestion1.png">
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("In the picture above, which edge is the width?")
@interact(answer =widgets.RadioButtons(
options=["A", "B"],
value=None,
description="Answer: ",
disabled=False
))
def reflective_angle_question(answer):
if answer=="A":
print("Correct!\
\nThe width is the shortest side of this rectangle.")
elif answer =="B":
print("Incorrect.\
\nRemember, the width of a rectangle depends on its orientation.")
# -
# ## Perimeter
#
# With this information we can now find the perimeter of a **rectangle** or **square**. The perimeter is the total length of all sides of a **rectangular shape**.
#
# To find the **perimeter** of a **rectangle**, one must *add all the sides* together. The formula for the perimeter of a rectangle is given by
#
# $$ (Length) + (Length) + (Width) + (Width) = (Perimeter) $$
# But, since we know that:
#
# $$ (Length) + (Length) = 2 \times (Length), $$
# And that:
#
# $$ (Width) + (Width) = 2 \times (Width), $$
#
# We can simplify the formula to look like:
#
# $$\ 2 \times (Width) + 2 \times (Length) = (Perimeter). $$
# Now, we have a simplified formula for finding the perimeter of any **rectangle**.
#
# Finding the **perimeter** of any **square** is even simpler since we know that all sides have equal length. So, the formula is even simpler. You can either write:
#
# $$\ 4 \times (Width) = (Perimeter), $$
# Or:
#
# $$ 4 \times (Length) = (Perimeter). $$
#
# ### Practice Question 2
#
# Given the following **rectangle**:
# <img src="images/PracticeQuestion2.png">
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the perimeter?")
@interact(answer = widgets.Text(
placeholder = "Enter your number here",
description='Answer: ',
disabled=False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "12":
print("Correct!\
\nThe perimeter is 2*(2) + 2*(4), which equals 12.")
elif answer == "":
print("Type the perimeter of the rectangle.")
else:
print("Incorrect.\
\nRemember, the formula for calulating the perimeter is 2*(width) + 2*(length).")
# -
# ## Diameter And Radius
#
# So far we've been focusing on rectangular objects, but now we must focus on circular shapes. We should know some useful facts about circles already, like how to find the **diameter** and **radius**, but we'll do a quick refresher on the subject.
#
# To find either of these values, we begin by locating the exact middle of the circle.
# <img src="images/CircleMiddle.png">
# Next, if we drew a straight line from the middle to an edge of the circle, we'd get the **radius**:
# <img src="images/CircleRadius.png">
# We get the **diameter** if we draw a line starting from any edge of the circle to the middle, and then to another edge:
# <img src="images/CircleDiameter.png">
# It is also important to note that the **radius** of any circle is *half* the size of the **diameter**:
#
# $$ \frac{Diameter}{2} = (Radius) $$
#
# You can think of the **diameter** as being *twice* the **radius**.
#
# $$ 2 \times (Radius) = (Diameter) $$
#
# ## Circumference
#
# Now that we've found the radius and the diameter, we can begin to look at how to find the **circumference** of the circle.
#
# First, we will define what the circumference actually is. The **circumference** is the total length around the circle:
# <img src="images/CircleCircumference.png">
# To find the **circumference** of any circle, we first have to find either the **radius** or the **diameter**. Then, we can use either of the following formulas to calculate the circumference.
#
# If we only have the **radius**, we can use the following formula to calculate the **circumference**:
#
# $$\ 2 \times \pi \times (Radius) = (Circumference) $$
#
# If we only have the **diameter**, we can use the following formula to calculate the **circumference**:
#
# $$\ \pi \times (Diameter) = (Circumference) $$
# Now we have a formula we can use to calculate the **circumference** of a circle!
# ### Practice Question 3
#
# Given the following **circle**:
# <img src="images/PracticeQuestion3.png">
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the circumference? (Round to the nearest whole number)")
@interact(answer =widgets.Text(
placeholder = "Enter your number here",
description='Answer: ',
disabled=False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "44":
print("Correct!\
\nThe circumference is 2 * pi * (7). This equals 43.982297... which is rounded to 44.")
elif answer == "":
print("Type the circumference of the circle.")
else:
print("Incorrect.\
\nRemember, we are only given the radius here.")
# -
# ## Three Dimensions
#
# In three dimensions objects appear to have depth. Suppose you are looking at the following rectangle from the black circle:
# <img src="images/3dObjectsRectangle.png">
# One can see that a 3 dimensional rectangle has many more unique edges than a 2 dimensional one. To account for these changes humans came up with new terms to help describe these shapes.
# ## Height
#
# The **height** of a rectangle helps describe how **tall** some object is. This is the edge that goes vertically from the top of the shape to the bottom. A useful way to remember this is to think heigh**t** is for **t**all.
# <img src="images/3dObjectsRectangleHeight.png">
# We can also find the height of a 2 dimensional shape:
# <img src="images/2dObjectsRectanglesHeight.png">
# ## Depth
#
# The **depth** of a rectangle is how **deep** an object goes in. This is the edge that goes away from you. A useful way to remember this is to to think **d**epth is for **d**eep.
# <img src="images/3dObjectsRectangleDepth.png">
# ## Width
#
# The **width** of a 3 dimensional rectangle is the edge that's facing you. It's usually the one directly at the bottom of where you're facing.
# <img src="images/3dObjectsRectangleWidth.png">
# ## Thickness
#
# The **thickness** of a 3 dimensional shape is how deep some face of the shape is. A good way to picture this is to think about the walls of your house. The distance between the inside wall and the outer wall is the thickness of your house. This idea can be applied to all types of boxes and shapes, even lines:
# <img src="images/LineThickness.png">
# ## Cube
#
# As you may have guessed, the process of deciding **height**, **depth**, **thickness**, and **width** is the same for a cube:
# <img src="images/3dObjectsSquare.png">
# The only difference here is that the length of every edge is the same.
# ### Practice Question 4
#
# You view the following **rectangle** from the black circle:
# <img src="images/PracticeQuestion4.png">
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("Which edge is the depth?")
@interact(answer =widgets.RadioButtons(
options=["A", "B", "C"],
value=None,
description="Answer: ",
disabled=False
))
def reflective_angle_question(answer):
if answer=="A":
print("Incorrect.\
\nRemember, the depth is how deep, or far back an object goes, from your perspective.")
elif answer =="B":
print("Correct!\
\nThe depth is how deep, or far back an object goes, from your perspective.")
elif answer =="C":
print("Incorrect.\
\nRemember, the depth is how deep, or far back an object goes, from your perspective.")
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("Which edge is the height?")
@interact(answer =widgets.RadioButtons(
options=["A", "B", "C"],
value=None,
description="Answer: ",
disabled=False
))
def reflective_angle_question(answer):
if answer=="A":
print("Incorrect.\
\nRemember, the height is the edge that goes vertically, or top to bottom.")
elif answer =="B":
print("Incorrect.\
\nRemember, the height is the edge that goes vertically, or top to bottom.")
elif answer =="C":
print("Correct!\
\nThe height is the side that goes from top to bottom.")
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("Which edge is the width?")
@interact(answer =widgets.RadioButtons(
options=["A", "B", "C"],
value=None,
description="Answer: ",
disabled=False
))
def reflective_angle_question(answer):
if answer=="A":
print("Correct!\
\nThe width is the edge of the object that is closest to you, from your perspective.")
elif answer =="B":
print("Incorrect.\
\nRemember, the width is the edge of the object that is closest to you, from your perspective.")
elif answer =="C":
print("Incorrect.\
\nRemember, the width is the edge of the object that is closest to you, from your perspective.")
# -
# ## Measurements
#
# The last topic we will introduce in this section is the topic of **measurements**.
#
# This topic is important to every day life as it simplifies a lot of human activities. Think about the length of one of your nails. That length can't be treated the same way as the length it takes to go to the Moon from Earth. In other, 'mathier', words, we can't calculate long distances using the same methods we calculate small ones. Otherwise we'd get huge numbers for the long stuff, or tiny ones for the small stuff.
#
# So, humans came up with a way to solve this problem, and they created the **metric units**. These metric units help to calculate lengths. There are 7 of these units we need to focus on: **Kilometres**, **hectometres**, **decametres**, **metres**, **decimetres**, **centimetres**, and **millimetres**.
#
# While this may seem like a lot of units to memorize, they'll become very intuitive by the end of this lesson.
#
# The following is a diagram of how these units all interact together. It shows what **1 metre** would look like in each of the units. Make sure to take your time studying it.
# <img src="images/LadderMethod.png">
# As you may be able to tell from the diagram, these units interact with each other in a special way. This concept is explained in the following two ways:
#
# **GOING UP each step is 10 times BIGGER than the last step:** This means that each time you go up a step, your measurement becomes longer. For example, 10 metres, (*m*), is equal to 1 decametre, (*dam*). So, each time you go up a step, you must *divide by 10.*
#
# **GOING DOWN each step is 10 times SMALLER than the last step:** This means that each time you go down a step, your measurement becomes smaller. For example, 1 metre, (*m*), is equal to 10 decimetres, (*dm*). So, each time you go down a step, you must *multiply by 10.*
#
# So, try to remember: *Going down is multiply, Going up is divide.*
# ### An Useful Way To Memorize
#
# We can use a phrase to help remember each of the measurements and where they go on the stairs is:
#
# **"<NAME> Died By Drinking Chocolate Milk."**
#
# Then, we know that:
#
# **K**ing is for **K**ilometres.
#
# **H**enry is for **H**ectometres.
#
# **D**ied is for **D**ecametres.
#
# **B**y is for **B**ase or **Metres**.
#
# **D**rinking is for **D**ecimetres.
#
# **C**hocolate is for **C**entimetres.
#
# **M**ilk is for **M**illimetres.
# ### Some Example Questions
#
# Suppose you had 5 **decametres**, (*dam*), and you wanted to turn them into **kilometres**, (*km*). We know that going from **decametres** to **kilometres** is going **UP** 2 steps, so we must divide the amount of hectometres by 10 twice. This is because:
#
# $$ \frac{Decametre}{10} = (Hectometre) $$
#
# And:
#
# $$\ \frac{Hectometre}{10} = (Kilometre) $$
# Then, we can find the answer by writing:
#
# $$\ \frac{Decametres}{10 \times 10} = \frac{Decametres}{100} = (Kilometres) $$
#
# So:
#
# $$\ \frac{5}{100} = 0.05 $$
#
# **ANSWER:** 5 *dam* = 0.05 *km*.
# Suppose instead that you had 7 **centimetres**, (*cm*), and you wanted to turn them into **millimetres**, (*mm*). We know that going from **centimetres** to **milimetres** is going **DOWN** 1 step, so we must multiply the amount of centimetres by 10 once. This is because:
#
# $$\ (Centimetre) \times 10 = (Millimetre) $$
# Then, we can find the answer by doing:
# $$ 7 \times 10 = (70) $$
# **ANSWER:** 7 *cm* = 70 *mm*
# ### Practice Question 5
#
# Please answer the following true or false questions.
# + tags=["hide-input"]
print("True or False: 33 decimetres greater than 0.032 decametres.")
@interact(answer = widgets.RadioButtons(
options=['True', 'False'],
description='Answer:',
value = None,
disabled=False
))
def reflective_angle_question(answer):
if answer=="True":
print("Correct!\
\n33 decimetres is 0.33 decametres, which is greater than the 0.032 decametres.")
elif answer =="False":
print("Incorrect.\
\nRemember, going from decimetres to decametres is going up 2 steps.")
# + tags=["hide-input"]
print("True or False: 0.4 hectometres greater than 43 metres.")
@interact(answer = widgets.RadioButtons(
options=['True', 'False'],
description='Answer:',
value = None,
disabled=False
))
def reflective_angle_question(answer):
if answer=="True":
print("Incorrect.\
\nRemember, going from hectometres to metres is going down 2 steps.")
elif answer =="False":
print("Correct!\
\n0.4 decimetres is 40 metres, which is less than the 43 metres.")
# + tags=["hide-input"]
print("True or False: 3852 milimetres greater than 30 metres.")
@interact(answer = widgets.RadioButtons(
options=['True', 'False'],
description='Answer:',
value = None,
disabled=False
))
def reflective_angle_question(answer):
if answer=="True":
print("Incorrect.\
\nRemember, going from milimetres to metres is going up 3 steps.")
elif answer =="False":
print("Correct!\
\n3852 milimetres is 3.852 metres, which is less than the 30 metres.")
# -
# ### Practice Question 6
#
# The average conventional freight trains in the US are about **2 kilometres** long.
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("How long would the train be in metres?")
@interact(answer = widgets.Text(
placeholder = "Enter your number here",
description = 'Answer: ',
disabled = False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "2000":
print("Correct!\
\nThe train is 2 kilometres long, and going from kilometres to metres is going down 4 steps. \
\nThen, we do 2 * 10 * 10 * 10. This equals 2000 metres")
elif answer == "":
print("Type the length of the train in metres.")
else:
print("Incorrect.\
\nRemember, going from kilometres to metres is going down 4 steps.")
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("How long would the train be in decametres?")
@interact(answer = widgets.Text(
placeholder = "Enter your number here",
description = 'Answer: ',
disabled = False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "200":
print("Correct\
\nThe train is 2 kilometres long, and going from kilometres to decametres is going down 2 steps. \
\nThen, we do 2 * 10 * 10. This equals 200 decametres.")
elif answer == "":
print("Type the length of the train in decametres.")
else:
print("Incorrect.\
\nRemember, going from kilometres to decametres is going down 2 steps.")
# -
# ## Calculator For Measurements
#
# The following is an interactive calculator for going from any of the 7 units to another unit:
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
from IPython.display import display
#List of all possible measurement types.
MeasureList = ['Kilometres', 'Hectometres', 'Decametres', 'Metres', \
'Decimetres', 'Centimetres', 'Millimetres']
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
#Let user choose the final measurement.
#Bounded between 0 and 9999 to make sure it's positive.
initVal = widgets.BoundedFloatText(value = 0, \
min = 0, \
max = 9999, \
description = 'Measurement Value:', \
style = style)
#Let user choose the initial measurement.
initMeas = widgets.SelectionSlider(options = MeasureList, \
value = 'Kilometres', \
description = 'Initial Measurement:', \
style = style)
#Let user choose the final measurement.
finMeas = widgets.SelectionSlider(options = MeasureList, \
value = 'Kilometres', \
description = 'Final Measurement:', \
style = style)
#Displaying all the previously created widgets in order.
display(initVal)
display(initMeas)
display(finMeas)
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
from IPython.display import display
import decimal
#"MeasureDict" assigns a numeric value to each of the units of measurement. This will help us to calculate
# going from one unit to another.
MeasureDict = {'Kilometres' : 7, 'Hectometres': 6, 'Decametres': 5,\
'Metres': 4, 'Decimetres': 3, 'Centimetres': 2, 'Millimetres': 1}
#"calculate_final" will determine what the final value obtained from the new measurement type.
def calculate_final(initV, initM, finM):
#Create the global value finV. This will store the final measurement value.
global finV
#Find the difference, (diff), between initM and finM and store it in diff. Two cases exist:
# 1. diff is less than or equal to 0:
# In this case the final measurement type is smaller than or equal to the initial measurement type.
# Therefore, must divide by 10 |diff| amount of times, (Or simply multiply 10 ** diff)
#
# 2. diff is greater than 0:
# In this case the final measurement type is greater than or equal to the final measurement type.
# Therefore, must multiply by 10 diff amount of times, (Or simply multiply 10 ** diff)
#
#Thus, we can use 10**diff as it works for both cases.
diff = initM - finM
finV = initV * 10**diff
#Create a button to calculate the change in measurement.
button = widgets.Button(description="Calculate")
display(button)
def on_button_clicked(b):
#Get the initial value inputed as an integer.
initV = initVal.value
#Get the initial measurement inputed as a string.
initM = initMeas.value
#Get the final measurement inputed as a string.
finM = finMeas.value
#Get the key value of the initial measurement type.
initMeasKey = MeasureDict.get(initM)
#Get the key value of the final measurement type.
finMeasKey = MeasureDict.get(finM)
#Calculate the final value.
calculate_final(initV, initMeasKey, finMeasKey)
#Since after 4 decimal places python begins to use scientific notation, checkFp will be the final value
# rounded to the last 4 decimal places.
checkFp = float(format(finV, '.4f'))
#If the value of checkFp is equal to finV, then simply print out the value of finV.
if (checkFp - finV) == 0:
print("Your initial measurement of " + str(initV) + " " + initM + \
" becomes " + str(finV) + " " + finM + ".")
#Otherwise, if the value of checkFp is not equal to finV, then print the value of finV rounded to the 6th
# decimal place.
else:
print("Your initial measurement of " + str(initV) + " " + initM + \
" becomes " + format(finV, ',.6f') + " " + finM + ".")
button.on_click(on_button_clicked)
# -
#
# <h1 align='center'> Exercises </h1>
# ## Question 1
#
# Given the following **circle**:
# <img src="images/Question1.png">
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the radius? (Round to the nearest CENTIMETRE)")
@interact(answer =widgets.Text(
placeholder = "Enter your number here",
description='Answer: ',
disabled=False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "3":
print("Correct!\
\nThe radius is 19 ÷ (2 * pi). This equals 3.02394... cm which is rounded to 3 cm.")
elif answer == "":
print("Type the radius of the circle.")
else:
print("Incorrect.\
\nRemember, in this question we are given the circumference. \
\nTry to find a way to go from circumference to radius.")
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the diameter? (Round to the nearest METRE)")
@interact(answer =widgets.Text(
placeholder = "Enter your number here",
description = 'Answer: ',
disabled = False
))
def reflective_angle_question(answer):
answer = answer.replace(" " , "")
if answer == "0.06":
print("Correct!\
\nThe diameter is twice the radius, which we found to be 3cm in the last question. \
\nThen, we divide the diameter by 100 to get it to 0.06m.")
elif answer == "":
print("Type the diameter of the circle.")
else:
print("Incorrect.\
\nRemember, in this question we want the nearest metre. \
\nTry using the relationship between radius and diameter.")
# -
# ## Question 2
#
# Bob is a farmer who built a perfectly square fence that had a perimeter of 32 metres. Suppose that Bob wanted to build the largest circular fence he could inside the square fence.
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the circumference of the circular fence? (Round to the nearest METRE)")
@interact(answer =widgets.Text(
placeholder = "Enter your number here",
description='Answer: ',
disabled=False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "25":
print("Correct!\
\nThe diameter of the new fence would be 32 ÷ 4, which equals 8. \
\nThen, the circumference is 8 * pi. This equals 25.13274...m, or 25m.")
elif answer == "":
print("Type the circumference of the circle.")
else:
print("Incorrect.\
\nRemember, in this question we are given the perimeter of a square fence. \
\nTry finding how long the edge of the square fence is, and then use that as the diameter for the new fence.")
# -
# ## Question 3
# You view the following **rectangle** from the black circle:
# <img src="images/Question3.png">
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the height of the rectangle? (Give the value in DECAMETRES)")
@interact(answer =widgets.Text(
placeholder = "Enter your number here",
description='Answer: ',
disabled=False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "1.24":
print("Correct!\
\nThe height is equal to 12.4 metres. \
\nThen, we divide the height by 10 to get 1.24 decametres.")
elif answer == "":
print("Type the height of the rectangle.")
else:
print("Incorrect.\
\nRemember, in this question we want the value in decametres. \
\nTry using the length to the from the bottom to the top of the rectangle.")
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the width of the rectangle? (Give the value in CENTIMETRES)")
@interact(answer =widgets.Text(
placeholder = "Enter your number here",
description='Answer: ',
disabled=False
))
def reflective_angle_question(answer):
answer = answer.replace(" ", "")
if answer == "660":
print("Correct!\
\nThe width is equal to 6.6 metres. \
\nThen, we multiply the width by 100 to get 660 centimetres.")
elif answer == "":
print("Type the width of the rectangle.")
else:
print("Incorrect.\
\nRemember, in this question we want the value in centimetres. \
\nTry using the length at the bottom of the rectangle.")
# + tags=["hide-input"]
from ipywidgets import interact_manual,widgets
print("What is the depth of the rectangle? (Give the value in HECTOMETRES)")
@interact(answer = widgets.Text(
placeholder = "Enter your number here",
description= 'Answer: ',
disabled= False
))
def reflective_angle_question(answer):
answer = answer.replace(" " , "")
if answer == "0.034":
print("Correct!\
\nThe depth is equal to 3.4 metres. \
\nThen, we divide the depth by 100 to get 0.034 hectometres.")
elif answer == "":
print("Type the width of the rectangle.")
else:
print("Incorrect.\
\nRemember, in this question we want the value in hectometres. \
\nTry using the length at the side of the rectangle.")
# -
#
# <h1 align='center'> Summary </h1>
# + [markdown] slideshow={"slide_type": "slide"}
# You will now know how to find **length**, **width**, **perimeter** and **circumference**. Additionally, you will know how to find **height**, **depth**, and **thickness**. You have also been introduced to the **metric units**, and how to use them.
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/jupyter_execute/curriculum-notebooks/Mathematics/ShapeAndSpace/shape-and-space.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_teachopencadd)
# language: python
# name: conda_teachopencadd
# ---
# # トークトリアル 5
#
# # 化合物クラスタリング
#
# #### Developed in the CADD seminars 2017 and 2018, AG Volkamer, Charité/FU Berlin
#
# <NAME> and <NAME>
# ## このトークトリアルの目的
#
# 類似の化合物は同じターゲット分子に結合し、類似の効果を示すかもしれません。この類似性質原則(similar property principle、SPP)に基づき、化合物類似度をつかって、クラスタリングによる化合物集団(chemical group)の構築を行うことができます。そのようなクラスタリングによって、より大きなスクリーニング化合物セットから、さらなる実験を行う対象となる化合物の多様性のあるセットを選ぶことができます。
#
# ## 学習の目標
#
# このトークトリアルでは、以下についてさらに学びます:
#
# * 化合物をグループ化する方法と、多様性のある化合物セットを選ぶ方法
# * クラスタリングアルゴリズム2つの簡潔な紹介
# * サンプル化合物セットへのButinaクラスタリングアルゴリズムの適用
#
# ### 理論
#
# * クラスタリングとJarvis-Patrickアルゴリズムの紹介
# * Butinaクラスタリングの詳細な説明
# * 多様性の化合物の選択
#
# ### 実践
#
# * Butinaクラスタリングと化合物選択の例
#
# ## レファレンス
#
# * <NAME>. Unsupervised Data Base Clustering Based on Daylight’s Fingerprint and Tanimoto Similarity: A Fast and Automated Way To Cluster Small and Large Data Set. J. Chem. Inf. Comput. Sci. 1999.
# * Leach, <NAME>., Gillet, <NAME>. An Introduction to Chemoinformatics. 2003
# * Jarvis-Patrickクラスタリング: http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm
# * TDT チュートリアル: https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb
# * rdkitクラスタリングドキュメンテーション: http://rdkit.org/docs/Cookbook.html#clustering-molecules
# _____________________________________________________________________________________________________________________
#
#
# ## 理論
#
# ### クラスタリングとJarvis-Patrickアルゴリズムの紹介
#
# [クラスタリング(Clustering)](https://en.wikipedia.org/wiki/Cluster_analysis) は「ある一連のものをグループ化するタスクで、(クラスターと呼ばれる)同じグループに属するものが(ある基準において)、他のグループ(クラスター)に属するものと比較して、お互いにより似通っているようにグループ化するタスク」と定義することができます。
#
# 医薬品研究における化合物クラスタリングは化合物間の化学的、構造的類似度にしばしば基づいており、共通する特徴をもつグループをみつけたり、さらに解析を行うための多様性がある代表的な化合物セットをデザインするために行われます。
#
# 一般的な手順:
#
# * 近接するポイント間の類似度によってデータをクラスタリングすることに基づく手法
# * ケモインフォマティクスにおいて、化合物は化合物フィンガープリントでエンコードされ、類似度はタニモト類似度によって表現されることが多いです(**トークトリアル4**参照)
# * 念のため再確認:フィンガープリントはバイナリのベクトルで、各ビットはある特定の部分構造フラグメントが分子の中にあるか無いかを示します。
# * 類似度(あるいは距離)行列: バイナリのフィンガープリントによって表現された分子のペアの類似度はタニモト係数を使って定量化されることがもっとも多く、これは共通する特徴(ビット)の数を評価する値です。
# * タニモト係数の値の範囲は0(類似性なし)から1(類似度が高い)となっています。
#
# 利用可能なクラスタリングアルゴリズムはたくさんありますが、中でも[Jarvis-Patrickクラスタリング](http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm) は医薬品の分野において最も広く使われているアルゴリズムの一つです。
#
# Jarvis-Patrickクラスタリングアルゴリズムは2つのパラメーター、KとK<sub>min</sub>で定義されています:
# * 各分子のK最近傍のセットを計算
# * 2つの化合物が次の場合、同じクラスターに含める
# * 最近傍のリストにお互い含まれている場合
# * K最近傍のうち少なくともK<sub>min</sub>を共通に持つ場合
#
# Jarvis-Patrickクラスタリングアルゴリズムは決定的アルゴリズムで、巨大な化合物セットを数時間のうちに処理することができます。しかしながら、この手法の悪い側面として、大きく不均質なクラスタを生み出す傾向があります(Butinaクラスタリングの参考文献を参照)。
#
# 他のクラスタリングアルゴリズムについて知りたければ [scikit-learn clustering module](http://scikit-learn.org/stable/modules/clustering.html) を参照してください。
# ### Butinaクラスタリングの詳細な説明
#
# Butinaクラスタリング([*J. Chem. Inf. Model.*(1999), 39(4), 747](https://pubs.acs.org/doi/abs/10.1021/ci9803381))はより小さいが均質なクラスターを見つけるために開発されました。(少なくとも)クラスターの中心が、クラスター内の他の全ての分子と、与えられた閾値以上に類似していること、を前提条件とします。
#
# このクラスタリングアプローチには鍵となるステップがあります(下のフローチャート参照してください):
#
# #### 1. データの準備と化合物エンコーディング
# * 化合物類似性を見つけるために、入力データの化合物(例 SMILESとしてあたえられたもの)は化合物フィンガープリントとしてエンコードされます。例えば、RDK5フィンガープリントは、よく知られている[Daylight Fingerprint](/http://www.daylight.com/dayhtml/doc/theory/theory.finger.html)(のオリジナル文献で使われていたもの)に似たサブグラフに基づく(subgraph-based)フィンガープリントです。
#
# #### 2. タニモト類似度(あるいは距離)行列
# * 2つのフィンガープリント間の類似度はタニモト係数を使って計算します
# * 全ての可能な分子/フィンガープリントのペア間のタニモト類似度を要素とする行列です( n x n 類似度行列、n=化合物数、上三角行列のみが使われます)
# * 同様に、距離行列が計算できます(1 - 類似度)
#
# #### 3. 化合物クラスタリング:中心(centroids)と排除球(exclusion spheres)
#
# 注:化合物は次の場合に同じクラスターに含められます。(距離行列を使用する場合)クラスターのセントロイドからの最長距離が特定のカットオフ値以下の時、あるいは、(類似度行列を使用する場合)最小の類似度が特定のカットオフ値以上のときです。
#
# * **潜在的なクラスター中心の発見**
# * クラスター中心は、与えられたクラスターの中で最も近傍化合物の数が多い分子です。
# * 近傍化合物の割り当て(Annotate neighbors):各分子について、与えられた閾値以下のタニモト距離に含まれる全ての分子の数を数えます。
# * 化合物を近傍化合物の数によって降順に並べ替え、クラスター中心となりうるもの(即ち、近傍の数が最大となる化合物)をファイルの一番上に置くようにします。
#
# * **排除球(exclusion shpheres)に基づくクラスタリング**
# * 並べ替えたリスト内の最初の分子(中心、centroid)から始めます。
# * クラスタリングに使われたカットオフ値以上のタニモトインデックスをもつ全ての化合物をクラスターのメンバーとします(類似度の場合)
# * 考慮しているクラスターのメンバーとなった各化合物にフラグをたて、以降の比較の対象から取り除きます。したがって、フラグがたてたれた化合物は他のクラスターの中心やメンバーとなることはできません。この手順は新しく作成したクラスターの周りに排除球を置くようなものです。
# * リストの最初の化合物について全ての近傍化合物がみつかったら、リストトップの最初に利用可能な(即ちフラグが立っていない)化合物を新しいクラスター中心とします。
# * リストのフラグの立っていない全ての化合物について、上から順に同じ手順を繰り返していきます。
# * クラスタリング手順の最後までフラグの立てられなかった化合物はシングルトン(singleton)となります。
# * シングルトンとして割り当てられた化合物の中には、指定したタニモト類似度インデックスで近傍となる化合物をもつものありえますが、これらの近傍化合物は、クラスター中心に基づく枠組みによってすでに排除球に含められていることに注意してください。
from IPython.display import IFrame
IFrame('./images/butina_full.pdf', width=600, height=300)
# *Figure 1:* Butinaクラスタリングアルゴリズムの理論的な例(Calvinna Caswaraによる)
# ### 多様な化合物の選択
#
# 代表的な化合物セットを見つけるという考え方が、医薬品業界ではしばしば使われます。
#
# * 例えば、バーチャルスクリーニングキャンペーンを行なったものの、リソースが非常に限られており、確認のためのアッセイ実験で評価可能な化合物数が少ないとしましょう。
# * このスクリーニングから得られる情報を最大限のものとするために、多様性のある化合物セットを選びたいと思います。そのためには、活性を有する可能性がある化合物のリストの中で、各ケミカルシリーズを代表する一つを選択します。
#
# 別のシナリオとしては、構造活性相関、即ち化合物の小さな構造変化がin vitro活性にどう影響を与えるか?、について情報を得るために一連の化合物を選択する、というシナリオが考えられます。
# ## 実践
#
# ### Butinaクラスタリングアルゴリズムの使用例
# [<NAME> と <NAME>によるTDT tutorial notebook](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb)の例にしたがって適用してみます。
# #### 1. データの読み込みとフィンガープリントの計算
# ここではデータの準備とフィンガープリントの計算を行います。
# パッケージのimport
import pandas as pd
import numpy
import matplotlib.pyplot as plt
import time
import random
from random import choices
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit.DataStructs import cDataStructs
from rdkit.ML.Cluster import Butina
from rdkit.Chem import Draw
from rdkit.Chem import rdFingerprintGenerator
from rdkit.Chem.Draw import IPythonConsole
# データを読み込んで眺める
# トークトリアル2で取り出したフィルタリング済みのデータ
compound_df= pd.read_csv('../data/T2/EGFR_compounds_lipinski.csv',sep=";", index_col=0)
print('data frame shape:',compound_df.shape)
compound_df.head()
# SMILESから化合物を作成し、配列に格納
mols = []
for i in compound_df.index:
chemblId = compound_df['molecule_chembl_id'][i]
smiles = compound_df['smiles'][i]
mols.append((Chem.MolFromSmiles(smiles), chemblId))
mols[0:5]
# +
# 全化合物についてフィンガープリントを作成
rdkit_gen = rdFingerprintGenerator.GetRDKitFPGenerator(maxPath=5)
fingerprints = [rdkit_gen.GetFingerprint(m) for m,idx in mols]
# 化合物/フィンガープリントは幾つあるでしょうか?
print('Number of compounds converted:',len(fingerprints))
print('Fingerprint length per compound:',len(fingerprints[0]))
# -
# #### 2. タニモト類似度と距離行列
# フィンガープリントが生成できたので、次の段階に進みます。クラスター中心となる可能性があるものの同定です。このために、タニモト類似度と距離行列を計算する関数を定義します。
# フィンガープリントのリストのための距離行列を計算
def Tanimoto_distance_matrix(fp_list):
dissimilarity_matrix = []
for i in range(1,len(fp_list)):
similarities = DataStructs.BulkTanimotoSimilarity(fp_list[i],fp_list[:i])
# 距離行列が必要なので、類似度行列の全要素について1-xを計算
dissimilarity_matrix.extend([1-x for x in similarities])
return dissimilarity_matrix
# [RDKitクックブック:分子のクラスタリング](http://rdkit.org/docs/Cookbook.html#clustering-molecules)も参照してください。
# 例:2つのフィンガープリントの類似度を一つ計算
sim = DataStructs.TanimotoSimilarity(fingerprints[0],fingerprints[1])
print ('Tanimoto similarity: %4.2f, distance: %4.2f' %(sim,1-sim))
# 例:距離行列を計算(距離 = 1-類似度)
Tanimoto_distance_matrix(fingerprints)[0:5]
# +
# 追記: 行列ではなくリストのように見えます。
# しかし、リスト形式の上三角行列です。
n = len(fingerprints)
# n*(n-1)/2 によって三角行列の要素数を計算します。
elem_triangular_matr = (n*(n-1))/2
print(int(elem_triangular_matr), len(Tanimoto_distance_matrix(fingerprints)))
# -
# #### 3. 化合物クラスタリング:中心(centroids)と排除球(exclusion spheres)
# ここでは、化合物のクラスター化を行い結果を眺めます。
# クラスタリング関数の定義
# 入力:フィンガープリントとクラスタリングの閾値
def ClusterFps(fps,cutoff=0.2):
# タニモト距離行列の計算
distance_matr = Tanimoto_distance_matrix(fps)
# Butinaアルゴリズムの実装を使ってデータをクラスタリング:
clusters = Butina.ClusterData(distance_matr,len(fps),cutoff,isDistData=True)
return clusters
# フィンガープリントの類似度に基づき化合物をクラスター化
# +
# データセットのためのクラスタリング手順を実行
clusters = ClusterFps(fingerprints,cutoff=0.3)
# クラスターの数とサイズについて短いレポートを表示
num_clust_g1 = len([c for c in clusters if len(c) == 1])
num_clust_g5 = len([c for c in clusters if len(c) > 5])
num_clust_g25 = len([c for c in clusters if len(c) > 25])
num_clust_g100 = len([c for c in clusters if len(c) > 100])
print("total # clusters: ", len(clusters))
print("# clusters with only 1 compound: ", num_clust_g1)
print("# clusters with >5 compounds: ", num_clust_g5)
print("# clusters with >25 compounds: ", num_clust_g25)
print("# clusters with >100 compounds: ", num_clust_g100)
# -
# **訳注(04/2020)**
#
# ここで実行しているRDKitの[Butinaモジュール](https://www.rdkit.org/docs/source/rdkit.ML.Cluster.Butina.html)の戻り値はクラスターの情報を含むタプルのタプルです。
#
# ( (cluster1_elem1, cluster1_elem2, …), (cluster2_elem1, cluster2_elem2, …), …)
#
# という形式で、タプル内タプルの最初の`clusterX_elem1`は各クラスターXのcentroidとなっています。
# 上記では`len()`でクラスターのサイズを取得しています。
#
# **訳注ここまで**
# クラスターの大きさをプロット
fig = plt.figure(1, figsize=(10, 4))
plt1 = plt.subplot(111)
plt.axis([0, len(clusters), 0, len(clusters[0])+1])
plt.xlabel('Cluster index', fontsize=20)
plt.ylabel('Number of molecules', fontsize=20)
plt.tick_params(labelsize=16)
plt1.bar(range(1, len(clusters)), [len(c) for c in clusters[:len(clusters)-1]], lw=0)
plt.show()
# #### 合理的なカットオフ値を選ぶにはどうすればよいか?
#
# クラスタリングの結果はユーザーの選んだ閾値に依存するので、カットオフ値の選択についてより詳細に見てみたいと思います。
for i in numpy.arange(0., 1.0, 0.1):
clusters = ClusterFps(fingerprints,cutoff=i)
fig = plt.figure(1, figsize=(10, 4))
plt1 = plt.subplot(111)
plt.axis([0, len(clusters), 0, len(clusters[0])+1])
plt.xlabel('Cluster index', fontsize=20)
plt.ylabel('Number of molecules', fontsize=20)
plt.tick_params(labelsize=16)
plt.title('Threshold: '+str('%3.1f' %i), fontsize=20)
plt1.bar(range(1, len(clusters)), [len(c) for c in clusters[:len(clusters)-1]], lw=0)
plt.show()
# 結果を見ると、閾値(距離のカットオフ)が高いほど、より多くの化合物が類似しているとみなされ、したがってより少ないクラスター数にクラスタリングされます。閾値が低くなると、より多数の小さなクラスターとシングルトン(singleton)が現れます。
#
# * 距離のカットオフ値がより小さくなるほど、一つのクラスターに所属する化合物は互いにより類似していることが求められます。
#
# 上のプロットをもとにて、私たちは距離の閾値0.2を選択しました。シングルトンの数が多くなく、クラスターのサイズが大き過ぎていませんが、分布は滑らかになっています。
# +
dist_co = 0.2
clusters = ClusterFps(fingerprints,cutoff=dist_co)
# クラスターのサイズをプロットー保存
fig = plt.figure(1, figsize=(8, 2.5))
plt1 = plt.subplot(111)
plt.axis([0, len(clusters), 0, len(clusters[0])+1])
plt.xlabel('Cluster index', fontsize=20)
plt.ylabel('# molecules', fontsize=20)
plt.tick_params(labelsize=16)
plt1.bar(range(1, len(clusters)), [len(c) for c in clusters[:len(clusters)-1]], lw=0)
plt.title('Threshold: '+str('%3.1f' %dist_co), fontsize=20)
plt.savefig("../data/T5/cluster_dist_cutoff_%4.2f.png" %dist_co, dpi=300, bbox_inches="tight", transparent=True)
print('Number of clusters %d from %d molecules at distance cut-off %4.2f' %(len(clusters), len(mols), dist_co))
print('Number of molecules in largest cluster:', len(clusters[0]))
print('Similarity between two random points in same cluster %4.2f'%DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]],fingerprints[clusters[0][1]]))
print('Similarity between two random points in different cluster %4.2f'%DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]],fingerprints[clusters[1][0]]))
# -
# ### クラスターの可視化
# #### 最大のクラスターからの化合物例10個
#
# 最初の最もサイズの大きいクラスターから、初めの10化合物をより詳細に見てみましょう。
print ('Ten molecules from largest cluster:')
# 化合物描画
Draw.MolsToGridImage([mols[i][0] for i in clusters[0][:10]],
legends=[mols[i][1] for i in clusters[0][:10]],
molsPerRow=5)
# +
# トークトリアル9でMCS解析を行うため最大のサイズのクラスターの化合物を保存
# 訳注:上記はトークトリアル6の記載ミスか?
w = Chem.SDWriter('../data/T5/molSet_largestCluster.sdf')
# データの準備
tmp_mols=[]
for i in clusters[0]:
tmp = mols[i][0]
tmp.SetProp("_Name",mols[i][1])
tmp_mols.append(tmp)
# データの書き出し
for m in tmp_mols: w.write(m)
# -
# #### 2番目に大きいクラスターからの化合物例10個
print ('Ten molecules from second largest cluster:')
# 化合物の描画
Draw.MolsToGridImage([mols[i][0] for i in clusters[1][:10]],
legends=[mols[i][1] for i in clusters[1][:10]],
molsPerRow=5)
# それぞれのクラスターの最初の10化合物は実際にお互いに類似しており、多くが共通の骨格を共有しています(視覚的に判断する限り)。
#
# 化合物セットの最大共通部分構造(maximum common substructure、MCS)を計算する方法について、もっと知りたければ**トークトリアル6**を参照してください。
#
# #### 最初の10クラスターからの例
#
# 比較のため、最初の10クラスターのクラスター中心を見てみます。
print ('Ten molecules from first 10 clusters:')
# 化合物を描画
Draw.MolsToGridImage([mols[clusters[i][0]][0] for i in range(10)],
legends=[mols[clusters[i][0]][1] for i in range(10)],
molsPerRow=5)
# 最初の3つクラスターから取り出したクラスター中心をSVGファイルとして保存。
# +
# イメージを生成
img = Draw.MolsToGridImage([mols[clusters[i][0]][0] for i in range(0,3)],
legends=["Cluster "+str(i) for i in range(1,4)],
subImgSize=(200,200), useSVG=True)
# SVGデータを取得
molsvg = img.data
# 不透明な背景を透明に置き換え、フォントサイズを設定
molsvg = molsvg.replace("opacity:1.0", "opacity:0.0");
molsvg = molsvg.replace("12px", "20px");
# 変換したSVGデータをファイルに保存
f = open("../data/T5/cluster_representatives.svg", "w")
f.write(molsvg)
f.close()
# -
# まだいくらか類似性が残っているのが見てとれますが、明らかに、異なるクラスターの中心同士は、一つのクラスター内の化合物同士と比較して、類似性が低くなっています。
# #### クラスター内タニモト類似度
#
# クラスター内のタニモト類似度をみてみることもできます。
# 各クラスターのフィンガープリントの全てのペアについてタニモト類似度を計算する関数
def IntraTanimoto(fps_clusters):
intra_similarity =[]
# クラスターごとの内部類似度を計算
for k in range(0,len(fps_clusters)):
# 類似度行列(1ー距離)に変換されたタニモト距離行列の関数
intra_similarity.append([1-x for x in Tanimoto_distance_matrix(fps_clusters[k])])
return intra_similarity
# 最初の10個のクラスターについてフィンガープリントを再計算
mol_fps_per_cluster=[]
for c in clusters[:10]:
mol_fps_per_cluster.append([rdkit_gen.GetFingerprint(mols[i][0]) for i in c])
# クラスター内類似度を計算
intra_sim = IntraTanimoto(mol_fps_per_cluster)
# クラスター内類似度とともにバイオリンプロット
pos = list(range(10))
labels = pos
plt.figure(1, figsize=(10, 5))
ax = plt.subplot(111)
r = plt.violinplot(intra_sim, pos, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(pos)
ax.set_xticklabels(labels)
ax.set_yticks(numpy.arange(0.6, 1., 0.1))
ax.set_title('Intra-cluster Tanimoto similarity', fontsize=13)
r['cmeans'].set_color('red')
# 平均=赤色, 中央値=青色
# ### 化合物選択
#
# 以下では、**多様性のある**サブセットとして**最大 1000化合物**の最終リストを取り出します。
#
# このため、最大サイズのクラスターから始めて最大1000化合物を取り出すまで、各クラスターのクラスター中心(即ち、各クラスターの最初の分子)をとりだして、各クラスターのクラスター中心と最も似ている10化合物(あるいは、クラスター化合物が10以下のときは50%の化合物)を選びます。これにより、各クラスターの代表が得られます。
#
# この化合物選択の目的は、確認用アッセイ実験に提案するためのより小さな化合物セットの多様性を確保することです。
#
# 選択の手順は[<NAME> と <NAME>によるTDT tutorial notebook](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb)からとりました。ノートブックで述べられているように、このアプローチの背景にあるアイデアは、確認アッセイ実験の結果からSARを取得しつつ(より大きなクラスターの非常に類似した化合物群を保ちつつ)、多様性を確保する(各クラスターの代表を組み入れる)ことです。
# クラスター中心を取得します。
# 各クラスターのクラスター中心を取得(各クラスターの最初の化合物)
clus_center = [mols[c[0]] for c in clusters]
# クラスター中心/クラスターはいくつあるか?
print('Number of cluster centers: ', len(clus_center))
# サイズによってクラスターを並べ替え、類似度によって各クラスターの化合物を並べ換える。
# クラスター中心に対する類似度に基づいてクラスター内の化合物を並べ替え、
# サイズに基づきクラスターを並べ替えます。
clusters_sort = []
for c in clusters:
if len(c) < 2: continue # シングルトン
else:
# 各クラスター要素についてフィンガープリントを計算
fps_clust = [rdkit_gen.GetFingerprint(mols[i][0]) for i in c]
# クラスター中心に対する全クラスターメンバーの類似度
simils = DataStructs.BulkTanimotoSimilarity(fps_clust[0],fps_clust[1:])
# (中心は除いて!)類似度に化合物のインデックスを付与する
simils = [(s,index) for s,index in zip(simils, c[1:])]
# 類似度によって降順に並べ替え
simils.sort(reverse=True)
# クラスターのサイズと化合物インデックスをclusters_sortに保存
clusters_sort.append((len(simils), [i for s,i in simils]))
# クラスターサイズによって降順にソート
clusters_sort.sort(reverse=True)
# 最大1000化合物を取り出す。
# 選んだ分子を数える、はじめにクラスター中心を取り出す
sel_molecules = clus_center.copy()
# 最大のクラスターからはじめて、各クラスターの10化合物(あるいは 最大の50%)を取り出す
index = 0
diff = 1000 - len(sel_molecules)
while diff > 0 and index < len(clusters_sort):
# 並べ替えたクラスターのインデックスを取得
tmp_cluster = clusters_sort[index][1]
# 最初のクラスターが10以上の大きさの時、ちょうど10化合物を取り出す
if clusters_sort[index][0] > 10:
num_compounds = 10
# 10より小さいなら、化合物の半分をとる
else:
num_compounds = int(0.5*len(c))+1
if num_compounds > diff:
num_compounds = diff
# picked_fps と名付けたリストのリストに取り出した分子と構造を書き込む
# 訳注: 上記リストの名称 picked_fpsはsel_moleculesの記載ミスか?
sel_molecules += [mols[i] for i in tmp_cluster[:num_compounds]]
index += 1
diff = 1000 - len(sel_molecules)
print('# Selected molecules: '+str(len(sel_molecules)))
# この多様性のある化合物のセットは実験による評価に用いることができます。
# **訳注(2020/04)**
#
# 上記で選択された化合物は**1132**個となっており、意図した最大1000個とはなっていません。オリジナルのノートブックと異なり、現在(2020年04月)の取得データをもとにすると、クラスター数が**1132**個となり、クラスター数の時点で1000個を超えています(オリジナルは**988**個)。ですので、上記コードのクラスター中心を取り出した`sel_molecules`が1000以上(`diff <0`) となり、`while`条件文以降が実行されないという結果となっています。以下、コードが正しく機能するか確認のため、取り出す化合物の最大数を1200個にして実行して見ます。
# + code_folding=[7]
sel_molecules2 = clus_center.copy()
index = 0
diff = 1200 - len(sel_molecules2)
while diff > 0 and index < len(clusters_sort):
tmp_cluster = clusters_sort[index][1]
if clusters_sort[index][0] > 10:
num_compounds = 10
else:
num_compounds = int(0.5*len(c))+1
if num_compounds > diff:
num_compounds = diff
sel_molecules2 += [mols[i] for i in tmp_cluster[:num_compounds]]
index += 1
diff = 1200 - len(sel_molecules2)
print('# Selected molecules: '+str(len(sel_molecules2)))
# -
# 指定した1200個の化合物を取り出すことができました。他の検証方法としてはクラスタリングの閾値を0.2よりも大きくしてクラスター数を1000個以下とすることがあげられると思います。
#
# **訳注ここまで**
# ### (追加情報:実行時間)
#
# トークトリアルの終わりに、データセットのサイズが変わった時に、Butinaクラスタリングの実行時間がどのように変わるかみてみましょう。
# 古いデータセットを再利用
sampled_mols = mols.copy()
# より大きなデータセットを試してみることもできますが、10000以上のデータポイントの時点で非常に大きなメモリと時間がかかりはじめます(これがここで止めた理由です。)
#
# 時間を図るためのヘルパー関数
def MeasureRuntime(sampled_mols):
start_time = time.time()
sampled_fingerprints = [rdkit_gen.GetFingerprint(m) for m,idx in sampled_mols]
# データセットでクラスタリングを実行
sampled_clusters = ClusterFps(sampled_fingerprints,cutoff=0.3)
return(time.time() - start_time)
dsize=[100, 500, 1000, 2000, 4000, 6000, 8000, 10000]
runtimes=[]
# 置き換えありで、ランダムにサンプルをとる
for s in dsize:
tmp_set = [sampled_mols[i] for i in sorted(numpy.random.choice(range(len(sampled_mols)), size=s))]
tmp_t= MeasureRuntime(tmp_set)
print('Dataset size %d, time %4.2f seconds' %(s, tmp_t))
runtimes.append(tmp_t)
plt.plot(dsize, runtimes, 'g^')
plt.title('Runtime measurement of Butina Clustering with different dataset sizes')
plt.xlabel('# Molecules in data set')
plt.ylabel('Runtime in seconds')
plt.show()
# ## クイズ
# * 化合物のクラスタリングはなぜ重要なのでしょうか?
# * 化合物セットのクラスタリングにどのアルゴリズムを使うことができますか?また、アルゴリズムの背景になる一般的な考え方はどのようなものでしょうか?
# * 他のクラスタリングアルゴリズムを知っていますか?
| 5_compound_clustering/T5_compound_clustering_JP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/huggingface/transformers/blob/master/notebooks/03-pipelines.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] pycharm={"name": "#%% md\n"} id="qUU7wy-brl_H" colab_type="text"
# ## How can I leverage State-of-the-Art Natural Language Models with only one line of code ?
# + [markdown] pycharm={"name": "#%% md\n"} id="-HLOHXuArl_L" colab_type="text"
# Newly introduced in transformers v2.3.0, **pipelines** provides a high-level, easy to use,
# API for doing inference over a variety of downstream-tasks, including:
#
# - ***Sentence Classification _(Sentiment Analysis)_***: Indicate if the overall sentence is either positive or negative, i.e. *binary classification task* or *logitic regression task*.
# - ***Token Classification (Named Entity Recognition, Part-of-Speech tagging)***: For each sub-entities _(*tokens*)_ in the input, assign them a label, i.e. classification task.
# - ***Question-Answering***: Provided a tuple (`question`, `context`) the model should find the span of text in `content` answering the `question`.
# - ***Mask-Filling***: Suggests possible word(s) to fill the masked input with respect to the provided `context`.
# - ***Summarization***: Summarizes the ``input`` article to a shorter article.
# - ***Translation***: Translates the input from a language to another language.
# - ***Feature Extraction***: Maps the input to a higher, multi-dimensional space learned from the data.
#
# Pipelines encapsulate the overall process of every NLP process:
#
# 1. *Tokenization*: Split the initial input into multiple sub-entities with ... properties (i.e. tokens).
# 2. *Inference*: Maps every tokens into a more meaningful representation.
# 3. *Decoding*: Use the above representation to generate and/or extract the final output for the underlying task.
#
# The overall API is exposed to the end-user through the `pipeline()` method with the following
# structure:
#
# ```python
# from transformers import pipeline
#
# # Using default model and tokenizer for the task
# pipeline("<task-name>")
#
# # Using a user-specified model
# pipeline("<task-name>", model="<model_name>")
#
# # Using custom model/tokenizer as str
# pipeline('<task-name>', model='<model name>', tokenizer='<tokenizer_name>')
# ```
# + pycharm={"name": "#%% code\n"} id="4maAknWNrl_N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="467e3cc8-a069-47da-8029-86e4142c7dde"
# !pip install -q transformers
# + pycharm={"is_executing": false, "name": "#%% code \n"} id="uKaqzCh6rl_V" colab_type="code" colab={}
from __future__ import print_function
import ipywidgets as widgets
from transformers import pipeline
# + [markdown] pycharm={"name": "#%% md\n"} id="uDPZ42Uerl_b" colab_type="text"
# ## 1. Sentence Classification - Sentiment Analysis
# + pycharm={"is_executing": false, "name": "#%% code\n"} id="AMRXHQw9rl_d" colab_type="code" outputId="a7a10851-b71e-4553-9afc-04066120410d" colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["4bab5df43b3c46caadf48e264344ab42", "9b426c68631f4bb288e2ca79aad9f9d9", "6902104f7ec143519fb1a6ab9363d4a0", "c133fb34fe2a4aba8a6b233671af8b04", "e3f72d443a74414ca62c2b848d34b125", "<KEY>", "ad84da685cf44abb90d17d9d2e023b48", "a246f9eea2d7440cb979e728741d2e32"]}
nlp_sentence_classif = pipeline('sentiment-analysis')
nlp_sentence_classif('Such a nice weather outside !')
# + [markdown] pycharm={"name": "#%% md\n"} id="RY8aUJTvrl_k" colab_type="text"
# ## 2. Token Classification - Named Entity Recognition
# + pycharm={"is_executing": false, "name": "#%% code\n"} id="B3BDRX_Krl_n" colab_type="code" outputId="a6b90b11-a272-4ecb-960d-4c682551b399" colab={"base_uri": "https://localhost:8080/", "height": 185, "referenced_widgets": ["451464c936444ba5a652b46c1b4f9931", "<KEY>", "b6e1a2e57f4948a39283f1370352612c", "9d4941ebdfa64978b47232f6e5908d97", "<KEY>", "<KEY>", "405afa5bb8b840d8bc0850e02f593ce4", "78c718e3d5fa4cb892217260bea6d540"]}
nlp_token_class = pipeline('ner')
nlp_token_class('Hugging Face is a French company based in New-York.')
# + [markdown] id="qIvUFEVarl_s" colab_type="text"
# ## 3. Question Answering
# + pycharm={"is_executing": false, "name": "#%% code\n"} id="ND_8LzQKrl_u" colab_type="code" outputId="c59ae695-c465-4de6-fa6e-181d8f1a3992" colab={"base_uri": "https://localhost:8080/", "height": 117, "referenced_widgets": ["7d66a4534c164d2f9493fc0467abebbd", "7a15588f85b14f2b93e32b4c0442fa1b", "213567d815894ca08041f6d682ced3c9", "ee6c95e700e64d0a9ebec2c1545dd083", "3e556abf5c4a4ee69d52366fd59471b2", "876b2eba73fa46a6a941d2e3a8a975ad", "<KEY>", "67cbaa1f55d24e62ad6b022af36bca56"]}
nlp_qa = pipeline('question-answering')
nlp_qa(context='Hugging Face is a French company based in New-York.', question='Where is based Hugging Face ?')
# + [markdown] id="9W_CnP5Zrl_2" colab_type="text"
# ## 4. Text Generation - Mask Filling
# + pycharm={"is_executing": false, "name": "#%% code\n"} id="zpJQ2HXNrl_4" colab_type="code" outputId="3fb62e7a-25a6-4b06-ced8-51eb8aa6bf33" colab={"base_uri": "https://localhost:8080/", "height": 321, "referenced_widgets": ["58669943d3064f309436157270544c08", "3eff293c2b554d85aefaea863e29b678", "d0b9925f3dde46008bf186cf5ef7722d", "427e07ce24a442af84ddc71f9463fdff", "1eb2fa080ec44f8c8d5f6f52900277ab", "23377596349e40a89ea57c8558660073", "a35703cc8ff44e93a8c0eb413caddc40", "9df7014c99b343f3b178fa020ff56010"]}
nlp_fill = pipeline('fill-mask')
nlp_fill('Hugging Face is a French company based in ' + nlp_fill.tokenizer.mask_token)
# + [markdown] id="Fbs9t1KvrzDy" colab_type="text"
# ## 5. Summarization
#
# Summarization is currently supported by `Bart` and `T5`.
# + id="8BaOgzi1u1Yc" colab_type="code" outputId="2168e437-cfba-4247-a38c-07f02f555c6e" colab={"base_uri": "https://localhost:8080/", "height": 88}
TEXT_TO_SUMMARIZE = """
New York (CNN)When <NAME> was 23 years old, she got married in Westchester County, New York.
A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband.
Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other.
In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage.
Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the
2010 marriage license application, according to court documents.
Prosecutors said the marriages were part of an immigration scam.
On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, <NAME>, who declined to comment further.
After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective
<NAME>, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002.
All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say.
Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages.
Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted.
The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s
Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali.
Her eighth husband, <NAME>, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force.
If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.
"""
summarizer = pipeline('summarization')
summarizer(TEXT_TO_SUMMARIZE)
# + [markdown] id="u5JA6IJsr-G0" colab_type="text"
# ## 6. Translation
#
# Translation is currently supported by `T5` for the language mappings English-to-French (`translation_en_to_fr`), English-to-German (`translation_en_to_de`) and English-to-Romanian (`translation_en_to_ro`).
# + id="8FwayP4nwV3Z" colab_type="code" outputId="66956816-c924-4718-fe58-cabef7d51974" colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["57e8c36594d043c581c766b434037771", "82760185d5c14a808cbf6639b589f249", "f2a1b430594b4736879cdff4ec532098", "c81338551e60474fab9e9950fe5df294", "98563b405bd043a9a301a43909e43157", "<KEY>", "ad78042ee71a41fd989e4b4ce9d2e3c1", "40c8d2617f3d4c84b923b140456fa5da"]}
# English to French
translator = pipeline('translation_en_to_fr')
translator("HuggingFace is a French company that is based in New York City. HuggingFace's mission is to solve NLP one commit at a time")
# + colab_type="code" id="ra0-WfznwoIW" outputId="278a3d5f-cc42-40bc-a9db-c92ec5a3a2f0" colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["311a65b811964ebfa2c064eb348b3ce9", "5a2032c44d0e4f8cbaf512e6c29214cd", "54d1ff55e0094a4fa2b62ecdfb428328", "2e45f2d7d65246ecb8d6e666d026ac13", "e05c0ec3b49e4d4990a943d428532fb0", "39721262fc1e4456966d92fabe0f54ea", "4486f8a2efc34b9aab3864eb5ad2ba48", "d6228324f3444aa6bd1323d65ae4ff75"]}
# English to German
translator = pipeline('translation_en_to_de')
translator("The history of natural language processing (NLP) generally started in the 1950s, although work can be found from earlier periods.")
# + [markdown] id="qPUpg0M8hCtB" colab_type="text"
# ## 7. Text Generation
#
# Text generation is currently supported by GPT-2, OpenAi-GPT, TransfoXL, XLNet, CTRL and Reformer.
# + id="5pKfxTxohXuZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120, "referenced_widgets": ["3c86415352574190b71e1fe5a15d36f1", "dd2c9dd935754cf2802233053554c21c", "8ae3be32d9c845e59fdb1c47884d48aa", "4dea0031f3554752ad5aad01fe516a60", "1efb96d931a446de92f1930b973ae846", "6a4f5aab5ba949fd860b5a35bba7db9c", "4b02b2e964ad49af9f7ce7023131ceb8", "0ae8a68c3668401da8d8a6d5ec9cac8f"]} outputId="8705f6b4-2413-4ac6-f72d-e5ecce160662"
text_generator = pipeline("text-generation")
text_generator("Today is a beautiful day and I will")
# + [markdown] id="Utmldmetrl_9" colab_type="text"
# ## 8. Projection - Features Extraction
# + pycharm={"is_executing": false, "name": "#%% code\n"} id="O4SjR1QQrl__" colab_type="code" outputId="2ce966d5-7a89-4488-d48f-626d1c2a8222" colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["fd44cf6ab17e4b768b2e1d5cb8ce5af9", "b8c0ea31578d4eaaa69251d0004fd8c6", "2015cd9c1da9467290ecd9019af231eb", "17bacdaee55b43e8977c4dfe4f7245bb", "879ef9e1a0e94f3d96ed56fb4bae64b8", "7ab70324d42647acac5020b387955caf", "31d97ecf78fa412c99e6659196d82828", "c6be5d48ec3c4c799d1445607e5f1ac6"]}
import numpy as np
nlp_features = pipeline('feature-extraction')
output = nlp_features('Hugging Face is a French company based in Paris')
np.array(output).shape # (Samples, Tokens, Vector Size)
# + [markdown] pycharm={"name": "#%% md\n"} id="02j8km8YrmAE" colab_type="text"
# Alright ! Now you have a nice picture of what is possible through transformers' pipelines, and there is more
# to come in future releases.
#
# In the meantime, you can try the different pipelines with your own inputs
# + pycharm={"is_executing": false, "name": "#%% code\n"} id="yFlBPQHtrmAH" colab_type="code" outputId="03cc3207-a7e8-49fd-904a-63a7a1d0eb7a" colab={"base_uri": "https://localhost:8080/", "height": 116, "referenced_widgets": ["0bd407b4975f49c3827aede14c59501c", "3f5406df699e44f5b60678c1c13500f5", "17768469581445b68246ed308ce69326", "74cbcbae5cac4f12abf080a38390f05c", "62b10ca525cc4ac68f3a006434eb7416", "211109537fbe4e60b89a238c89db1346"]}
task = widgets.Dropdown(
options=['sentiment-analysis', 'ner', 'fill_mask'],
value='ner',
description='Task:',
disabled=False
)
input = widgets.Text(
value='',
placeholder='Enter something',
description='Your input:',
disabled=False
)
def forward(_):
if len(input.value) > 0:
if task.value == 'ner':
output = nlp_token_class(input.value)
elif task.value == 'sentiment-analysis':
output = nlp_sentence_classif(input.value)
else:
if input.value.find('<mask>') == -1:
output = nlp_fill(input.value + ' <mask>')
else:
output = nlp_fill(input.value)
print(output)
input.on_submit(forward)
display(task, input)
# + pycharm={"is_executing": false, "name": "#%% Question Answering\n"} id="GCoKbBTYrmAN" colab_type="code" outputId="57c3a647-160a-4b3a-e852-e7a1daf1294a" colab={"base_uri": "https://localhost:8080/", "height": 143, "referenced_widgets": ["d79946ac16ea4855a0bbe2ca2a4d4bf5", "ab5774ac19f84ab18ddf09a63433df00", "a02164204f0f43668bc36a907e720af7", "3b12aec414b14221ad2a11dfd975faa0", "d305ba1662e3466c93ab5cca7ebf8f33", "879f7a3747ad455d810c7a29918648ee"]}
context = widgets.Textarea(
value='Einstein is famous for the general theory of relativity',
placeholder='Enter something',
description='Context:',
disabled=False
)
query = widgets.Text(
value='Why is Einstein famous for ?',
placeholder='Enter something',
description='Question:',
disabled=False
)
def forward(_):
if len(context.value) > 0 and len(query.value) > 0:
output = nlp_qa(question=query.value, context=context.value)
print(output)
query.on_submit(forward)
display(context, query)
| methods/transformers/notebooks/03-pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''myenv'': conda)'
# language: python
# name: python3811jvsc74a57bd0f5712b28ab533ddcd3a93c4a815f0ece6a0b0b411aefcf33cd4d282335a68ea6
# ---
import torch,torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
import numpy as np
import pandas as pd
import wandb
import os, json, cv2, random
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor,DefaultTrainer
from detectron2.config import get_cfg
from detectron2.structures import BoxMode
from tqdm import tqdm
import matplotlib.pyplot as plt
from detectron2.utils.visualizer import ColorMode
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
PROJECT_NAME = 'Car-Object-Detection-V5-Learning-Detectron2-V2'
data = pd.read_csv('./data.csv')
data
xmin,ymin,xmax,ymax = 281,187,327,223
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
img = cv2.imread('./data/vid_4_1000.jpg')
crop = img[y:y+h,x:x+w]
plt.imshow(crop)
cv2.imwrite('./crop.png',crop)
cv2.imwrite('./box.png',cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2))
plt.imshow(cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2))
def load_data():
idxs = len(data)
new_data = []
for idx in tqdm(range(idxs)):
record = {}
info = data.iloc[idx]
height,width = cv2.imread(f'./data/{info["image"]}').shape[:2]
record['height'] = height
record['width'] = width
record['image_id'] = idx
record['file_name'] = f'./data/{info["image"]}'
record['annotations'] = [
{
'bbox':[info['xmin'],info['ymin'],info['xmax'],info['ymax']],
'bbox_mode':BoxMode.XYXY_ABS,
'category_id':0
}
]
new_data.append(record)
return new_data
labels = ['car']
DatasetCatalog.register('data',lambda : load_data())
MetadataCatalog.get('data').set(thing_classes=labels)
metadata = MetadataCatalog.get('data')
model = "COCO-Detection/faster_rcnn_R_50_C4_3x.yaml"
wandb.init(project=PROJECT_NAME,name='baseline',sync_tensorboard=True)
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(model))
cfg.DATASETS.TRAIN = ('data')
cfg.DATASETS.TEST = ()
cfg.SOLVER.MAX_ITER = 2500
cfg.SOLVER.BASE_LR = 0.00025
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(labels)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.SOLVER.STEPS = []
cfg.DATASETS.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model)
cfg.TEST.EVAL_PERIOD = 250
# trainer = DefaultTrainer(cfg)
# trainer.resume_or_load(resume=False)
# trainer.train()
cfg.MODEL.WEIGHTS = './output/model_final.pth'
cfg.SOLVER.SCORE_THRESH_TEST = 0.25
img = cv2.imread('./data/vid_4_1000.jpg')
predictor = DefaultPredictor(cfg)
v = Visualizer(img[:,:,::-1],metadata=metadata)
v = v.draw_instance_predictions(predictor(img)['instances'].to('cpu'))
v = v.get_image()[:,:,::-1]
plt.figure(figsize=(12,6))
plt.imshow(v)
plt.savefig('./pred.png')
plt.close()
wandb.log({'Img':wandb.Image(cv2.imread('./pred.png'))})
evaluator = COCOEvaluator('data',output_dir='./output/')
val_loader = build_detection_test_loader(cfg,'data')
metrics = inference_on_dataset(predictor.model,val_loader,evaluator)
wandb.log(metrics)
torch.save(cfg,'cfg.pt')
torch.save(cfg,'cfg.pth')
torch.save(predictor,'predictor.pt')
torch.save(predictor,'predictor.pth')
torch.save(evaluator,'evaluator.pt')
torch.save(evaluator,'evaluator.pth')
torch.save(v,'img.pt')
torch.save(v,'img.pth')
torch.save(model,'model.pt')
torch.save(model,'model.pth')
torch.save(labels,'labels.pt')
torch.save(labels,'labels.pth')
torch.save(metrics,'metrics.pt')
torch.save(metrics,'metrics.pth')
| 00.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Training with DenseNet-121
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import torch
from torch import nn
from torchvision import datasets, models, transforms
from torch import optim
import torch.nn.functional as F
import helper
# +
# Loading images
dataset = 'Cat_Dog_Data'
# Define Train Set and Test Set, and perform Transforms
train_transforms = transforms.Compose([transforms.Resize((224, 224)),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
test_transforms = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
train_data = datasets.ImageFolder(dataset + '/train', transform = train_transforms)
test_data = datasets.ImageFolder(dataset + '/test', transform = test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size = 64, shuffle = True)
testloader = torch.utils.data.DataLoader(test_data, batch_size = 64)
# +
# View images
data_iter = iter(trainloader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for i in range(4):
ax = axes[i]
helper.imshow(images[i], ax=ax, normalize=False)
# +
# Run on GPU if possible
device = torch.device("cude" if torch.cuda.is_available() else "cpu")
# Define my model
model = models.resnet50(pretrained=True)
for param in model.parameters():
param.requires_grad = False
my_classifier = nn.Sequential(nn.Linear(1024, 512),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(512, 256),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(256, 2),
nn.LogSoftmax(dim=1))
model.classifier = my_classifier # I think it's model.(type: classifier for densenet and fc for resnet = name of model)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.005)
model.to(device)
# +
# Training Process
epochs = 1
steps = 0
running_loss = 0
train_losses, test_losses = [], [] # for graphing purposes
for e in range(epochs):
for images, labels in trainloader:
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
log_prob = my_classifier(images)
loss = criterion(log_prob, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
with torch.no_grad():
model.eval()
for images, labels in testloader:
lob_prob_test = my_classifier(images)
loss = criterion(log_prob_test, labels)
test_loss += loss.item()
prob = torch.exp(log_prob_test)
top_prob, top_class = ps.topk(1, dim=1)
equality = top_class == labels.view(top_class.shape)
accuracy += torch.mean(equality.type(torch.FloatTensor)).item()
model.train()
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
# -
| intro-to-pytorch/.ipynb_checkpoints/Practice_Transfer Learning with DenseNet-121_Cat_Dog Classification-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--NAVIGATION-->
# < [错误和调试](01.06-Errors-and-Debugging.ipynb) | [目录](Index.ipynb) | [更多IPython资源](01.08-More-IPython-Resources.ipynb) >
#
# <a href="https://colab.research.google.com/github/wangyingsm/Python-Data-Science-Handbook/blob/master/notebooks/01.07-Timing-and-Profiling.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
# # Profiling and Timing Code
#
# # 性能测算和计时
# > In the process of developing code and creating data processing pipelines, there are often trade-offs you can make between various implementations.
# Early in developing your algorithm, it can be counterproductive to worry about such things. As Donald Knuth famously quipped, "We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil."
#
# 在开发阶段以及创建数据处理任务流时,经常都会出现多种可能的实现方案,每种都有各自优缺点,你需要在这之中进行权衡。在开发你的算法的早期阶段,过于关注性能很可能会影响你的实现效率。正如高德纳(译者注:<NAME>,《计算机程序设计艺术》作者,最年轻的ACM图灵奖获得者,计算机算法泰山北斗)的名言:“我们应该忘掉那些小的效率问题,在绝大部分情况下:过早的优化是所有罪恶之源。”
#
# > But once you have your code working, it can be useful to dig into its efficiency a bit.
# Sometimes it's useful to check the execution time of a given command or set of commands; other times it's useful to dig into a multiline process and determine where the bottleneck lies in some complicated series of operations.
# IPython provides access to a wide array of functionality for this kind of timing and profiling of code.
# Here we'll discuss the following IPython magic commands:
#
# > - ``%time``: Time the execution of a single statement
# > - ``%timeit``: Time repeated execution of a single statement for more accuracy
# > - ``%prun``: Run code with the profiler
# > - ``%lprun``: Run code with the line-by-line profiler
# > - ``%memit``: Measure the memory use of a single statement
# > - ``%mprun``: Run code with the line-by-line memory profiler
#
# 但是,一旦你的代码已经开始工作了,那么你就应该开始深入的考虑一下性能问题了。有时你会需要检查一行代码或者一系列代码的执行时间;有时你又需要对多个线程进行研究,找到一系列复杂操作当中的瓶颈所在。IPython提供了这类计时或性能测算的丰富功能。本章节中我们会讨论下述的IPython魔术指令:
#
# - ``%time``: 测量单条语句的执行时间
# - ``%timeit``: 对单条语句进行多次重复执行,并测量平均执行时间,以获得更加准确的结果
# - ``%prun``: 执行代码,并使用性能测算工具进行测算
# - ``%lprun``: 执行代码,并使用单条语句性能测算工具进行测算
# - ``%memit``: 测量单条语句的内存占用情况
# - ``%mprun``: 执行代码,并使用单条语句内存测算工具进行测算
#
# > The last four commands are not bundled with IPython–you'll need to get the ``line_profiler`` and ``memory_profiler`` extensions, which we will discuss in the following sections.
#
# 后面四个指令并不是随着IPython一起安装的,你需要去获取安装`line_profiler`和`memory_profiler`扩展,我们会在下面小节中介绍。
# ## Timing Code Snippets: ``%timeit`` and ``%time``
#
# ## 代码计时工具:`%timeit` 和 `%time`
#
# > We saw the ``%timeit`` line-magic and ``%%timeit`` cell-magic in the introduction to magic functions in [IPython Magic Commands](01.03-Magic-Commands.ipynb); it can be used to time the repeated execution of snippets of code:
#
# 我们在[IPython魔术命令](01.03-Magic-Commands.ipynb)中已经介绍过`%timeit`行魔术指令和`%%timeit`块魔术指令;它们用来对于代码(块)进行重复执行,并测量执行时间:
# %timeit sum(range(100))
# > Note that because this operation is so fast, ``%timeit`` automatically does a large number of repetitions.
# For slower commands, ``%timeit`` will automatically adjust and perform fewer repetitions:
#
# 这里说明一下,因为这个操作是非常快速的,因此`%timeit`自动做了很多次的重复执行。如果换成一个执行慢的操作,`%timeit`会自动调整(减少)重复次数。
# %%timeit
total = 0
for i in range(1000):
for j in range(1000):
total += i * (-1) ** j
# > Sometimes repeating an operation is not the best option.
# For example, if we have a list that we'd like to sort, we might be misled by a repeated operation.
# Sorting a pre-sorted list is much faster than sorting an unsorted list, so the repetition will skew the result:
#
# 值得注意的是,有些情况下,重复多次执行反而会得出一个错误的测量数据。例如,我们有一个列表,希望对它进行排序,重复执行的结果会明显的误导我们。因为对一个已经排好序的列表执行排序是非常快的,因此在第一次执行完成之后,后面重复进行排序的测量数据都是错误的:
import random
L = [random.random() for i in range(100000)]
# %timeit L.sort()
# > For this, the ``%time`` magic function may be a better choice. It also is a good choice for longer-running commands, when short, system-related delays are unlikely to affect the result.
# Let's time the sorting of an unsorted and a presorted list:
#
# 在这种情况下,`%time`魔术指令可能会是一个更好的选择。对于一个执行时间较长的操作来说,它也更加适用,因为与系统相关的那些持续时间很短的延迟将不会对结果产生什么影响。让我们对一个未排序和一个已排序的列表进行排序,并观察执行时间:
import random
L = [random.random() for i in range(100000)]
print("sorting an unsorted list:")
# %time L.sort()
print("sorting an already sorted list:")
# %time L.sort()
# > Notice how much faster the presorted list is to sort, but notice also how much longer the timing takes with ``%time`` versus ``%timeit``, even for the presorted list!
# This is a result of the fact that ``%timeit`` does some clever things under the hood to prevent system calls from interfering with the timing.
# For example, it prevents cleanup of unused Python objects (known as *garbage collection*) which might otherwise affect the timing.
# For this reason, ``%timeit`` results are usually noticeably faster than ``%time`` results.
#
# 你应该首先注意到的是对于未排序的列表和对于已排序的列表进行排序的执行时间差别(译者注:在我的笔记本上,接近5倍的时间)。而且你还需要了解`%time`和`%timeit`执行的区别,即使都是使用已经排好序的列表的情况下。这是因为`%timeit`会使用一种额外的机制来防止系统调用影响计时的结果。例如,它会阻止Python解析器清理不再使用的对象(也被称为*垃圾收集*),否则垃圾收集会影响计时的结果。因此,我们认为通常情况下`%timeit`的结果都会比`%time`的结果要快。
#
# > For ``%time`` as with ``%timeit``, using the double-percent-sign cell magic syntax allows timing of multiline scripts:
#
# 对于`%time`和`%timeit`指令,使用两个百分号可以对一段代码进行计时:
# %%time
total = 0
for i in range(1000):
for j in range(1000):
total += i * (-1) ** j
# > For more information on ``%time`` and ``%timeit``, as well as their available options, use the IPython help functionality (i.e., type ``%time?`` at the IPython prompt).
#
# 更多关于`%time`和`%timeit`的资料,包括它们的选项,可以使用IPython的帮助功能(如在IPython提示符下键入`%time?`)进行查看。
# ## Profiling Full Scripts: ``%prun``
#
# ## 脚本代码块性能测算:`%prun`
#
# > A program is made of many single statements, and sometimes timing these statements in context is more important than timing them on their own.
# Python contains a built-in code profiler (which you can read about in the Python documentation), but IPython offers a much more convenient way to use this profiler, in the form of the magic function ``%prun``.
#
# 一个程序都是有很多条代码组成的,有的时候对整段代码块性能进行测算比对每条代码进行计时要更加重要。Python自带一个內建的代码性能测算工具(你可以在Python文档中找到它),而IPython提供了一个更加简便的方式来使用这个测算工具,使用`%prun`魔术指令。
#
# > By way of example, we'll define a simple function that does some calculations:
#
# 我们定义一个简单的函数作为例子:
def sum_of_lists(N):
total = 0
for i in range(5):
L = [j ^ (j >> i) for j in range(N)]
total += sum(L)
return total
# > Now we can call ``%prun`` with a function call to see the profiled results:
#
# 然后我们就可以使用`%prun`来调用这个函数,并查看测算的结果:
# %prun sum_of_lists(1000000)
# > In the notebook, the output is printed to the pager, and looks something like this:
#
# ```
# 14 function calls in 0.714 seconds
#
# Ordered by: internal time
#
# ncalls tottime percall cumtime percall filename:lineno(function)
# 5 0.599 0.120 0.599 0.120 <ipython-input-19>:4(<listcomp>)
# 5 0.064 0.013 0.064 0.013 {built-in method sum}
# 1 0.036 0.036 0.699 0.699 <ipython-input-19>:1(sum_of_lists)
# 1 0.014 0.014 0.714 0.714 <string>:1(<module>)
# 1 0.000 0.000 0.714 0.714 {built-in method exec}
# ```
#
# 在译者的笔记本上,这个指令的结果输出如下:
#
# ```
# 14 function calls in 0.500 seconds
#
# Ordered by: internal time
#
# ncalls tottime percall cumtime percall filename:lineno(function)
# 5 0.440 0.088 0.440 0.088 <ipython-input-8-f105717832a2>:4(<listcomp>)
# 5 0.027 0.005 0.027 0.005 {built-in method builtins.sum}
# 1 0.025 0.025 0.492 0.492 <ipython-input-8-f105717832a2>:1(sum_of_lists)
# 1 0.008 0.008 0.500 0.500 <string>:1(<module>)
# 1 0.000 0.000 0.500 0.500 {built-in method builtins.exec}
# 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
# ```
#
# > The result is a table that indicates, in order of total time on each function call, where the execution is spending the most time. In this case, the bulk of execution time is in the list comprehension inside ``sum_of_lists``.
# From here, we could start thinking about what changes we might make to improve the performance in the algorithm.
#
# 这个结果的表格,使用的是每个函数调用执行总时间进行排序(从大到小)。从上面的结果可以看出,绝大部分的执行时间都发生在函数`sum_of_lists`中的列表解析之上。然后,我们就可以知道如果需要优化这段代码的性能,可以从哪个方面开始着手了。
#
# > For more information on ``%prun``, as well as its available options, use the IPython help functionality (i.e., type ``%prun?`` at the IPython prompt).
#
# 更多关于`%prun`的资料,包括它的选项,可以使用IPython的帮助功能(在IPython提示符下键入`%prun?`)进行查看。
# ## Line-By-Line Profiling with ``%lprun``
#
# ## 使用`%lprun`对单条代码执行性能进行测算
#
# > The function-by-function profiling of ``%prun`` is useful, but sometimes it's more convenient to have a line-by-line profile report.
# This is not built into Python or IPython, but there is a ``line_profiler`` package available for installation that can do this.
# Start by using Python's packaging tool, ``pip``, to install the ``line_profiler`` package:
#
# 刚才介绍的对于整个函数进行测算的`%prun`很有用,但是有时能对单条代码进行性能测算会更加方便我们调优。这个功能不是内置在Python或者IPython里的,你需要安装一个第三方包`line_profiler`来完成这项任务。使用Python包管理工具`pip`可以很容易地安装`line_profiler`包:
#
# ```
# $ pip install line_profiler
# ```
#
# > Next, you can use IPython to load the ``line_profiler`` IPython extension, offered as part of this package:
#
# 然后,你可以使用IPython来载入`line_profiler`扩展模块:
# %load_ext line_profiler
# > Now the ``%lprun`` command will do a line-by-line profiling of any function–in this case, we need to tell it explicitly which functions we're interested in profiling:
#
# 然后`%lprun`魔术指令就可以对任何函数进行单行的性能测算了,我们需要明确指出要对哪个函数进行性能测算:
# %lprun -f sum_of_lists sum_of_lists(5000)
# > As before, the notebook sends the result to the pager, but it looks something like this:
#
# ```
# Timer unit: 1e-06 s
#
# Total time: 0.009382 s
# File: <ipython-input-19-fa2be176cc3e>
# Function: sum_of_lists at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def sum_of_lists(N):
# 2 1 2 2.0 0.0 total = 0
# 3 6 8 1.3 0.1 for i in range(5):
# 4 5 9001 1800.2 95.9 L = [j ^ (j >> i) for j in range(N)]
# 5 5 371 74.2 4.0 total += sum(L)
# 6 1 0 0.0 0.0 return total
# ```
#
# 像刚才一样,notebook会在一个弹出页面中展示结果,在译者的笔记本上执行效果如下:
#
# ```
# Timer unit: 1e-06 s
#
# Total time: 0.007372 s
# File: <ipython-input-7-f105717832a2>
# Function: sum_of_lists at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def sum_of_lists(N):
# 2 1 2.0 2.0 0.0 total = 0
# 3 6 9.0 1.5 0.1 for i in range(5):
# 4 5 7114.0 1422.8 96.5 L = [j ^ (j >> i) for j in range(N)]
# 5 5 246.0 49.2 3.3 total += sum(L)
# 6 1 1.0 1.0 0.0 return total
# ```
#
# > The information at the top gives us the key to reading the results: the time is reported in microseconds and we can see where the program is spending the most time.
# At this point, we may be able to use this information to modify aspects of the script and make it perform better for our desired use case.
#
# 结果第一行给我们提供了下面表中的时间单位:微秒,我们可以从中看到函数中哪一行执行花了最多时间。然后,我们就可以根据这些信息对我们的代码进行调优,以达到我们需要的性能指标。
#
# > For more information on ``%lprun``, as well as its available options, use the IPython help functionality (i.e., type ``%lprun?`` at the IPython prompt).
#
# 更多关于`%lprun`的资料,包括它的选项,可以使用IPython的帮助功能(在IPython提示符下键入`%lprun?`)进行查看。
# ## Profiling Memory Use: ``%memit`` and ``%mprun``
#
# ## 测算内存使用:`%memit` 和 `%mprun`
#
# > Another aspect of profiling is the amount of memory an operation uses.
# This can be evaluated with another IPython extension, the ``memory_profiler``.
# As with the ``line_profiler``, we start by ``pip``-installing the extension:
#
# 对于性能测算来说,还有一个方面需要我们注意的是操作使用的内存大小。这需要用到另外一个IPython的扩展模块`memory_profiler`。就像`line_profiler`那样,我们可以使用`pip`安装这个扩展模块:
#
# ```
# $ pip install memory_profiler
# ```
#
# > Then we can use IPython to load the extension:
#
# 然后将扩展模块加载到IPython中:
# %load_ext memory_profiler
# > The memory profiler extension contains two useful magic functions: the ``%memit`` magic (which offers a memory-measuring equivalent of ``%timeit``) and the ``%mprun`` function (which offers a memory-measuring equivalent of ``%lprun``).
# The ``%memit`` function can be used rather simply:
#
# 内存性能测算工具`memory_profiler`包括两个有用的魔术指令:`%memit`(提供了与`%timeit`等同的内存测算功能)和`%mprun`(提供了与`%lprun`等同的内存测算功能)。`%memit`的用法非常简单:
# %memit sum_of_lists(1000000)
# > We see that this function uses about 100 MB of memory.
#
# 我们可以看到这个函数使用了约100MB的内存。
#
# > For a line-by-line description of memory use, we can use the ``%mprun`` magic.
# Unfortunately, this magic works only for functions defined in separate modules rather than the notebook itself, so we'll start by using the ``%%file`` magic to create a simple module called ``mprun_demo.py``, which contains our ``sum_of_lists`` function, with one addition that will make our memory profiling results more clear:
#
# 对于单行代码的内存使用测算,我们可以使用`%mprun`魔术指令。不幸的是,这个魔术指令只能应用在独立模块里面的函数上,而不能应用在notebook本身。因此我们需要使用`%%file`魔术指令来创建一个简单的模块,模块的名称为`mprun_demo.py`,该模块定义了前面的`sum_of_lists`函数,在这个例子中,我们加了一行代码,来让我们的内存测算结果更加的明显:
# %%file mprun_demo.py
def sum_of_lists(N):
total = 0
for i in range(5):
L = [j ^ (j >> i) for j in range(N)]
total += sum(L)
del L # 将列表L的引用删除
return total
# > We can now import the new version of this function and run the memory line profiler:
#
# 下面我们可以载入这个模块,然后使用内存测算工具对改写后的函数进行单条代码的内存性能测算:
from mprun_demo import sum_of_lists
# %mprun -f sum_of_lists sum_of_lists(1000000)
# > The result, printed to the pager, gives us a summary of the memory use of the function, and looks something like this:
#
# 在弹出页面中展示的结果给我们大概描述了函数中每行代码内存的使用情况,在译者笔记本上结果如下:
#
# ```
# Filename: ./mprun_demo.py
#
# Line # Mem usage Increment Line Contents
# ================================================
# 4 71.9 MiB 0.0 MiB L = [j ^ (j >> i) for j in range(N)]
#
#
# Filename: ./mprun_demo.py
#
# Line # Mem usage Increment Line Contents
# ================================================
# 1 39.0 MiB 0.0 MiB def sum_of_lists(N):
# 2 39.0 MiB 0.0 MiB total = 0
# 3 46.5 MiB 7.5 MiB for i in range(5):
# 4 71.9 MiB 25.4 MiB L = [j ^ (j >> i) for j in range(N)]
# 5 71.9 MiB 0.0 MiB total += sum(L)
# 6 46.5 MiB -25.4 MiB del L # remove reference to L
# 7 39.1 MiB -7.4 MiB return total
# ```
#
# > Here the ``Increment`` column tells us how much each line affects the total memory budget: observe that when we create and delete the list ``L``, we are adding about 25 MB of memory usage.
# This is on top of the background memory usage from the Python interpreter itself.
#
# 这里的`Increment`列告诉我们函数的每一行怎样影响到了总内存的使用量:观察一下当我们使用列表解析创建`L`和使用`del`删除`L`时发生的情况,这里会有大约25MB内存的使用变化。这是在Python解析器本身占用的基本内存基础上我们函数使用到的内存用量。
#
# > For more information on ``%memit`` and ``%mprun``, as well as their available options, use the IPython help functionality (i.e., type ``%memit?`` at the IPython prompt).
#
# 更多关于`%memit`和`mprun`的资料,包括它们的选项,可以使用IPython的帮助功能(在IPython提示符下键入`%memit?`或`%mprun?`)进行查看。
# <!--NAVIGATION-->
# < [错误和调试](01.06-Errors-and-Debugging.ipynb) | [目录](Index.ipynb) | [更多IPython资源](01.08-More-IPython-Resources.ipynb) >
#
# <a href="https://colab.research.google.com/github/wangyingsm/Python-Data-Science-Handbook/blob/master/notebooks/01.07-Timing-and-Profiling.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
| Python-Data-Science-Handbook/notebooks/01.07-Timing-and-Profiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="YSE5OTApBGX5"
# ## Cross-validation
# It's a method to evaluate model instead of tuning the model (grid search).
# + [markdown] id="h9SdKLWpBGX6"
# ### Example based on k-NN classifier with fruit dataset (2 features)
# + id="MEl1kK_4BGX6" outputId="d81b4bf8-0e38-4860-8c63-412d95fe443a"
from sklearn.model_selection import cross_val_score
clf = KNeighborsClassifier(n_neighbors = 5)
X = X_fruits_2d.as_matrix()
y = y_fruits_2d.as_matrix()
cv_scores = cross_val_score(clf, X, y)
print('Cross-validation scores (3-fold):', cv_scores)
print('Mean cross-validation score (3-fold): {:.3f}'
.format(np.mean(cv_scores)))
# + [markdown] id="GVMoWE_CBGX6"
# ### A note on performing cross-validation for more advanced scenarios.
#
# In some cases (e.g. when feature values have very different ranges), we've seen the need to scale or normalize the training and test sets before use with a classifier. The proper way to do cross-validation when you need to scale the data is *not* to scale the entire dataset with a single transform, since this will indirectly leak information into the training data about the whole dataset, including the test data (see the lecture on data leakage later in the course). **Instead, scaling/normalizing must be computed and applied for each cross-validation fold separately.** To do this, the easiest way in scikit-learn is to use *pipelines*. While these are beyond the scope of this course, further information is available in the scikit-learn documentation here:
#
# http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
#
# or the Pipeline section in the recommended textbook: Introduction to Machine Learning with Python by <NAME> and <NAME> (O'Reilly Media).
# + [markdown] id="y29FFDXxBGX7"
# ## Validation curve example
# + id="uXaU8o3sBGX7"
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
param_range = np.logspace(-3, 3, 4)
train_scores, test_scores = validation_curve(SVC(), X, y,
param_name='gamma',
param_range=param_range, cv=3)
# + id="AgNq28j-BGX7" outputId="ee572c74-a3cc-410f-8376-386950172406"
print(train_scores)
# + id="JFbfnLorBGX8" outputId="f1087e75-f106-4ce0-afa1-b6bf4963da70"
print(test_scores)
# + id="t-wxX8rSBGX8" outputId="8c1ee1ee-bcd7-43b8-9aa5-fa17840e6266"
# This code based on scikit-learn validation_plot example
# See: http://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html
plt.figure()
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title('Validation Curve with SVM')
plt.xlabel('$\gamma$ (gamma)')
plt.ylabel('Score')
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label='Training score',
color='darkorange', lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color='darkorange', lw=lw)
plt.semilogx(param_range, test_scores_mean, label='Cross-validation score',
color='navy', lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color='navy', lw=lw)
plt.legend(loc='best')
plt.show()
| Cross_Validation/Cross_Validation&Validation_Curve.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quickly Detecting Anomalies in Web site traffic
#
# ---
#
# ***Final Project, FAES BIOF 309 Introduction to Python, Fall 2018***
#
# **<NAME>, <EMAIL>**
#
# ## Overview
#
# The purpose of this project is to provide a faster way to visualize
# data than (repeatedly) importing data into Excel and creating a
# chart. This will save me time!
# ## Background
#
# One of my work tasks is to provide statistics about Web site use.
# Over the years, my institute has mandated various Web analytics software
# including: awstats, WebTrends and Google Analytics.
#
# Each piece of software analyzes and displays results differently.
# So switching to different Web analytics software usually creates the
# appearance of a massive increase or decrease in traffic.
#
# I need a way to tell whether there has been a legitimate unexpected
# change in Web site traffic. Unique IP addresses per day turns out
# to be a reasonably reliable indicator.
#
# Anomalies can also happen if the Web site is the subject of a denail
# of service attack or if an event of national or international interest
# is related to content on the Web site. Having a way to quickly visualize
# the number of unique IP addresses daily over time will help me quickly
# spot anomalies.
# ## Demo
#
# My data for this project contains the number of unique IP addresses
# accessing a Web site each day. A few lines of data follows:
# ```
# 06/01/2018|5565|515120|515120|515120
# 06/02/2018|4801|518657|518657|518657
# 06/03/2018|4069|451881|451881|451881
# 06/04/2018|4859|493762|493762|493762
# 06/05/2018|4816|514587|514587|514587
# ```
#
# - We are interested in the first two columns. We want to ignore the rest of the columns.
#
# - The first column contains dates, but they are in the form MM/DD/YYYY rather than YYYY-MM-DD.
#
# - There are no column headers in this data.
#
# - The columns are separated by the "pipe" character rather than commas, spaces or tabs.
# Import the necessary packages
import os
# Decide later whether urllib.request package or requests package is preferred.
# For now, go with requests package and print the version to satisfy my curiosity.
import requests
requests.__version__
# List the version of the numpy package, just to satifsy my curiosity
import numpy as np
np.__version__
# Thanks to Martin for requirements.txt that imports pandas into Binder!
import pandas as pd
pd.__version__
# In case of error in Binder, check requirements.txt for missing matplotlib line.
import matplotlib.pyplot as plt
# May not need either of the following. Experiment and test...
# %matplotlib inline
# # %matplotlib notebook
from datetime import datetime
# Specify the location of the data files.
# When developing or running locally, having data files in the local directory works great.
# You need variable data_dir if you are developing or running locally.
data_dir = "./data/raw/"
# But... putting files in a local directory, such as "./data/raw/", does not work with Google Colaboratory and Binder.
# When running this notebook in Binder or Colaboratory, you need to provide a URL to the location of the data files.
# You need variable url_path if running from Google Colaboratory or Binder.
url_path = 'https://raw.githubusercontent.com/BIOF309/group-project-marie_gallagher_final_project/master/notebooks/data/raw/'
# Workaround for not being able to read files when using Colaboratory or Binder.
# use_data_from = "data_dir" when working locally.
# use_data_from = "url_path" when NOT working locally.
use_data_from = "url_path"
# Check for a typo in the value of use_data_from.
if (use_data_from != "url_path" and use_data_from != "data_dir"):
raise ValueError("use_data_from should be either 'url_path' or 'data_dir'")
# Optional: display the names of the files in the raw data directory.
if use_data_from == "data_dir":
print(os.listdir(data_dir))
# Choose a data file to plot.
file_name = "log_daily_ip_201806a.txt"
if use_data_from == "data_dir":
daily_ip_file = data_dir + file_name
elif use_data_from == "url_path":
daily_ip_file = url_path + file_name
# Optional: display the first few lines of the daily_ip_file.
lines_to_display = 3
if use_data_from == "data_dir":
with open(daily_ip_file) as file:
line_num = 1
while line_num <= lines_to_display:
print(file.readline())
line_num = line_num + 1
file.close()
elif use_data_from == "url_path":
# Thanks to https://stackoverflow.com/questions/1393324/in-python-given-a-url-to-a-text-file-what-is-the-simplest-way-to-read-the-cont
response = requests.get(daily_ip_file)
raw_data_lines = response.text.split('\n')
line_num = 1
while line_num <= lines_to_display:
print(raw_data_lines[line_num-1])
line_num = line_num + 1
# Read the data file into a pandas dataframe, df.
# The columns separator is '|'.
# There is no header row.
# We only need the first two columns.
df = pd.read_csv(daily_ip_file, sep="|", header=None, usecols=[0,1])
# Optional: display the first few rows of df. There should now be only two columns, not five.
print(df.head(3))
# Give the dataframe's columns descriptive names.
df.columns = ["dates", "unique_ips"]
# Optional: make sure the column names have changed. Display information about the columns.
print(df.info())
# Optional: what is the type of the df.dates column, first row?
print(type(df.dates[0]))
# +
# Convert the "object" data in the df.dates column to date/time objects so they will be treated as such.
# Thanks to <NAME> for this function adapted from
# https://github.com/burkesquires/python_biologist/blob/master/05_python_data_analysis
# Define a function to convert a string (m/d/yyyy) to a date
def string_to_date(date_text):
'''
string_to_date(a_str) converts a_str from format mm/dd/yyyy to a datetime object yyyy-mm-dd.
string_to_date("12/25/2018") converts string 12/25/2018 to datetime object 2018-12-25.
'''
return datetime.strptime(date_text, "%m/%d/%Y")
# Run the string_to_date function on every date string and overwrite the df.dates column
df.dates = df.dates.apply(string_to_date)
# -
# Optional: what is the type of the df.dates column, first row, now? Before it was str.
print(type(df.dates[0]))
# Optional: Display information about the columns. Now df.dates is datetime64 instead of object.
print(df.info())
# Optional: what does the data in the df.dates column look like now? yyyy-mm-dd instead of mm/dd/yyyy!
print(df.dates.head(3))
# +
# Caution:
# Don't do this: df = df.set_index('dates')
# Don't set the index to 'dates'. datetime64 is not iterable. You may regret this later.
# Experiment in the future.
# -
# Display a quick plot.
# The days (not the df index 0, 1, 2...) should display on the x axis
# The unique_ips will be used on the y axis.
# figsize changes the size to 12 (x) by 3 (y).
# title adds the title to the plot.
df.plot.bar(x='dates',figsize=(12,3),title=daily_ip_file,legend=False);
# What was the median number (half above, half below) of unique IP addresses accessed in a day?
print(df.unique_ips.median())
# What was the lowest number of unique IP addresses in a day?
print(df.unique_ips.min())
# What day is associated with the lowest number of unique IP addresses?
# The day(s) when df.unique_ips.min() == df.unique_ips
print(df.dates[df.unique_ips==df.unique_ips.min()])
# What was the highest number of unique IP addresses in a day?
print(df.unique_ips.max())
# What day is associated with the highest number of IP addresses?
# The day(s) when df.unique_ips.max() == df.unique_ips
print(df.dates[df.unique_ips==df.unique_ips.max()])
# ## Future
#
# 1. I will incorporate this project into my work immediately. (Until now, I imported the data into Excel and made a graph.)
#
# 2. Break my program into functions and restructure my project files
#
# 3. Scrub IP addresses from raw log files and extract data from them
#
# 4. List the most accessed URLs on days with high IP address counts
#
# 5. List the top referrers on days with high IP address counts
# ## Acknowledgments and Thanks!
#
# BIOF 309 Instructors
# * <NAME>
# * <NAME>
# * <NAME>
#
# BIOF 309 Class
# * Helpful questions
#
# NIAID Scientific Programming Seminars through CIT
# * <NAME>
| notebooks/01_plot_unique_ips.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:immune-evolution]
# language: python
# name: conda-env-immune-evolution-py
# ---
# # Imports
# +
# Python standard library
from collections import defaultdict
import glob
import os
# Third-party libraries
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import screed
# Local python files
from path_constants import (
FIGURE_FOLDER,
DATA_FOLDER,
ORPHEUM_BENCHMARKING_FOLDER,
MAMMALIA_BUSCO_SUBSET_FOLDER,
)
from nb_utils import describe
from plot_constants import (
PROTEIN_COLOR,
DAYHOFF_COLOR,
PEPTIDE_MOLTYPE_PALETTE,
PEPTIDE_MOLTYPE_ORDER,
PEPTIDE_ALPHABET_KSIZES,
PEPTIDE_ALPHABET_PALETTES,
)
from mya_utils import (
clean_common_names_of_species,
BUSCO_MAMMALIA_SPECIES,
MYA_COLOR_KWARGS,
MYA_ORDER,
)
# -
# ## Get number of protein sequences per species
# +
proteome_folder = MAMMALIA_BUSCO_SUBSET_FOLDER
busco_mammalia_species = BUSCO_MAMMALIA_SPECIES.copy()
col = "proteome_size"
busco_mammalia_species[col] = 0
for fasta in glob.glob(os.path.join(proteome_folder, "*.fasta")):
species_lower = os.path.basename(fasta).split("__")[-1].split(".")[0]
proteome_size = sum(1 for _ in screed.open(fasta))
busco_mammalia_species.loc[species_lower, col] = proteome_size
busco_mammalia_species = busco_mammalia_species.sort_values("mya")
describe(busco_mammalia_species)
# -
# ## Reindex BUSCO mammalia series to use common name
busco_mammalia_common = busco_mammalia_species.reset_index().set_index("common_name")
# ## Read ROC AUC csv
# +
csv = os.path.join(
ORPHEUM_BENCHMARKING_FOLDER,
"busco_mammalia_coding_scores_roc_auc_score.csv",
)
combined_roc_auc_score = pd.read_csv(csv)
# combined_roc_auc_score = combined_roc_auc_score.join(distance_from_human_mya, on='species')
combined_roc_auc_score.species = combined_roc_auc_score.species.map(
clean_common_names_of_species
)
# Everything decreases after k > 21
combined_roc_auc_score = combined_roc_auc_score.query("ksize <= 21")
combined_roc_auc_score = combined_roc_auc_score.join(
busco_mammalia_common, on="species"
)
combined_roc_auc_score.head()
# -
max_roc_scores = combined_roc_auc_score.groupby(
['species', 'alphabet']).score_value.max()
max_roc_scores.name = 'max_roc_auc'
max_roc_scores = max_roc_scores.reset_index()
max_roc_scores = max_roc_scores.join(busco_mammalia_common, on='species')
describe(max_roc_scores)
max_roc_scores.query('max_roc_auc > 0.6')
# # Fig 2C,D
# ## Plot ROC AUC showing argmax for each divergence time -- both protein and dayhoff
# +
n_lines_plotted = defaultdict(int)
def plot_argmax(x, y, *args, **kwargs):
idxmax = y.idxmax()
global n_lines_plotted
true_x = x[idxmax]
adjusted_x = true_x + n_lines_plotted[true_x]/20
plt.vlines(
adjusted_x,
0,
y.max(),
linestyle='--',
linewidth=1,
*args,
**kwargs
)
n_lines_plotted[true_x] += 1
def pointplot_like(x, y, *args, **kwargs):
"""Like a pointplot, but x is true values not categorical"""
new_y = y.groupby(x).mean()
new_x = x.unique()
plt.plot(new_x, new_y, 'o-', *args, **kwargs)
y_stds = y.groupby(x).std()
for x_pos, y_mean, y_sd in zip(new_x, new_y, y_stds):
plt.plot([x_pos, x_pos], [y_mean - y_sd, y_mean + y_sd], *args, **kwargs)
data = combined_roc_auc_score.query('proteome_size > 2500')
g = sns.FacetGrid(
data=data,
height=2.5, aspect=1.5,
col='alphabet',
sharex=False,
hue='mya',
palette='viridis',
hue_order=sorted(data['mya'].unique())
)
# g.map(sns.scatterplot, 'ksize', 'score_value')
g.map(pointplot_like, 'ksize', 'score_value')
g.add_legend()
g.map(plot_argmax, 'ksize', 'score_value')
g.set(ylabel='ROC AUC',
ylim=(0, 1),
# xticks=[5, 6, 7, 8, 9, 10, 15, 20]
)
for ax in g.axes.flat:
if 'protein' in ax.get_title():
ax.set(xticks=[5, 6, 7, 8, 9, 10, 15, 20])
else:
ax.set(xticks=[5, 10, 15, 16, 17, 18, 19, 20])
g.set_titles('{col_name}')
pdf = os.path.join(
FIGURE_FOLDER,
'leaftea_translate__human_mouse__roc_auc__protein_dayhoff.pdf')
g.savefig(pdf)
# -
combined_roc_auc_score_dayhoff17_protein8 = combined_roc_auc_score.query('(alphabet == "dayhoff" and ksize == 17) or (alphabet == "protein" and ksize == 8)')
describe(combined_roc_auc_score_dayhoff17_protein8)
combined_roc_auc_score_dayhoff17_protein8.query('proteome_size > 2000').groupby(['alphabet', 'mya']).score_value.mean()
| notebooks/figure_2C-D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Data preparation for modeling
# ## Environment Set-Up
#
# ### Load relevant Python Packages
# +
# Importing the most important modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from matplotlib import pyplot
# For data reading and data export
import os, glob
from pathlib import Path
from pandas import read_csv
# For data cleaning
from datetime import datetime
# For data exploration
# For data visualisation - WordCloud
from wordcloud import WordCloud
# -
# ## Loading Datasets & Preparation for Modeling
#data has been saved using a .pkl file.
path = './data/df_ride.pkl'
df = pd.read_pickle(path)
df.head(2)
# Never a bad idea - Quickly check the number of rows (observations) and columns (features) in the df
df.shape
# Let us check some basic information for the columns (for example the data types)
df.info()
# _________
# ## Data Cleaning / Feature Engineering
# The following is pretty much dthe same as in [CyPer_EDA](CyPer_EDA.ipynb) with just small but important differences for the following predictive modeling.
#To work with the `When` column and the data in it we have to transform the data type.
#We do this with the function: `to_datetype`. By this function we convert the date type into *datetime*.
df.When = pd.to_datetime(df.When)
# Convert timestamp ('When') to datetime object
# Extract additional features from timestamp column
df = df.assign(
timestamp = lambda x: pd.to_datetime(x['When']),
date = lambda x: x['When'].dt.date,
year = lambda x: x['When'].dt.year,
month = lambda x: x['When'].dt.month,
day = lambda x: x['When'].dt.day,
dayofyear = lambda x: x['When'].dt.dayofyear,
hour = lambda x: x['When'].dt.hour,
minute = lambda x: x['When'].dt.minute,
second = lambda x: x['When'].dt.second,
)
# Rename columns in a more pythonic way
df = df.rename(columns={
'Type': 'type',
'Gear': 'gear',
'Name': 'name',
'Dist km': 'dist_km',
'Elv m': 'elv_m',
'Elapsed Time': 'elapsed_time',
'Moving Time': 'moving_time',
'Start Time': 'start_time',
'Day of Week': 'day_of_week',
'Speed km/h': 'speed_km/h',
'Max Speed km/h': 'max_speed_km/h',
'Pace /km': 'pace_min/km',
'Max Pace /km': 'max_pace_min/km',
'Pace /100m': 'pace_min/100m',
'Max Pace /100m': 'max_pace_min/100m',
'Pwr W': 'power_W',
'Weighted Avg Pwr W': 'weighted_avg_power_W',
'Max Pwr W': 'max_power_W',
'Cad': 'cad',
'Heart': 'heart',
'Max Heart': 'max_heart',
'Elv High m': 'elv_high_m',
'Elv Low m': 'elv_low_m',
'Efficiency %': 'efficiency_%',
'Elev/Dist m/km': 'elv/dist_m/km',
'Elev/Time m/h': 'elv/time_m/h',
'W/HR': 'w/hr',
'Speed/HR': 'speed/hr',
'Temp °C': 'temp_°C',
'Cal': 'cal',
'Energy kJ': 'energy_kJ',
'Dist start to end km': 'dist_start_to_end_km',
'H/R Zone 1': 'h/r_zone1',
'H/R Zone 2': 'h/r_zone2',
'H/R Zone 3': 'h/r_zone3',
'H/R Zone 4': 'h/r_zone4',
'H/R Zone 5': 'h/r_zone5',
'Power 0W': 'power_0W',
'Power 0-50W': 'power_0-50W',
'Power 50-100W': 'power_50-100W',
'Power 100-150W': 'power_100-150W',
'Power 150-200W': 'power_150-200W',
'Power 200-250W': 'power_200-250W',
'Power 250-300W': 'power_250-300W',
'Power 300-350W': 'power_300-350W',
'Power 350-400W': 'power_350-400W',
'Power 400-450W': 'power_400-450W',
'Power 450W+': 'power_450W+',
'Description': 'description',
'City': 'city',
'State': 'state',
'Device/App': 'device/app',
'Activity Id': 'activity_id',
'filename': 'cyclist_id'
}
)
# Convert and transform data
df['dist_km'] = (df['dist_km'] / 1000).round(2)
df['speed_km/h'] = (df['speed_km/h'] * 3.6).round(1)
df['max_speed_km/h'] = (df['max_speed_km/h'] * 3.6).round(1)
df['power_W'] = df['power_W'].round(0)
df['heart'] = df['heart'].round(0)
df['temp_°C'] = df['temp_°C'].round(0)
df['efficiency_%'] = df['efficiency_%'].round(2)
df['dist_start_to_end_km'] = (df['dist_start_to_end_km'] / 1000).round(2)
# drop duplicates
df.drop_duplicates(subset="activity_id", inplace=True)
# dist_km has 2902 (1.0%) zeros
# drop all rows with a 'dist_km' of 0
df = df[df.dist_km != 0]
# 'speed_km/h' has 2937 (1.1%) zeros
# drop all rows with a 'speed_km/h' of 0
df = df[df['speed_km/h'] != 0]
# delate all activities with an average speed > 70km/h
df = df[df['speed_km/h'] <= 70]
# delet all rows that have 'elv_m' is 0 and the activitie is before 01.02.2018
df = df.drop(df[(df['elv_m'] == 0) & (df['When'] <= '2018-02-01')].index)
# delate all 'temp_°C' with an temp greater then 70°C
df = df[df['temp_°C'] <= 70]
# replace all NaN in'temp_°C' woth median of 'temp_°C'
df['temp_°C'] = df['temp_°C'].fillna((df['temp_°C'].median()))
df = df[df['heart'].notna()]
df['heart'] = df['heart'].mask(df['heart'] == 0, df['heart'].median(skipna=True))
# replace all 0 in'energy_kJ' with mean of 'energy_kJ'
df['energy_kJ'] = df['energy_kJ'].mask(df['energy_kJ'] == 0, df['energy_kJ'].mean(skipna=True))
# replace all 0 in'cal' with mean of 'cal'
df['cal'] = df['cal'].mask(df['cal'] == 0, df['cal'].mean(skipna=True))
# replace all 0 in'cad' with median of 'cad'
df['cad'] = df['cad'].mask(df['cad'] == 0, df['cad'].median(skipna=True))
# delete all activities with dist_km < 2km
df = df.drop(df[(df['dist_km'] <= 1.99)].index)
# replace all NaN in'elv_high_m' with mean of 'elv_high_m'
df['elv_high_m'] = df['elv_high_m'].fillna((df['elv_high_m'].mean()))
# replace all NaN in'elv_low_m' with mean of 'elv_low_m'
df['elv_low_m'] = df['elv_low_m'].fillna((df['elv_low_m'].mean()))
# exclute all rows without input for power_... and h/r_zone...
df = df[df[['h/r_zone1', 'h/r_zone2', 'h/r_zone3', 'h/r_zone4', 'h/r_zone5']].ne(0, axis=0).any(axis=1)]
#Drop columns with to many missing values
#max_power_W has 194319 (69.6%) missing values
#weighted_avg_power_W has 193704 (69.4%) zeros
df.drop(['When', 'type', 'gear', 'city', 'state', 'device/app', 'activity_id', 'cyclist_id', 'power_0W', 'power_0-50W', 'power_50-100W', 'power_100-150W',
'power_150-200W', 'power_200-250W', 'power_250-300W', 'power_300-350W','max_pace_min/km','max_pace_min/100m',
'power_350-400W', 'power_400-450W','date', 'timestamp', 'power_450W+', 'max_power_W', 'weighted_avg_power_W', 'name', 'dist_start_to_end_km', 'max_speed_km/h', 'w/hr'], axis=1, inplace=True)
df.reset_index(drop=True, inplace=True)
# Checking for NaN values in the df_model data frame
df.isna().values.any()
# Find out the lines with inf values
df[(df == np.inf).any(axis=1)]
# delet the lines with inf values
df = df.drop(df.index[[83530,84042]])
# delete all rows with 0 in'power_W'
df = df[df['power_W'] != 0]
# ### Creat Dummy Varaible
# The `"day_of_week"` column is categorical, not numeric. So we have to convert that to a one-hot/dummy:
#create dummie-variables
day_of_week_dummies = pd.get_dummies(df['day_of_week'], prefix='day_of_week', drop_first=True)
df = pd.concat([df.drop(['day_of_week'], axis = 1), day_of_week_dummies], axis = 1)
# ## Export of Data
# The data modification ends at this point. The DataFrame (df) is organized, cleaned and modified. For purity and performance reasons, the Machine Learning part of this project will be continued in another Jupyter Notebook. The interface between the Jupyter Notebooks will be given using a `.pkl`file.
# - `df_model.pkl` --> DataFrame with only the most important features ready for computational demanding ML models
df_model = df.copy()
# +
# save the cleaned df as .pkl file
path = './data/df_model.pkl'
df_model.to_pickle(path)
Path(path).is_file()
print('This cell was last run on: ')
print(datetime.now())
| CyPer_DP4Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem Set 3
# ## Learning from data [TIF285], Chalmers, Fall 2019
#
# Last revised: 09-Oct-2019 by <NAME> [<EMAIL>]
# ## Instructions
# - See deadline on the course web page
# - This problem set is performed individually (but collaboration is encouraged) and contains a number of basic and extra problems; you can choose which and how many to work on.
# - See examination rules on the course web page.
# - Hand-in is performed through the following **two** actions:
# - Upload of your solution in the form of a jupyter notebook, or python code, via Canvas.
# - Answer the corresponding questions on OpenTA.
#
# Note that the hand-in is not complete, and will not be graded, if any of those actions is not performed.
# Fill your personal details
# - Name: **Lastname, Firstname**
# - Personnummer: **YYMMDD-XXXX**
# <br/>
# (civic registration number)
# ## Problems
# The 10 basic points of this problem set are distributed over four problems:
# 1. Assigning probabilities for a hundred-sided die (2 basic points)
# 2. Chi-squared hypothesis testing (2 basic points)
# 3. Gaussian process regression (3 basic points)
# 4. Neural network classification (3 basic points)
#
# Each of them will be presented in a separate jupyter notebook.
# # Problem 1: Assigning probabilities for a hundred-sided die
# ### (2 basic points)
# Consider a hundred-sided die (labeled with 1, 2, 3, ..., 100) for which you know that the average result is 10.
#
# Use the principle of maximum entropy to assign the probabilities $\{ p_i \}_{i=1}^{100}$ for the outcomes of a die roll.
#
# *Hint: Use the method of Lagrange multipliers. See, e.g., Gregory section 8.6-7.*
#
# *There are two constraints from the known information: the normalization of the probabilities $\sum_i p_i = 1$ and the average result $\sum_i i p_i = 10$. In lack of other information, it is best to set the Lebesque measure $m_i = \mathrm{constant}$.*
# ### Task
# * Assign the probabilities and make a bar plot.
# * The distribution looks like an exponential $e^{-\lambda i}$. What is its decay constant $\lambda$?
| doc/pub/ProblemSets/Set3/TIF285_ProblemSet3-Problem1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <center> <font size=5> <h1>Define working environment</h1> </font> </center>
# The following cells are used to:
# - Import needed libraries
# - Set the environment variables for Python, Anaconda, GRASS GIS and R statistical computing
# - Define the ["GRASSDATA" folder](https://grass.osgeo.org/grass73/manuals/helptext.html), the name of "location" and "mapset" where you will to work.
# **Import libraries**
# +
## Import libraries needed for setting parameters of operating system
import os
import sys
## Import library for temporary files creation
import tempfile
## Import Pandas library
import pandas as pd
## Import Numpy library
import numpy
## Import Psycopg2 library (interection with postgres database)
import psycopg2 as pg
# Import Math library (usefull for rounding number, e.g.)
import math
## Import Subprocess + subprocess.call
import subprocess
from subprocess import call, Popen, PIPE, STDOUT
# -
# <center> <font size=3> <h3>Environment variables when working on Linux Mint</h3> </font> </center>
# **Set 'Python' and 'GRASS GIS' environment variables**
# Here, we set [the environment variables allowing to use of GRASS GIS](https://grass.osgeo.org/grass64/manuals/variables.html) inside this Jupyter notebook. Please change the directory path according to your own system configuration.
# +
### Define GRASS GIS environment variables for LINUX UBUNTU Mint 18.1 (Serena)
# Check is environmental variables exists and create them (empty) if not exists.
if not 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH']=''
if not 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH']=''
# Set environmental variables
os.environ['GISBASE'] = '/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu'
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'bin')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'script')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib')
#os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python')
os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python')
os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass')
os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass','script')
os.environ['PYTHONLIB'] = '/usr/lib/python2.7'
os.environ['LD_LIBRARY_PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib')
os.environ['GIS_LOCK'] = '$$'
os.environ['GISRC'] = os.path.join(os.environ['HOME'],'.grass7','rc')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','bin')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','scripts')
## Define GRASS-Python environment
sys.path.append(os.path.join(os.environ['GISBASE'],'etc','python'))
# -
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
# **Display current environment variables of your computer**
## Display the current defined environment variables
for key in os.environ.keys():
print "%s = %s \t" % (key,os.environ[key])
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
# <center> <font size=5> <h1>User inputs</h1> </font> </center>
## Define a empty dictionnary for saving user inputs
user={}
# Here after:
# - Enter the path to the directory you want to use as "[GRASSDATA](https://grass.osgeo.org/programming7/loc_struct.png)".
# - Enter the name of the location in which you want to work and its projection information in [EPSG code](http://spatialreference.org/ref/epsg/) format. Please note that the GRASSDATA folder and locations will be automatically created if not existing yet. If the location name already exists, the projection information will not be used.
# - Enter the name you want for the mapsets which will be used later for Unsupervised Segmentation Parameter Optimization (USPO), Segmentation and Classification steps.
# +
## Enter the path to GRASSDATA folder
user["gisdb"] = "/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/GRASSDATA"
## Enter the name of the location (existing or for a new one)
user["location"] = "Ouaga_32630"
## Enter the EPSG code for this location
user["locationepsg"] = "32630"
## Enter the name of the mapset to use for segmentation
user["segmentation_mapsetname"] = "LOCAL_SEGMENT"
## Enter the name of the mapset to use for classification
user["classificationA_mapsetname"] = "CLASSIF"
# -
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
# <center> <font size=5> <h1>Define GRASSDATA folder and create GRASS' location and mapsets</h1> </font> </center>
# Here after, the python script will check if GRASSDATA folder, locations and mapsets already exist. If not, they will be automatically created.
# **Import GRASS Python packages**
# +
## Import libraries needed to launch GRASS GIS in the jupyter notebook
import grass.script.setup as gsetup
## Import libraries needed to call GRASS using Python
import grass.script as grass
# -
# **Define GRASSDATA folder and create location and mapsets**
## Automatic creation of GRASSDATA folder
if os.path.exists(user["gisdb"]):
print "GRASSDATA folder already exist"
else:
os.makedirs(user["gisdb"])
print "GRASSDATA folder created in "+user["gisdb"]
## Automatic creation of GRASS location is doesn't exist
if os.path.exists(os.path.join(user["gisdb"],user["location"])):
print "Location "+user["location"]+" already exist"
else :
grass.core.create_location(user["gisdb"], user["location"], epsg=user["locationepsg"], overwrite=False)
print "Location "+user["location"]+" created"
# +
### Automatic creation of GRASS GIS mapsets
## Import library for file copying
import shutil
mapsetname=user["classificationA_mapsetname"]
if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)):
if not os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname,'WIND')):
print "WARNING: '"+mapsetname+"' mapset already exist, but a 'WIND' file is missing. Please solve this issue."
else: print "'"+mapsetname+"' mapset already exist"
else:
os.makedirs(os.path.join(user["gisdb"],user["location"],mapsetname))
shutil.copy(os.path.join(user["gisdb"],user["location"],'PERMANENT','WIND'),os.path.join(user["gisdb"],user["location"],mapsetname,'WIND'))
print "'"+mapsetname+"' mapset created in location '"+user["location"]+"'"
# +
### Automatic creation of GRASS GIS mapsets
## Import library for file copying
import shutil
mapsetname=user["segmentation_mapsetname"]
if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)):
if not os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname,'WIND')):
print "WARNING: '"+mapsetname+"' mapset already exist, but a 'WIND' file is missing. Please solve this issue."
else: print "'"+mapsetname+"' mapset already exist"
else:
os.makedirs(os.path.join(user["gisdb"],user["location"],mapsetname))
shutil.copy(os.path.join(user["gisdb"],user["location"],'PERMANENT','WIND'),os.path.join(user["gisdb"],user["location"],mapsetname,'WIND'))
print "'"+mapsetname+"' mapset created in location '"+user["location"]+"'"
# -
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
# <center> <font size=5> <h1>Define functions</h1> </font> </center>
# This section of the notebook is dedicated to defining functions which will then be called later in the script. If you want to create your own functions, define them here.
# ### Function for computing processing time
# The "print_processing_time" is used to calculate and display the processing time for various stages of the processing chain. At the beginning of each major step, the current time is stored in a new variable, using [time.time() function](https://docs.python.org/2/library/time.html). At the end of the stage in question, the "print_processing_time" function is called and takes as argument the name of this new variable containing the recorded time at the beginning of the stage, and an output message.
# +
## Import library for managing time in python
import time
## Function "print_processing_time()" compute processing time and printing it.
# The argument "begintime" wait for a variable containing the begintime (result of time.time()) of the process for which to compute processing time.
# The argument "printmessage" wait for a string format with information about the process.
def print_processing_time(begintime, printmessage):
endtime=time.time()
processtime=endtime-begintime
remainingtime=processtime
days=int((remainingtime)/86400)
remainingtime-=(days*86400)
hours=int((remainingtime)/3600)
remainingtime-=(hours*3600)
minutes=int((remainingtime)/60)
remainingtime-=(minutes*60)
seconds=round((remainingtime)%60,1)
if processtime<60:
finalprintmessage=str(printmessage)+str(seconds)+" seconds"
elif processtime<3600:
finalprintmessage=str(printmessage)+str(minutes)+" minutes and "+str(seconds)+" seconds"
elif processtime<86400:
finalprintmessage=str(printmessage)+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds"
elif processtime>=86400:
finalprintmessage=str(printmessage)+str(days)+" days, "+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds"
return finalprintmessage
# -
# ### Function for Postgres database vaccum
# Do a VACUUM on the current Postgresql database
def vacuum(db):
old_isolation_level = db.isolation_level
db.set_isolation_level(0)
query = "VACUUM"
cur.execute(query)
db.set_isolation_level(old_isolation_level)
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
## Saving current time for processing time management
begintime_full=time.time()
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
# # Set parameter for Postgresql database connection
# User for postgresql connexion
dbuser="tais"
# Password of user
dbpassword="<PASSWORD>"
# Host of database
host="localhost"
# Name of the new database
dbname="ouaga_fullaoi_localsegment"
# Set name of schema for objects statistics
stat_schema="statistics"
# Set name of schema for samples
sample_schema="samples"
# Set name of schema for classification optical only
classifA_schema="classif_A"
# Set name of schema for classification optical and nDSM
classifB_schema="classif_B"
# Set name of schema for classification SAR only
classifC_schema="classif_C"
# Set name of schema for classification SAR and optical
classifD_schema="classif_D"
# Set name of table with statistics of segmentobject_stats_table="object_stats_sar"s - FOR OPTICAL
object_stats_optical="object_stats_optical"
# Set name of table with statistics of segmentobject_stats_table="object_stats_sar"s - FOR SAR
object_stats_sar="object_stats_sar"
# Set name of table with all the samples
samples_labels="sample_labels"
# Set name of table with samples without outliers
samples_labels_ok="sample_labels_ok"
# Set name of table with extension of the AOI where SAR and optical exists
sar_aoi="sar_aoi"
# Set name of table with samples without outliers in SAR AOI
sample_sar="sample_sar"
# Set name of table with samples without outliers in SAR AOI - Test set
sample_test="sample_test"
# Set name of table with samples without outliers in SAR AOI - Training set
sample_training="sample_training"
# Set name of table with results of classification
classif="classif"
# Set name of table with results of classification and ground truth
groundtruth_classif="groundtruth_classif"
# <center> <font size=5> <h1>Select samples in segmentation layer</h1> </font> </center>
# **Launch GRASS GIS working session**
# +
### Automatic creation of GRASS GIS mapsets
## Import library for file copying
import shutil
## Set the name of the mapset in which to work
mapsetname=classifC_schema
if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)):
if not os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname,'WIND')):
print "WARNING: '"+mapsetname+"' mapset already exist, but a 'WIND' file is missing. Please solve this issue."
else: print "'"+mapsetname+"' mapset already exist"
else:
os.makedirs(os.path.join(user["gisdb"],user["location"],mapsetname))
shutil.copy(os.path.join(user["gisdb"],user["location"],'PERMANENT','WIND'),os.path.join(user["gisdb"],user["location"],mapsetname,'WIND'))
print "'"+mapsetname+"' mapset created in location '"+user["location"]+"'"
# -
## Launch GRASS GIS working session in the mapset
if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)):
gsetup.init(os.environ['GISBASE'], user["gisdb"], user["location"], mapsetname)
print "You are now working in mapset '"+mapsetname+"'"
else:
print "'"+mapsetname+"' mapset doesn't exists in "+user["gisdb"]
# ### Create new schema
# +
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
# Connect to postgres database
db=None
db=pg.connect(dbname=dbname, user='tais', password='<PASSWORD>', host='localhost')
# Allow to create a new database
db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
# Execute the CREATE DATABASE query
cur=db.cursor()
#cur.execute('DROP SCHEMA IF EXISTS '+classifC_schema+' CASCADE') #Comment this to avoid deleting existing DB
try:
cur.execute('CREATE SCHEMA '+classifC_schema)
except Exception as e:
print ("Exception occured : "+str(e))
cur.close()
db.close()
# -
# ## Define the folder where to save the results and create it if necessary
# In the next cell, please adapt the path to the directory where you want to save the .csv output of i.segment.uspo.
general_path="/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/Results/CLASSIF"
resultfolder=os.path.join(general_path, classifC_schema)
## Create the folder if does not exists
if not os.path.exists(resultfolder):
os.makedirs(resultfolder)
print "Folder '"+resultfolder+"' created"
outputfolder=os.path.join(resultfolder, "classification")
## Create the folder if does not exists
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
print "Folder '"+outputfolder+"' created"
# ## Create a README.txt file informing about the classification methodology
## Set-up a message
message=""
message+=classifC_schema+" : Classification using SAR only"
## Write it in the .txt file
f=open(os.path.join(outputfolder,"readme.txt"),'w')
f.write(message)
f.close()
# ## Display test and training samples
# Connect to an existing database
db=pg.connect(database=dbname, user=dbuser, password=<PASSWORD>, host=host)
# Open a cursor to perform database operations
cur=db.cursor()
# Query to find the number of row in the test samples
query="SELECT class, min(class_num) as class_num, count(*) FROM "+sample_schema+"."+sample_test+" \
GROUP BY class \
ORDER BY max(class_num) ASC"
# Execute query through panda
df_countperclass=pd.read_sql(query, db)
# Show dataframe
df_countperclass.head(5000)
# Print the total number in sample
print "Test samples counts "+str(sum(df_countperclass['count']))+" points."
# Query to find the number of row in the sample table
query="SELECT class, min(class_num) as class_num, count(*) FROM "+sample_schema+"."+sample_training+" \
GROUP BY class \
ORDER BY max(class_num) ASC"
# Execute query through panda
df_countperclass=pd.read_sql(query, db)
# Show dataframe
df_countperclass.head(5000)
# Print the total number in sample
print "Training samples counts "+str(sum(df_countperclass['count']))+" points."
# ## Create csv with statistics for training
# Drop table if exists:
cur.execute("DROP TABLE IF EXISTS "+classifC_schema+"."+sample_training)
# Make the changes to the database persistent
db.commit()
# Create table with
query="CREATE TABLE "+classifC_schema+"."+sample_training+" AS(\
WITH shape_stats AS(\
SELECT a.seg_id, a.class_num, b.area, b.perimeter, b.compact_circle, b.compact_square, b.fd \
FROM "+sample_schema+"."+sample_training+" AS a \
INNER JOIN "+stat_schema+"."+object_stats_optical+" AS b \
ON a.seg_id=b.cat)\
\
SELECT a.*, c.* \
FROM shape_stats AS a \
INNER JOIN "+stat_schema+"."+object_stats_sar+" AS c \
ON a.seg_id=c.cat)"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Drop some non wanted columns
query="ALTER TABLE "+classifC_schema+"."+sample_training+" DROP COLUMN cat"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Query to find the number of row in the sample table
query="SELECT * FROM "+classifC_schema+"."+sample_training
# Execute query through panda
df_countperclass=pd.read_sql(query, db)
# Show dataframe
df_countperclass.head(10)
# ### Export training sample as .csv for archive
outputfolder
## Define the path to the .csv output
sample_training_csv=os.path.join(outputfolder,"sample_training.csv")
# Connect to an existing database
db=pg.connect(database=dbname, user=dbuser, password=<PASSWORD>, host=host)
# Open a cursor to perform database operations
cur=db.cursor()
#### Export as .csv
# Query
query="COPY "+classifC_schema+"."+sample_training+" TO '"+sample_training_csv+"' DELIMITER ',' CSV HEADER"
# Execute the CREATE TABLE query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Close cursor and communication with the database
cur.close()
db.close()
# <center> <font size=5> <h1>Feature selection</h1> </font> </center>
# ### Import and load extension rpy2
import rpy2
# %load_ext rpy2.ipython
# ### Import training set in R
# + magic_args="-i sample_training_csv -o training" language="R"
# # Import training
# training <- read.csv(sample_training_csv, sep=",", header=TRUE, row.names=1)
# training$class_num <- as.factor(training$class_num)
# -
training.head()
# + magic_args="-o colname" language="R"
# colname<-colnames(training)
# -
# Create a list with the column name
print list(colname)
# Set the name of the colum with class
class_column='class_num'
# Set the name of the colum with the first feature to be used
first_feature='area'
# Set the name of the colum with the last feature to be used
last_feature='w11_sm_perc_90'
# + magic_args="-i class_column,first_feature,last_feature -o features" language="R"
#
# # Save the index of the column
# classnumindex=match(c(class_column),colname)
# # Save the index where object's features to be used start
# startindex=match(c(first_feature),colname)
# # Save the index where object's features to be used stop
# stopindex=match(c(last_feature),colname)
#
# # Create dataframe with only columns to be used features
# temp1<-as.data.frame(training[classnumindex])
# temp2<-as.data.frame(training[startindex:stopindex])
# merged_df=merge(temp1,temp2, by=0, all=TRUE)
# features<-transform(merged_df, row.names=Row.names, Row.names=NULL)
# # Set class column as factor
# features$class_num <- as.factor(features$class_num)
# -
features.head()
# + language="R"
# # Instal library
# if(!is.element('parallel', installed.packages()[,1])){
# cat('\n\nInstalling parallel package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('parallel')}
#
# if(!is.element('doParallel', installed.packages()[,1])){
# cat('\n\nInstalling doParallel package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('doParallel')}
#
# if(!is.element('VSURF', installed.packages()[,1])){
# cat('\n\nInstalling VSURF package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('VSURF')}
#
# library(parallel)
# require(parallel)
# library(doParallel)
# require(doParallel)
# library(VSURF)
# require(VSURF)
# + magic_args="-i class_column,first_feature,last_feature -o Surf" language="R"
#
# # Save column names
# colname<-colnames(features)
# # Save the index of the column
# classnumindex<-match(c(class_column),colname)
# # Save the index where object's features to be used start
# startindex<-match(c(first_feature),colname)
# # Save the index where object's features to be used stop
# stopindex<-match(c(last_feature),colname)
#
# # Set number of cores to use
# usedcores=detectCores()-5
# registerDoParallel(usedcores)
# # Feature Selection using VSURF
# Surf=VSURF(features[startindex:stopindex],features$class_num, parallel=TRUE, ncores=usedcores)
# -
## Set the path to the output .txt file with summary of VSURF
output_summary=os.path.join(outputfolder,"VSURF_summary.txt")
# + magic_args="-i output_summary" language="R"
#
# # Print and save summary of VSRUF
# summaryVSRUF<-summary(Surf)
# output<-capture.output(summary(Surf))
# print (summaryVSRUF)
# cat(output,file=output_summary,sep="\n")
# -
## Set the path to the output csv with results of VSURF
output_interp=os.path.join(outputfolder,"VSURF_interp.csv")
output_pred=os.path.join(outputfolder,"VSURF_pred.csv")
# + magic_args="-i output_interp,output_pred -o Surf_interp_features,Surf_pred_features" language="R"
#
# # Save interpretation step and prediction step results
# Surf_interp<-Surf$varselect.interp
# Surf_pred<-Surf$varselect.pred
#
# # Save name of feature in a list ranked with the same index as VSURF output
# colname<-colnames(features[startindex:stopindex])
#
# # Declare empty variables of type 'character'
# Surf_interp_features <- character()
#
# # Loop on indexes of features selecteds by VSURF to find the corresponding feature name
# count<-0
# for (x in Surf_interp){
# count<-count+1
# Surf_interp_features[count]<-colname[x]
# }
#
# # Write CSV
# write.csv(Surf_interp_features, file=output_interp, row.names=TRUE)
#
# # Declare empty variables of type 'character'
# Surf_pred_features <- character()
#
# # Loop on indexes of features selecteds by VSURF to find the corresponding feature name
# count<-0
# for (x in Surf_pred){
# count<-count+1
# Surf_pred_features[count]<-colname[x]
# }
#
# # Write CSV
# write.csv(Surf_pred_features, file=output_pred, row.names=TRUE)
# -
## Print features selected by VSURF at interpretation step
print "Interpretation results"
for i in range(len(Surf_interp_features)):
print str(i+1)+" "+Surf_interp_features[i]
## Print features selected by VSURF at interpretation step
print "Prediction results"
for i in range(len(Surf_pred_features)):
print str(i+1)+" "+Surf_pred_features[i]
## Set the path to the output plot (PDF)
outputgraph=os.path.join(outputfolder,"VSURF_opt_plot.pdf")
# + magic_args="-i outputgraph" language="R"
#
# # Export the plot in pdf
# VSURF_plot_export=outputgraph
# pdf(VSURF_plot_export)
# plot(Surf, var.names = FALSE,
# nvar.interp = length(Surf_pred))
# dev.off()
# -
## Display graphs
import wand
from wand.image import Image as WImage
img = WImage(filename=outputgraph)
img
# <center> <font size=5> <h1>Classification using Random Forest</h1> </font> </center>
# ### Prepare .csv for classification (training set and object to classify)
# Connect to an existing database
db=pg.connect(database=dbname, user=dbuser, password=<PASSWORD>, host=host)
# Open a cursor to perform database operations
cur=db.cursor()
# #### Export training set as .csv
### Set if all feature have to be used (vsurfonly=False)
### or only those resulting from prediction step of VSURF (vsurfonly=True)
vsurfonly=True
# +
## Save list of features to be used
if vsurfonly:
features_for_classif=list(Surf_pred_features)
else:
features_for_classif=list(features.columns.values)[1:]
## Split features in to a list for Geometrical and Optical features and another for SAR features
features_for_classif_optical=[]
features_for_classif_sar=[]
for f in features_for_classif:
if f.split("_")[0] in ("w3","w5","w7","w11","w13","w15","w17"):
features_for_classif_sar.append(f)
else:
features_for_classif_optical.append(f)
# -
print "Optical and geometric features to be used :\n"+','.join(features_for_classif_optical)
print "\n"
print "SAR features to be used :\n"+','.join(features_for_classif_sar)
print "\n"
print "All features to be used :\n"+','.join(features_for_classif)
## Define the path to the .csv
training_csv=os.path.join(outputfolder,"training_csv.csv")
#### Export as .csv
# Query
query="COPY (\
SELECT seg_id, class_num, "+", ".join(features_for_classif)+" \
FROM "+classifC_schema+"."+sample_training+") TO '"+training_csv+"' DELIMITER ',' CSV HEADER"
# Execute the CREATE TABLE query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# #### Evaluate number of objects to be classified and number of loops to create
## Evalutate the number of row in 'object_stat' table
query="SELECT reltuples::bigint AS estimated_object \
FROM pg_class \
WHERE oid='"+stat_schema+"."+object_stats_optical+"'::regclass"
# Execute query through panda
df=pd.read_sql(query, db)
# Save estimated number of objects
estimate=list(df['estimated_object'])[0]
# Print
print "The table contains "+str(estimate)+" rows (estimate)."
# Define number of object to classify on each loop
nbobjloop=1000000
# Add 10% of estimated number of rows to be sure to well cover the full dataset
remaining_rows=int(estimate*1.05)
# Define number of loops for classification
import math
loops=int(math.ceil(remaining_rows/(nbobjloop*1.0)))
## Print what is going to happend
print "Classification will be made on "+str(loops)+" loop(s)"
# # Create mutliple .csv with the statistics of segments
# #### Creating a new temporary table containing objects to be classified with all the statistics needed for classification
# Query to create the temporary table
query="CREATE TABLE "+classifC_schema+".temp1 AS(\
WITH \
optistats AS(\
SELECT cat, "+", ".join(features_for_classif_optical)+" \
FROM "+stat_schema+"."+object_stats_optical+"), \
\
sarstats AS(\
SELECT cat, "+", ".join(features_for_classif_sar)+" \
FROM "+stat_schema+"."+object_stats_sar+"),\
\
allstats AS(\
SELECT a.cat AS acat, a.*, b.* \
FROM optistats AS a \
INNER JOIN sarstats AS b \
ON a.cat=b.cat)\
\
SELECT acat AS cat, "+", ".join(features_for_classif)+" \
FROM allstats ORDER BY cat)"
# Execute the query
cur.execute(query)
db.commit()
# Define starting offset and limit (for the firsth loop)
offset=0
# Declare an empty list containing the paths to .csv
list_objstat_csv=[]
# Export object statistics on multiple .csv files
for loop in range(loops):
## Define the path to the .csv
objstat_csv=os.path.join(outputfolder,"objects_stats_"+str(loop+1)+".csv")
list_objstat_csv.append(objstat_csv)
# Query to export as .csv
query="COPY (SELECT * FROM "+classifC_schema+".temp1 ORDER BY cat \
OFFSET "+str(offset)+" LIMIT "+str(nbobjloop)+") TO '"+objstat_csv+"' DELIMITER ',' CSV HEADER"
# Execute the query
cur.execute(query)
db.commit()
# Print
print "Rows between "+str(offset)+" and "+str(offset+nbobjloop)+" exported in .csv file '"+objstat_csv+"'"
# Update offset and limit
offset+=nbobjloop
# Drop table if exists:
cur.execute("DROP TABLE IF EXISTS "+classifC_schema+".temp1")
# Make the changes to the database persistent
db.commit()
# Close cursor and communication with the database
cur.close()
db.close()
# + language="R"
#
# # Install package
# if(!is.element('caret', installed.packages()[,1])){
# cat('\n\nInstalling caret package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('caret')}
# # Install package
# if(!is.element('randomForest', installed.packages()[,1])){
# cat('\n\nInstalling randomForest package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('randomForest')}
# # Install package
# if(!is.element('pROC', installed.packages()[,1])){
# cat('\n\nInstalling pROC package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('pROC')}
# # Install package
# if(!is.element('e1071', installed.packages()[,1])){
# cat('\n\nInstalling e1071 package from CRAN')
# chooseCRANmirror(ind=1)
# install.packages('e1071')}
# + language="R"
#
# # Load libraries
# library(parallel)
# require(parallel)
# library(doParallel)
# require(doParallel)
# library(caret)
# require(caret)
# library(randomForest)
# require(randomForest)
# library(e1071)
# require(e1071)
# -
# ## Train Random Forest model
## Set the path to the output of random forest classification
output_accuracy=os.path.join(outputfolder,"accuracy_RF.csv")
# + magic_args="-i training_csv,output_accuracy" language="R"
#
# # Set number of cores to use
# usedcores=detectCores()-5
# registerDoParallel(usedcores)
#
# # Import training
# training <- read.csv(training_csv, sep=",", header=TRUE, row.names=1)
# # Define factor
# training$class_num <- as.factor(training$class_num)
#
# # Cross-validation setting
# MyFolds.cv <- createMultiFolds(training$class_num, k=5, times=10)
# MyControl.cv <- trainControl(method='repeatedCV', index=MyFolds.cv, allowParallel = TRUE)
#
# # Train Random Forest
# rfModel <- train(class_num~.,training,method='rf', trControl=MyControl.cv,tuneLength=10)
# resamps.cv <- rfModel$resample
# accuracy_means <- mean(resamps.cv$Accuracy)
# kappa_means <- mean(resamps.cv$Kappa)
# df_means <- data.frame(method='rf',accuracy=accuracy_means, kappa=kappa_means)
# write.csv(df_means, output_accuracy, row.names=FALSE, quote=FALSE)
# -
#### Show mean accuracy results from cross-validation for tuning
## Import .csv file
accuracy=pd.read_csv(output_accuracy, sep=',',header=0)
## Display table
accuracy.head(15)
## Set the path to the output of random forest classification
output_varimp=os.path.join(outputfolder,"VariablesImportance_RF.pdf")
output_rfmodel=os.path.join(outputfolder,"RF_model.txt")
output_cv=os.path.join(outputfolder,"RF_cv.pdf")
# + magic_args="-i output_varimp,output_rfmodel,output_cv" language="R"
#
# # Plot variable importance
# library(pROC)
# importance <- varImp(rfModel, scale=FALSE)
# pdf(output_varimp, width = 11, height = ncol(features)/6 )
# print(plot(importance))
# dev.off()
#
# # Show final model
# tmp<-rfModel$finalModel
# sink(output_rfmodel)
# print(tmp)
# sink()
#
# # Plot cross validation tuning results
# pdf(output_cv)
# print(plot(rfModel))
# dev.off()
# -
#### Import classifiers tuning parameters and confusion matrix
## Open file
classifier_runs = open(output_rfmodel, 'r')
## Read file
print classifier_runs.read()
# The following cell display pdf figures. Please first install the **Wand** package using the folowwing command in the terminal :
#
# ``` sudo pip instal Wand ```
## Display graphs
import wand
from wand.image import Image as WImage
img = WImage(filename=output_varimp)
img
## Display graphs
import wand
from wand.image import Image as WImage
img = WImage(filename=output_cv)
img
# ## Predict classes for objects
# + magic_args="-i list_objstat_csv" language="R"
#
# # Classification using loop
# for(i in 1:length(list_objstat_csv)) {
# filepath<-list_objstat_csv[i]
# # Import object statistics as dataframe
# features <- read.csv(filepath, sep=",", header=TRUE, row.names=1)
# # Predict class
# predicted <- data.frame(predict(rfModel, features))
# name<-paste('resultsdf',i,sep='')
# assign(name,data.frame(id=rownames(features), predicted))
# rm(features)
# rm(predicted)
# gc()
# }
# + language="R"
#
# # Merge predictions in one single dataframe
# listdf<-lapply(ls(pattern = "resultsdf*"), get)
# rf_predictions<-do.call(rbind,listdf)
# -
# ### Save training set and predictions in .csv files
## Define outputfile for .csv with training data for R
trainingset_R=os.path.join(outputfolder,"RF_trainingset_R.csv")
## Define outputfile for .csv with predictions of classification
classif_results=os.path.join(outputfolder,"RF_classif_results_R.csv")
# + magic_args="-i trainingset_R,classif_results" language="R"
# write.csv(training, file=trainingset_R, row.names=FALSE, quote=FALSE)
# write.csv(rf_predictions, file=classif_results, row.names=FALSE, quote=FALSE)
# + language="R"
#
# # Remove variables not needed anymore
# rm(training)
# gc()
#
# rm(list=ls(pattern = "resultsdf*"))
# rm(listdf)
# gc()
# -
# ### Delete csv files with object statistics for each loop of classification
for csv in list_objstat_csv:
os.remove(csv)
# <center> <font size=3> <h2>Import classification results in postgis </h2> </font> </center>
# Connect to an existing database
db=pg.connect(database=dbname, user=dbuser, password=<PASSWORD>, host=host)
# Open a cursor to perform database operations
cur=db.cursor()
# ### Import predictions of Random forest at level 2
# +
# Drop table if exists:
cur.execute("DROP TABLE IF EXISTS "+classifC_schema+"."+classif)
# Make the changes to the database persistent
db.commit()
# Create new table
query="CREATE TABLE "+classifC_schema+"."+classif+" (seg_id integer PRIMARY KEY, rf_pred_l2 integer)"
# Execute the CREATE TABLE query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Create query for copy data from csv
query="COPY "+classifC_schema+"."+classif+" FROM '"+str(classif_results)+"' HEADER DELIMITER ',' CSV"
# Execute the COPY FROM CSV query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# -
# ### Merge classes of level 2 to reach level 2b
## Add column for prediction at level 2b
query="ALTER TABLE "+classifC_schema+"."+classif+" ADD COLUMN rf_pred_l2b integer"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Update the value
query="UPDATE "+classifC_schema+"."+classif+" SET \
rf_pred_l2b = '20' WHERE rf_pred_l2 IN ('21','22')"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Update the value
query="UPDATE "+classifC_schema+"."+classif+" SET \
rf_pred_l2b = '30' WHERE rf_pred_l2 IN ('32','33','34')"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Update the value
query="UPDATE "+classifC_schema+"."+classif+" SET \
rf_pred_l2b = rf_pred_l2 WHERE rf_pred_l2b IS NULL"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# ### Merge classes of level 2 to reach level 1
## Add column for prediction at level 1
query="ALTER TABLE "+classifC_schema+"."+classif+" ADD COLUMN rf_pred_l1 integer"
# Execute the query
cur.execute(query)
## Update column for prediction and ground truth at level 2
query="UPDATE "+classifC_schema+"."+classif+" SET rf_pred_l1=rf_pred_l2/10"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# ### Export to csv for archive
## Define path of .txt file for saving prediction at different level
outputfile=classif_results.split(".")[0]+'_diff_levels.csv'
#### Export as .csv
# Query
query="COPY "+classifC_schema+"."+classif+" TO '"+outputfile+"' DELIMITER ',' CSV HEADER"
# Execute the CREATE TABLE query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# Close cursor and communication with the database
cur.close()
db.close()
# <center> <font size=3> <h2>Accuracy assesment</h2> </font> </center>
# ### Change the outputfolder
# +
## Folder in which save processing time output
outputfolder=os.path.join(resultfolder,"accuracy_assess")
## Create the folder if does not exists
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
print "Folder '"+outputfolder+"' created"
# -
# ## Join sample test and classifier predictions in postgresql
# Connect to an existing database
db=pg.connect(database=dbname, user=dbuser, password=<PASSWORD>, host=host)
# Open a cursor to perform database operations
cur=db.cursor()
# Build a query to drop view if exists
query="DROP TABLE IF EXISTS "+classifC_schema+"."+groundtruth_classif
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
#### Join sample_test and classification results
query="CREATE TABLE "+classifC_schema+"."+groundtruth_classif+" AS (\
SELECT a.cat_point, a.id, \
a.class AS level2_label, \
a.class_b AS level2b_label, \
a.class_l1 AS level1_label, \
a.class_num::integer AS level2_groundtr, \
a.class_num_b::integer AS level2b_groundtr, \
a.class_num_l1::integer AS level1_groundtr, \
c.rf_pred_l2 AS rf_level2, \
c.rf_pred_l2b AS rf_level2b, \
c.rf_pred_l1 AS rf_level1 \
FROM "+sample_schema+"."+sample_test+" AS a \
LEFT JOIN "+classifC_schema+"."+classif+" AS c ON a.seg_id = c.seg_id)"
# Execute the query
cur.execute(query)
# Make the changes to the database persistent
db.commit()
# +
#### Save ground truth, optical_predic, opticalsar_predic as lists, for Level-1 and Level-2
# Query
query="SELECT * FROM "+classifC_schema+"."+groundtruth_classif+" ORDER BY id"
# Execute query through panda
df=pd.read_sql(query, db)
# Show dataframe
df.head(20)
# +
# Save ground truth - Level2
groundtruth_L2=list(df['level2_groundtr'])
# Save ground truth - Level2b
groundtruth_L2b=list(df['level2b_groundtr'])
# Save ground truth - Level1
groundtruth_L1=list(df['level1_groundtr'])
# Save predictions - level2
prediction_L2=list(df['rf_level2'])
# Save predictions - level2b
prediction_L2b=list(df['rf_level2b'])
# Save predictions - level1
prediction_L1=list(df['rf_level1'])
# -
# Display number of objects in test sample
nrows=len(groundtruth_L2)
print nrows
## Zip the list together - Level 2
ziped_results=zip(groundtruth_L2,prediction_L2)
## Sort
ziped_results.sort()
## Unzip
groundtruth_L2,prediction_L2=zip(*ziped_results)
## Zip the list together - Level 2b
ziped_results=zip(groundtruth_L2b,prediction_L2b)
## Sort
ziped_results.sort()
## Unzip
groundtruth_L2b,prediction_L2b=zip(*ziped_results)
## Zip the list together - Level 1
ziped_results=zip(groundtruth_L1,prediction_L1)
## Sort
ziped_results.sort()
## Unzip
groundtruth_L1,prediction_L1=zip(*ziped_results)
# +
## Check if lists contain th same distinct values
tmp1=list(set(groundtruth_L2))
tmp1.sort()
tmp2=list(set(prediction_L2))
tmp2.sort()
if tmp1 != tmp2:
sys.exit('WARNING: Lists contain different distinct values. Please check before continue')
## Check if lists contain th same distinct values
tmp1=list(set(groundtruth_L2b))
tmp1.sort()
tmp2=list(set(prediction_L2b))
tmp2.sort()
if tmp1 != tmp2:
sys.exit('WARNING: Lists contain different distinct values. Please check before continue')
## Check if lists contain th same distinct values
tmp1=list(set(groundtruth_L1))
tmp1.sort()
tmp2=list(set(prediction_L1))
tmp2.sort()
if tmp1 != tmp2:
sys.exit('WARNING: Lists contain different distinct values. Please check before continue')
# +
#### Save ground truth, optical_predic, opticalsar_predic as lists, for Level-1 and Level-2
# Query
query="SELECT DISTINCT level2_groundtr, level2_label \
FROM "+classifC_schema+"."+groundtruth_classif+" ORDER BY level2_groundtr"
# Execute query through panda
df=pd.read_sql(query, db)
# Save ground truth classes labels - Level2
classes_L2=list(df['level2_label'])
# Query
query="SELECT DISTINCT level2b_groundtr, level2b_label \
FROM "+classifC_schema+"."+groundtruth_classif+" ORDER BY level2b_groundtr"
# Execute query through panda
df=pd.read_sql(query, db)
# Save ground truth classes labels - Level2
classes_L2b=list(df['level2b_label'])
# Query
query="SELECT DISTINCT level1_groundtr, level1_label \
FROM "+classifC_schema+"."+groundtruth_classif+" ORDER BY level1_groundtr"
# Execute query through panda
df=pd.read_sql(query, db)
# Save ground truth classes labels - Level2
classes_L1=list(df['level1_label'])
# -
print "Level 2 classes:\n\n"+'\n'.join(classes_L2)
print "Level 2 classes:\n\n"+'\n'.join(classes_L2b)
print "Level 2 classes:\n\n"+'\n'.join(classes_L1)
# <center> <font size=4> <h2>Confusion matrix</h2> </font> </center>
# The classification performance evaluation is made mainly using [Scikit-learn](http://scikit-learn.org/stable/modules/model_evaluation.html).
#
# The trick to plot the confusion matrix was found [here](http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py).
## Import libraries
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
import numpy as np
# %matplotlib inline
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, ha='right', rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, round(cm[i, j],2),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# ### Level 2b
# Compute confusion matrix
cnf_matrix_L2=confusion_matrix(groundtruth_L2, prediction_L2)
# +
## Set the path to the output
output_rowconfmat=os.path.join(outputfolder,"rowconfusionmatrix_L2.txt")
## Export the row confusion matrix
numpy.savetxt(output_rowconfmat, cnf_matrix_L2.astype(np.int), fmt='%d', delimiter=",")
# +
# Plot non-normalized confusion matrix
fig_cm=plt.figure(figsize=(15,10))
plot_confusion_matrix(cnf_matrix_L2, classes=classes_L2,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
fig_cm_normal=plt.figure(figsize=(15,10))
plot_confusion_matrix(cnf_matrix_L2, classes=classes_L2, normalize=True,
title='Normalized confusion matrix')
plt.show()
# -
## Set the path to the output
output_confmat_pdf=os.path.join(outputfolder,"confusionmatrix_L2.pdf")
output_confmatA_png=os.path.join(outputfolder,"confusionmatrixA_L2.png")
output_confmatB_png=os.path.join(outputfolder,"confusionmatrixB_L2.png")
# +
# Export in PDF
from matplotlib.backends.backend_pdf import PdfPages
pp=PdfPages(output_confmat_pdf)
pp.savefig(fig_cm)
pp.savefig(fig_cm_normal)
pp.close()
# Export in PNG
fig_cm.savefig(output_confmatA_png, format='png', dpi=300)
fig_cm_normal.savefig(output_confmatB_png, format='png', dpi=300)
# -
# ### Level 2
# Compute confusion matrix
cnf_matrix_L2b=confusion_matrix(groundtruth_L2b, prediction_L2b)
# +
## Set the path to the output
output_rowconfmat=os.path.join(outputfolder,"rowconfusionmatrix_L2b.txt")
## Export the row confusion matrix
numpy.savetxt(output_rowconfmat, cnf_matrix_L2b.astype(np.int), fmt='%d', delimiter=",")
# +
# Plot non-normalized confusion matrix
fig_cm=plt.figure(figsize=(15,10))
plot_confusion_matrix(cnf_matrix_L2b, classes=classes_L2b,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
fig_cm_normal=plt.figure(figsize=(15,10))
plot_confusion_matrix(cnf_matrix_L2b, classes=classes_L2b, normalize=True,
title='Normalized confusion matrix')
plt.show()
# -
## Set the path to the output
output_confmat_pdf=os.path.join(outputfolder,"confusionmatrix_L2b.pdf")
output_confmatA_png=os.path.join(outputfolder,"confusionmatrixA_L2b.png")
output_confmatB_png=os.path.join(outputfolder,"confusionmatrixB_L2b.png")
# +
# Export in PDF
from matplotlib.backends.backend_pdf import PdfPages
pp=PdfPages(output_confmat_pdf)
pp.savefig(fig_cm)
pp.savefig(fig_cm_normal)
pp.close()
# Export in PNG
fig_cm.savefig(output_confmatA_png, format='png', dpi=300)
fig_cm_normal.savefig(output_confmatB_png, format='png', dpi=300)
# -
# ### Level 1
# Compute confusion matrix
cnf_matrix_L1=confusion_matrix(groundtruth_L1, prediction_L1)
# +
## Set the path to the output
output_rowconfmat=os.path.join(outputfolder,"rowconfusionmatrix_L1.txt")
## Export the row confusion matrix
numpy.savetxt(output_rowconfmat, cnf_matrix_L1.astype(np.int), fmt='%d', delimiter=",")
# +
# Plot non-normalized confusion matrix
fig_cm=plt.figure(figsize=(10,7))
plot_confusion_matrix(cnf_matrix_L1, classes=classes_L1,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
fig_cm_normal=plt.figure(figsize=(10,7))
plot_confusion_matrix(cnf_matrix_L1, classes=classes_L1, normalize=True,
title='Normalized confusion matrix')
plt.show()
# -
## Set the path to the output
output_confmat_pdf=os.path.join(outputfolder,"confusionmatrix_L1.pdf")
output_confmatA_png=os.path.join(outputfolder,"confusionmatrixA_L1.png")
output_confmatB_png=os.path.join(outputfolder,"confusionmatrixB_L1.png")
# +
# Export in PDF
from matplotlib.backends.backend_pdf import PdfPages
pp=PdfPages(output_confmat_pdf)
pp.savefig(fig_cm)
pp.savefig(fig_cm_normal)
pp.close()
# Export in PNG
fig_cm.savefig(output_confmatA_png, format='png', dpi=300)
fig_cm_normal.savefig(output_confmatB_png, format='png', dpi=300)
# -
# <center> <font size=4> <h2>Accuracy measures</h2> </font> </center>
# ## Classification report
# TODO: Find a way to compute comission and omission errore / User's Producers' accuracies
# +
## Import libraries
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
# -
# #### Level 2
## Set the path to the output
output=os.path.join(outputfolder,"RF_classif_repport_L2.txt")
# +
# Define dataset to take into account
y_true=groundtruth_L2
y_pred=prediction_L2
class_label=classes_L2
# Compute precision accuracy
accuracy=accuracy_score(y_true, y_pred, normalize=True)
# Compute Cohen's Kappa
cohen_kappa=cohen_kappa_score(y_true, y_pred)
# Compute f1-score
f_1=f1_score(y_true, y_pred, average='weighted')
# Compute 'classification report'
classif_report=classification_report(y_true, y_pred, target_names=class_label)
# Save as .txt file
f=open(output, 'w')
f.write("Performance evalutation: \n")
f.write("Overall Accuracy: "+str(accuracy)+"\n")
f.write("Cohen's Kappa: "+str(cohen_kappa)+"\n")
f.write("F1-score: "+str(f_1)+"\n")
f.write("\n\n")
f.write("Classification report: \n "+classif_report)
f.close()
# Show file content
f=open(output,'r')
file_contents=f.read()
print (file_contents)
f.close()
# -
# #### Level 2b
## Set the path to the output
output=os.path.join(outputfolder,"RF_classif_repport_L2b.txt")
# +
# Define dataset to take into account
y_true=groundtruth_L2b
y_pred=prediction_L2b
class_label=classes_L2b
# Compute precision accuracy
accuracy=accuracy_score(y_true, y_pred, normalize=True)
# Compute Cohen's Kappa
cohen_kappa=cohen_kappa_score(y_true, y_pred)
# Compute f1-score
f_1=f1_score(y_true, y_pred, average='weighted')
# Compute 'classification report'
classif_report=classification_report(y_true, y_pred, target_names=class_label)
# Save as .txt file
f=open(output, 'w')
f.write("Performance evalutation: \n")
f.write("Overall Accuracy: "+str(accuracy)+"\n")
f.write("Cohen's Kappa: "+str(cohen_kappa)+"\n")
f.write("F1-score: "+str(f_1)+"\n")
f.write("\n\n")
f.write("Classification report: \n "+classif_report)
f.close()
# Show file content
f=open(output,'r')
file_contents=f.read()
print (file_contents)
f.close()
# -
# #### Level 1
## Set the path to the output
output=os.path.join(outputfolder,"RF_classif_repport_L1.txt")
# +
# Define dataset to take into account
y_true=groundtruth_L1
y_pred=prediction_L1
class_label=classes_L1
# Compute precision accuracy
accuracy=accuracy_score(y_true, y_pred, normalize=True)
# Compute Cohen's Kappa
cohen_kappa=cohen_kappa_score(y_true, y_pred)
# Compute f1-score
f_1=f1_score(y_true, y_pred, average='weighted')
# Compute 'classification report'
classif_report=classification_report(y_true, y_pred, target_names=class_label)
# Save as .txt file
f=open(output, 'w')
f.write("Performance evalutation: \n")
f.write("Overall Accuracy: "+str(accuracy)+"\n")
f.write("Cohen's Kappa: "+str(cohen_kappa)+"\n")
f.write("F1-score: "+str(f_1)+"\n")
f.write("\n\n")
f.write("Classification report: \n "+classif_report)
f.close()
# Show file content
f=open(output,'r')
file_contents=f.read()
print (file_contents)
f.close()
# -
# <center> <font size=3> <h2>Reclass segmentation rasters with classification results</h2> </font> </center>
## Print current mapset
print "You are currently working in the <"+str(mapsetname)+"> mapset"
# #### Change the outputfolder
# +
## Folder in which save processing time output
outputfolder=os.path.join(resultfolder,"classified_rasters")
## Create the folder if does not exists
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
print "Folder '"+outputfolder+"' created"
# -
# ## Reclass segmentation raster
# Here after, the segment raster will be reclassed (using r.reclass) with the classification results.
# Connect to an existing database
db=pg.connect(database=dbname, user=dbuser, password=<PASSWORD>, host=host)
# Open a cursor to perform database operations
cur=db.cursor()
# +
print "Start creating classified rasters"
classif_suffix=('l2','l2b','l1')
for x in classif_suffix:
## Set name for raster with the prediction at level 2
prediction_raster='rf_classif_'+x
#### Save 'seg_id' and the corresponding prediction for in a list
query="SELECT seg_id, rf_pred_"+x+" FROM "+classifC_schema+"."+classif
df=pd.read_sql(query, db)
# Save seg_id of objects in a list
listsegid=list(df['seg_id'])
# Save predictions at level_2 in a list
listpredict=list(df['rf_pred_'+x])
if len(listsegid) <> len(listpredict):
sys.exit('WARNING: lenght of lists containing segid and prediction are not the same. Please check before continue')
else:
nrows=len(listsegid)
print str(nrows)+" segment are going to be reclassified"
##### Reclassify raster
## Create a temporary 'reclass_rule.csv' file
temprulecsv=os.path.join(tempfile.gettempdir(),"reclass_rules.csv") # Define the csv output file name
f = open(temprulecsv, 'w')
# Write rules in the csv file
for i in range(0,nrows-1):
f.write(str(listsegid[i]))
f.write("=")
f.write(str(listpredict[i]))
f.write("\n")
f.write("*")
f.write("=")
f.write("NULL")
f.close()
## Reclass segments raster layer to keep only outliers segments, using the reclas_rule.csv file (create temporary raster)
grass.run_command('g.region', overwrite=True, raster="segments"+"@"+user["classificationA_mapsetname"])
print ("Working on <"+prediction_raster+">: Reclassify original segment layer")
grass.run_command('r.reclass', overwrite=True, input="segments"+"@"+user["classificationA_mapsetname"],
output=prediction_raster, rules=temprulecsv)
os.remove(temprulecsv)
#### Create 'real raster'
#### Make a copy of the classified maps of faster display in GRASS GIS
## Saving current time for processing time management
print ("Working on <"+prediction_raster+">: Make hard copy of the reclassified layer")
## Set computational region
grass.run_command('g.region', overwrite=True, raster="segments"+"@"+user["classificationA_mapsetname"])
## Create the same raster with r.mapcalc
formula=prediction_raster+"_temp="+prediction_raster
grass.mapcalc(formula, overwrite=True)
## Rename the new raster with the name of the original one (will be overwrited)
print ("Working on <"+prediction_raster+">: Renaming layer")
renameformula=prediction_raster+"_temp,"+prediction_raster
grass.run_command('g.rename', overwrite=True, raster=renameformula)
# -
# ### Change color table and export in .tif
# +
# Define color table. Replace with the RGB values of wanted colors of each class
color_rule="11 227:26:28"+"\n"
color_rule+="12 255:141:1"+"\n"
color_rule+="13 94:221:227"+"\n"
color_rule+="14 102:102:102"+"\n"
color_rule+="21 246:194:142"+"\n"
color_rule+="22 211:217:173"+"\n"
color_rule+="31 0:128:0"+"\n"
color_rule+="32 189:255:185"+"\n"
color_rule+="33 88:190:141"+"\n"
color_rule+="34 29:220:0"+"\n"
color_rule+="41 30:30:192"+"\n"
color_rule+="51 0:0:0"+"\n"
## Create a temporary 'color_table.txt' file
color_table=os.path.join(outputfolder,"color_table_l2.txt") # Define the csv output file name
f = open(color_table, 'w')
f.write(color_rule)
f.close()
# +
# Define color table. Replace with the RGB values of wanted colors of each class
color_rule="11 227:26:28"+"\n"
color_rule+="12 255:141:1"+"\n"
color_rule+="13 94:221:227"+"\n"
color_rule+="14 102:102:102"+"\n"
color_rule+="20 211:217:173"+"\n"
color_rule+="30 29:220:0"+"\n"
color_rule+="31 0:128:0"+"\n"
color_rule+="41 30:30:192"+"\n"
color_rule+="51 0:0:0"+"\n"
## Create a temporary 'color_table.txt' file
color_table=os.path.join(outputfolder,"color_table_l2b.txt") # Define the csv output file name
f = open(color_table, 'w')
f.write(color_rule)
f.close()
# +
# Define color table. Replace with the RGB values of wanted colors of each class
color_rule="1 227:26:28"+"\n"
color_rule+="2 211:217:173"+"\n"
color_rule+="3 29:220:0"+"\n"
color_rule+="4 30:30:192"+"\n"
color_rule+="5 0:0:0"+"\n"
## Create a temporary 'color_table.txt' file
color_table=os.path.join(outputfolder,"color_table_l1.txt") # Define the csv output file name
f = open(color_table, 'w')
f.write(color_rule)
f.close()
# -
# Create a list with classified raster
classifiedraster_list=grass.list_strings("rast", pattern="rf_classif_*", flag='r', mapset=classifC_schema)
print classifiedraster_list
for prediction_raster in classifiedraster_list:
## Apply new color the existing GRASS colortable (for faster display in GRASS map display)
suffix=prediction_raster.split("@")[0].split("_")[-1]
color_table_file=os.path.join(outputfolder,"color_table_"+suffix+".txt")
grass.run_command('r.colors', map=prediction_raster, rules=color_table_file)
# #### Export of classification raster
# +
## Saving current time for processing time management
print ("Export classified raster maps on " + time.ctime())
begintime_exportraster=time.time()
for prediction_raster in classifiedraster_list:
outputname=os.path.join(outputfolder,prediction_raster+".tif")
grass.run_command('g.region', overwrite=True, raster=prediction_raster)
grass.run_command('r.out.gdal', overwrite=True, input=prediction_raster, output=outputname, format='GTiff')
## Compute processing time and print it
print_processing_time(begintime_exportraster, "Classified raster maps exported in ")
# -
# <left> <font size=4> <b> End of classification part </b> </font> </left>
print("The script ends at "+ time.ctime())
print_processing_time(begintime_segmentation_full, "Entire process has been achieved in ")
# **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
| Classification/.ipynb_checkpoints/Classif_C_SAR-checkpoint.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++17
// language: C++17
// name: xcpp17
// ---
// # Iteration über eine Liste
// <div class="prereq">
// <h3>Was man wissen sollte</h3>
// <div>
// Um über eine Liste iterieren zu können , benötigen wir eine
// <a class="prereq" href="/user-redirect/algoviz/lessons/06_ADT/01_Liste.ipynb">Liste</a>.
// </div>
// </div>
// <div class="slideshow 06_ADT/02_Iteration/slides.json">Die Iteration über eine Liste</a>
// Für unsere Experimente benötigen wir eine Liste.
#include <iostream>
#include <list>
using namespace std;
// +
list<int> liste = list<int>();
for ( int i = 0; i < 20; i++ ) {
liste.push_back(i);
}
// -
// Die Iteration erfolgt im Grunde genauso, wie wir das bereits für Vektoren gesehen haben. Und statt der in der Slideshow vorgestellten Operationen verwendet C++ die Inkrement- und Dekrement-Operatoren `++` und `--`. Insgesamt ergeben sich die folgenden Ersetzungen:
//
// - `it.toFirst()` wird zu `it = list.begin()`
// - `it.toLast()` wird zu `it = list.end()`
// - `it.toNext()` entspricht `it++`
// - `it.toPrev()` entspricht `it--`
// - `it.hasNext() == false` entspricht in etwa `it == list.begin()`
// - `it.hasPrev() == false` entspricht in etwa `it == list.end()`
// - `it.getData()`entspricht `*it` (Dereferenzierung)
//
// Damit können wir die Liste jetzt iterieren.
// +
list<int>::iterator it = liste.begin(); // Der Typ kann auch durch auto ersetzt werden
while ( it != liste.end() ) {
cout << *it << " ";
it++;
}
// -
// Oder auch rückwärts:
// +
list<int>::iterator it = liste.end(); // Der Typ kann auch durch auto ersetzt werden
while ( it != liste.begin() ) {
cout << *it << " ";
it--;
}
// -
// Wie bei Vektoren kann man die Iteration in C++ auch mit einer For-Schleife durchführen.
for ( int i : liste ) {
cout << i << " ";
}
// ## Ein Beispiel - Die Maximumsuche
//
// Wir können die Iteration genauso nutzen, wie den Durchlauf von Arrays. Z.B. um das Maximum in einer
// Liste von zufälligen Zahlen zu finden.
// +
#include <iostream>
#include <list>
using namespace std;
list<int> liste = list<int>();
srand(time(NULL));
// Wir befüllen die Liste mit zufälligen Zahlen.
for ( int i = 0 ; i < 100 ; i++ ) {
liste.push_back(rand());
}
// Hier beginnt die Maximumsuche
// Hole das erste Element
int max = liste.front();
for ( int i : liste ) {
if ( i > max ) {
max = i;
}
}
// Und das Ergebnis ...
cout << max << endl;
// -
// ## [Alles hat ein Ende ...](https://www.youtube.com/watch?v=a4JSE32fuOc)
//
// In diesem Abschnitt werfen wir einen Blick auf die konkreten Iteratoren, die die Klasse `list` in C++ zur Verfügung stellt. Speziell geht es um die Enden der Liste, die einige Besonderheiten aufweisen. Und diese technischen Details müssen unter Umständen in Programmen berücksichtigt werden.
//
// Was passiert eigentlich, wenn man den Iterator, den beim Aufruf von `liste.end()` erhält? Probieren wir es aus. Erstmal bereiten wir eine Liste vor.
// +
#include <iostream>
#include <list>
using namespace std;
list<int> liste = list<int>();
srand(time(NULL));
// Wir befüllen die Liste mit zufälligen Zahlen.
for ( int i = 0 ; i < 10 ; i++ ) {
liste.push_back(i);
}
// -
// Jetzt holen wir uns das "Ende".
auto it = liste.end();
// Als ersets schauen wir nach, auf welchen Wert der Iterator verweist.
*it
// In der Regel sollte jetzt ein Wert erscheinen, der **nicht** zwischen 0 und 9 liegt (durch Zufall kann es zwar einer der Werte sein, aber das wäre wirklich ein ziemlicher Zufall). Tatsächlich zeigt der Iterator zur Zeit auf kein gültiges Element. Daher ist nicht garantiert, welkchen Wert der Zugriff ergibt.
//
// Aber wenn wir einen Schritt zurück gehen, dann sollten wir beim letzten definierten Element, also der 9, ankommen.
// +
it--;
*it
// -
// Das passt. Was geschieht jetzt aber, wenn wir wieder einen Schritt vorgehen.
it++;
*it
// Wie es scheint sind wir wieder beim **undefinierten** Ende. Und was passiert, wenn wir noch einen Schritt vorgehen?
it++;
*it
// Wir scheinen wieder vorne angekommen zu sein. Machen wir das nochmal.
it++;
*it
// Die 1, wie erwartet. D.h. wenn wir `end()` erreicht haben und den Iterator weitersetzen, dann kommen wir wieder beim ersten Element an. Wir laufen also im Kreis.
//
// Probieren wir aus, ob das auch mit `front()` und rückwärts laufen funktioniert.
it = liste.begin();
it--;
*it
// Der gleiche Wert, wie vorhin. Und noch einen Schritt.
it--;
*it
// **Merke: Die Liste in C++ hat nur ein Ende!** Man erreicht es, wenn man mit einem Iterator über das letzte Element hinausläuft, oder vom ersten Element rückwärts geht. Läuft man über das Ende hinaus, kommt man am anderen Ende der Liste an.
// <div class="followup">
// <h3>Wo es weiter geht</h3>
// <div>
// Mit Hilfe der Iteratoren kann man Positionen in einer Liste beschreiben. Das ermöglicht auch
// das
// <a class="followup" href="/user-redirect/algoviz/lessons/06_ADT/03_EinfuegenLoeschen.ipynb">Einfügen und Löschen</a> von Elementen.
// </div>
// </div>
| lessons/06_ADT/02_Iteration.ipynb |