code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://stackoverflow.com/questions/3241929/python-find-dominant-most-common-color-in-an-image
from __future__ import print_function
import binascii
import struct
from PIL import Image
import numpy as np
import scipy
import scipy.misc
import scipy.cluster
NUM_CLUSTERS = 5
image_filename = "super_condesed_always_recycle_art/composites/composite_393.jpg"
print('reading image')
im = Image.open(image_filename)
im = im.resize((150, 150)) # optional, to reduce time
ar = np.asarray(im)
shape = ar.shape
ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)
print('finding clusters')
codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
print('cluster centres:\n', codes)
vecs, dist = scipy.cluster.vq.vq(ar, codes) # assign codes
counts, bins = scipy.histogram(vecs, len(codes)) # count occurrences
index_max = scipy.argmax(counts) # find most frequent
peak = codes[index_max]
colour = binascii.hexlify(bytearray(int(c) for c in peak)).decode('ascii')
print('most frequent is %s (#%s)' % (peak, colour))
# -
# bonus: save image using only the N most common colours
import imageio
c = ar.copy()
for i, code in enumerate(codes):
c[scipy.r_[scipy.where(vecs==i)],:] = code
imageio.imwrite(image_filename.split("/")[2]+'_clusters.png', c.reshape(*shape).astype(np.uint8))
print('saved clustered image')
# ! pip install colorthief
from colorthief import ColorThief
color_thief = ColorThief(image_filename)
# get the dominant color
dominant_color = color_thief.get_color(quality=1)
print(dominant_color)
print(color_thief.get_color(quality=1))
print(color_thief.get_palette(quality=1))
# +
# https://stackoverflow.com/questions/51728957/create-a-color-palette-image-from-a-list-of-rgb-color
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# palette = [(82, 129, 169), (218, 223, 224), (147, 172, 193), (168, 197, 215), (117, 170, 212)]
palette = color_thief.get_palette(quality=1)
palette = np.array(palette)[np.newaxis, :, :]
plt.imshow(palette);
plt.axis('off');
plt.savefig(image_filename.split("/")[2] + "_color_palette_test.jpg", pad_inches=0)
plt.show();
# -
plt.savefig(image_filename.split("/")[2] + "_color_palette.jpg")
import matplotlib.pyplot as plt
X = [1, 1, 1]
Y = [1, 1, 1]
plt.plot(X,Y,marker='d',color=(188/255.0, 32/255.0, 75/255.0))
# ! pip install webcolors
import webcolors
webcolors.rgb_to_name((0, 0, 0))
webcolors.name_to_hex('deepskyblue')
dominant_color = color_thief.get_color(quality=1)
dominant_color
webcolors.rgb_to_name(dominant_color)
webcolors.html5_parse_legacy_color('chucknorris')
#HTML5SimpleColor(red=192, blue=0, green=0)
|
Finding_Dominant_Image_Color_Experiments.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.2 64-bit
# language: python
# name: python3
# ---
# # Install Python
# This tutorial will teach you on how to install python3.
#
# ## Checking to see if Python is already installed
#
# If your on mac or linux, you probably already have python3 installed. but just to be sure open the terminal and type:
#
# ```
# python3 --version
# ```
#
# this should give you the output of something like:
#
# ```
# Python 3.9.10
# ```
#
# If that doesnt happen for you, that means you dont have it installed.
#
# ## Installing python
# Ok, so you dont have python installed, no worries, this will tell you how to install python
#
# first go to the pythons official website, click [HERE](https://www.python.org/) to go to the official website of python.
#
# Then, you need to go to the downloads section and install python, it should automatically tell you which os you have and automatically pick the right version of python(by version I mean the different aspects of your computer like what cpu you have and what is your operating system).
#
# Then just run the file downloaded, and there you go! you have python installed.
# if you want to confirm to to the top first sub-section and see on how to check if python is installed on your computer.
|
src/tutorial/1. Installation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# +
data_path = "../data/data_2017/"
enroll_df = pd.read_csv(data_path + '2017-Enrollment-Disenrollment-PUF.csv',na_values=['*'],encoding='cp1252')
enroll_df = enroll_df[['HIOS ID','Policy County FIPS Code','Ever Enrolled Count']].fillna('0')
#clean the columns
enroll_df['Policy County FIPS Code'] = enroll_df['Policy County FIPS Code'].apply( lambda x : str(int(x)) )
enroll_df['Ever Enrolled Count'] = enroll_df['Ever Enrolled Count'].apply(lambda x: float(str(x).replace(',','')) )
# -
# # enrollment with issuer characteristics
# +
service_df = pd.read_csv(data_path + 'Service_Area_PUF_2017.csv', encoding='cp1252')
service_df = service_df[['County','ServiceAreaId','IssuerId']]
service_df = service_df.drop_duplicates()
issuer_df = pd.read_csv('../data/processed_data/issuer_characteristics_2017.csv')
#fix fips
service_df['County'] = service_df['County'].fillna(0)
service_df['County'] = service_df['County'].apply( lambda x : str(int(x)) )
# +
issuer_service = issuer_df.merge(service_df, how='inner', on=['ServiceAreaId', 'IssuerId'])
issuer_service = issuer_service[ ~issuer_service['County'].isna() ]
#reorder the columns, drop service area
col_order= ['IssuerId','County'] + list(issuer_service.columns)[2:-1]
issuer_service = issuer_service[ col_order ]
#group by county
#pre_cols = list(issuer_service.columns)
issuer_service = issuer_service.groupby(['IssuerId','County','StandardComponentId','StateCode'],as_index=False).median()
issuer_service_count = issuer_service.groupby(['IssuerId','County','StateCode'],as_index=False)['StandardComponentId'].count()
issuer_service = issuer_service.groupby(['IssuerId','County','StateCode'],as_index=False).mean()
issuer_service['Plan Counts'] = issuer_service_count['StandardComponentId']
issuer_service = issuer_service.rename(columns={'StateCode':'State'})
#post_cols = list(issuer_service.columns)
print(issuer_service.shape)
#print(len(pre_cols),len(post_cols))
# -
enroll_issuer = enroll_df.merge(issuer_service, how='left', right_on=['County', 'IssuerId'],
left_on=['Policy County FIPS Code','HIOS ID'])
# enroll_issuer.to_csv('test3.csv')
enroll_issuer['County'] = enroll_issuer['Policy County FIPS Code']
enroll_issuer['IssuerId'] = enroll_issuer['HIOS ID']
enroll_issuer = enroll_issuer.fillna(0)
print(enroll_issuer.shape)
# # Merge with county characteristics
# +
county = pd.read_csv('../data/processed_data/county_characteristics_2017.csv')
county['County'] = county['County'].apply(lambda x : str(x))
result = enroll_issuer.merge(county, how='left', left_on='County', right_on='County')
result =result[~result['County'].isna()]
print(result.shape)
# +
all_cols = list(result.columns)
keys = ['HIOS ID','Policy County FIPS Code','IssuerId','County','State']
for key in keys:
all_cols.remove(key)
#delete bad columns/clean up census data
all_cols2 = []
for col in all_cols:
#fix cols from census data
result[col] = result[col].apply(lambda x : float(str(x).replace('-','0').replace('N','0')))
#clean up cols with no variance
if result[col].std() > 0:
all_cols2.append(col)
result[keys + all_cols2].to_csv('../data/processed_data/merged_characteristics_2017.csv',index=False)
# -
# # Summary stats
result['Ever Enrolled Count'].sum()
result['Policy County FIPS Code'].nunique()
result['IssuerId'].nunique()
col_names = pd.Series(result.columns)
#col_names.to_csv('col_names.csv')
result1 = pd.DataFrame(result.isnull().sum())
result1
merged_characteristics_2017 = pd.read_csv('../data/processed_data/merged_characteristics_2017.csv')
list2 = list(merged_characteristics_2017.columns)
merged_characteristics = pd.read_csv('../data/processed_data/merged_characteristics.csv')
list1 = list(merged_characteristics.columns)
list_missing = list(set(list1).difference(list2))
print("Missing values in first list:", list_missing)
for col in list_missing:
if col!= 'County Name':
merged_characteristics_2017[col] = 0
merged_characteristics_2017
merged_characteristics_2017.to_csv('../data/processed_data/merged_characteristics_2017.csv',index=False)
|
preprocess_2017/merge_characteristics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:metis] *
# language: python
# name: conda-env-metis-py
# ---
# ## Data Sources:
#
# [ENGIE’s first open data windfarm](https://opendata-renewables.engie.com/)
#
# [La Haute Borne Data (2013-2016)](https://opendata-renewables.engie.com/explore/dataset/d543716b-368d-4c53-8fb1-55addbe8d3ad/information)
#
# [La Haute Borne Data (2017-2020)](https://opendata-renewables.engie.com/explore/dataset/01c55756-5cd6-4f60-9f63-2d771bb25a1a/table)
#
# - [Data Descriptions](https://opendata-renewables.engie.com/explore/dataset/39490fd2-04a2-4622-9042-ce4dd34c2a58/information)
# ## Anomaly Detection:
#
# [Wondering how to build an anomaly detection model?](https://towardsdatascience.com/wondering-how-to-build-an-anomaly-detection-model-87d28e50309)
#
# [Best clustering algorithms for anomaly detection](https://towardsdatascience.com/best-clustering-algorithms-for-anomaly-detection-d5b7412537c8)
#
# [An Awesome Tutorial to Learn Outlier Detection in Python using PyOD Library](https://www.analyticsvidhya.com/blog/2019/02/outlier-detection-python-pyod/)
#
# [Anomaly Detection for Dummies](https://towardsdatascience.com/anomaly-detection-for-dummies-15f148e559c1)
#
# [Anomaly Detection : Isolation Forest with Statistical Rules](https://towardsdatascience.com/isolation-forest-with-statistical-rules-4dd27dad2da9)
#
# [Anomaly Detection Using Isolation Forest in Python](https://blog.paperspace.com/anomaly-detection-isolation-forest/)
#
# Paper on [Isolation Forest for Anomaly Detection](http://www.ncsa.illinois.edu/Conferences/LSST18/assets/pdfs/hariri_forest.pdf)
#
# Explanation of [Isolation Forest Parameters](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html)
#
# <NAME>'s 'Outlier Analysis' book: [first chapter](http://www.charuaggarwal.net/outlierbook.pdf)
# ## Time Series:
#
# [How a Kalman filter works, in pictures](https://www.bzarg.com/p/how-a-kalman-filter-works-in-pictures/)
#
# [Kalman Filters: A step by step implementation guide in python](https://towardsdatascience.com/kalman-filters-a-step-by-step-implementation-guide-in-python-91e7e123b968)
#
# [Time Series Analysis & Climate Change](https://towardsdatascience.com/time-series-analysis-and-climate-change-7bb4371021e)
#
# [Analysis of Time Series Data on Wind Turbine Loads](http://www.ce.utexas.edu/prof/manuel/papers/trexreport_aaronsterns.pdf)
#
# [Holt Winters](https://www.statsmodels.org/stable/examples/notebooks/generated/exponential_smoothing.html)
#
# [How to Build Exponential Smoothing Models Using Python: Simple Exponential Smoothing, Holt, and Holt-Winters](https://medium.com/datadriveninvestor/how-to-build-exponential-smoothing-models-using-python-simple-exponential-smoothing-holt-and-da371189e1a1)
#
# [Stack Overflow](https://stackoverflow.com/questions/50785479/holt-winters-time-series-forecasting-with-statsmodels)
#
# [statsmodels.tsa.holtwinters.ExponentialSmoothing](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing.html)
# ## Wind Turbines:
#
# [Turbine Size](https://oneenergy.com/wind-knowledge/wind-turbine-information/)
#
# [Predicting Wind Turbine Blade Erosion using Machine Learning](https://scholar.smu.edu/cgi/viewcontent.cgi?article=1110&context=datasciencereview)
#
# [Guidelines for a preliminary windfarm data-driven analysis](https://medium.com/@mbonanomi/guidelines-for-a-preliminary-windfarm-data-driven-analysis-f4793f840ef2)
#
# [Welcome to windrose’s documentation!](https://windrose.readthedocs.io/en/latest/)
#
# [Weather.us](https://weather.us/model-charts/euro/europe/wind-mean-direction.html)
# ## Open license:
# - [LICENCE OUVERTE / OPEN LICENCE](https://www.etalab.gouv.fr/wp-content/uploads/2017/04/ETALAB-Licence-Ouverte-v2.0.pdf)
|
Project_5/References.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # #MakeoverMonday - How many hours of sleep do children get?
# > Visualising the recommended amount of sleep vs the actual amount.
#
# - toc: false
# - badges: true
# - comments: true
# - categories: [makeovermonday, altair, python, visualisation]
# - image: images/sleep.png
# ## How many hours of sleep do children get?
#
# Savvysleeper.com surveyed 1,000 parents on their children's sleeping pattern. They found out that on average none of the grades from Kindergarten to 12th grade meets the CDC's recommendation of 9 to 12 hours of sleep for 6- to 12-year olds and 8 to 10 hours of sleep for 13- to 18-year olds.
#
# Source: https://savvysleeper.org/costing-kids-sleep/
# hide
import pandas as pd
import altair as alt
# hide
df = pd.read_excel("Week9MM.xlsx")
df
# hide
# Convert dataset into long and thin format
df.set_index("Grade", inplace = True)
df = df.stack(level=-1)
df
# hide
# Convert series back to df
df = df.to_frame().reset_index()
df
# hide
# Change column names to strings so they can be renamed
df.columns = df.columns.astype(str)
# hide
# Rename columns
df.rename(columns={"level_1": "Category", "0": "Value"}, inplace = True)
df
# +
bar1 = (alt.Chart(df).mark_bar(fill='white', stroke='#376c94').encode(
alt.X('Value:Q'),
alt.Y('Grade:N',
sort=[
'Kindergarten', 'First grade', 'Second grade', 'Third grade',
'Fourth grade', 'Fifth grade', 'Sixth grade', 'Seventh grade',
'Eighth grade', 'Ninth grade', '10th grade', '11th grade',
'12th grade'
]), tooltip = ['Grade', 'Value']).transform_filter(
alt.FieldEqualPredicate(field='Category', equal='Hours Needed')))
bar2 = (alt.Chart(df).mark_bar(color='#376c94').encode(
alt.X('Value:Q', title="Hours of sleep"),
alt.Y('Grade:N',
title=None,
sort=[
'Kindergarten', 'First grade', 'Second grade', 'Third grade',
'Fourth grade', 'Fifth grade', 'Sixth grade', 'Seventh grade',
'Eighth grade', 'Ninth grade', '10th grade', '11th grade',
'12th grade'
]), tooltip = ['Grade', 'Value']).transform_filter(
alt.FieldEqualPredicate(field='Category',
equal='Hours Averaged')))
(bar1 + bar2).configure_axis(grid=False).properties(
title={
"text": "How many hours of sleep do children get?",
"subtitle": "Blue: hours averaged | White: hours needed"
}).configure_view(
strokeOpacity=0).configure_axisY( # left aligns y-axis labels
titleAngle=0,
titleY=-10,
titleX=-60,
labelPadding=90,
labelAlign='left')
# -
|
_notebooks/2020-05-26-Hours-of-Sleep.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LIST
lst=["<NAME>",20,9246903888,"<EMAIL>"]
lst
# +
#lst.append,index,pop,insert,count
# -
lst.append("Hyderabad")
lst
# +
lst.index(9246903888)
lst.pop(3)
lst.insert(3,"Engineering")
# -
lst
lst.count(20)
#
# # DICTIONARIES
dict={"Name":"<NAME>","Age":20,"Location":"Hyderabad","PhNo":9246903888}
dict
# +
#dict.get,keys,items,pop,update
# +
dict.get("Name")
# -
dict.keys()
dict.items()
dict.pop("Age")
dict1={"Phno":924690388}
dict.update(dict1)
dict
# # SET
st={"Pranay",9,2,4,6,9,0,3,8,8,8,"Hyderabad"}
st
# +
#st.add,difference,intersection,isdisjoint,union
# -
st.add(20)
st
st1={9,9,4,9,4,0,3,8,8,8}
print(st.difference(st1))
print(st.intersection(st1))
print(st.isdisjoint(st1))
st.union(st1)
# # TUPLES
tup=("Pranay",20,"Hyderabad","Jubilee hills","Hyderabad")
tup
# +
#tup.index,count
# -
tup.index(20)
tup.count("Hyderabad")
# # STRINGS
str="<NAME>"
str
# +
#str.capitalize,center,islower,swapcase,startswith
# -
str.capitalize()
str.center(14)
str.islower()
str.swapcase()
str.startswith("Pranay")
|
Assignment 1 Day-2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Developing the DataGenerator
# +
import os
import numpy as np
import pandas as pd
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.utils import Sequence
# Inspired by https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
class DataGenerator(Sequence):
'''Generates .npy files for Conv2D'''
def __init__(self, data_dir, include=None, batch_size=32,
dim=(128,640), n_channels=1, test=False):
'''
Parameters
----------
data_dir : str
Path to data split (training, validation, or test)
include : list or None
Subdirectories to include
if None, include all
batch_size : int
Number of files to return at a time
Auto set to 1 if test=True
dim : tuple
Dimension of arrays to read in
n_channels : int
Number of color channels for image array
test : bool
If test split, store labels, do not shuffle indices,
and set batch_size to 1
'''
self.data_dir = data_dir
self.batch_size = batch_size
self.dim = dim
self.n_channels = n_channels
self.test = test
self.include = include
self.test_labels = None
self.label_dict = self.__get_label_dict()
self.files = self.__get_files()
self.n_classes = len(self.label_dict) # Number of sub dirs
self.on_epoch_end() # populates self.indexes
if self.test:
self.test_labels = np.empty((len(self.files), self.n_classes), dtype=int)
self.batch_size = 1
def __len__(self):
'''Denotes the number of batches per epoch'''
return int(np.floor(len(self.files) / self.batch_size))
def __getitem__(self, index):
'''Generate one batch of data'''
# Generate indexes of the batch
idxs = self.indexes[index*self.batch_size : (index+1)*self.batch_size]
# Find list of IDs
file_list = self.files[idxs]
# Generate data
X, y = self.__data_generation(file_list)
if self.test:
self.test_labels[idxs,] = y
return X, y
def on_epoch_end(self):
'''Updates indexes after each epoch'''
self.indexes = np.arange(len(self.files))
if not self.test:
np.random.shuffle(self.indexes) # Shuffles in place
def __get_files(self):
'''Get all files from subdirectories of data_dir'''
subdirs = [k for k in self.label_dict.keys()]
all_files = []
for subdir in subdirs:
full_dir = os.path.join(self.data_dir, subdir)
files = os.listdir(full_dir)
for file in files:
all_files.append(os.path.join(subdir, file))
return np.array(all_files)
def __get_label_dict(self):
'''
Create dict of labels from sub directories
{Genre : int}
'''
subdirs = sorted(os.listdir(self.data_dir))
# Only include specific sub dirs
if self.include:
subdirs = [s for s in subdirs if s in self.include]
labels = np.arange(len(subdirs))
return {k:v for k,v in zip(subdirs, labels)}
def __data_generation(self, file_list):
'''
Generates data containing batch_size samples
Parameters
----------
file_list : list or np.array
List of files to retrieve/process/load
Returns
-------
X : (n_samples, *dim, n_channels)
'''
X = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty((self.batch_size), dtype=int)
for i, file in enumerate(file_list):
npy = np.load(os.path.join(self.data_dir, file))
target = file.split('/')[0]
label = self.label_dict[target]
X[i,] = npy[:,:,None] # Create extra dim for channel
y[i,] = label
return X, to_categorical(y, num_classes=self.n_classes, dtype='int')
# -
datagen = DataGenerator('../data/training', include=['Rock', 'Hip-Hop'], batch_size=64, dim=(128,640),
n_channels=1, test=False)
len(datagen)
i = datagen.__getitem__(1)
i[0].shape
i[0].shape == (64, 128, 640, 1)
i[1][:4]
datagen.label_dict
valid_datagen = DataGenerator('../data/validation', batch_size=64, dim=(128,640),
n_channels=1, test=False)
len(valid_datagen)
# ## Test DataGenerator on model
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, Activation
from tensorflow.keras.layers import MaxPooling2D, Flatten, Conv1D, \
Conv2D, BatchNormalization, MaxPooling1D, \
GlobalAveragePooling2D
from tensorflow.keras.layers import Lambda, ELU
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau
from tensorflow.keras import backend
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras import regularizers
# +
# Build model
model = Sequential()
model.add(Conv2D(32, (9, 9), input_shape = (128, 640, 1),
padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.3))
model.add(Dense(units = 256, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(units = 128, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(units = 64, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(units = 3, activation = 'softmax'))
opt = Adam(lr=0.001)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# -
model.summary()
# +
checkpoint_callback = ModelCheckpoint('../models/model2_with_datagen_best_val_loss.h5',
monitor='val_loss', mode='min',
save_best_only=True, verbose=1)
reducelr_callback = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.8,
patience=2, min_delta=0.005, verbose=1)
callbacks_list = [checkpoint_callback, reducelr_callback]
# -
history = model.fit_generator(generator=datagen, epochs=25,
validation_data=valid_datagen, verbose=1,
callbacks=callbacks_list)
# +
import matplotlib.pyplot as plt
def save_summary_plots(history, savepath=None, dpi=200):
# List all data in history
print(history.history.keys())
# Summarize history for accuracy
plt.figure(figsize=(4,3), dpi=dpi)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='best')
if savepath:
plt.savefig(os.path.join(savepath, f''),
dpi=dpi)
plt.show()
# Summarize history for loss
plt.figure(figsize=(4,3), dpi=dpi)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='best')
if savepath:
plt.savefig(savepath, dpi=dpi)
plt.show()
# -
show_summary_stats(history)
test_datagen = DataGenerator('../data/test', batch_size=64, dim=(128,640),
n_channels=1, test=True)
y_pred = model.predict_generator(test_datagen)
y_pred = np.argmax(y_pred, axis=1)
y_pred
y_true = test_datagen.test_labels
y_true = np.argmax(y_true, axis=1)
y_true
target_names = sorted(test_datagen.label_dict.keys())
# +
from sklearn.metrics import classification_report
print(classification_report(y_true, y_pred, target_names=target_names))
# -
model.evaluate_generator(generator=test_datagen)
from sklearn.metrics import confusion_matrix
from matplotlib import cm
import seaborn as sns
# +
mat = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(figsize=(10,10))
sns.heatmap(mat.T, square=True, annot=True, fmt='d',
cbar=True, cmap=cm.Reds,
xticklabels=target_names,
yticklabels=target_names,
ax=ax)
plt.xticks(rotation=45)
plt.yticks(rotation=45)
plt.xlabel('Actual', size=15)
plt.ylabel('Predicted', size=15);
# -
# ## Scrap
subdirs = sorted(os.listdir(filepath))
labels = np.arange(len(subdirs))
{k:v for k,v in zip(subdirs, labels)}
label_dict = {
'Rock': 0,
'Instrumental': 1,
'Hip-Hop': 2,
'Folk': 3,
'International': 4,
'Electronic': 5,
'Experimental': 6,
'Pop': 7
}
len(label_dict)
# +
# Create list of filenames in a directory
filepath = 'data/test'
def __get_files(filepath):
subdirs = os.listdir(filepath)
all_files = []
for subdir in subdirs:
full_dir = os.path.join(filepath, subdir)
files = os.listdir(full_dir)
for file in files:
all_files.append(os.path.join(subdir, file))
return np.array(all_files)
# -
all_files = __get_files(filepath)
idxs = [0,33,99]
all_files[idxs]
# +
batch_size = 5
npy = np.load(os.path.join(filepath, all_files[0]))
shape = npy.shape
channels = 1
n_classes = 2
# -
def __data_generation(all_files):
X = np.empty((batch_size, *shape, channels))
y = np.empty((batch_size), dtype=int)
for i, file in enumerate(all_files[:batch_size]):
npy = np.load(os.path.join(filepath, file))
target = file.split('/')[0]
label = label_dict[target]
X[i,] = npy[:,:,None]
y[i,] = label
return X, y
# to_categorical(y, num_classes=n_classes, dtype='int')
X, y = __data_generation(all_files)
y
to_categorical(y, num_classes=5)
X = np.empty((batch_size, *shape, channels))
all_files[0].split('/')[0]
|
nbs/DataGenerator_and_model2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Stellar Classification
#
# <img src="data/image.png"/>
#
# In this project, I'm going to train some Machine Learning models to classify stellar into star, galaxy or quasar based on their spectral characteristics, using a dataset downloaded from Kaggle Plataform provided by fedesoriano. The dataset was released by Sloan Digital Sky Survey DR17 under public domain.
#
#
# ## About the dataset
#
# "Stellar classification, in astronomy, is the classification of stars based on their spectral characteristics. The classification scheme of galaxies, quasars, and stars is one of the most fundamental in astronomy. The early cataloguing of stars and their distribution in the sky has led to the understanding that they make up our own galaxy and, following the distinction that Andromeda was a separate galaxy to our own, numerous galaxies began to be surveyed as more powerful telescopes were built. This datasat aims to classificate stars, galaxies, and quasars based on their spectral characteristics."
#
# ***Identifying galaxies, quasars, and stars with machine learning: A new catalogue of classifications for 111 million SDSS sources without spectra. <NAME>, <NAME>, <NAME> and <NAME>; A&A, 639 (2020) A84; DOI: https://doi.org/10.1051/0004-6361/201936770***
#
# ## Content
#
# The data consists of **100,000 observations** of space taken by the SDSS (Sloan Digital Sky Survey). Every observation is described by **17 feature columns** and **1 class column** which identifies it to be either a **star, galaxy or quasar**.
#
# Attributes:
#
# - **obj_ID** = Object Identifier, the unique value that identifies the object in the image catalog used by the CAS
# - **alpha** = Right Ascension angle (at J2000 epoch)
# - **delta** = Declination angle (at J2000 epoch)
# - **u** = Ultraviolet filter in the photometric system
# - **g** = Green filter in the photometric system
# - **r** = Red filter in the photometric system
# - **i** = Near Infrared filter in the photometric system
# - **z** = Infrared filter in the photometric system
# - **run_ID** = Run Number used to identify the specific scan
# - **rereun_ID** = Rerun Number to specify how the image was processed
# - **cam_col** = Camera column to identify the scanline within the run
# - **field_ID** = Field number to identify each field
# - **spec_obj_ID** = Unique ID used for optical spectroscopic objects (this means that 2 different observations with the same - spec_obj_ID must share the output class)
# - **class** = object class (galaxy, star or quasar object)
# - **redshift** = redshift value based on the increase in wavelength
# - **plate** = plate ID, identifies each plate in SDSS
# - **MJD** = Modified Julian Date, used to indicate when a given piece of SDSS data was taken
# - **fiber_ID** = fiber ID that identifies the fiber that pointed the light at the focal plane in each observation
#
# ## Citations
#
# fedesoriano. (January 2022). Stellar Classification Dataset - SDSS17. Retrieved [Date Retrieved] from https://www.kaggle.com/fedesoriano/stellar-classification-dataset-sdss17.
# # Import, Analyse and Preprocess data
import pandas as pd
data = pd.read_csv('data/star_classification.csv')
data.info()
data.head()
train_df = data.drop(['obj_ID', 'run_ID', 'rerun_ID', 'cam_col', 'field_ID', 'spec_obj_ID', 'fiber_ID'], axis=1)
import seaborn as sns
sns.boxplot(data=train_df)
train_df.columns
sns.pairplot(data=train_df[['alpha', 'delta', 'u', 'g', 'r', 'class']], hue='class', palette='plasma')
sns.pairplot(data=train_df[['i', 'z', 'redshift', 'plate', 'MJD', 'class']], hue='class', palette='coolwarm')
train_df[['alpha', 'delta', 'u', 'g', 'r', 'i', 'z', 'redshift', 'plate', 'MJD']].hist(figsize=(18, 12), color='y');
sns.countplot(train_df['class'])
train_df['class'].value_counts()
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X = train_df.drop('class', axis=1)
# 'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2
y = train_df['class'].map({'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2})
y[:10]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
stdScaler = StandardScaler()
# +
numeric_cols = ['alpha', 'delta', 'u', 'g', 'r', 'i', 'z', 'redshift', 'plate', 'MJD']
X_train = pd.DataFrame(stdScaler.fit_transform(X_train), columns=numeric_cols)
X_test = pd.DataFrame(stdScaler.transform(X_test), columns=numeric_cols)
# -
X_train.head()
sns.boxplot(data=X_train[['alpha', 'delta', 'r', 'i', 'redshift', 'plate', 'MJD']])
sns.boxplot(data=X_train[['u', 'g', 'z']])
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
# # Decision Tree Model
from sklearn.tree import DecisionTreeClassifier, plot_tree
dt = DecisionTreeClassifier(min_samples_leaf=14, random_state=1)
dt.fit(X_train, y_train)
dt.score(X_test, y_test)
pred_dt = dt.predict(X_test)
print(classification_report(y_test, pred_dt))
cmdt = confusion_matrix(y_test, pred_dt)
# 'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2
ConfusionMatrixDisplay(confusion_matrix=cmdt, display_labels=['GALAXY', 'STAR', 'QSO']).plot(cmap=plt.cm.Blues);
# # Random Forest Model
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(min_samples_leaf=18, n_estimators=400, random_state=1)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
pred_rf = rf.predict(X_test)
print(classification_report(y_test, pred_rf))
cmrf = confusion_matrix(y_test, pred_rf)
# 'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2
ConfusionMatrixDisplay(confusion_matrix=cmrf, display_labels=['GALAXY', 'STAR', 'QSO']).plot(cmap=plt.cm.Blues);
# # Extra Tree Model
from sklearn.ensemble import ExtraTreesClassifier
ex = ExtraTreesClassifier(n_estimators=400, min_samples_leaf=18, random_state=1)
ex.fit(X_train, y_train)
ex.score(X_test, y_test)
pred_ex = ex.predict(X_test)
print(classification_report(y_test, pred_ex))
cmex = confusion_matrix(y_test, pred_ex)
# 'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2
ConfusionMatrixDisplay(confusion_matrix=cmex, display_labels=['GALAXY', 'STAR', 'QSO']).plot(cmap=plt.cm.Blues);
# # XGBoost Model
from xgboost import XGBClassifier
xgb = XGBClassifier(n_estimators=400,
use_label_encoder=False,
random_state=1, objective='multi:softmax',
num_class=3,
eval_metric = 'mlogloss')
xgb.fit(X_train, y_train)
xgb.score(X_test, y_test)
pred_xgb = xgb.predict(X_test)
print(classification_report(y_test, pred_xgb))
cmxgb = confusion_matrix(y_test, pred_xgb)
# 'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2
ConfusionMatrixDisplay(confusion_matrix=cmxgb, display_labels=['GALAXY', 'STAR', 'QSO']).plot(cmap=plt.cm.Blues);
# # Adaboost Model
from sklearn.ensemble import AdaBoostClassifier
ada = AdaBoostClassifier()
ada.fit(X_train, y_train)
ada.score(X_test, y_test)
pred_ada = ada.predict(X_test)
print(classification_report(y_test, pred_ada))
cmada = confusion_matrix(y_test, pred_ada)
# 'GALAXY' : 0, 'STAR' : 1, 'QSO' : 2
ConfusionMatrixDisplay(confusion_matrix=cmada, display_labels=['GALAXY', 'STAR', 'QSO']).plot(cmap=plt.cm.Blues);
|
Stellar Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# Load the dataset. The file `BL-Flickr-Images-Book.csv` is a CSV file containing information about books from the British Library.
df = pd.read_csv('BL-Flickr-Images-Book.csv')
df.head()
# Inspect the columns list.
df.columns
# Drop some columns that are not of interest.
to_drop = ['Edition Statement',
'Corporate Author',
'Corporate Contributors',
'Former owner',
'Engraver',
'Contributors',
'Issuance type',
'Shelfmarks']
df.drop(to_drop, inplace=True, axis=1)
df.head()
# Check if the `Identifier` column contains entirely unique values.
df['Identifier'].is_unique
# Set the index to be the `Identifier` column.
df = df.set_index('Identifier')
df.head()
# We can retrieve columns by referencing the index using `.loc[ ... ]`.
df.loc[206]
# If we look at one particular colum, `Date of Publication`, we can see that there are a few different ways that dates are expressed in the dataset.
df.loc[1905:, 'Date of Publication'] # this selects record ID 1905 to the end of the dataset, for the given column only
# We can use the regular expression `^(\d{4})` to select only the publication year from the data. We can test it on a copy of just that column.
extr = df['Date of Publication'].str.extract(r'^(\d{4})', expand=False)
extr.loc[1905:]
# Now we know that it works, we can reset the column contents using the cleaned version.
df['Date of Publication'] = df['Date of Publication'].str.extract(r'^(\d{4})', expand=False)
df.loc[1905:]
# You probably noticed that there's a lot of null values (`NaN` - Not a Number) in the column. We can calcuate the percentage of null values by summing up the null values and dividing by the number of rows in the dataset and multiply by 100.
df['Date of Publication'].isnull().sum() / len(df) * 100
# Let's take a look at another colum now, `Place of Publication`.
df['Place of Publication'].head(10)
# We can see, there are also problem with this column! By inspecting these 10 rows, we can see that places that are supposed to be London and Oxford might be formatted differently. Let's also inspect two specific records.
df.loc[4157862]
df.loc[4159587]
# We can see from these records that these two books were published in the same place, but one `Place of Publication` entry uses hyphens while the other does not.
#
# One strategy to fix all of these is to look for all instances where `London` apppears inside the text and replace it specifically with the normalised text as only `London`, and the same for `Oxford`. We can also try and clean up the hyphens by replacing those with a single space.
df['Place of Publication'] = np.where(df['Place of Publication'].str.contains('London'), 'London',
np.where(df['Place of Publication'].str.contains('Oxford'), 'Oxford',
df['Place of Publication'].str.replace('-', ' ')))
df.head(10)
# At this point, we have cleaned things up quite nicely, but it is by no means complete. We can check all the different place labels used by extracting a list of the unique values of that column. When we do this, we see that there is actually quite a lot more work needed to completely clean up that column.
pd.Series(df['Place of Publication'].unique())
# Let's take a look at a different example file to clean up, `university_towns.txt`. Note that this is a text file with lines of text, and not a CSV file representing a table.
uni_towns = open('university_towns.txt')
uni_towns.readlines()[:20]
# What we can see from looking at the first 20 lines of the file is that we have section labels that are names of US states with the text `[edit]` attached to it, followed by a list of names of university towns with the names of the universities that are situated in each of those towns in parentheses. We can take advantage of this pattern to preprocess the file into a CSV format.
uni_towns = []
with open('university_towns.txt') as file:
for line in file:
if '[edit]' in line:
state = line
else:
uni_towns.append((state, line))
uni_towns[:20]
# We can now load this into a DataFrame.
uni_towns_df = pd.DataFrame(uni_towns, columns=['State', 'RegionName'])
uni_towns_df
# We can see now that the cells in our DataFrame need a lot more cleaning up. We could have done this in the loop we used above, but the Pandas library makes it very easy using the `applymap()` function.
#
# First we define a function that can independently clean up one cell's contents.
def get_citystate(item):
if ' (' in item:
return item[:item.find(' (')]
elif '[' in item:
return item[:item.find('[')]
else:
return item
# Let's test this on a couple of example strings.
get_citystate('Wyoming[edit]\n')
get_citystate('Eau Claire (University of Wisconsin–Eau Claire)\n')
# With `applymap()` we can pass the function name so that it runs the function on all cells in the DataFrame.
uni_towns_df = uni_towns_df.applymap(get_citystate)
uni_towns_df
# Somtimes datasets that you work with will either have column names that are not easy to understand, or unimportant information in the first or last few rows, for example definitions of terms in the dataset or some footnotes. In these cases we can rename columns or drop certain rows.
#
# Let's take a look at another example dataset in `olympics.csv`.
uni_towns = open('olympics.csv')
uni_towns.readlines()[:20]
# It looks like we can load this into a DataFrame straight away, so we will do that.
olympics_df = pd.read_csv('olympics.csv')
olympics_df.head()
# We can see that the top of this dataset is definitely messy! The first row seems to be an index of the columns (0, 1, 2, 3, etc.) and then the row that should be used as the column names looks like when it was written to disk that the special characters were not rendered correctly, and requires renaming across them.
#
# Pandas gives us an easy way to set the correct header. When we use `read_csv()` we can provide an extra parameter to tell it which row to use as the header.
olympics_df = pd.read_csv('olympics.csv', header=1)
olympics_df.head()
# Now, we can rename the column headers using the DataFrame's `rename()` method. This function allows us to rename any axis based on a mapping, which is represented using a Python dictionary (a `dict` datatype).
name_mapping = {
'Unnamed: 0': 'Country',
'? Summer': 'Summer Olympics',
'01 !': 'Gold',
'02 !': 'Silver',
'03 !': 'Bronze',
'? Winter': 'Winter Olympics',
'01 !.1': 'Gold.1',
'02 !.1': 'Silver.1',
'03 !.1': 'Bronze.1',
'? Games': '# Games',
'01 !.2': 'Gold.2',
'02 !.2': 'Silver.2',
'03 !.2': 'Bronze.2'
}
olympics_df = olympics_df.rename(columns=name_mapping)
olympics_df.head()
|
Data_cleaning_with_np_and_pd.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/generalassembly-open-graph.png" width="240" height="240" align="left"/>
# # Data architecture notebook
# **Author: <NAME>**
# <br> May 2019 | Chicago, IL.
#
# ### Table of contents
# - [Overview](#ov)
# - [Importing libraries](#imp)
# - [Merging reviews and business data](#me)
# - [Construction of distance matrix](#dist)
# - [Construction of category matrix](#cat)
# - [Script for data architecture as numpy array](#da)
# ### Overview<a id="ov"></a>
# This notebook presents in detail the script used to create the proposed data architecture for businesses in the "area of interest" of "Las Vegas Strip", as explained in the Readme.
#
# There are (4) key steps to compile the desired data architecture:
#
# - **(1)** Combine the features from multiple dataframes,
# - **(2)** Construction of distance matrix to mask businesses given a distance criteria (radius of incluence),
# - **(3)** Construction of a cateogry martix to mask businesses given a business category criteria,
# - **(4)** Use the distance and category matrix to loop through specific distnace and cateogry bins to build the 4D tensor for the CNN model,
#
# The loop on step (4) to build the data architecture for the CNN model was created using numpy, given that it surpassed the capacity of pandas to manage large sets of multidimensional data.
# ### Importing libraries<a id="imp"></a>
# In this section we outline the initial code needed to run this workbook. If this code returns an error we recommend the reader to verify that the most up to date version of the libraries mentioned below have been installed in their computers. For a guideline on python installation of modules please refer to the __[official documentation](https://docs.python.org/3/installing/)__.
# +
#Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import random
#Time library
import time
#Setting max rows and columns
pd.set_option('display.max_columns', 10000)
pd.set_option('display.max_rows', 10000)
#Import library for calculating distance between lat and long
import utm
#Import combination and permutation library
from itertools import combinations
# Import Gensim for Wrod2Vec similarity
import gensim
from gensim.models.word2vec import Word2Vec
#Import Standard Scaler
from sklearn.preprocessing import StandardScaler
# -
# ### Merging reviews and business data <a id="imp"></a>
# In this section we merge the dataframes that were generated on the preprocessing and data extraction notebooks by business id for the "area of interest" of "Las Vegas Strip".
# +
#Read the data from the CSV folder, business dataframe created on the data extraction process
df_business = pd.read_csv('./csv_data/business.csv').drop(columns = 'Unnamed: 0')
#Select relevant columns to build the data architecture, these are lat and long for distance metrics (Y dimension),
#categories for similarity metrics (X dimension),
#and the features we want to model with (such as stars, and review count)
df = df_business[['business_id','latitude','longitude','is_open','categories','stars','review_count','city']]
df = df.set_index('business_id') #Set the index to be the business id
#Visualize the head
df.head()
# -
# We will limit the dataframe to the "Las Vegas Strip", which is the area of interest for this particular analysis. The code below limits the dataframe for businesses within such geographical area.
# **Visual representation of the "area of interest" of the "Las Vegas Strip"**
#
# <img src="images/las_Vegas.jpg" width="1000" height="1000" align="left"/>
# +
#Define lat and long limits
lat_low = 36.092239
lat_up = 36.159071
long_left = -115.234375
long_right = -115.136185
#Mask the dataframe for such lat and long area
mask = (df['latitude']>=lat_low) & (df['latitude']<=lat_up) & (df['longitude']>=long_left) & (df['longitude']<=long_right)
df = df[mask]
#Display the shape
df.shape
# -
# As we can see, in this particular instance the number of businesses to analyze is 8,527. We will now standarize the data to be able to run it through the data architecture process and the CNN.
# +
#Standarize data
ssFeat = ['stars','review_count']
ss = StandardScaler()
df_ss = pd.DataFrame(ss.fit_transform(df[ssFeat]),columns = ssFeat)
df_ss = pd.concat([df_ss,pd.DataFrame(df.index)],axis=1).set_index('business_id')
#Merge the data
df = pd.merge(df.drop(columns = ['stars','review_count']),
df_ss,
how='inner',
left_index=True,
right_index=True)
#Inspect the shape
df.shape
# -
#Inspect the head
df.head()
# Now we will pull in the features from the review dataframe, merge it with this dataframe.
#Read the df
df_reviews = pd.read_csv('./csv_data/reviews_df.csv').set_index('business_id')
#Visualize the head
df_reviews.head()
#Inspect the shape
df_reviews.shape
# +
#Merge both dataframes
df= pd.merge(df,
df_reviews,
how='inner',
left_index=True,
right_index=True)
df['is_closed'] = df['is_open'].apply(lambda x: 1 if x==0 else 0)
#Check the shape
df.shape
# -
#Save as CSV
df.to_csv('./csv_data/df_2d_class.csv')
#Inspect the head
df.head()
# ### Construction of distance matrix<a id="dist"></a>
#
# In this section the code to create the distance matrix is detailed.
# +
#Create columns that convert the lat and long to UTM in order to compute Euclidean distance
values = [utm.from_latlon(x,y)[0] for (x,y) in zip(df['latitude'],df['longitude'])]
df['UTM1'] = values
values = [utm.from_latlon(x,y)[1] for (x,y) in zip(df['latitude'],df['longitude'])]
df['UTM2'] = values
#Convert the columns to numpy arrays
lats = df['UTM1'].values
lons = df['UTM2'].values
#Calculate the absolute difference between all the elements of the vector
dlats = np.abs(lats[:, None] - lats[None, :])
dlongs = np.abs(lons[:, None] - lons[None, :])
#Compute Euclidean distance and store on a distance matrix where each row is a vector of the distance of a specific business
#to all the other businesses in the dataset
distances = np.sqrt((dlats)**2 + (dlongs)**2)
#Display the distances shape
distances.shape
# +
#Create a sign matrix to update the distance matrix based on north to south orientation
signs = np.sign(np.array(lats[:, None] - lats[None, :]))
distances = np.multiply(distances,signs)
# -
#Check again the shape
distances.shape
# ### Construction of category matrix<a id="cat"></a>
#
# In this section the code to create the category matrix is detailed.It displays if a given business pertains to a specific category, and then uses this output to create category mask of the same dimensions as the distance martix.
# +
#Expand the Categories column into a dummy field
#First obtain a list of all the categories that exist
categories = []
for x in df['categories']:
if type(x)==float:
continue
else:
categories.extend(x.split(','))
#Clean white spaces
categories = [x.strip() for x in categories]
#Create a list with unique items
categories = [str(x) for x in set(categories)]
#Create a dataframe
frame = []
for x in df['categories']:
values=[]
try:
for cat in categories:
if cat in [x.strip() for x in x.split(',')]:
values.append(1)
else:
values.append(0)
except:
for cat in categories:
values.append(0)
frame.append(values)
#Store in a DataFrame
df_categories = pd.DataFrame(frame, columns = categories).fillna(0)
#Add the business id column
df_categories.index=df.index
df_categories = df_categories.astype(float)
# -
#Check a vectors head
df_categories['Restaurants'].head()
df_categories.columns
# In the following lines of code we will loop through the different distance and category bins to build the data architecure.
#In order to define the distance bins, we need to look at the distribution of the distance.
np.min(distances)
np.max(distances)
# 11km is the limit for north and south distance. Hennce we will define bins that do not exceed this limits. We also need to define the similarity between the categories, in order to order the categories from middle to outer based on similarity. For this task we will use the Word2Vec library gesmin and train it with the wikipedia archive.
# +
def strip_split(x):
result = []
for word in x.split(','):
word = word.strip(' ')
result.append(word)
return result
df['categories'] = df['categories'].fillna('')
corpus = list(df['categories'].apply(lambda x: strip_split(x)).values)
# +
# Start timer.
t0 = time.time()
# Import word vectors into "model."
model = Word2Vec(corpus, # Corpus of data.
size=100, # Dimensions
window=5, # Context words
min_count=1, # Ignores words below this threshold.
sg=0, # SG = 0 uses CBOW (default).
workers=4) # (parallelizes process).
# Print results of timer.
print(time.time() - t0)
# -
#Display top 15 similarities
similarity = model.most_similar('Restaurants',topn = 15)
similarity
#Store top similarities on a list
top_c = [x for x,y in similarity]
top_c
# +
#Store 0ther categories in a list
categories = list(model.wv.vocab.keys())
low_c = []
for x in categories:
if x in top_c:
pass
else:
low_c.append(x)
#Shuffle list
random.shuffle(low_c)
random.shuffle(low_c)
random.shuffle(low_c)
#Pop the restaurants category
low_c.remove('Restaurants')
low_c.remove('')
#Split into two groups
top_c1 = top_c[:int(len(top_c)/2)]
top_c2 = top_c[int(len(top_c)/2):]
#Split into two groups
low_c1 = low_c[:int(len(low_c)/2)]
low_c2 = low_c[int(len(low_c)/2):]
# -
low_c1 + low_c2 == low_c
# +
#Create the category boolean for "other", low and up
df_categories['other_low'] = df_categories[low_c1].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
df_categories['other_up'] = df_categories[low_c2].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Create categories for top
df_categories['top'] = df_categories[top_c].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Rewrite the other categories with zero if first category if the business is already captured in the "top"
df_categories['other_low'] = np.select([(df_categories['top'] == 1) & (df_categories['other_low'] == 1),
(df_categories['top'] == 1) & (df_categories['other_low'] == 0),
(df_categories['top'] == 0) & (df_categories['other_low'] == 1),
(df_categories['top'] == 0) & (df_categories['other_low'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['top'] == 1) & (df_categories['other_up'] == 1),
(df_categories['top'] == 1) & (df_categories['other_up'] == 0),
(df_categories['top'] == 0) & (df_categories['other_up'] == 1),
(df_categories['top'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['other_low'] == 1) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 1) & (df_categories['other_up'] == 0),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
#Convert everything to float
df_categories = df_categories.astype(float)
# -
#Display the list of bins to include in the model
['other_low'] + top_c1 + ['Restaurants'] + top_c2 +['other_up']
#Visualize the other low and other up vectors (there should be no overlap)
df_categories[['top','other_low','other_up']].tail(20)
df_categories['other_up'].value_counts()
df_categories['other_low'].value_counts()
# ### Script of the data architecture<a id="da"></a>
#
# In this section the code to create the data architecture for the CNN is detailed. The steps it follows are:
#
# - **(1) Define bins:** Define category and distance bins as lists to compose the "x" and "y" axis of each businesses grid.
# - **(2) Select features:** Select the features to include in the "feature array".
# - **(3) Loop through bins to create a numpy array witht he desired architecture** Loops through the criteria defined on steps (1) and (2) and stores a numpy array with the desired 4D architecture.
# +
#Define categories to loop on
categories = ['other_low'] + top_c1 + ['Restaurants'] + top_c2 + ['other_up']
#Define distances to loop on
dist_lst = [
[5000,int(np.max(distances))],
[2500,5000],
[1000,2500],
[500,1000],
[250,500],
[100,250],
[50,100],
[0,50],
[-50,0],
[-100,-50],
[-250,-100],
[-500,-250],
[-1000,-500],
[-2500,-1000],
[-5000,-2500],
[int(np.min(distances)),-5000]
]
#Define features to loop on
features = ['stars','rev_stars','cool','funny','useful','positive_comments','negative_comments','age','polarity','subjectivity']
#Define empty arrays
All = np.empty((distances.shape[0],0)) #For inner loop on cat and distance
#Extract category boolean vector for each category
for cat in categories:
category = np.array(df_categories[cat]) #Create numpy array
category = np.tile(category,(distances.shape[0],1)) #Reshape as distances martix
#Loop through distance bins to create distance and category mask
for dist in dist_lst:
distance_mask = (distances >= dist[0]) & (distances < dist[1])
mask = np.multiply(distance_mask,category)
#Loop through features to apply such calculation to the mask
for feature in features:
stars = np.tile(df[feature].values,(distances.shape[0],1))
values = np.multiply(mask,stars)
values[values==0]=np.nan
means = np.nan_to_num(np.nanmean(values[:,:],axis=1))
means = np.where(means!=0,means - df[feature].values,0)
means = means.reshape(means.shape[0],1)
#Assign to values unique array
All = np.concatenate((All, means),axis=1)
#Reshape all the array into the right format for the CNN
All = All.reshape((distances.shape[0],
len(categories),
len(dist_lst),
len(features)))
# -
#Lets inspect the shape
All.shape
# +
#Save the dataset
np.save('./numpy_arrays/cnn_dataset_rest',All)
#Save the target variable
np.save('./numpy_arrays/target_rest',df['is_closed'].values)
#Save the observation's id
np.save('./numpy_arrays/ids_rest',np.array(df.index))
#Save the Main category id (Restaurants) - used for masking this specific business category in the modelling process
np.save('./numpy_arrays/rest_category',df_categories['Restaurants'].values)
# -
# **The script above loops over restaurants as the main category of interest**
# The scripts that follow loop through other categories of interest such as "Food", "Bars, and "Cafes".
# +
#Create category similarity
similarity = model.most_similar('Food',topn = 15)
top_c = [x for x,y in similarity]
#Store 0ther categories in a list
categories = list(model.wv.vocab.keys())
low_c = []
for x in categories:
if x in top_c:
pass
else:
low_c.append(x)
#Shuffle list
random.shuffle(low_c)
random.shuffle(low_c)
random.shuffle(low_c)
#Pop the restaurants category
low_c.remove('Food')
low_c.remove('')
#Split into two groups
top_c1 = top_c[:int(len(top_c)/2)]
top_c2 = top_c[int(len(top_c)/2):]
#Split into two groups
low_c1 = low_c[:int(len(low_c)/2)]
low_c2 = low_c[int(len(low_c)/2):]
#Create the category boolean for "other", low and up
df_categories['other_low'] = df_categories[low_c1].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
df_categories['other_up'] = df_categories[low_c2].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Create categories for top
df_categories['top'] = df_categories[top_c].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Rewrite the other categories with zero if first category if the business is already captured in the "top"
df_categories['other_low'] = np.select([(df_categories['top'] == 1) & (df_categories['other_low'] == 1),
(df_categories['top'] == 1) & (df_categories['other_low'] == 0),
(df_categories['top'] == 0) & (df_categories['other_low'] == 1),
(df_categories['top'] == 0) & (df_categories['other_low'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['top'] == 1) & (df_categories['other_up'] == 1),
(df_categories['top'] == 1) & (df_categories['other_up'] == 0),
(df_categories['top'] == 0) & (df_categories['other_up'] == 1),
(df_categories['top'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['other_low'] == 1) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 1) & (df_categories['other_up'] == 0),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
#Convert everything to float
df_categories = df_categories.astype(float)
#Define categories to loop on
categories = ['other_low'] + top_c1 + ['Food'] + top_c2 + ['other_up']
#Define distances to loop on
dist_lst = [
[5000,int(np.max(distances))],
[2500,5000],
[1000,2500],
[500,1000],
[250,500],
[100,250],
[50,100],
[0,50],
[-50,0],
[-100,-50],
[-250,-100],
[-500,-250],
[-1000,-500],
[-2500,-1000],
[-5000,-2500],
[int(np.min(distances)),-5000]
]
#Define features to loop on
features = ['stars','rev_stars','cool','funny','useful','positive_comments','negative_comments','age','polarity','subjectivity']
#Define empty arrays
All = np.empty((distances.shape[0],0)) #For inner loop on cat and distance
#Extract category boolean vector for each category
for cat in categories:
category = np.array(df_categories[cat]) #Create numpy array
category = np.tile(category,(distances.shape[0],1)) #Reshape as distances martix
#Loop through distance bins to create distance and category mask
for dist in dist_lst:
distance_mask = (distances >= dist[0]) & (distances < dist[1])
mask = np.multiply(distance_mask,category)
#Loop through features to apply such calculation to the mask
for feature in features:
stars = np.tile(df[feature].values,(distances.shape[0],1))
values = np.multiply(mask,stars)
values[values==0]=np.nan
means = np.nan_to_num(np.nanmean(values[:,:],axis=1))
means = np.where(means!=0,means - df[feature].values,0)
means = means.reshape(means.shape[0],1)
#Assign to values unique array
All = np.concatenate((All, means),axis=1)
#Reshape all the array into the right format for the CNN
All = All.reshape((distances.shape[0],
len(categories),
len(dist_lst),
len(features)))
#Save the dataset
np.save('./numpy_arrays/cnn_dataset_food',All)
#Save the target variable
np.save('./numpy_arrays/target_food',df['is_closed'].values)
#Save the observation's id
np.save('./numpy_arrays/ids_food',np.array(df.index))
#Save the Main category id (Restaurants)
np.save('./numpy_arrays/food_category',df_categories['Food'].values)
# -
# Adding Bars ...
# +
#Create category similarity
similarity = model.most_similar('Bars',topn = 15)
top_c = [x for x,y in similarity]
#Store 0ther categories in a list
categories = list(model.wv.vocab.keys())
low_c = []
for x in categories:
if x in top_c:
pass
else:
low_c.append(x)
#Shuffle list
random.shuffle(low_c)
random.shuffle(low_c)
random.shuffle(low_c)
#Pop the restaurants category
low_c.remove('Bars')
low_c.remove('')
#Split into two groups
top_c1 = top_c[:int(len(top_c)/2)]
top_c2 = top_c[int(len(top_c)/2):]
#Split into two groups
low_c1 = low_c[:int(len(low_c)/2)]
low_c2 = low_c[int(len(low_c)/2):]
#Create the category boolean for "other", low and up
df_categories['other_low'] = df_categories[low_c1].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
df_categories['other_up'] = df_categories[low_c2].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Create categories for top
df_categories['top'] = df_categories[top_c].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Rewrite the other categories with zero if first category if the business is already captured in the "top"
df_categories['other_low'] = np.select([(df_categories['top'] == 1) & (df_categories['other_low'] == 1),
(df_categories['top'] == 1) & (df_categories['other_low'] == 0),
(df_categories['top'] == 0) & (df_categories['other_low'] == 1),
(df_categories['top'] == 0) & (df_categories['other_low'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['top'] == 1) & (df_categories['other_up'] == 1),
(df_categories['top'] == 1) & (df_categories['other_up'] == 0),
(df_categories['top'] == 0) & (df_categories['other_up'] == 1),
(df_categories['top'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['other_low'] == 1) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 1) & (df_categories['other_up'] == 0),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
#Convert everything to float
df_categories = df_categories.astype(float)
#Define categories to loop on
categories = ['other_low'] + top_c1 + ['Bars'] + top_c2 + ['other_up']
#Define distances to loop on
dist_lst = [
[5000,int(np.max(distances))],
[2500,5000],
[1000,2500],
[500,1000],
[250,500],
[100,250],
[50,100],
[0,50],
[-50,0],
[-100,-50],
[-250,-100],
[-500,-250],
[-1000,-500],
[-2500,-1000],
[-5000,-2500],
[int(np.min(distances)),-5000]
]
#Define features to loop on
features = ['stars','rev_stars','cool','funny','useful','positive_comments','negative_comments','age','polarity','subjectivity']
#Define empty arrays
All = np.empty((distances.shape[0],0)) #For inner loop on cat and distance
#Extract category boolean vector for each category
for cat in categories:
category = np.array(df_categories[cat]) #Create numpy array
category = np.tile(category,(distances.shape[0],1)) #Reshape as distances martix
#Loop through distance bins to create distance and category mask
for dist in dist_lst:
distance_mask = (distances >= dist[0]) & (distances < dist[1])
mask = np.multiply(distance_mask,category)
#Loop through features to apply such calculation to the mask
for feature in features:
stars = np.tile(df[feature].values,(distances.shape[0],1))
values = np.multiply(mask,stars)
values[values==0]=np.nan
means = np.nan_to_num(np.nanmean(values[:,:],axis=1))
means = np.where(means!=0,means - df[feature].values,0)
means = means.reshape(means.shape[0],1)
#Assign to values unique array
All = np.concatenate((All, means),axis=1)
#Reshape all the array into the right format for the CNN
All = All.reshape((distances.shape[0],
len(categories),
len(dist_lst),
len(features)))
#Save the dataset
np.save('./numpy_arrays/cnn_dataset_bars',All)
#Save the target variable
np.save('./numpy_arrays/target_bars',df['is_closed'].values)
#Save the observation's id
np.save('./numpy_arrays/ids_bars',np.array(df.index))
#Save the Main category id (Restaurants)
np.save('./numpy_arrays/bars_category',df_categories['Bars'].values)
# -
# Adding Cafes ...
# +
#Create category similarity
similarity = model.most_similar('Cafes',topn = 15)
top_c = [x for x,y in similarity]
#Store 0ther categories in a list
categories = list(model.wv.vocab.keys())
low_c = []
for x in categories:
if x in top_c:
pass
else:
low_c.append(x)
#Shuffle list
random.shuffle(low_c)
random.shuffle(low_c)
random.shuffle(low_c)
#Pop the restaurants category
low_c.remove('Cafes')
low_c.remove('')
#Split into two groups
top_c1 = top_c[:int(len(top_c)/2)]
top_c2 = top_c[int(len(top_c)/2):]
#Split into two groups
low_c1 = low_c[:int(len(low_c)/2)]
low_c2 = low_c[int(len(low_c)/2):]
#Create the category boolean for "other", low and up
df_categories['other_low'] = df_categories[low_c1].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
df_categories['other_up'] = df_categories[low_c2].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Create categories for top
df_categories['top'] = df_categories[top_c].fillna(0).sum(axis=1).apply(lambda x: 1 if x>0 else 0)
#Rewrite the other categories with zero if first category if the business is already captured in the "top"
df_categories['other_low'] = np.select([(df_categories['top'] == 1) & (df_categories['other_low'] == 1),
(df_categories['top'] == 1) & (df_categories['other_low'] == 0),
(df_categories['top'] == 0) & (df_categories['other_low'] == 1),
(df_categories['top'] == 0) & (df_categories['other_low'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['top'] == 1) & (df_categories['other_up'] == 1),
(df_categories['top'] == 1) & (df_categories['other_up'] == 0),
(df_categories['top'] == 0) & (df_categories['other_up'] == 1),
(df_categories['top'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
df_categories['other_up'] = np.select([(df_categories['other_low'] == 1) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 1) & (df_categories['other_up'] == 0),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 1),
(df_categories['other_low'] == 0) & (df_categories['other_up'] == 0)],
[0,0,1,0],
default = 0)
#Convert everything to float
df_categories = df_categories.astype(float)
#Define categories to loop on
categories = ['other_low'] + top_c1 + ['Shopping'] + top_c2 + ['other_up']
#Define distances to loop on
dist_lst = [
[5000,int(np.max(distances))],
[2500,5000],
[1000,2500],
[500,1000],
[250,500],
[100,250],
[50,100],
[0,50],
[-50,0],
[-100,-50],
[-250,-100],
[-500,-250],
[-1000,-500],
[-2500,-1000],
[-5000,-2500],
[int(np.min(distances)),-5000]
]
#Define features to loop on
features = ['stars','rev_stars','cool','funny','useful','positive_comments','negative_comments','age','polarity','subjectivity']
#Define empty arrays
All = np.empty((distances.shape[0],0)) #For inner loop on cat and distance
#Extract category boolean vector for each category
for cat in categories:
category = np.array(df_categories[cat]) #Create numpy array
category = np.tile(category,(distances.shape[0],1)) #Reshape as distances martix
#Loop through distance bins to create distance and category mask
for dist in dist_lst:
distance_mask = (distances >= dist[0]) & (distances < dist[1])
mask = np.multiply(distance_mask,category)
#Loop through features to apply such calculation to the mask
for feature in features:
stars = np.tile(df[feature].values,(distances.shape[0],1))
values = np.multiply(mask,stars)
values[values==0]=np.nan
means = np.nan_to_num(np.nanmean(values[:,:],axis=1))
means = np.where(means!=0,means - df[feature].values,0)
means = means.reshape(means.shape[0],1)
#Assign to values unique array
All = np.concatenate((All, means),axis=1)
#Reshape all the array into the right format for the CNN
All = All.reshape((distances.shape[0],
len(categories),
len(dist_lst),
len(features)))
#Save the dataset
np.save('./numpy_arrays/cnn_dataset_cafes',All)
#Save the target variable
np.save('./numpy_arrays/target_cafes',df['is_closed'].values)
#Save the observation's id
np.save('./numpy_arrays/ids_cafes',np.array(df.index))
#Save the Main category id (Restaurants)
np.save('./numpy_arrays/cafes_category',df_categories['Cafes'].values)
|
Data_architecture.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dggasque/DS-Unit-2-Linear-Models/blob/master/module2-regression-2/LS_DS_212_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="P0J410ttJljI" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 1, Module 2*
#
# ---
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# # Regression 2
#
# ## Assignment
#
# You'll continue to **predict how much it costs to rent an apartment in NYC,** using the dataset from renthop.com.
#
# - [ ] Do train/test split. Use data from April & May 2016 to train. Use data from June 2016 to test.
# - [ ] Engineer at least two new features. (See below for explanation & ideas.)
# - [ ] Fit a linear regression model with at least two features.
# - [ ] Get the model's coefficients and intercept.
# - [ ] Get regression metrics RMSE, MAE, and $R^2$, for both the train and test data.
# - [ ] What's the best test MAE you can get? Share your score and features used with your cohort on Slack!
# - [ ] As always, commit your notebook to your fork of the GitHub repo.
#
#
# #### [Feature Engineering](https://en.wikipedia.org/wiki/Feature_engineering)
#
# > "Some machine learning projects succeed and some fail. What makes the difference? Easily the most important factor is the features used." — <NAME>, ["A Few Useful Things to Know about Machine Learning"](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)
#
# > "Coming up with features is difficult, time-consuming, requires expert knowledge. 'Applied machine learning' is basically feature engineering." — <NAME>, [Machine Learning and AI via Brain simulations](https://forum.stanford.edu/events/2011/2011slides/plenary/2011plenaryNg.pdf)
#
# > Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work.
#
# #### Feature Ideas
# - Does the apartment have a description?
# - How long is the description?
# - How many total perks does each apartment have?
# - Are cats _or_ dogs allowed?
# - Are cats _and_ dogs allowed?
# - Total number of rooms (beds + baths)
# - Ratio of beds to baths
# - What's the neighborhood, based on address or latitude & longitude?
#
# ## Stretch Goals
# - [ ] If you want more math, skim [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 3.1, Simple Linear Regression, & Chapter 3.2, Multiple Linear Regression
# - [ ] If you want more introduction, watch [<NAME>, Statistics 101: Simple Linear Regression](https://www.youtube.com/watch?v=ZkjP5RJLQF4)
# (20 minutes, over 1 million views)
# - [ ] Add your own stretch goal(s) !
# + colab_type="code" id="o9eSnDYhUGD7" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + colab_type="code" id="cvrw-T3bZOuW" colab={}
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# + id="G1kjR8DgBcXP" colab_type="code" colab={}
#Map Interest to numerical value
df['interest_numerical'] = df['interest_level'].map({'low':1, 'medium':2, 'high':3})
# + id="7FqNLmAnGnyX" colab_type="code" colab={}
# create feature that counts the length of the description
df['description_length'] = df['description'].apply(lambda x: len(str(x)))
# + id="yw2aNNI7JoMt" colab_type="code" colab={}
# Create allows cats and dogs column
df['cats_and_dogs_allowed'] = df['cats_allowed'] + df['dogs_allowed']
df['cats_and_dogs_allowed'] = df['cats_and_dogs_allowed'].map({2:1, 1:0, 0:0})
# + id="2jX34uOdIg_0" colab_type="code" outputId="67880a8f-cc4d-4ba8-86fe-a5334f972c71" colab={"base_uri": "https://localhost:8080/", "height": 512}
df.head()
# + id="vict_YVwukYs" colab_type="code" colab={}
# Convert created feature from obect to date time
df['created'] = pd.to_datetime(df['created'])
# + id="3c01dj-VvZLN" colab_type="code" colab={}
# Split data into train and test datasets by month
train = df[df['created'].dt.month < 6]
test = df[df['created'].dt.month >= 6]
# + id="4J9fXbOkvjnR" colab_type="code" outputId="5f2ae531-1092-4dd8-fa59-1a01fb68c902" colab={"base_uri": "https://localhost:8080/", "height": 35}
train.shape
# + id="hEPTT0JH_IDQ" colab_type="code" outputId="312bc5f9-fdc1-4d8d-fbac-fb677bce4d83" colab={"base_uri": "https://localhost:8080/", "height": 35}
test.shape
# + id="-hjg5Of2_KZf" colab_type="code" outputId="843ae291-477a-41c5-bd7b-8ae4e46bba81" colab={"base_uri": "https://localhost:8080/", "height": 512}
train.head()
# + id="xPawIqbQKx4P" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
# + id="PdFnqrAtKqkB" colab_type="code" outputId="c7d87f9c-76bf-40a3-eb07-cbb98c7dc685" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Set baseline
target = 'price'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: ', mae)
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: ', mae)
# + id="3lRwqrXnaVuq" colab_type="code" colab={}
#Initiate model
model = LinearRegression()
# + id="glk9ffEULBN-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="51e12eee-272e-426a-cda2-ee970089b88b"
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms', 'interest_numerical',
'elevator', 'doorman', 'garden_patio', 'cats_and_dogs_allowed']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + id="wIiqHERJQxDh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="6af12316-07a3-446f-dcca-311af99e0559"
model.coef_
# + id="aOKzsHIvQ0Ro" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a5bcb320-8694-40fb-d08a-f7e44a2376b9"
model.intercept_
# + [markdown] id="_KD8TeYPQmnJ" colab_type="text"
# Change Features to see if we can improve model.
# + id="a70fyGp0Q8TR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="c9a94089-feba-4822-b299-e5213c9feb8f"
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms',
'elevator', 'doorman',
'garden_patio', 'dishwasher',
'cats_and_dogs_allowed',
'hardwood_floors', 'laundry_in_unit',
'interest_numerical']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + colab_type="code" outputId="81a50a10-8b83-4532-a461-1c7d54208e9c" id="sFcWTfIASaMN" colab={"base_uri": "https://localhost:8080/", "height": 126}
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms',
'elevator', 'doorman',
'garden_patio', 'dishwasher',
'cats_and_dogs_allowed',
'hardwood_floors', 'laundry_in_unit',
'interest_numerical', 'fitness_center',
'high_speed_internet', 'balcony']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + colab_type="code" outputId="bd807adb-18d1-4f8f-d4ef-2071faf342bd" id="xl0EG_w0TJ11" colab={"base_uri": "https://localhost:8080/", "height": 126}
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms',
'elevator', 'doorman',
'garden_patio', 'dishwasher',
'cats_and_dogs_allowed',
'hardwood_floors', 'laundry_in_unit',
'interest_numerical', 'fitness_center',
'high_speed_internet', 'balcony',
'exclusive', 'new_construction',
'pre-war', 'longitude',
'outdoor_space', 'common_outdoor_space']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + colab_type="code" outputId="492f6905-dedd-4ad1-b5bb-8f62a4efde05" id="qT5mYMWIW95R" colab={"base_uri": "https://localhost:8080/", "height": 126}
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms',
'elevator', 'doorman',
'garden_patio', 'dishwasher',
'cats_and_dogs_allowed',
'hardwood_floors', 'laundry_in_unit',
'interest_numerical', 'fitness_center',
'high_speed_internet', 'balcony',
'exclusive', 'new_construction',
'pre-war', 'longitude',
'outdoor_space', 'common_outdoor_space',
'laundry_in_building', 'wheelchair_access']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Mean Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Mean Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + colab_type="code" outputId="9c731cee-706c-492d-b6ff-8e28ca9674b9" id="cXQzFIZAYBNX" colab={"base_uri": "https://localhost:8080/", "height": 126}
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms',
'elevator', 'doorman',
'garden_patio', 'dishwasher',
'cats_and_dogs_allowed',
'hardwood_floors', 'laundry_in_unit',
'interest_numerical', 'fitness_center',
'high_speed_internet', 'balcony',
'exclusive', 'new_construction',
'pre-war', 'longitude',
'outdoor_space', 'common_outdoor_space',
'laundry_in_building', 'wheelchair_access',
'roof_deck']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Mean Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Mean Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + id="Gi8ox2RCc0af" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="9ee9e8fa-cf71-4de9-c32b-f90329791f22"
model.coef_
# + id="S0T-xW7Uc26X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f6298c26-7bad-46f1-bec4-7cf6b7f0372a"
model.intercept_
# + colab_type="code" outputId="71f7db35-6363-4167-b003-6df5732aa316" id="HHTWa96Pdwfd" colab={"base_uri": "https://localhost:8080/", "height": 126}
# Arrange X feature matrices and y target vectors
features = ['bathrooms', 'bedrooms',
'elevator', 'doorman',
'garden_patio', 'dishwasher',
'cats_allowed', 'dogs_allowed',
'hardwood_floors', 'laundry_in_unit',
'interest_numerical', 'fitness_center',
'high_speed_internet', 'balcony',
'exclusive', 'new_construction',
'pre-war', 'longitude',
'outdoor_space', 'common_outdoor_space',
'laundry_in_building', 'wheelchair_access',
'roof_deck']
target = 'price'
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Train Root Mean Squared Error: ', rmse)
print(f'Train Mean Absolute Error: ', mae)
print(f'Train R^2 Score: ', r2)
# Apply the model to test data
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print('Test Root Mean Squared Error: ', rmse)
print('Test Mean Absolute Error: ', mae)
print('Test R^2 Score: ', r2)
# + id="GvJJE2Qbkal3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="a2303cd5-29e7-4a0f-ee64-05c5edb06b16"
model.coef_
# + id="7cfeV1pPkgw0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="05b7e04f-0efd-41a4-8c97-bef2f5260aea"
model.intercept_
# + [markdown] id="ELbL3vE7bcuC" colab_type="text"
# Best model has test error of $674.35
#
# features = ['bathrooms', 'bedrooms', 'elevator', 'doorman', 'garden_patio', 'dishwasher', 'cats_allowed', 'dogs_allowed', 'hardwood_floors', 'laundry_in_unit', 'interest_numerical', 'fitness_center', 'high_speed_internet', 'balcony', 'exclusive', 'new_construction', 'pre-war', 'longitude', 'outdoor_space', 'common_outdoor_space', 'laundry_in_building', 'wheelchair_access', 'roof_deck']
|
module2-regression-2/LS_DS_212_assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense,LSTM
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
import math
# -
df=pd.read_csv("C:\\Users\\LEGION\\Downloads\\Telegram Desktop\\FuturePricePrediction\\BANKNIFTY_FUTURE.csv")
df.head()
data = df.filter(['close'])
dataset = data.values
training_data_len = math.ceil( len(dataset) *.8)
training_data_len
data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:, : ]
x_train=[]
y_train = []
for i in range(40,len(train_data)):
x_train.append(train_data[i-40:i,0])
y_train.append(train_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train.shape
x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
x_train.shape
model = Sequential()
model.add(LSTM(units=300, return_sequences=True,input_shape=(x_train.shape[1],1)))
model.add(LSTM(units=200))
model.add(Dense(units=100))
model.add(Dense(units=50))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, batch_size=7, epochs=15)
# here we are testing data set
test_data = scaled_data[training_data_len - 30: , : ]
#Creating the x_test and y_test data sets
x_test = []
y_test = dataset[training_data_len : , : ] #Get all of the rows from index 1603 to the rest and all of the columns (in this case it's only column 'Close'), so 2003 - 1603 = 400 rows of data
for i in range(30,len(test_data)):
x_test.append(test_data[i-30:i,0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0],x_test.shape[1],1))
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)#Undo scaling
predictions
rmse=np.sqrt(np.mean(((predictions- y_test)**2)))
rmse
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions
plt.figure(figsize=(16,8))
plt.title('Model')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price', fontsize=18)
plt.plot(train['close'])
plt.plot(valid[['close', 'Predictions']])
plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
plt.show()
valid.head()
# +
# linear regression prediction with prediction interval
from numpy.random import randn
from numpy.random import seed
from numpy import power
from numpy import sqrt
from numpy import mean
from numpy import std
from numpy import sum as arraysum
from scipy.stats import linregress
from matplotlib import pyplot
# seed random number generator
seed(1)
# -
y=np.array(valid['Predictions'])
x=np.array(valid['close'])
b1, b0, r_value, p_value, std_err = linregress(x, y)
# make predictions
yhat = b0 + b1 * x
# define new input, expected value and prediction
x_in = x[0]
y_out = y[0]
yhat_out = yhat[0]
# estimate stdev of yhat
sum_errs = arraysum((y - yhat)**2)
stdev = sqrt(1/(len(y)-2) * sum_errs)
# calculate prediction interval
interval = 1.96 * stdev
print('Prediction Interval: %.3f' % interval)
lower, upper = yhat_out - interval, yhat_out + interval
print('95%% likelihood that the true value is between %.3f and %.3f' % (lower, upper))
print('True value: %.3f' % y_out)
# plot dataset and prediction with interval
pyplot.scatter(x, y)
pyplot.plot(x, yhat, color='red')
pyplot.errorbar(x_in, yhat_out, yerr=interval, color='black', fmt='o')
pyplot.show()
|
BankNifty_Future_Final_LSTM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import euclidean
import itertools
a = [[0,0,0],[1,1,1]]
euclidean(a,b)
a = [0.02929688, 0.05859377, 0.08789065, 0.11718754, 0.14307873, 0.16896992,
0.19145541, 0.21280568, 0.23415594, 0.25550621, 0.27685648, 0.29820674,
0.31955701, 0.34020823, 0.36085945, 0.38151067, 0.40216189, 0.42281311,
0.44346433, 0.46411555, 0.48476677, 0.50541799, 0.52606921, 0.54672044,
0.56737166, 0.58802288, 0.6086741, 0.62932532, 0.64997654, 0.67062776,
0.69127898, 0.7119302, 0.73258142, 0.75323264, 0.77388386, 0.79271443,
0.811545, 0.83037557, 0.84920614, 0.86803671, 0.88686728, 0.90322054,
0.91957379, 0.93592705, 0.95228031, 0.96532316, 0.97759157, 0.98959713,
1., 1. ]
target = 0.979127490958217
find_nearest_above(np.array(a), target)
np.searchsorted(a,[target,],side='right')[0]
a_list = [1, 2, 3]
r=2
list(itertools.combinations(a_list, r))
|
.ipynb_checkpoints/fiddle-checkpoint-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:aind]
# language: python
# name: conda-env-aind-py
# ---
# # Sparse graph based networks - some experiments for network type 1
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import graph_utils as graph_utils
import graph_neural_networks as graph_nn
import data_preparation_utils as data_prep
from iterative_updaters import VanillaGradientDescent, MomentumGradientDescent, NesterovMomentumGradientDescent, RMSPropGradientDescent, AdamGradientDescent
import training_and_evaluation as train_eval
import graph_nn_experiments as experiments
# -
m = np.array([[1.2,2.3,3.4],[5.0,-1.2,-0.9],[9.0,-0.8,0.75]])
m
m[:,[1,0,2]]
# Experiment with the above functions and an artificial adjacency matrix:
artificial_adj_matrix = np.array([[1,1,0,0],[1,1,1,0],[0,1,1,1],[0,0,1,1]])
print(artificial_adj_matrix)
artificial_edges = graph_nn.extract_undirected_edges(artificial_adj_matrix)
artificial_edges_matrix, artificial_edges_bias = graph_nn.get_transfer_matrix_plus_bias_vector(artificial_edges, False)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
m = sess.run(artificial_edges_matrix)
v = sess.run(artificial_edges_bias)
sess.close()
print(m)
print(v)
artificial_edges_matrix_shared, artificial_edges_bias_shared = graph_nn.get_shared_transfer_matrix_plus_bias_vector(artificial_edges)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
m = sess.run(artificial_edges_matrix_shared)
v = sess.run(artificial_edges_bias_shared)
sess.close()
print(m)
print(v)
artificial_edges_matrix_transposed, artificial_edges_bias_transposed = graph_nn.get_transfer_matrix_plus_bias_vector(artificial_edges, True)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
m = sess.run(artificial_edges_matrix_transposed)
v = sess.run(artificial_edges_bias_transposed)
sess.close()
print(m)
print(v)
# Load adjacency matrix for crossings in Ochota:
ochota_adj_matrix = np.genfromtxt("macierz_sasiedztwa.txt")
plt.imshow(ochota_adj_matrix, cmap='hot', interpolation='nearest')
plt.show()
# Now construct a neural network that uses several layers of transfer matrices.
# Try it out using some random parameters and input:
# +
# also testing the possibility to have more than a single channel in input
number_of_test_channels = 5
inp = tf.placeholder(dtype=tf.float32,shape=[None,21])
inp_mul = tf.reshape(tf.tile(inp,[1,number_of_test_channels]),[-1,21,number_of_test_channels])
nn = graph_nn.transfer_matrix_neural_net(inp_mul, 3, 2, tf.nn.tanh, ochota_adj_matrix, verbose=True)
print(nn.shape)
# -
random_input = np.random.normal(loc=0.0,scale=1.0,size=[1,21])
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1):
transfered = sess.run(nn, feed_dict={inp: random_input})
print(transfered)
sess.close()
# It works!
# Now try the version with variable channel count:
nn = graph_nn.transfer_matrix_neural_net_var_channels(inp_mul, 3, [2,3,1], tf.nn.tanh, ochota_adj_matrix, verbose=True)
print(nn.shape)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1):
transfered = sess.run(nn, feed_dict={inp: random_input})
print(transfered)
sess.close()
# Seems to work again
# **Now try type 1 architecture on 100k traffic lights dataset:**
# Load the data:
traffic_lights_data = pd.read_csv("100k.csv", header=None)
traffic_lights_data.head()
# Normalize:
X, y, X_scaler, y_scaler = data_prep.scale_standard_traffic_light_data(traffic_lights_data)
X.head()
X_scaler.inverse_transform(X).head()
y_scaler.inverse_transform(y)
# Normalization seems to work ok.
# Train test split:
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=831191)
# -
# Now construct the neural net and define the loss:
# +
# also testing the possibility to have more than a single channel in input (turned off for the moment)
#number_of_test_channels = 2
nn_input = tf.placeholder(dtype=tf.float32, shape=[None, 21])
targets = tf.placeholder(dtype=tf.float32, shape=[None, 1])
nn_output = graph_nn.transfer_matrix_neural_net(nn_input, 3, 4, tf.nn.tanh, ochota_adj_matrix, verbose=True, share_weights_in_transfer_matrix=False, share_biases_in_transfer_matrix=False)
# also testing the possibility to have more than a single channel in input (turned off for the moment)
#nn_input_mul = tf.tile(tf.expand_dims(nn_input,-1),[1,1,number_of_test_channels])
#nn_output = graph_nn.transfer_matrix_neural_net(nn_input_mul, 3, 4, tf.nn.tanh, ochota_adj_matrix, verbose=True, share_weights_in_transfer_matrix=True, share_biases_in_transfer_matrix=True)
# -
# __NOTE__: transfer_matrix_neural_net(traffic_signal_settings, 3, 4, tf.nn.tanh, ochota_adj_matrix) seems to work ok (+ Adam using 0.01 learning rate)
# Now define the optimizer:
optimizer = tf.train.AdamOptimizer(0.005) #0.0035
# Define batch iterator:
batch_iterator = data_prep.BatchIterator(X_train, y_train, 997)
# Now, train for several epochs:
test_and_batch_losses = train_eval.train_model(nn_output, nn_input, targets, optimizer, 100000, batch_iterator, X_test, y_test, "trained_networks/test_model.ckpt", 1000, verbose=True)
# Check relative accuracy on test set:
model_avg_error, actual_vs_predicted = train_eval.evaluate_model_on_a_dataset("trained_networks/test_model.ckpt", nn_output,nn_input, X_test, y_test, y_scaler)
print(model_avg_error)
y_test_sorted = [x[0] for x in actual_vs_predicted]
y_test_pred_sorted = [x[1] for x in actual_vs_predicted]
plt.plot(y_test_pred_sorted, y_test_sorted)
plt.plot(y_test_pred_sorted, y_test_pred_sorted)
plt.title("Actual vs. predicted")
# Evaluation seems to work ok.
# Restore from checkpoint:
# +
# close session (if open)
try:
sess.close()
except:
pass
# open new session
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, "trained_networks/test_model.ckpt")
# -
# Generate a gradient descent trajectory:
# +
updater = MomentumGradientDescent()
trajectory = train_eval.generate_gradient_descent_trajectory(sess, nn_output, nn_input, X_scaler, y_scaler, updater, 1000, 10, verbose=True)
# -
# Seems to work fine.
# Generate several gradient optimization paths for comparison with simulation:
# +
no_of_trajectories = 1 #set this to a higher number to get multiple trajectories
updater = NesterovMomentumGradientDescent()
trajectories = train_eval.generate_and_join_multiple_gradient_descent_trajectories(sess, no_of_trajectories, nn_output, nn_input, X_scaler, y_scaler, updater, 1000, 100, verbose=True, trajectories_verbose=False)
# -
predictions_from_trajectories = [x[1] for x in trajectories]
print("Min prediction: %f" % min(predictions_from_trajectories))
print("Mean prediction: %f" % np.mean(predictions_from_trajectories))
print("Median prediction: %f" % np.median(predictions_from_trajectories))
_ = plt.hist(predictions_from_trajectories, bins=100)
# ### Comparison with simulator outputs
# First test if the microservice works
simulator_microservice_url = "http://3.122.113.135:25041/" #this IP may change
import requests
import json
resp = requests.post(simulator_microservice_url, json={'user':'pgora', 'password':'<PASSWORD>', 'settings':"[26,31,11,18,60,15,112,34,37,10,21,52,13,114,15,76,117,18,39,20,81]"}, headers={'Content-Type': 'application/json'})
float(json.loads(resp.text)['score'])
# Now generate a test set based on trajectories and the corresponding simulation results:
# +
# set this equal to the number of threads in the simulator virtual machine
no_of_threads = 8
simulation_test_X, simulation_test_y = train_eval.generate_test_set_from_trajectory_points(trajectories, no_of_threads, simulator_microservice_url, verbose=True)
# simulation test set needs to be scaled before plugging it into a network
simulation_test_X = X_scaler.transform(simulation_test_X)
simulation_test_y = y_scaler.transform(simulation_test_y.reshape(-1,1)).reshape(-1)
# -
# Evaluate the net on the test set created above:
model_avg_simulation_error, actual_simulation_vs_predicted = train_eval.evaluate_model_on_a_dataset("trained_networks/test_model.ckpt", nn_output,nn_input, simulation_test_X, simulation_test_y, y_scaler)
print(model_avg_simulation_error)
y_simulation_test_sorted = [x[0] for x in actual_simulation_vs_predicted]
y_simulation_test_pred_sorted = [x[1] for x in actual_simulation_vs_predicted]
plt.plot(y_simulation_test_pred_sorted, y_simulation_test_sorted)
plt.plot(y_simulation_test_pred_sorted, y_simulation_test_pred_sorted)
plt.title("Actual vs. predicted")
# **NOTE:** Just a quick test - meaningful results obtained obtained in the main experiment run outside this notebook
# ### Random graph topology experiments
# Testing random graph topologies:
# +
random_adj_matrix = graph_utils.generate_random_adjacency_matrix(21, 48)
plt.imshow(random_adj_matrix, cmap="hot")
plt.show()
#random_adj_matrix
print("Distance: %d, %d (should be equal)" %
(graph_utils.undirected_symmetric_difference(ochota_adj_matrix, random_adj_matrix),
graph_utils.undirected_symmetric_difference(random_adj_matrix, ochota_adj_matrix)))
# -
for i in range(5):
random_adj_matrix = graph_utils.generate_random_adjacency_matrix_with_fixed_symmetric_difference(i, ochota_adj_matrix)
plt.subplot(151+i)
plt.imshow(random_adj_matrix, cmap="hot")
plt.show()
# Generate many random graph topologies, train for each of them:
# +
symmetric_diffs = [3 * i for i in range(15)]
no_of_samples = 5
symmetric_diffs *= no_of_samples
symmetric_diffs = symmetric_diffs[3:] # just for the moment
file_to_save_results = "random_topologies_3_2.csv"
with open(file_to_save_results,"a") as f:
for i in symmetric_diffs:
print("Constructing random adjacency matrix with symmetric diff %d" % i)
random_adj_matrix = graph_utils.generate_random_adjacency_matrix_with_fixed_symmetric_difference(i, ochota_adj_matrix)
plt.imshow(random_adj_matrix, cmap="hot")
plt.show()
tf.reset_default_graph()
nn_input = tf.placeholder(dtype=tf.float32, shape=[None, 21])
targets = tf.placeholder(dtype=tf.float32, shape=[None, 1])
print("Constructing graph neural net")
nn_output = graph_nn.transfer_matrix_neural_net(nn_input, 3, 4, tf.nn.tanh, random_adj_matrix, verbose=False, share_weights_in_transfer_matrix=False, share_biases_in_transfer_matrix=False)
optimizer = tf.train.AdamOptimizer(0.005)
batch_iterator = data_prep.BatchIterator(X_train, y_train, 997)
print("Training network with symmetric diff %d" % i)
test_and_batch_losses = train_eval.train_model(nn_output, nn_input, targets, optimizer, 30000, batch_iterator, X_test, y_test, "trained_networks/random_model_tmp.ckpt", 1000, verbose=True)
test_loss = test_and_batch_losses[-1][0]
model_avg_error, actual_vs_predicted = train_eval.evaluate_model_on_a_dataset("trained_networks/random_model_tmp.ckpt", nn_output,nn_input, X_test, y_test, y_scaler)
f.write("%d,%f,%f\n" % (i, model_avg_error, test_loss))
f.flush()
print((i, model_avg_error, test_loss))
f.close()
# -
# **NOTE**: Just a quick test, main results obtained outside this notebook and loaded and presented here
# Random graphs experiment results for type 1 graph neural networks:
random_topologies_results = np.genfromtxt("random_topologies_3_2.csv",delimiter=",")
rand_top_data_frame = pd.DataFrame(random_topologies_results, columns=["SymmetricDiff","RelativeError","MeanSquaredError"])
medians = rand_top_data_frame.groupby("SymmetricDiff").median()
lower_five_perc = rand_top_data_frame.groupby("SymmetricDiff").quantile(0.05)
upper_five_perc = rand_top_data_frame.groupby("SymmetricDiff").quantile(0.95)
medians
rand_top_data_frame_ = rand_top_data_frame.copy()
rand_top_data_frame_["Count"] = 1
rand_top_data_frame_ = rand_top_data_frame_[["SymmetricDiff","Count"]]
rand_top_data_frame_.groupby("SymmetricDiff").sum()
rel_error_y_err_max = pd.DataFrame()
rel_error_y_err_max["RelativeErrorMax"] = upper_five_perc.loc[:,"RelativeError"] - medians.loc[:,"RelativeError"]
rel_error_y_err_min = pd.DataFrame()
rel_error_y_err_min["RelativeErrorMin"] = medians.loc[:,"RelativeError"] - lower_five_perc.loc[:,"RelativeError"]
rel_error_y_err = pd.concat([rel_error_y_err_min,rel_error_y_err_max], axis=1).transpose().iloc[0:2,:]
plt.plot(rand_top_data_frame.iloc[:,0] * 2,rand_top_data_frame.iloc[:,1],"bo", alpha=0.1, zorder=-32)
plt.plot(medians.index.values * 2,medians.iloc[:,0], "ro",zorder=-32)
plt.errorbar(medians.index.values * 2,medians.iloc[:,0],yerr=rel_error_y_err.values,ecolor="r", barsabove=True)
plt.xlabel("Symmetric diff")
plt.ylabel("Relative test error")
plt.show()
ms_error_y_err_max = pd.DataFrame()
ms_error_y_err_max["MeanSquaredErrorMax"] = upper_five_perc.loc[:,"MeanSquaredError"] - medians.loc[:,"MeanSquaredError"]
ms_error_y_err_min = pd.DataFrame()
ms_error_y_err_min["MeanSquaredErrorMin"] = medians.loc[:,"MeanSquaredError"] - lower_five_perc.loc[:,"MeanSquaredError"]
ms_error_y_err = pd.concat([ms_error_y_err_min,ms_error_y_err_max], axis=1).transpose().iloc[0:2,:]
plt.plot(rand_top_data_frame.iloc[:,0] * 2,rand_top_data_frame.iloc[:,2],"bo", alpha=0.1, zorder=-32)
plt.plot(medians.index.values * 2,medians.iloc[:,1], "ro", zorder=-32)
plt.errorbar(medians.index.values * 2,medians.iloc[:,1],yerr=ms_error_y_err.values,ecolor="r", barsabove=True)
plt.xlabel("Symmetric diff")
plt.ylabel("Mean squared error")
plt.show()
# ### Permuted topologies experiments summary
permuted_topologies_results = np.genfromtxt("permuted_topologies_0.csv",delimiter=",")
perm_top_data_frame = pd.DataFrame(permuted_topologies_results, columns=["NoOfTranspositions","SymmetricDiff","RelativeError","MeanSquaredError"])
medians = perm_top_data_frame.groupby("SymmetricDiff").median()
lower_five_perc = perm_top_data_frame.groupby("SymmetricDiff").quantile(0.05)
upper_five_perc = perm_top_data_frame.groupby("SymmetricDiff").quantile(0.95)
medians
perm_top_data_frame_ = perm_top_data_frame.copy()
perm_top_data_frame_["Count"] = 1
perm_top_data_frame_ = perm_top_data_frame_[["SymmetricDiff","Count"]]
perm_top_data_frame_.groupby("SymmetricDiff").sum()
rel_error_y_err_max = pd.DataFrame()
rel_error_y_err_max["RelativeErrorMax"] = upper_five_perc.loc[:,"RelativeError"] - medians.loc[:,"RelativeError"]
rel_error_y_err_min = pd.DataFrame()
rel_error_y_err_min["RelativeErrorMin"] = medians.loc[:,"RelativeError"] - lower_five_perc.loc[:,"RelativeError"]
rel_error_y_err = rel_error_y_err_min
plt.plot(perm_top_data_frame.iloc[:,1],perm_top_data_frame.iloc[:,2],"bo",alpha=0.1)
plt.plot(medians.index.values,medians.iloc[:,1], "ro")
plt.errorbar(medians.index.values,medians.iloc[:,1],yerr=rel_error_y_err.values,lolims=False,uplims=True,ecolor="r", barsabove=True)
plt.xlabel("Symmetric diff") # <-- not divided by two here
plt.ylabel("Relative test error")
plt.show()
ms_error_y_err_max = pd.DataFrame()
ms_error_y_err_max["MeanSquaredErrorMax"] = upper_five_perc.loc[:,"MeanSquaredError"] - medians.loc[:,"MeanSquaredError"]
ms_error_y_err_min = pd.DataFrame()
ms_error_y_err_min["MeanSquaredErrorMin"] = medians.loc[:,"MeanSquaredError"] - lower_five_perc.loc[:,"MeanSquaredError"]
ms_error_y_err = ms_error_y_err_min
plt.plot(perm_top_data_frame.iloc[:,1],perm_top_data_frame.iloc[:,3],"bo", alpha=0.1)
plt.plot(medians.index.values,medians.iloc[:,2], "ro")
plt.errorbar(medians.index.values,medians.iloc[:,2],yerr=ms_error_y_err.values,lolims=False,uplims=True,ecolor="r", barsabove=True)
plt.xlabel("Symmetric diff") # <-- not divided by two here
plt.ylabel("Mean squared error")
plt.show()
# A quick check of fit and eval experiments results - more details in a separate notebook
df = pd.read_csv("100k_fit_and_evaluate_experiments/fit_eval_results.csv",header=None)
df.sort_values([5])
# ### Experiments with intermediate layers - visualization in mind, not included in the paper
# Define a network and get its layer list:
nn_input = tf.placeholder(dtype=tf.float32, shape=[None, 21])
targets = tf.placeholder(dtype=tf.float32, shape=[None, 1])
nn_output, nn_layers = graph_nn.transfer_matrix_neural_net_with_layer_output(nn_input, 3, 4, tf.nn.tanh, ochota_adj_matrix, verbose=True, share_weights_in_transfer_matrix=False, share_biases_in_transfer_matrix=False)
nn_layers
# Load a trained network:
# +
# close session (if open)
try:
sess.close()
except:
pass
# open new session
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, "100k_fit_and_evaluate_experiments/model_3_4_tanh.ckpt")
# -
model_avg_error, actual_vs_predicted = train_eval.evaluate_model_on_a_dataset("100k_fit_and_evaluate_experiments/model_3_4_tanh.ckpt", nn_output,nn_input, X_test, y_test, y_scaler)
print(model_avg_error)
y_test_sorted = [x[0] for x in actual_vs_predicted]
y_test_pred_sorted = [x[1] for x in actual_vs_predicted]
plt.plot(y_test_pred_sorted, y_test_sorted)
plt.plot(y_test_pred_sorted, y_test_pred_sorted)
plt.title("Actual vs. predicted")
X_test = X_test.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
example_X = X_test.iloc[y_test.idxmin(),:].values.reshape([1,-1])
# Plot layer activations for the exemplary input:
import seaborn as sns
for l in nn_layers:
l_out = sess.run(l, feed_dict={nn_input: example_X})
if len(l_out.shape) > 2:
l_out_ = np.squeeze(l_out)
else:
l_out_ = np.transpose(l_out)
sns.heatmap(l_out_, linewidth=0.5, center=0.0)
plt.show()
# Now for gradients:
import seaborn as sns
for l in nn_layers:
l_out = sess.run(l, feed_dict={nn_input: example_X})
gradient_op = tf.gradients(nn_output, l)
gradient = sess.run(gradient_op, feed_dict={nn_input: example_X})
gradient = gradient[0].reshape(l_out.shape)
if len(l_out.shape) > 2:
l_out_ = np.squeeze(l_out)
gradient_ = np.squeeze(gradient)
else:
l_out_ = np.transpose(l_out)
gradient_ = np.transpose(gradient)
sns.heatmap(gradient_, linewidth=0.5, center=0.0)
plt.show()
# The same thing for Kiepf's type network:
nn_input = tf.placeholder(dtype=tf.float32, shape=[None, 21])
targets = tf.placeholder(dtype=tf.float32, shape=[None, 1])
nn_output, nn_layers = graph_nn.kipfs_transfer_matrix_neural_net_with_layer_output(nn_input, 5, 4, tf.nn.tanh, ochota_adj_matrix, verbose=True)
optimizer = tf.train.AdamOptimizer(0.005)
nn_layers
# +
# close session (if open)
try:
sess.close()
except:
pass
# open new session
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, "trained_networks/kipfs_5_4_tanh_model.ckpt")
# -
import seaborn as sns
for l in nn_layers:
l_out = sess.run(l, feed_dict={nn_input: example_X})
gradient_op = tf.gradients(nn_output, l)
gradient = sess.run(gradient_op, feed_dict={nn_input: example_X})
gradient = gradient[0].reshape(l_out.shape)
if len(l_out.shape) > 2:
l_out_ = np.squeeze(l_out)
gradient_ = np.squeeze(gradient)
else:
l_out_ = np.transpose(l_out)
gradient_ = np.transpose(gradient)
sns.heatmap(gradient_, linewidth=0.5, center=0.0)
plt.show()
# +
updater = MomentumGradientDescent()
trajectory = train_eval.generate_gradient_descent_trajectory(sess, nn_output, nn_input, X_scaler, y_scaler, updater, 3000, 30, verbose=True)
# -
min_X_example = X_scaler.transform(np.array(trajectory[-1][0])).reshape(1,21)
import seaborn as sns
for l in nn_layers:
l_out = sess.run(l, feed_dict={nn_input: min_X_example})
gradient_op = tf.gradients(nn_output, l)
gradient = sess.run(gradient_op, feed_dict={nn_input: min_X_example})
gradient = gradient[0].reshape(l_out.shape)
if len(l_out.shape) > 2:
l_out_ = np.squeeze(l_out)
gradient_ = np.squeeze(gradient)
else:
l_out_ = np.transpose(l_out)
gradient_ = np.transpose(gradient)
sns.heatmap(gradient_, linewidth=0.5, center=0.0)
plt.show()
|
graph_nn_type_1_tests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This example notebook shows how we can train an image classification model, as described [here](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb),
# with the use of TileDB support for Tensorflow Data API for dense TileDB arrays. We will firstly ingest our MNIST dataset in two dense TileDB arrays, i.e, x and y,
# and then move on with training of a classification model with Keras. Firstly, let's import what we need.
# + pycharm={"is_executing": true, "name": "#%%\n"}
import tensorflow as tf
import tiledb
import numpy as np
import matplotlib.pyplot as plt
from tiledb.ml.readers.tensorflow import TensorflowTileDBDenseDataset
# -
# Load MNIST dataset for Keras datasets and scale.
# + pycharm={"is_executing": true, "name": "#%%\n"}
mnist = tf.keras.datasets.mnist
(images, labels), _ = mnist.load_data()
images = images / 255.0
print(images.shape)
print()
print(labels.shape)
# -
# Then we proceed with ingesting images and labels into dense TileDB arrays. Here, we should point out that besides the
# flexibility of TileDB in defining a schema, i.e., multiple dimensions, multiple attributes, compression etc,
# we choose to define a simple schema. So, for a numpy array of D number of dimensions we create a dense TileDB array,
# with the same number of dimensions, and a single attribute of data type numpy float32. Moreover, the
# tile extend of the 1st dimension should always be equal with the batch size, in order to achieve optimal reads while
# training. Let's define an ingestion function.
# + pycharm={"is_executing": true, "name": "#%%\n"}
def ingest_in_tiledb(data: np.array, batch_size: int, uri: str):
# Equal number of dimensions with the numpy array.
dims = [
tiledb.Dim(
name="dim_" + str(dim),
domain=(0, data.shape[dim] - 1),
tile=data.shape[dim] if dim > 0 else batch_size,
dtype=np.int32,
)
for dim in range(data.ndim)
]
# TileDB schema
schema = tiledb.ArraySchema(
domain=tiledb.Domain(*dims),
sparse=False,
attrs=[tiledb.Attr(name="features", dtype=data.dtype)],
)
# Create array
tiledb.Array.create(uri, schema)
# Ingest
with tiledb.open(uri, "w") as tiledb_array:
tiledb_array[:] = {"features": data}
# + [markdown] pycharm={"name": "#%% md\n"}
# Now we proceed with ingestion.
# + pycharm={"is_executing": true, "name": "#%%\n"}
# Ingest images
ingest_in_tiledb(data=images, batch_size=64, uri='training_images')
# Ingest labels
ingest_in_tiledb(data=labels, batch_size=64, uri='training_labels')
# -
# We can now explore our TileDB arrays and check their structure.
# + pycharm={"is_executing": true, "name": "#%%\n"}
images_array = tiledb.open('training_images')
labels_array = tiledb.open('training_labels')
print(images_array.schema)
print(labels_array.schema)
# -
# We can easily now slice our data and create some plots. We can either slice an image or a part of
# an image. Because we use only one attribute, we always slice with attribute with index equal to 0.
# Some examples below.
# + pycharm={"is_executing": true, "name": "#%%\n"}
# Plot an image
plt.subplot(1, 2, 1)
plt.imshow(images_array[0][images_array.schema.attr(0).name], cmap="gray")
# Plot part of the same image
plt.subplot(1, 2, 2)
plt.imshow(images_array[0, 5:20, 5:20][images_array.schema.attr(0).name], cmap="gray")
# -
# We can then define a function that creates a basic digit classifier for the MNIST dataset.
# + pycharm={"is_executing": true, "name": "#%%\n"}
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
return model
# + [markdown] pycharm={"name": "#%% md\n"}
# Now we move on with creating a model, opening the arrays, define a Tensorflow TileDB dataset and train the model.
# + pycharm={"is_executing": true, "name": "#%%\n"}
model = create_model()
with tiledb.open('training_images') as x, tiledb.open('training_labels') as y:
tiledb_dataset = TensorflowTileDBDenseDataset(
x_array=x, y_array=y, x_attribute_names=['features'], y_attribute_names=['features'], batch_size=64,
buffer_size = 300*64, batch_shuffle=True, within_batch_shuffle=False,
)
model.fit(tiledb_dataset, epochs=5)
# + pycharm={"is_executing": true, "name": "#%%\n"}
model.summary()
# + pycharm={"is_executing": true}
|
examples/readers/tensorflow_data_api_tiledb_dense.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2 align='center' style='color:blue'>Exercise: Outlier Detection and Removal Using IQR</h2>
# You are given height_weight.csv file which contains heights and weights of 1000 people. Dataset is taken from here,
# https://www.kaggle.com/mustafaali96/weight-height
#
# You need to do this,
#
# (1) Load this csv in pandas dataframe and first plot histograms for height and weight parameters
#
# (2) Using IQR detect weight outliers and print them
#
# (3) Using IQR, detect height outliers and print them
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
# %matplotlib inline
matplotlib.rcParams['figure.figsize'] = (8,4)
df = pd.read_csv("height_weight.csv")
df.head(5)
# **Histgram for weights**
plt.hist(df.weight, bins=20, rwidth=0.8)
plt.xlabel('Weight')
plt.ylabel('Count')
plt.show()
# **Histgram for heights**
plt.hist(df.height, bins=20, rwidth=0.8)
plt.xlabel('Height')
plt.ylabel('Count')
plt.show()
# **Detect outliers based on weight**
Q1 = df.weight.quantile(0.25)
Q3 = df.weight.quantile(0.75)
Q1, Q3
IQR = Q3 - Q1
IQR
lower_limit = Q1 - 1.5*IQR
upper_limit = Q3 + 1.5*IQR
lower_limit, upper_limit
df.weight.describe()
df[(df.weight<lower_limit)|(df.weight>upper_limit)]
# **Detect outliers based on height**
Q1 = df.height.quantile(0.25)
Q3 = df.height.quantile(0.75)
Q1, Q3
IQR = Q3 - Q1
IQR
lower_limit = Q1 - 1.5*IQR
upper_limit = Q3 + 1.5*IQR
lower_limit, upper_limit
df[(df.height<lower_limit)|(df.height>upper_limit)]
|
Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/ML/FeatureEngineering/3_outlier_IQR/Exercise/3_outlier_iqr_exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute
#
# Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100’s of nodes if needed.
#
# This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a single compute.
# ### Cartpole problem
#
# Cartpole, also known as [Inverted Pendulum](https://en.wikipedia.org/wiki/Inverted_pendulum), is a pendulum with a center of mass above its pivot point. This formation is essentially unstable and will easily fall over but can be kept balanced by applying appropriate horizontal forces to the pivot point.
#
# <table style="width:50%">
# <tr>
# <th>
# <img src="./images/cartpole.png" alt="Cartpole image" />
# </th>
# </tr>
# <tr>
# <th><p>Fig 1. Cartpole problem schematic description (from <a href="https://towardsdatascience.com/cartpole-introduction-to-reinforcement-learning-ed0eb5b58288">towardsdatascience.com</a>).</p></th>
# </tr>
# </table>
#
# The goal here is to train an agent to keep the cartpole balanced by applying appropriate forces to the pivot point.
#
# See [this video](https://www.youtube.com/watch?v=XiigTGKZfks) for a real-world demonstration of cartpole problem.
# ### Prerequisite
# The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace.
# ## Set up Development Environment
# The following subsections show typical steps to setup your development environment. Setup includes:
#
# * Connecting to a workspace to enable communication between your local machine and remote resources
# * Creating an experiment to track all your runs
# * Creating a remote compute target to use for training
# ### Azure Machine Learning SDK
# Display the Azure Machine Learning SDK version.
# +
import azureml.core
print("Azure Machine Learning SDK Version:", azureml.core.VERSION)
# -
# ### Get Azure Machine Learning workspace
# Get a reference to an existing Azure Machine Learning workspace.
# +
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, sep = ' | ')
# -
# ### Create a new compute resource or attach an existing one
#
# A compute target is a designated compute resource where you run your training and simulation scripts. This location may be your local machine or a cloud-based compute resource. The code below shows how to create a cloud-based compute target. For more information see [What are compute targets in Azure Machine Learning?](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-target)
#
# **Note: Creation of a compute resource can take several minutes**. Please make sure to change `STANDARD_D2_V2` to a [size available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines).
# +
from azureml.core.compute import AmlCompute, ComputeTarget
import os
# Choose a name and maximum size for your cluster
compute_name = "cpu-cluster-d2"
compute_min_nodes = 0
compute_max_nodes = 4
vm_size = "STANDARD_D2_V2"
if compute_name in ws.compute_targets:
print("Found an existing compute target of name: " + compute_name)
compute_target = ws.compute_targets[compute_name]
# Note: you may want to make sure compute_target is of type AmlCompute
else:
print("Creating new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
min_nodes=compute_min_nodes,
max_nodes=compute_max_nodes)
# Create the cluster
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
print(compute_target.get_status().serialize())
# -
# ### Create Azure Machine Learning experiment
# Create an experiment to track the runs in your workspace.
# +
from azureml.core.experiment import Experiment
experiment_name = 'CartPole-v0-SC'
exp = Experiment(workspace=ws, name=experiment_name)
# -
# ## Train Cartpole Agent
# To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent.
# ### Create reinforcement learning estimator
#
# The code below creates an instance of *ReinforcementLearningEstimator*, `training_estimator`, which then will be used to submit a job to Azure Machine Learning to start the Ray experiment run.
#
# Note that this example is purposely simplified to the minimum. Here is a short description of the parameters we are passing into the constructor:
#
# - `source_directory`, local directory containing your training script(s) and helper modules,
# - `entry_script`, path to your entry script relative to the source directory,
# - `script_params`, constant parameters to be passed to each run of training script,
# - `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,
# - `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).
#
# We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.
#
# +
from azureml.contrib.train.rl import ReinforcementLearningEstimator, Ray
from azureml.core.environment import Environment
training_algorithm = "PPO"
rl_environment = "CartPole-v0"
video_capture = True
if video_capture:
algorithm_config = '\'{"num_gpus": 0, "num_workers": 1, "monitor": true}\''
else:
algorithm_config = '\'{"num_gpus": 0, "num_workers": 1, "monitor": false}\''
script_params = {
# Training algorithm
"--run": training_algorithm,
# Training environment
"--env": rl_environment,
# Algorithm-specific parameters
"--config": algorithm_config,
# Stop conditions
"--stop": '\'{"episode_reward_mean": 200, "time_total_s": 300}\'',
# Frequency of taking checkpoints
"--checkpoint-freq": 2,
# If a checkpoint should be taken at the end - optional argument with no value
"--checkpoint-at-end": "",
# Log directory
"--local-dir": './logs'
}
xvfb_env = None
if video_capture:
# Ray's video capture support requires to run everything under a headless display driver called (xvfb).
# There are two parts to this:
# 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl
# and other dependencies.
with open("files/docker/Dockerfile", "r") as f:
dockerfile=f.read()
xvfb_env = Environment(name='xvfb-vdisplay')
xvfb_env.docker.base_image = None
xvfb_env.docker.base_dockerfile = dockerfile
# 2. Execute the Python process via the xvfb-run command to set up the headless display driver.
xvfb_env.python.user_managed_dependencies = True
xvfb_env.python.interpreter_path = "xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' python"
training_estimator = ReinforcementLearningEstimator(
# Location of source files
source_directory='files',
# Python script file
entry_script='cartpole_training.py',
# A dictionary of arguments to pass to the training script specified in ``entry_script``
script_params=script_params,
# The Azure Machine Learning compute target set up for Ray head nodes
compute_target=compute_target,
# Reinforcement learning framework. Currently must be Ray.
rl_framework=Ray(),
# Custom environmnet for Xvfb
environment=xvfb_env
)
# -
# ### Training script
#
# As recommended in RLlib documentations, we use Ray Tune API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use `tune.run()` to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.
#
# This is the list of parameters we are passing into `tune.run()` via the `script_params` parameter:
#
# - `run_or_experiment`: name of the [built-in algorithm](https://ray.readthedocs.io/en/latest/rllib-algorithms.html#rllib-algorithms), 'PPO' in our example,
# - `config`: Algorithm-specific configuration. This includes specifying the environment, `env`, which in our example is the gym **[CartPole-v0](https://gym.openai.com/envs/CartPole-v0/)** environment,
# - `stop`: stopping conditions, which could be any of the metrics returned by the trainer. Here we use "mean of episode reward", and "total training time in seconds" as stop conditions, and
# - `checkpoint_freq` and `checkpoint_at_end`: Frequency of taking checkpoints (number of training iterations between checkpoints), and if a checkpoint should be taken at the end.
#
# We also specify the `local_dir`, the directory in which the training logs, checkpoints and other training artificats will be recorded.
#
# See [RLlib Training APIs](https://ray.readthedocs.io/en/latest/rllib-training.html#rllib-training-apis) for more details, and also [Training (tune.run, tune.Experiment)](https://ray.readthedocs.io/en/latest/tune/api_docs/execution.html#training-tune-run-tune-experiment) for the complete list of parameters.
#
# ```python
# import ray
# import ray.tune as tune
#
# if __name__ == "__main__":
#
# # parse arguments ...
#
# # Intitialize ray
# ray.init(address=args.ray_address)
#
# # Run training task using tune.run
# tune.run(
# run_or_experiment=args.run,
# config=dict(args.config, env=args.env),
# stop=args.stop,
# checkpoint_freq=args.checkpoint_freq,
# checkpoint_at_end=args.checkpoint_at_end,
# local_dir=args.local_dir
# )
# ```
# ### Submit the estimator to start experiment
# Now we use the *training_estimator* to submit a run.
training_run = exp.submit(training_estimator)
# ### Monitor experiment
#
# Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.
#
# Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.
#
# The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged.
# +
from azureml.widgets import RunDetails
RunDetails(training_run).show()
# -
# ### Stop the run
# To stop the run, call `training_run.cancel()`.
# +
# Uncomment line below to cancel the run
#training_run.cancel()
# -
# ### Wait for completion
# Wait for the run to complete before proceeding.
#
# **Note: The length of the run depends on the provisioning time of the compute target and it may take several minutes to complete.**
training_run.wait_for_completion()
# ### Get a handle to the child run
# You can obtain a handle to the child run as follows. In our scenario, there is only one child run, we have it called `child_run_0`.
# +
import time
child_run_0 = None
timeout = 30
while timeout > 0 and not child_run_0:
child_runs = list(training_run.get_children())
print('Number of child runs:', len(child_runs))
if len(child_runs) > 0:
child_run_0 = child_runs[0]
break
time.sleep(2) # Wait for 2 seconds
timeout -= 2
print('Child run info:')
print(child_run_0)
# -
# ### Get access to training artifacts
# We can simply use run id to get a handle to an in-progress or a previously concluded run.
# +
from azureml.core import Run
run_id = child_run_0.id # Or set to run id of a completed run (e.g. 'rl-cartpole-v0_1587572312_06e04ace_head')
child_run_0 = Run(exp, run_id=run_id)
# -
# Now we can use the Run API to download policy training artifacts (saved model and checkpoints) to local compute.
# +
from os import path
from distutils import dir_util
path_prefix = path.join("logs", training_algorithm)
print("Path prefix:", path_prefix)
if path.exists(path_prefix):
dir_util.remove_tree(path_prefix)
# Uncomment line below to download run artifacts to local compute
#child_run_0.download_files(path_prefix)
# -
# ### Create a dataset of training artifacts
# To evaluate a trained policy (a checkpoint) we need to make the checkpoint accessible to the rollout script. All the training artifacts are stored in workspace default datastore under **azureml/<run_id>** directory.
#
# Here we create a file dataset from the stored artifacts, and then use this dataset to feed these data to rollout estimator.
# +
from azureml.core import Dataset
run_id = child_run_0.id # Or set to run id of a completed run (e.g. 'rl-cartpole-v0_1587572312_06e04ace_head')
run_artifacts_path = os.path.join('azureml', run_id)
print("Run artifacts path:", run_artifacts_path)
# Create a file dataset object from the files stored on default datastore
datastore = ws.get_default_datastore()
training_artifacts_ds = Dataset.File.from_files(datastore.path(os.path.join(run_artifacts_path, '**')))
# -
# To verify, we can print out the number (and paths) of all the files in the dataset, as follows.
# +
artifacts_paths = training_artifacts_ds.to_path()
print("Number of files in dataset:", len(artifacts_paths))
# Uncomment line below to print all file paths
#print("Artifacts dataset file paths: ", artifacts_paths)
# -
# ### Display movies of selected training episodes
#
# Ray creates video output of selected training episodes in mp4 format. Here we will display two of these, i.e. the first and the last recorded videos, so you could see the improvement of the agent after training.
#
# First we introduce a few helper functions: a function to download the movies from our dataset, another one to find mp4 movies in a local directory, and one more to display a downloaded movie.
# +
import shutil
# A helper function to download (copy) movies from a dataset to local directory
def download_movies(artifacts_ds, movies, destination):
# Create the local destination directory
if path.exists(destination):
dir_util.remove_tree(destination)
dir_util.mkpath(destination)
try:
print("Trying mounting dataset and copying movies.")
# Note: We assume movie paths start with '\'
mount_context = artifacts_ds.mount()
mount_context.start()
for movie in movies:
print('Copying {} ...'.format(movie))
shutil.copy2(path.join(mount_context.mount_point, movie[1:]), destination)
mount_context.stop()
except OSError as e:
print("Mounting failed with error '{0}'. Going with dataset download.".format(e))
for i, artifact in enumerate(artifacts_ds.to_path()):
if artifact in movies:
print('Downloading {} ...'.format(artifact))
artifacts_ds.skip(i).take(1).download(target_path=destination, overwrite=True)
print('Downloading movies completed!')
# A helper function to find movies in a directory
def find_movies(movie_path):
print("Looking in path:", movie_path)
mp4_movies = []
for root, _, files in os.walk(movie_path):
for name in files:
if name.endswith('.mp4'):
mp4_movies.append(path.join(root, name))
print('Found {} movies'.format(len(mp4_movies)))
return mp4_movies
# A helper function to display a movie
from IPython.core.display import display, HTML
def display_movie(movie_file):
display(
HTML('\
<video alt="cannot display video" autoplay loop> \
<source src="{}" type="video/mp4"> \
</video>'.format(movie_file)
)
)
# -
# Now let's find the first and the last recorded videos in training artifacts dataset and download them to a local directory.
# +
# Find first and last movie
mp4_files = [file for file in training_artifacts_ds.to_path() if file.endswith('.mp4')]
mp4_files.sort()
first_movie = mp4_files[0] if len(mp4_files) > 0 else None
last_movie = mp4_files[-1] if len(mp4_files) > 1 else None
print("First movie:", first_movie)
print("Last movie:", last_movie)
# Download movies
training_movies_path = path.join("training", "videos")
download_movies(training_artifacts_ds, [first_movie, last_movie], training_movies_path)
# -
# Look for the downloaded movies in the local directory and sort them.
mp4_files = find_movies(training_movies_path)
mp4_files.sort()
# Display a movie of the first training episode. This is how the agent performs with no training.
# +
first_movie = mp4_files[0] if len(mp4_files) > 0 else None
print("First movie:", first_movie)
display_movie(first_movie)
# -
# Display a movie of the last training episode. This is how a fully-trained agent performs.
# +
last_movie = mp4_files[-1] if len(mp4_files) > 0 else None
print("Last movie:", last_movie)
display_movie(last_movie)
# -
# ## Evaluate Trained Agent and See Results
#
# We can evaluate a previously trained policy using the `rollout.py` helper script provided by RLlib (see [Evaluating Trained Policies](https://ray.readthedocs.io/en/latest/rllib-training.html#evaluating-trained-policies) for more details). Here we use an adaptation of this script to reconstruct a policy from a checkpoint taken and saved during training. We took these checkpoints by setting `checkpoint-freq` and `checkpoint-at-end` parameters above.
# In this section we show how to use these checkpoints to evaluate the trained policy.
# ### Evaluate a trained policy
# We need to configure another reinforcement learning estimator, `rollout_estimator`, and then use it to submit another run. Note that the entry script for this estimator now points to `cartpole-rollout.py` script.
# Also note how we pass the checkpoints dataset to this script using `inputs` parameter of the _ReinforcementLearningEstimator_.
#
# We are using script parameters to pass in the same algorithm and the same environment used during training. We also specify the checkpoint number of the checkpoint we wish to evaluate, `checkpoint-number`, and number of the steps we shall run the rollout, `steps`.
#
# The training artifacts dataset will be accessible to the rollout script as a mounted folder. The mounted folder and the checkpoint number, passed in via `checkpoint-number`, will be used to create a path to the checkpoint we are going to evaluate. The created checkpoint path then will be passed into RLlib rollout script for evaluation.
#
# Let's find the checkpoints and the last checkpoint number first.
# +
# Find checkpoints and last checkpoint number
checkpoint_files = [
os.path.basename(file) for file in training_artifacts_ds.to_path() \
if os.path.basename(file).startswith('checkpoint-') and \
not os.path.basename(file).endswith('tune_metadata')
]
checkpoint_numbers = []
for file in checkpoint_files:
checkpoint_numbers.append(int(file.split('-')[1]))
print("Checkpoints:", checkpoint_numbers)
last_checkpoint_number = max(checkpoint_numbers)
print("Last checkpoint number:", last_checkpoint_number)
# -
# Now let's configure rollout estimator. Note that we use the last checkpoint for evaluation. The assumption is that the last checkpoint points to our best trained agent. You may change this to any of the checkpoint numbers printed above and observe the effect.
# +
script_params = {
# Checkpoint number of the checkpoint from which to roll out
"--checkpoint-number": last_checkpoint_number,
# Training algorithm
"--run": training_algorithm,
# Training environment
"--env": rl_environment,
# Algorithm-specific parameters
"--config": '{}',
# Number of rollout steps
"--steps": 2000,
# If should repress rendering of the environment
"--no-render": "",
# The place where recorded videos will be stored
"--video-dir": "./logs/video"
}
if video_capture:
script_params.pop("--no-render")
else:
script_params.pop("--video-dir")
# Ray's video capture support requires to run everything under a headless display driver called (xvfb).
# There are two parts to this:
# 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl
# and other dependencies.
# Note: Even when the rendering is off pyhton-opengl is needed.
with open("files/docker/Dockerfile", "r") as f:
dockerfile=f.read()
xvfb_env = Environment(name='xvfb-vdisplay')
xvfb_env.docker.base_image = None
xvfb_env.docker.base_dockerfile = dockerfile
# 2. Execute the Python process via the xvfb-run command to set up the headless display driver.
xvfb_env.python.user_managed_dependencies = True
if video_capture:
xvfb_env.python.interpreter_path = "xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' python"
rollout_estimator = ReinforcementLearningEstimator(
# Location of source files
source_directory='files',
# Python script file
entry_script='cartpole_rollout.py',
# A dictionary of arguments to pass to the rollout script specified in ``entry_script``
script_params = script_params,
# Data inputs
inputs=[
training_artifacts_ds.as_named_input('artifacts_dataset'),
training_artifacts_ds.as_named_input('artifacts_path').as_mount()],
# The Azure Machine Learning compute target set up for Ray head nodes
compute_target=compute_target,
# Reinforcement learning framework. Currently must be Ray.
rl_framework=Ray(),
# Custom environmnet for Xvfb
environment=xvfb_env)
# -
# Same as before, we use the *rollout_estimator* to submit a run.
rollout_run = exp.submit(rollout_estimator)
# And then, similar to the training section, we can monitor the real-time progress of the rollout run and its chid as follows. If you browse logs of the child run you can see the evaluation results recorded in driver_log.txt file. Note that you may need to wait several minutes before these results become available.
RunDetails(rollout_run).show()
# Wait for completion of the rollout run before moving to the next section, or you may cancel the run.
# Uncomment line below to cancel the run
#rollout_run.cancel()
rollout_run.wait_for_completion()
# ### Display movies of selected rollout episodes
#
# To display recorded movies first we download recorded videos to local machine. Here again we create a dataset of rollout artifacts and use the helper functions introduced above to download and displays rollout videos.
# +
# Get a handle to child run
child_runs = list(rollout_run.get_children())
print('Number of child runs:', len(child_runs))
child_run_0 = child_runs[0]
run_id = child_run_0.id # Or set to run id of a completed run (e.g. 'rl-cartpole-v0_1587572312_06e04ace_head')
run_artifacts_path = os.path.join('azureml', run_id)
print("Run artifacts path:", run_artifacts_path)
# Create a file dataset object from the files stored on default datastore
datastore = ws.get_default_datastore()
rollout_artifacts_ds = Dataset.File.from_files(datastore.path(os.path.join(run_artifacts_path, '**')))
artifacts_paths = rollout_artifacts_ds.to_path()
print("Number of files in dataset:", len(artifacts_paths))
# -
# Now, similar to the training section, we look for the last video.
# +
# Find last movie
mp4_files = [file for file in rollout_artifacts_ds.to_path() if file.endswith('.mp4')]
mp4_files.sort()
last_movie = mp4_files[-1] if len(mp4_files) > 1 else None
print("Last movie:", last_movie)
# Download last movie
rollout_movies_path = path.join("rollout", "videos")
download_movies(rollout_artifacts_ds, [last_movie], rollout_movies_path)
# Look for the downloaded movie in local directory
mp4_files = find_movies(rollout_movies_path)
mp4_files.sort()
# -
# Display last video recorded during the rollout.
# +
last_movie = mp4_files[-1] if len(mp4_files) > 0 else None
print("Last movie:", last_movie)
display_movie(last_movie)
# -
# ## Cleaning up
# For your convenience, below you can find code snippets to clean up any resources created as part of this tutorial that you don't wish to retain.
# +
# To archive the created experiment:
#exp.archive()
# To delete the compute target:
#compute_target.delete()
# To delete downloaded training artifacts
#if os.path.exists(path_prefix):
# dir_util.remove_tree(path_prefix)
# To delete downloaded training videos
#if path.exists(training_movies_path):
# dir_util.remove_tree(training_movies_path)
# To delete downloaded rollout videos
#if path.exists(rollout_movies_path):
# dir_util.remove_tree(rollout_movies_path)
# -
# ## Next
# This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a single compute. Please see [Pong Problem](../atari-on-distributed-compute/pong_rllib.ipynb)
# example which uses Ray RLlib to train a Pong playing agent on a multi-node cluster.
|
how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import math
import random
import pandas as pd
import os
import matplotlib.pyplot as plt
import cv2
import glob
import gc
from utils import *
from tqdm import tqdm
import pickle
from keras.optimizers import *
from keras.models import Model
from keras.layers import *
from keras.layers.core import *
from keras.layers.convolutional import *
from keras import backend as K
import tensorflow as tf
# -
# # Initialize the setting
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2"
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# +
random.seed(1234)
class Config():
def __init__(self):
self.frame_l = 32 # the length of frames
self.joint_n = 25 # the number of joints
self.joint_d = 2 # the dimension of joints
self.clc_num = 21 # the number of class
self.feat_d = 300
self.filters = 64
self.data_dir = os.path.join(os.path.abspath(''), '..', 'data', 'openpose_all_jhmdb_hybrid')
C = Config()
# -
def data_generator(T,C,le):
X_0 = []
X_1 = []
Y = []
for i in tqdm(range(len(T['pose']))):
p = np.copy(T['pose'][i])
p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)
label = np.zeros(C.clc_num)
label[le.transform(T['label'])[i]-1] = 1
M = get_CG(p,C)
X_0.append(M)
X_1.append(p)
Y.append(label)
X_0 = np.stack(X_0)
X_1 = np.stack(X_1)
Y = np.stack(Y)
return X_0,X_1,Y
# # Building the model
# +
def poses_diff(x):
H, W = x.get_shape()[1],x.get_shape()[2]
x = tf.subtract(x[:,1:,...],x[:,:-1,...])
x = tf.image.resize_nearest_neighbor(x,size=[H.value,W.value],align_corners=False) # should not alignment here
return x
def pose_motion(P,frame_l):
P_diff_slow = Lambda(lambda x: poses_diff(x))(P)
P_diff_slow = Reshape((frame_l,-1))(P_diff_slow)
P_fast = Lambda(lambda x: x[:,::2,...])(P)
P_diff_fast = Lambda(lambda x: poses_diff(x))(P_fast)
P_diff_fast = Reshape((int(frame_l/2),-1))(P_diff_fast)
return P_diff_slow,P_diff_fast
def c1D(x,filters,kernel):
x = Conv1D(filters, kernel_size=kernel,padding='same',use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
return x
def block(x,filters):
x = c1D(x,filters,3)
x = c1D(x,filters,3)
return x
def d1D(x,filters):
x = Dense(filters,use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
return x
def build_FM(frame_l=32,joint_n=22,joint_d=2,feat_d=231,filters=16):
M = Input(shape=(frame_l,feat_d))
P = Input(shape=(frame_l,joint_n,joint_d))
diff_slow,diff_fast = pose_motion(P,frame_l)
x = c1D(M,filters*2,1)
x = SpatialDropout1D(0.1)(x)
x = c1D(x,filters,3)
x = SpatialDropout1D(0.1)(x)
x = c1D(x,filters,1)
x = MaxPooling1D(2)(x)
x = SpatialDropout1D(0.1)(x)
x_d_slow = c1D(diff_slow,filters*2,1)
x_d_slow = SpatialDropout1D(0.1)(x_d_slow)
x_d_slow = c1D(x_d_slow,filters,3)
x_d_slow = SpatialDropout1D(0.1)(x_d_slow)
x_d_slow = c1D(x_d_slow,filters,1)
x_d_slow = MaxPool1D(2)(x_d_slow)
x_d_slow = SpatialDropout1D(0.1)(x_d_slow)
x_d_fast = c1D(diff_fast,filters*2,1)
x_d_fast = SpatialDropout1D(0.1)(x_d_fast)
x_d_fast = c1D(x_d_fast,filters,3)
x_d_fast = SpatialDropout1D(0.1)(x_d_fast)
x_d_fast = c1D(x_d_fast,filters,1)
x_d_fast = SpatialDropout1D(0.1)(x_d_fast)
x = concatenate([x,x_d_slow,x_d_fast])
x = block(x,filters*2)
x = MaxPool1D(2)(x)
x = SpatialDropout1D(0.1)(x)
x = block(x,filters*4)
x = MaxPool1D(2)(x)
x = SpatialDropout1D(0.1)(x)
x = block(x,filters*8)
x = SpatialDropout1D(0.1)(x)
return Model(inputs=[M,P],outputs=x)
def build_DD_Net(C):
M = Input(name='M', shape=(C.frame_l,C.feat_d))
P = Input(name='P', shape=(C.frame_l,C.joint_n,C.joint_d))
FM = build_FM(C.frame_l,C.joint_n,C.joint_d,C.feat_d,C.filters)
x = FM([M,P])
x = GlobalMaxPool1D()(x)
x = d1D(x,128)
x = Dropout(0.5)(x)
x = d1D(x,128)
x = Dropout(0.5)(x)
x = Dense(C.clc_num, activation='softmax')(x)
######################Self-supervised part
model = Model(inputs=[M,P],outputs=x)
return model
# -
DD_Net = build_DD_Net(C)
DD_Net.summary()
# ## Train and test on GT_split 1
# +
Train = pickle.load(open(os.path.join(C.data_dir, "GT_train_1.pkl"), "rb"))
Test = pickle.load(open(os.path.join(C.data_dir, "GT_test_1.pkl"), "rb"))
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Train['label'])
X_0,X_1,Y = data_generator(Train,C,le)
X_test_0,X_test_1,Y_test = data_generator(Test,C,le)
# -
import keras
lr = 1e-3
DD_Net.compile(loss="categorical_crossentropy",optimizer=adam(lr),metrics=['accuracy'])
lrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)
history = DD_Net.fit([X_0,X_1],Y,
batch_size=len(Y),
epochs=600,
verbose=True,
shuffle=True,
callbacks=[lrScheduler],
validation_data=([X_test_0,X_test_1],Y_test)
)
lr = 1e-4
DD_Net.compile(loss="categorical_crossentropy",optimizer=adam(lr),metrics=['accuracy'])
lrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)
history = DD_Net.fit([X_0,X_1],Y,
batch_size=len(Y),
epochs=600,
verbose=True,
shuffle=True,
callbacks=[lrScheduler],
validation_data=([X_test_0,X_test_1],Y_test)
)
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
pose_label = pickle.load(open('/home/ubuntu/roger_actions_output/pose_label.pkl', 'rb'))
x_0, x_1, y = data_generator(pose_label,C,le)
for result in y:
print(np.argmax(result))
output = DD_Net.predict([X_0,X_1])
for i, label in zip(range(len(X_0)), Train['label']):
print(np.argmax(output[i]), label)
le.inverse_transform([20])
le.transform(['clap'])
y[2].shape
# ## Train and test on GT_split 2
# +
Train = pickle.load(open(C.data_dir+"GT_train_2.pkl", "rb"))
Test = pickle.load(open(C.data_dir+"GT_test_2.pkl", "rb"))
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Train['label'])
X_0,X_1,Y = data_generator(Train,C,le)
X_test_0,X_test_1,Y_test = data_generator(Test,C,le)
# -
# Re-initialize weights, since training and testing data switch
DD_Net = build_DD_Net(C)
import keras
lr = 1e-3
DD_Net.compile(loss="categorical_crossentropy",optimizer=adam(lr),metrics=['accuracy'])
lrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)
history = DD_Net.fit([X_0,X_1],Y,
batch_size=len(Y),
epochs=600,
verbose=True,
shuffle=True,
#callbacks=[lrScheduler],
validation_data=([X_test_0,X_test_1],Y_test)
)
lr = 1e-4
DD_Net.compile(loss="categorical_crossentropy",optimizer=adam(lr),metrics=['accuracy'])
lrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)
history = DD_Net.fit([X_0,X_1],Y,
batch_size=len(Y),
epochs=600,
verbose=True,
shuffle=True,
#callbacks=[lrScheduler],
validation_data=([X_test_0,X_test_1],Y_test)
)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# ## Train and test on GT_split 3
# +
Train = pickle.load(open(C.data_dir+"GT_train_3.pkl", "rb"))
Test = pickle.load(open(C.data_dir+"GT_test_3.pkl", "rb"))
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Train['label'])
X_0,X_1,Y = data_generator(Train,C,le)
X_test_0,X_test_1,Y_test = data_generator(Test,C,le)
# -
# Re-initialize weights, since training and testing data switch
DD_Net = build_DD_Net(C)
# +
import keras
lr = 1e-3
DD_Net.compile(loss="categorical_crossentropy",optimizer=adam(lr),metrics=['accuracy'])
lrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)
history = DD_Net.fit([X_0,X_1],Y,
batch_size=len(Y),
epochs=600,
verbose=True,
shuffle=True,
callbacks=[lrScheduler],
validation_data=([X_test_0,X_test_1],Y_test)
)
lr = 1e-4
DD_Net.compile(loss="categorical_crossentropy",optimizer=adam(lr),metrics=['accuracy'])
lrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)
history = DD_Net.fit([X_0,X_1],Y,
batch_size=len(Y),
epochs=600,
verbose=True,
shuffle=True,
callbacks=[lrScheduler],
validation_data=([X_test_0,X_test_1],Y_test)
)
# -
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# ## Calculate average
(0.76 + 0.80 + 0.755)/3
|
JHMDB/jhmdb_1D_heavy.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 9.0
# language: sage
# name: sagemath
# ---
# This challenge implements a KMOV cryptosystem based on twisted Edwards curve (see [[1]](#1)), the vulnerablity comes from the generation of the public exponent $e$ parameter, here $e$ satisfies the equation:
#
# $$ex+(p + 1)(q + 1)y = z$$
#
# where $x$, $y$ and $z$ satisfy:
#
# $$xy < \frac{\sqrt{2N}}{12} \quad {\rm and} \quad \left|z \right| < \frac{(p - q)N^{0.21}y}{3(p + q)}$$
#
# In this case, we can use Diophantine approximations to find $x$ and $y$ among the convergents of the continued fraction expansion of $\frac{e}{N}$ (see [[2]](#2), Theorem 1, 2), after finding $x$ and $y$, we can get an approximation $\tilde{p}$ of $p$ satisfying $|p - \tilde{p}| < N^{0.21}$, which leads to the factorization of $N$ by using Coppersmith's method for finding small roots of modular polynomial equations (see [[2]](#2), Theorem 4).
#
# After that, we can get the private key of this challenge, the only thing left is to figure out that the encryption process is just computing a scalar multiplication on a twisted Edwards curve, since we have already got the private key, we can just use it to decrypt the ciphertext, however, the method used here to compute scalar multiplication is through repeated addition, this is a fully exponential approach and it runs too slow, so that we can simply use the double-and-add algorithm to speed up this process.
#
# Here is my final solver:
# +
#!/usr/bin/env sage
def on_curve(C, P):
a, d, p = C
x, y = P
return (a*x**2 + y**2 - d*x**2*y**2) % p == 1
def point_add(C, P, Q):
a, d, p = C
x1, y1 = P
x2, y2 = Q
assert on_curve(C, P) and on_curve(C, Q)
x3 = (x1 * y2 + y1 * x2) * inverse_mod(1 + d * x1 * x2 * y1 * y2, p) % p
y3 = (y1 * y2 - a * x1 * x2) * inverse_mod(1 - d * x1 * x2 * y1 * y2, p) % p
return (int(x3), int(y3))
def point_mul(C, P, s):
Q = (0, 1)
while s > 0:
if s % 2 == 1:
Q = point_add(C, Q, P)
P = point_add(C, P, P)
s //= 2
return Q
bits = 1024
'''
f = open('output.txt', 'rb').read()
data = f.replace(b'(', b'').replace(b')', b'').split(b'\n')
e, N = tuple(map(int, data[0].split(b', ')))
ct = tuple(map(int, data[1].split(b', ')))
'''
e, N = (288969517294013178236187423377607850772706067194956328319540958788120421760563745859661120809993097599452236235703456953461446476016483100948287481999230043898368061651387268308645842547879026821842863879967704742559469599469159759360184157244907772315674219466971226019794131421405331578417729612598931842872757269134756215101980595515566901217084629217607502582265295755863799167702741408881294579819035951888562668951997777236828957162036234849207438819692480197365737237130918496390340939168630111890207700776894851839829623749822549994705192645373973493114436603297829506747411555800330860323339168875710029679, 6321130275268755691320586594611921079666212146561948694592313061609721619539590734495630218941969050343046016393977582794839173726817429324685098585960482266998399162720208269336303520478867387042992449850962809825380612709067651432344409349798118550026702892042869238047094344883994914342037831757447770321791092478847580639207346027164495372017699282907858775577530313354865815011726710796887715414931577176850854690237886239119894136091932619828539390021389626283175740389396541552356118540397518601098858527880603493380691706649684470530670258670128352699647582718206243920566184954440517665446820063779925391893)
ct = (5899152272551058285195694254667877221970753694584926104666866605696215068207480540407327508300257676391022109169902014292744666257465490629821382573289737174334198164333033128913955350103258256280828114875165476209826215601196920761915628274301746678705023551051091500407363159529055081261677043206130866838451325794109635288399010815200512702451748093168790121961904783034526572263126354004237323724559882241164587153748688219172626902108911587291552030335170336301818195688699255375043513696525422124055880380071075595317183172843771015029292369558240259547938684717895057447152729328016698107789678823563841271755, 253027286530960212859400305369275200777004645361154014614791278682230897619117833798134983197915876185668102195590667437488411251835330785944874517235915807926715611143830896296709467978143690346677123639363900536537534596995622179904587739684155397043547262126131676366948937690378306959846311626889534352806134472610026603322329394769864728875293696851590640974817297099985799243285824842399573006841275494668451690794643886677303573329060084436896592291515021246248961538322485059619863786362159459122242131918702862396595818404578595841492379025543989260901540257216728185425462070297720884398220421012139424567)
res = [(i.denom(), i.numer()) for i in continued_fraction(e / N).convergents()]
P.<pp> = PolynomialRing(Zmod(N))
for x, y in res:
if Integer(y).nbits() in range(bits // 2 - 8, bits // 2) and Integer(x).nbits() in range(bits // 2 - 8, bits // 2):
U = (e * x // y) - N - 1
V = int(sqrt(abs(U**2 - 4 * N)))
p_0 = (U+V) // 2
f = ((p_0 << 576) >> 576) + pp
r = f.small_roots(X = 2**(bits - 576), beta = 0.4)
if r != []:
p = int(p_0 + r[0])
if (N % p == 0) and is_prime(p):
break
q = N // p
k = inverse_mod(e, (p + 1) * (q + 1))
d = (((ct[1])**2 - 1) * inverse_mod(((ct[1])**2 + 1) * (ct[0])**2, N)) % N
pt = point_mul((-d, d, N), ct, k)
flag = pt[0].to_bytes(32, 'big') + pt[1].to_bytes(32, 'big')
print(flag)
# -
# **P.S.**
#
# * According to ([[2]](#2), Theorem 3), suppose we know an approximation $\tilde{p}$ of $p$ with $|p - \tilde{p}| < N^{0.25}$, then $N$ can be factored in polynomial time, this means for a 2048-bit $N$, given 512 high order bits of $p$ is encough to factorize $N$, but in practice, if you want to use `.small_roots()` method in SageMath to get the result, usually you need 576 bits known to get the result quickly, you can also reduce the number of bits required by reducing the value of the `epsilon` parameter in `.small_roots()` (e.g., 540 bits known with `epsilon = 0.02` and 530 bits with `epsilon = 0.01` worked well), but it will cost more time.
#
# * Also, some new improved attacks on the KMOV cryptosystem have been proposed, these attacks work when the private key is suitably small and the new results improve the former attacks on the KMOV cryptosystem, see ([[3]](#3), Section 3, 4).
#
# * The content of the FLAG is a quote from movie *Scent of a Woman* "You're so wrapped up in sugar, you've forgotten the taste of real honey!"
#
# **References**
#
# <a id="1" href="https://eprint.iacr.org/2019/1051.pdf">[1] <NAME>., <NAME>. A new public key cryptosystem based on Edwards curves. J. Appl. Math. Comput. 61, 431–450 (2019).</a>
#
# <a id="2" href="https://eprint.iacr.org/2011/427.pdf">[2] <NAME>, A new attack on the KMOV cryptosystem, Bulletin of the Korean Math- ematical Society 51 (5), 1347–1356, 2014.</a>
#
# <a id="3" href="https://eprint.iacr.org/2019/1052.pdf">[3] Nitaj, Abderrahmane, <NAME>, and <NAME>. "Improved Cryptanalysis of the KMOV Elliptic Curve Cryptosystem." International Conference on Provable Security. Springer, Cham, 2019.</a>
|
Crypto/weird/weird_writeup.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building ANCOR Dataset
# +
from french_crs.ancor2dataset import dataset_builder
# Do-able with two nested loops
# for method in ["balanced", "representative", "window"]:
# for subcorpus in [["INDIRECTE"],["DIRECTE"],["ANAPHORE"]]:
dataset_params={
# There are other three possible strategies: (1)"balanced", (2)"representative" and (3)"window"
"strategy" : "balanced,
"ancor_corpus_path" : "../DISTRIB_ANCOR/",
# All possibilities are: ["corpus_OTG","corpus_UBS","corpus_ESLO" ,"corpus_ESLO_CO2"]
"sub_corpus_filter" : ["corpus_OTG","corpus_UBS","corpus_ESLO" ,"corpus_ESLO_CO2"],
# All possibilities are: ["DIRECTE", "INDIRECTE", "ANAPHORE"]
"coreference_type_filter" : ["DIRECTE"],
"dataset_output_folder" : "../datasets/",
# This will be used only in window strategy
"window_size" : 5
}
dataset = dataset_builder(**dataset_params)
dataset.build_dataset(file_analysis_alert=True)
dataset.merge_dataset(delete_original_after_merge=False)
# +
import glob
json_relations_lined_up={}
counter=0
for sub_corpus in ["corpus_OTG","corpus_UBS","corpus_ESLO_CO2","corpus_ESLO"]:
ancor_subcorpus_path = "../DISTRIB_ANCOR_EN_CHAINE_Medium/"+sub_corpus+'/'
corpus_files = [file[file.rfind(
"/")+1:-3] for file in glob.glob(ancor_subcorpus_path+"aa_fichiers/*.aa")]
list_dataframes = []
for subfile in corpus_files:
print("starting",subfile,"at",sub_corpus)
root_xml,root_aa,data_source=set_file_config("../DISTRIB_ANCOR_EN_CHAINE_Medium/",sub_corpus, subfile)
print("starting 1")
json_mentions=generate_json_mentions(root_xml,root_aa)
print("starting 2")
json_mentions=remove_empty_json_mentions(json_mentions)
print("starting 3")
json_relations=generate_json_relations()
print("starting 4")
# json_text=generate_json_text_with_mentions()
if json_relations == {}:
continue
json_coreference_chains=generate_json_chains()
print("starting 5")
for pair_id in json_relations:
if json_relations[pair_id]["TYPE"] in ["DIRECTE", "INDIRECTE", "ANAPHORE"]:
counter+=1
coref_features=generate_coref_relations_json_features(pair_id,counter,json_relations,
json_mentions,data_source,
is_positive_instance=True)
json_relations_lined_up[counter]=coref_features
list_mentions_sorted_seperated=[]
df_mentions = pd.DataFrame.from_dict(json_mentions, orient='index')
df_mentions = df_mentions.sort_values(by=['START_ID'])
list_mentions_sorted = list(df_mentions.index)
relation_left=json_relations[pair_id]["LEFT_UNIT"]["ID"]
relation_right=json_relations[pair_id]["RIGHT_UNIT"]["ID"]
if json_mentions[relation_left]["START_ID"] > json_mentions[relation_right]["START_ID"]:
relation_left,relation_right=relation_right,relation_left
start_index=list_mentions_sorted.index(relation_left)+1
end_index=list_mentions_sorted.index(relation_right)
mentions_in_between=list_mentions_sorted[start_index:end_index]
result="Nothing"
for ment in reversed(mentions_in_between):
for index, chain in json_coreference_chains.items():
if not (ment in json_coreference_chains[index] and relation_right in json_coreference_chains[index]):
result=(ment,relation_right)
break
else:
continue
break
if result!="Nothing":
counter+=1
coref_features=generate_coref_relations_json_features(-1,counter,json_relations,
json_mentions,data_source,
is_positive_instance=False,
pair_left_id=result[0],
pair_right_id=result[1])
json_relations_lined_up[counter]=coref_features
if json_relations[pair_id]["TYPE"] in ["ASSOC", "ASSOC_PRONOM"]:
counter+=1
coref_features=generate_coref_relations_json_features(pair_id,counter,json_relations,
json_mentions,data_source,
is_positive_instance=True,
is_assoc=True)
json_relations_lined_up[counter]=coref_features
df_coref_relations = pd.DataFrame.from_dict(json_relations_lined_up, orient='index')
df_coref_relations.to_excel("Dataset_ANCOR_XXXXX.xlsx")
df_coref_relations
# -
def generate_coref_relations_json_features(pair_id,counter_id,json_relations,json_mentions,
data_source,is_positive_instance=True,
pair_left_id=None,pair_right_id=None,is_assoc=False):
if is_positive_instance:
relation_type=json_relations[pair_id]["TYPE"]
relation_left=json_relations[pair_id]["LEFT_UNIT"]
relation_right=json_relations[pair_id]["RIGHT_UNIT"]
if relation_left["START_ID"] > relation_right["START_ID"]:
relation_left,relation_right=relation_right,relation_left
MENTION_LEFT_ID=relation_left["ID"]
MENTION_RIGHT_ID=relation_right["ID"]
if is_assoc:
is_positive_instance=False
else:
relation_type="NON_COREFERENCE"
relation_left=json_mentions[pair_left_id]
relation_right=json_mentions[pair_right_id]
if relation_left["START_ID"] > relation_right["START_ID"]:
relation_left,relation_right=relation_right,relation_left
MENTION_LEFT_ID=pair_left_id
MENTION_RIGHT_ID=pair_right_id
string_text = data_source[relation_left["END_ID"]:relation_right["START_ID"]]
string_text=re.sub("<[a-zA-Z0-9\s=\",/.]+>", " ", string_text).strip().split()
DISTANCE_MENTION = int(relation_right["NUM"])-int(relation_left["NUM"])-1
DISTANCE_WORD = len(string_text)
DISTANCE_CHAR = len("".join(string_text))
tf2yn = {True: "YES", False: "NO"}
left_content = str(relation_left["CONTENT"])
right_content = str(relation_right["CONTENT"])
left_content_list = left_content.lower().split()
right_content_list = right_content.lower().split()
len_of_words_min = min(len(left_content_list), len(right_content_list))
len_of_words_max = max(len(left_content_list), len(right_content_list))
len_intersection = len(
[value for value in left_content_list if value in right_content_list])
ID_FORM = tf2yn[relation_left["CONTENT"]==relation_right["CONTENT"]]
ID_SUBFORM = tf2yn[len_intersection>0]
INCL_RATE = len_intersection/len_of_words_min
COM_RATE = len_intersection/len_of_words_max
ID_DEF = tf2yn[relation_left["DEF"]==relation_right["DEF"]]
if relation_left["DEF"] == "UNK" or relation_right["DEF"] == "UNK":
ID_DEF = "UNK"
ID_GP = tf2yn[relation_left["GP"]==relation_right["GP"]]
if relation_left["GP"] == "UNK" or relation_right["GP"] == "UNK":
ID_GP = "UNK"
ID_TYPE = tf2yn[relation_left["TYPE"]==relation_right["TYPE"]]
if relation_left["TYPE"] == "UNK" or relation_right["TYPE"] == "UNK":
ID_TYPE = "UNK"
ID_EN = tf2yn[relation_left["EN"]==relation_right["EN"]]
if relation_left["EN"] == "UNK" or relation_right["EN"] == "UNK":
ID_EN = "UNK"
ID_GENDER = tf2yn[relation_left["GENRE"]==relation_right["GENRE"]]
if relation_left["GENRE"] == "UNK" or relation_right["GENRE"] == "UNK":
ID_GENDER = "UNK"
ID_NUMBER = tf2yn[relation_left["NUM"]==relation_right["NUM"]]
if relation_left["NUM"] == "UNK" or relation_right["NUM"] == "UNK":
ID_NUMBER = "UNK"
EMBEDDED = tf2yn[((left_content in right_content) or (right_content in left_content))]
ID_PREVIOUS = tf2yn[relation_left["PREVIOUS"]==relation_right["PREVIOUS"]]
if relation_left["PREVIOUS"] == "^" or relation_right["PREVIOUS"] == "^":
ID_PREVIOUS = "UNK"
ID_NEXT = tf2yn[relation_left["NEXT"]==relation_right["NEXT"]]
if relation_left["NEXT"] == "^" or relation_right["NEXT"] == "^":
ID_NEXT = "UNK"
coref_relation_json_feature={
"SUB_CORPUS":sub_corpus,
"FILE_NAME":subfile,
"COREF_TABLE_ID":counter_id,
"COREF_TYPE":relation_type,
"COREF_ANCOR_ID":pair_id,
"MENTION_LEFT_ID":MENTION_LEFT_ID,
"MENTION_RIGHT_ID":MENTION_RIGHT_ID,
"LEFT_CONTENT":relation_left["CONTENT"],
"RIGHT_CONTENT":relation_right["CONTENT"],
"LEFT_TYPE":relation_left["TYPE"],
"RIGHT_TYPE":relation_right["TYPE"],
"LEFT_DEF":relation_left["DEF"],
"RIGHT_DEF":relation_right["DEF"],
"LEFT_GP":relation_left["GP"],
"RIGHT_GP":relation_right["GP"],
"LEFT_GENRE":relation_left["GENRE"],
"RIGHT_GENRE":relation_right["GENRE"],
"LEFT_NB":relation_left["NB"],
"RIGHT_NB":relation_right["NB"],
"LEFT_EN":relation_left["EN"],
"RIGHT_EN":relation_right["EN"],
"ID_FORM": ID_FORM,
"ID_SUBFORM": ID_SUBFORM,
"INCL_RATE": INCL_RATE,
"COM_RATE": COM_RATE,
"ID_DEF": ID_DEF,
"ID_GP": ID_GP,
"ID_TYPE": ID_TYPE,
"ID_EN": ID_EN,
"ID_GENDER": ID_GENDER,
"ID_NUMBER": ID_NUMBER,
"DISTANCE_MENTION": DISTANCE_MENTION,
"DISTANCE_WORD": DISTANCE_WORD,
"DISTANCE_CHAR": DISTANCE_CHAR,
"EMBEDDED": EMBEDDED,
"ID_PREVIOUS": ID_PREVIOUS,
"ID_NEXT": ID_NEXT,
"IS_CO_REF": int(is_positive_instance)
}
return(coref_relation_json_feature)
# +
import xml.etree.ElementTree as ET
import json
import pandas as pd
import re
import random
import glob
import networkx as nx
import os
def set_file_config(path_to_ancor, sub_corpus, file):
path_file = path_to_ancor+sub_corpus+'/'
tree_xml = ET.parse(
path_file+'annotation_integree/'+file+'.xml')
tree_aa = ET.parse(
path_file+'aa_fichiers/'+file+'.aa')
data_source = open(
path_file+'ac_fichiers/'+file + '.ac', "r").read()
root_xml = tree_xml.getroot()
root_aa = tree_aa.getroot()
return(root_xml,root_aa,data_source)
# return mentions in json format
def generate_json_mentions(root_xml,root_aa):
json_mentions = {}
for unit in root_xml.iter("unit"):
data_character = {}
data_character["TYPE"] = unit.find("./characterisation/type").text
for anchor in root_xml.iter("anchor"):
if anchor.attrib["id"] == unit.attrib["id"]:
data_character["NUM"] = anchor.attrib["num"]
for feat in unit.findall("./characterisation/featureSet/feature"):
data_character[feat.attrib["name"]] = feat.text
for unit_aa in root_aa.iter("unit"):
if unit_aa.attrib["id"] == unit.attrib["id"]:
data_character["START_ID"] = int(unit_aa.find(
"./positioning/start/singlePosition").attrib["index"])
data_character["END_ID"] = int(unit_aa.find(
"./positioning/end/singlePosition").attrib["index"])
json_mentions[unit.attrib["id"]] = data_character
return(json_mentions)
# return mentions in json format and eliminates all the features that are not presented.
def remove_empty_json_mentions(json_mentions,features_to_check=["CONTENT", "PREVIOUS"]):
json_mentions_= json_mentions.copy()
for mention in list(json_mentions_):
for feature in features_to_check:
if feature not in json_mentions_[mention]:
del json_mentions_[mention]
break
return(json_mentions_)
# return relations in json format based on the json mentions
def generate_json_relations():
json_relations={}
for relation in root_aa.iter("relation"):
data_character = {}
data_character["TYPE"] = relation.find("./characterisation/type").text
for feat in relation.findall("./characterisation/featureSet/feature"):
data_character[feat.attrib["name"]] = feat.text
unit_ids = []
for feat in relation.findall("./positioning/term"):
unit_ids.append(feat.attrib["id"])
if (unit_ids[0] in json_mentions.keys()) and (unit_ids[1] in json_mentions.keys()):
if (len(unit_ids) == 2):
data_character["LEFT_UNIT"] = {
"ID": unit_ids[0], **json_mentions[unit_ids[0]]}
data_character["RIGHT_UNIT"] = {
"ID": unit_ids[1], **json_mentions[unit_ids[1]]}
json_relations[relation.attrib["id"]] = data_character
return(json_relations)
# return chains in json format based on the json mentions and relations.
def generate_json_chains(coreference_type=["DIRECTE", "INDIRECTE", "ANAPHORE"]):
G = nx.Graph()
chain_id = 0
json_coreference_chains = {}
for chain in json_relations:
if json_relations[chain]["TYPE"].strip() in coreference_type:
chain_left = json_relations[chain]["LEFT_UNIT"]["ID"]
chain_right = json_relations[chain]["RIGHT_UNIT"]["ID"]
G.add_edge(chain_left, chain_right,
coref_type=json_relations[chain]["TYPE"].strip())
for subgraph in list(nx.connected_components(G)):
json_coreference_chains[chain_id] = []
for node in subgraph:
json_coreference_chains[chain_id].append(node)
chain_id += 1
return(json_coreference_chains)
# -
json_mentions = {}
counter=0
for unit in root_xml.iter("unit"):
data_character = {}
data_character["TYPE"] = unit.find("./characterisation/type").text
counter+=1
print(counter,len(list(root_xml.iter("unit"))))
for anchor in root_xml.iter("anchor"):
if anchor.attrib["id"] == unit.attrib["id"]:
data_character["NUM"] = anchor.attrib["num"]
for feat in unit.findall("./characterisation/featureSet/feature"):
data_character[feat.attrib["name"]] = feat.text
for unit_aa in root_aa.iter("unit"):
if unit_aa.attrib["id"] == unit.attrib["id"]:
data_character["START_ID"] = int(unit_aa.find(
"./positioning/start/singlePosition").attrib["index"])
data_character["END_ID"] = int(unit_aa.find(
"./positioning/end/singlePosition").attrib["index"])
json_mentions[unit.attrib["id"]] = data_character
sub_corpus
subfile
# # Dataset Splitting
# +
# Two ways of Training: (1) commented codes down with parameter.
# (2) uncommented with mentioned explicity the config file.
from french_crs.fast_model_training import dataset_splitter
# ds1=dataset_splitter("./Dataset_ANCOR_Balanced.xlsx",
# "./Dataset_ANCOR_Balanced_Train.xlsx",
# "./Dataset_ANCOR_Balanced_Test.xlsx",
# split_config_json="./split_config.json")
# ds1.dataset_splitter_by_file(lower_rate=0.20,upper_rate=0.50, files_num=68)
ds2=dataset_splitter("./balanced_Full_Soon.xlsx.xlsx",
"./balanced_Full_Soon.xlsx_Train.xlsx",
"./balanced_Full_Soon.xlsx_Test.xlsx",
split_config_json="./default_split_config.json")
dict_files=ds2.dataset_splitter_by_json_config()
# -
# # Model Training
# +
from french_crs.fast_model_training import model_trainer
"""
12 possibilities for "train" and "test" variables
balanced_ANAPHORE
balanced_DIRECTE
balanced_Full
balanced_INDIRECTE
representative_ANAPHORE
representative_DIRECTE
representative_Full
representative_INDIRECTE
window_ANAPHORE
window_DIRECTE
window_Full
window_INDIRECTE
"""
train="balanced_Full_Soon"
test="balanced_Full_Soon"
model=model_trainer( "../datasets/"+ train+"/"+train+"_Train.xlsx",
"../datasets/"+ test+"/"+test+"_Test.xlsx",
"../datasets/"+ train+"/"+train+"_Test_Pred.xlsx",
"IS_CO_REF",
"IS_CO_REF"
)
model.columns_drop_list = ["SUB_CORPUS",
"FILE_NAME",
"COREF_TABLE_ID",
"COREF_TYPE",
"COREF_ANCOR_ID",
"MENTION_LEFT_ID",
"MENTION_RIGHT_ID",
"LEFT_CONTENT",
"RIGHT_CONTENT",
"LEFT_DEF",
"RIGHT_DEF",
"ID_DEF"]
# "DISTANCE_MENTION",
# "DISTANCE_WORD",
# "DISTANCE_CHAR"]
model.convert_columns_to_numeric()
print("Model_Name :","Model_ANCOR_"+train+".model\n")
print("Train Dataset :",train+"_Train.xlsx")
print("Test Dataset :",test+"_Test.xlsx")
performance=model.train_model_random_forest(model_name="../pre-trained language models/Model_ANCOR_"+train+".model",max_depth=10, random_state=0,n_estimators=250)
performance
# -
train="balanced_Full_Soon"
test="balanced_Full_Soon"
# # SCORCH Chains Building
# +
from french_crs.fast_pairs2chains import chains_builder
model_chains=chains_builder(path_gold_file="../datasets/"+ train+"/"+train+"_Test_Pred.xlsx",
path_model_file="../datasets/"+ test+"/"+test+"_Test_Pred.xlsx",
gold_column="IS_CO_REF",
model_column="Prediction",
scorch_output_path="../",
threshold=0.5)
model_chains.generate_gold_model_json_output(mode="train")
# -
# # SCORCH Outcome
import os
bashCommand = "scorch ./coref_chains_gold_b.json ./coref_chains_pred_r.json > ./mm.txt"
os.system(bashCommand)
f = open("./mm.txt",'r')
message = f.read()
print(message)
# # Putting All Chains Together
# +
import docx
import os
from french_crs.model_training import model_trainer
from french_crs.pairs2chains import chains_builder
mydoc = docx.Document()
style = mydoc.styles['Normal']
font = style.font
font.name = 'MS Gothic'
font.size = docx.shared.Pt(10)
train_test_list=[
"balanced_ANAPHORE",
"balanced_DIRECTE",
"balanced_Full",
"balanced_INDIRECTE",
"representative_ANAPHORE",
"representative_DIRECTE",
"representative_Full",
"representative_INDIRECTE",
"window_ANAPHORE",
"window_DIRECTE",
"window_Full",
"window_INDIRECTE"
]
# train_test_list=[
# ["balanced_ANAPHORE","window_ANAPHORE"],
# ["balanced_DIRECTE","window_DIRECTE"],
# ["balanced_Full","window_Full"],
# ["balanced_INDIRECTE","window_INDIRECTE"],
# ["representative_ANAPHORE","window_ANAPHORE"],
# ["representative_DIRECTE","window_DIRECTE"],
# ["representative_Full","window_Full"],
# ["representative_INDIRECTE","window_INDIRECTE"],
# ["window_ANAPHORE","window_ANAPHORE"],
# ["window_DIRECTE","window_DIRECTE"],
# ["window_Full","window_Full"],
# ["window_INDIRECTE","window_INDIRECTE"]
# ]
counter=0
# for train in train_test_list:
# for test in train_test_list:
for train in train_test_list:
test=train
counter+=1
model=model_trainer( "../datasets/"+ train+"/"+train+"_Train.xlsx",
"../datasets/"+ test+"/"+test+"_Test.xlsx",
"../datasets/"+ train+"/"+train+"_Test_Pred.xlsx",
"IS_CO_REF",
"IS_CO_REF"
)
model.columns_drop_list = ['m1_DEF', 'm2_DEF', 'ID_DEF',
'DISTANCE_MENTION','DISTANCE_WORD',
'DISTANCE_CHAR']
model.convert_columns_to_numeric()
performance=model.train_model_random_forest(model_name="../pre-trained language models/Model_ANCOR_"+train+".model",max_depth=10, random_state=0,n_estimators=250)
model_chains=chains_builder(path_gold_file="../datasets/"+ train+"/"+train+"_Test_Pred.xlsx",
path_model_file="../datasets/"+ train+"/"+train+"_Test_Pred.xlsx",
gold_column="IS_CO_REF",
model_column="Prediction",
scorch_output_path="../",
threshold=0.5)
model_chains.generate_gold_model_json_output(mode="train")
bashCommand = "scorch ../coref_chains_gold.json ../coref_chains_pred.json > ../output.txt"
os.system(bashCommand)
f = open("../output.txt",'r')
message = f.read()
f.close()
mydoc.add_paragraph("Model_Name : "+"Model_ANCOR_"+train+".model")
mydoc.add_paragraph("Train Dataset : "+train+"_Train.xlsx")
mydoc.add_paragraph("Test Dataset : "+test+"_Test.xlsx")
mydoc.add_paragraph("\n")
mydoc.add_paragraph(str(performance))
mydoc.add_paragraph("\n")
mydoc.add_paragraph(message)
mydoc.add_page_break()
print(counter)
mydoc.save("../pre-trained language models/Performance Analysis.docx")
# -
# # Model Testing
# +
from model_training import model_tester
model_parameter={
"model_name" : "./Models/Random_Forest_(Normal)_OTG_Neg_90_Pos_10.model",
"input_file" : "./Datasets/corpus_ALL_Window_30_Test.xlsx",
"output_file" : "./Datasets/corpus_ALL_Window_30_Test_Called_Seperately.xlsx",
"column_gold" : "IS_CO_REF",
"column_outcome" : "Prediction",
"threshold" : 0.5
}
model=model_tester(**model_parameter)
model.apply_model_to_dataset()
# -
|
demo/fast_Model_ANCOR_Training.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# TSG117 - App-Deploy Proxy Nginx Logs
# ====================================
#
# Steps
# -----
#
# ### Parameters
# + tags=["parameters"]
import re
tail_lines = 200
pod = None # All
container = "app-service-proxy"
log_files = [ "/var/log/nginx/error.log" ]
expressions_to_analyze = [
re.compile(".{23}[error]")
]
# -
# ### Instantiate Kubernetes client
# + tags=["hide_input"]
# Instantiate the Python Kubernetes client into 'api' variable
import os
from IPython.display import Markdown
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
# -
# ### Get the namespace for the big data cluster
#
# Get the namespace of the Big Data Cluster from the Kuberenetes API.
#
# **NOTE:**
#
# If there is more than one Big Data Cluster in the target Kubernetes
# cluster, then either:
#
# - set \[0\] to the correct value for the big data cluster.
# - set the environment variable AZDATA\_NAMESPACE, before starting
# Azure Data Studio.
# + tags=["hide_input"]
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
# -
# ### Get tail for log
# + tags=["hide_input"]
# Display the last 'tail_lines' of files in 'log_files' list
pods = api.list_namespaced_pod(namespace)
entries_for_analysis = []
for p in pods.items:
if pod is None or p.metadata.name == pod:
for c in p.spec.containers:
if container is None or c.name == container:
for log_file in log_files:
print (f"- LOGS: '{log_file}' for CONTAINER: '{c.name}' in POD: '{p.metadata.name}'")
try:
output = stream(api.connect_get_namespaced_pod_exec, p.metadata.name, namespace, command=['/bin/sh', '-c', f'tail -n {tail_lines} {log_file}'], container=c.name, stderr=True, stdout=True)
except Exception:
print (f"FAILED to get LOGS for CONTAINER: {c.name} in POD: {p.metadata.name}")
else:
for line in output.split('\n'):
for expression in expressions_to_analyze:
if expression.match(line):
entries_for_analysis.append(line)
print(line)
print("")
print(f"{len(entries_for_analysis)} log entries found for further analysis.")
# -
# ### Analyze log entries and suggest relevant Troubleshooting Guides
# + tags=["hide_input"]
# Analyze log entries and suggest further relevant troubleshooting guides
from IPython.display import Markdown
import os
import json
import requests
import ipykernel
import datetime
from urllib.parse import urljoin
from notebook import notebookapp
def get_notebook_name():
"""Return the full path of the jupyter notebook. Some runtimes (e.g. ADS)
have the kernel_id in the filename of the connection file. If so, the
notebook name at runtime can be determined using `list_running_servers`.
Other runtimes (e.g. azdata) do not have the kernel_id in the filename of
the connection file, therefore we are unable to establish the filename
"""
connection_file = os.path.basename(ipykernel.get_connection_file())
# If the runtime has the kernel_id in the connection filename, use it to
# get the real notebook name at runtime, otherwise, use the notebook
# filename from build time.
try:
kernel_id = connection_file.split('-', 1)[1].split('.')[0]
except:
pass
else:
for servers in list(notebookapp.list_running_servers()):
try:
response = requests.get(urljoin(servers['url'], 'api/sessions'), params={'token': servers.get('token', '')}, timeout=.01)
except:
pass
else:
for nn in json.loads(response.text):
if nn['kernel']['id'] == kernel_id:
return nn['path']
def load_json(filename):
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def get_notebook_rules():
"""Load the notebook rules from the metadata of this notebook (in the .ipynb file)"""
file_name = get_notebook_name()
if file_name == None:
return None
else:
j = load_json(file_name)
if "azdata" not in j["metadata"] or \
"expert" not in j["metadata"]["azdata"] or \
"log_analyzer_rules" not in j["metadata"]["azdata"]["expert"]:
return []
else:
return j["metadata"]["azdata"]["expert"]["log_analyzer_rules"]
rules = get_notebook_rules()
if rules == None:
print("")
print(f"Log Analysis only available when run in Azure Data Studio. Not available when run in azdata.")
else:
print(f"Applying the following {len(rules)} rules to {len(entries_for_analysis)} log entries for analysis, looking for HINTs to further troubleshooting.")
print(rules)
hints = 0
if len(rules) > 0:
for entry in entries_for_analysis:
for rule in rules:
if entry.find(rule[0]) != -1:
print (entry)
display(Markdown(f'HINT: Use [{rule[2]}]({rule[3]}) to resolve this issue.'))
hints = hints + 1
print("")
print(f"{len(entries_for_analysis)} log entries analyzed (using {len(rules)} rules). {hints} further troubleshooting hints made inline.")
# -
print('Notebook execution complete.')
|
Big-Data-Clusters/CU8/Public/content/log-analyzers/tsg117-get-approxy-nginx-logs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import ConfigParser
import pandas as pd
# # Get Configurations
#
# All configs should be put in config.ini and placed within the same directory as this notebook.
#
# #### Template for config.ini
#
# [Oanda]
#
# accountnumber = ....
#
# apikey = ....
config = ConfigParser.ConfigParser()
config.read('config.ini')
apikey = config.get('Oanda', 'apikey')
accountnumber = config.get('Oanda', 'accountnumber')
# # Import Oanda Python API Wrapper
#
# Please note that we are not using Oanda's REST API directly
#
# Documentation: http://oanda-api-v20.readthedocs.io/en/latest/installation.html
import oandapyV20
from oandapyV20 import API
import oandapyV20.endpoints.pricing as pricing
import oandapyV20.endpoints.accounts as accounts
import oandapyV20.endpoints.instruments as instruments
import oandapyV20.endpoints.orders as orders
import oandapyV20.endpoints.trades as trades
# # Initiate API
api = API(access_token=apikey)
# # Test: Get the list of all tradeable instruments
r = accounts.AccountInstruments(accountID=accountnumber)
rv = api.request(r)
tradeableIns = pd.DataFrame(rv['instruments'])
tradeableIns.info()
# Well, the list is long. Let's just take a peek
tradeableIns.head(20)
# # Test: Get some pricing info
# Let's create a random list of instruments real quick
selectedIns = tradeableIns.sample(frac=0.2)
InsText = ','.join([name for name in selectedIns.name])
params ={'instruments': InsText}
selectedIns.count()
# Set up PricingInfo
r = pricing.PricingInfo(accountID=accountnumber, params=params)
# Get the response
rv = api.request(r)
# Cache the response into Pandas DataFrame
raw_pricing = pd.DataFrame(r.response['prices'])
# Clean up the mess
pricing = raw_pricing.copy()
pricing.asks = pricing.asks.apply(lambda x: x[0]['price'])
pricing.bids = pricing.bids.apply(lambda x: x[0]['price'])
pricing['Available Short Units'] = pricing.unitsAvailable.apply(lambda x: x['default']['short'])
pricing['Available Long Units'] = pricing.unitsAvailable.apply(lambda x: x['default']['long'])
pricing.drop(['quoteHomeConversionFactors', 'unitsAvailable'], axis=1)
# # Test: Get Some Historical Prices
#
# Singapore 30 CFD: SG30_SGD
#
# #### Firstly, 100 prices @ 30 seconds
params = {'count': 20,'granularity': 'S30'}
r = instruments.InstrumentsCandles(instrument="EUR_USD",params=params)
res = api.request(r)
# +
# <NAME>, we need to parse some info
# Let's write a little helper
def parseOHLC(df):
temp_df = df.copy()
temp_df['open'] = df.mid.apply(lambda x: x['o'])
temp_df['high'] = df.mid.apply(lambda x: x['h'])
temp_df['low'] = df.mid.apply(lambda x: x['l'])
temp_df['close'] = df.mid.apply(lambda x: x['c'])
temp_df.drop('mid',axis=1, inplace=True)
temp_df = temp_df.reindex(columns = ['open','high','low','close','volume','time','complete'])
return temp_df
parseOHLC(pd.DataFrame(res['candles']))
# -
# #### 5 minutes
params = {'count': 20,'granularity': 'M5'}
r = instruments.InstrumentsCandles(instrument="EUR_USD",params=params)
res = api.request(r)
parseOHLC(pd.DataFrame(res['candles']))
# #### 1 hour
params = {'count': 20,'granularity': 'H1'}
r = instruments.InstrumentsCandles(instrument="EUR_USD",params=params)
res = api.request(r)
parseOHLC(pd.DataFrame(res['candles']))
# #### 1 Day
params = {'count': 20,'granularity': 'D'}
r = instruments.InstrumentsCandles(instrument="EUR_USD",params=params)
res = api.request(r)
parseOHLC(pd.DataFrame(res['candles']))
# #### 1 Week
params = {'count': 20,'granularity': 'W'}
r = instruments.InstrumentsCandles(instrument="EUR_USD",params=params)
res = api.request(r)
parseOHLC(pd.DataFrame(res['candles']))
# #### 1 Month
params = {'count': 20,'granularity': 'M'}
r = instruments.InstrumentsCandles(instrument="EUR_USD",params=params)
res = api.request(r)
parseOHLC(pd.DataFrame(res['candles']))
# # Open/Pending/Cancel orders
# +
# Long EUR_USD
# This one should go through
data = {
"order": {
"price": "1.18",
"instrument": "EUR_USD",
"units": "100",
"type": "LIMIT",
"positionFill": "DEFAULT"
}
}
r = orders.OrderCreate(accountnumber, data=data)
api.request(r)
pd.Series(r.response['orderFillTransaction'])
# +
# Long EUR_USD @ low price
# This one should NOT go through
data = {
"order": {
"price": "1.00",
"instrument": "EUR_USD",
"units": "100",
"type": "LIMIT",
"positionFill": "DEFAULT"
}
}
r = orders.OrderCreate(accountnumber, data=data)
api.request(r)
pd.Series(r.response)
# -
# ## Check pending orders
r = orders.OrdersPending(accountnumber)
pending = pd.DataFrame(api.request(r)['orders'])
pending
# ## Cancel all pending orders
for order_id in pending.id:
r = orders.OrderCancel(accountID= accountnumber, orderID=int(order_id))
api.request(r)
print r.response
r = orders.OrdersPending(accountnumber)
pending = pd.DataFrame(api.request(r)['orders'])
pending
# # List all open trades
r = trades.OpenTrades(accountID=accountnumber)
api.request(r)
openTrades = pd.DataFrame(r.response['trades'])
openTrades
# # Close all positions
for trade_id in openTrades.id:
r = trades.TradeClose(accountID=accountnumber, tradeID=int(trade_id))
api.request(r)
print r.response
# # Check Portfolio
r = accounts.AccountSummary(accountnumber)
api.request(r)
pd.Series(r.response['account'])
|
Oanda Testdrive.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Run this cell before the lab !
import numpy as np
import os
import os.path as op
import zipfile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2 compat
from urllib import urlretrieve
# BBC dataset
BBC_DATASET_URL = "http://mlg.ucd.ie/files/datasets/bbc-fulltext.zip"
zip_filename = BBC_DATASET_URL.rsplit('/', 1)[1]
BBC_DATASET_FOLDER = 'bbc'
if not op.exists(zip_filename):
print("Downloading %s to %s..." % (BBC_DATASET_URL, zip_filename))
urlretrieve(BBC_DATASET_URL, zip_filename)
if not op.exists(BBC_DATASET_FOLDER):
with zipfile.ZipFile(zip_filename, 'r') as f:
print("Extracting contents of %s..." % zip_filename)
f.extractall('.')
# Get pretrained Glove Word2Vec
URL_REPRESENTATIONS = "https://github.com/m2dsupsdlclass/lectures-labs/releases/download/0.3/glove100k.100d.zip"
ZIP_REPRESENTATIONS = "glove100k.100d.zip"
FILE_REPRESENTATIONS = "glove100K.100d.txt"
if not op.exists(ZIP_REPRESENTATIONS):
print('Downloading from %s to %s...' % (URL_REPRESENTATIONS, ZIP_REPRESENTATIONS))
urlretrieve(URL_REPRESENTATIONS, './' + ZIP_REPRESENTATIONS)
if not op.exists(FILE_REPRESENTATIONS):
print("extracting %s..." % ZIP_REPRESENTATIONS)
myzip = zipfile.ZipFile(ZIP_REPRESENTATIONS)
myzip.extractall()
# Get the Nietzche dataset
from tensorflow.keras.utils import get_file
URL = "https://s3.amazonaws.com/text-datasets/nietzsche.txt"
corpus_path = get_file('nietzsche.txt', origin=URL)
# -
|
labs/06_deep_nlp/data_download.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Bag of words model
# +
# load all necessary libraries
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
pd.set_option('max_colwidth', 100)
# -
# #### Let's build a basic bag of words model on three sample documents
documents = ["Gangs of Wasseypur is a great movie.", "The success of a movie depends on the performance of the actors.", "There are no new movies releasing this week."]
print(documents)
# +
def preprocess(document):
'changes document to lower case and removes stopwords'
# change sentence to lower case
document = document.lower()
# tokenize into words
words = word_tokenize(document)
# remove stop words
words = [word for word in words if word not in stopwords.words("english")]
# join words to make sentence
document = " ".join(words)
return document
documents = [preprocess(document) for document in documents]
print(documents)
# -
# #### Creating bag of words model using count vectorizer function
vectorizer = CountVectorizer()
bow_model = vectorizer.fit_transform(documents)
print(bow_model) # returns the rown and column number of cells which have 1 as value
# print the full sparse matrix
print(bow_model.toarray())
print(bow_model.shape)
print(vectorizer.get_feature_names())
# ### Let's create a bag of words model on the spam dataset.
# load data
spam = pd.read_csv("SMSSpamCollection.txt", sep = "\t", names=["label", "message"])
spam.head()
# ##### Let's take a subset of data (first 50 rows only) and create bag of word model on that.
spam = spam.iloc[0:100,:]
print(spam)
# extract the messages from the dataframe
messages = spam.message
print(messages)
# convert messages into list
messages = [message for message in messages]
print(messages)
# preprocess messages using the preprocess function
messages = [preprocess(message) for message in messages]
print(messages)
# bag of words model
vectorizer = CountVectorizer()
bow_model = vectorizer.fit_transform(messages)
bow_model.sum()
# look at the dataframe
df = pd.DataFrame(bow_model.toarray(), columns = vectorizer.get_feature_names())
print(vectorizer.get_feature_names())
# * A lot of duplicate tokens such as 'win'and 'winner'; 'reply' and 'replying'; 'want' and 'wanted' etc.
df.shape
|
bag+of+words.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp phenotypes.Field
# -
# config=Config()
# data_dict = config.data_dict
# pheno_df = pheno_df
#export
import pandas as pd
import dask.dataframe as dd
import logging
import numpy as np
from combinatorial_gwas.data_catalog import get_catalog, get_parameters
#export
parameters= get_parameters()
parameters
# +
#@delegate_as(dd.core.DataFrame, to='dd_cls')
#export
try:
data_dict = pd.read_csv(parameters["data_dict_file"])
data_dict.file = parameters["data_dict_file"]
pheno_df = dd.read_parquet(parameters["pheno_file"])
if parameters["id_col"]:
if parameters["id_col"] not in pheno_df.columns:
raise KeyError(f"Cannot find ID column {parameters['id_col']} in file {parameters['id_col']}")
logging.warning("Found ID column, setting index. This might take a bit long. Please be patient.")
pheno_df[parameters["id_col"]] = pheno_df[parameters["id_col"]].astype(int)
pheno_df = pheno_df.set_index(parameters["id_col"])
coding_file_path_template = parameters["coding_file_path_template"]
except AttributeError:
raise AttributeError("Could not find file names from the settings module, please set `data_dict_file` and `pheno_file` attribute")
# +
#export
class DelegatedAttribute:
def __init__(self, delegate_name, attr_name):
self.attr_name = attr_name
self.delegate_name = delegate_name
def __get__(self, instance, owner):
if instance is None:
return self
else:
print("attr_name",self.attr_name)
print("delegate_name", self.delegate_name)
#print("instance",instance)
print("owner", owner)
# return instance.delegate.attr
return getattr(self.delegate(instance), self.attr_name)
def __set__(self, instance, value):
# instance.delegate.attr = value
setattr(self.delegate(instance), self.attr_name, value)
def __delete__(self, instance):
delattr(self.delegate(instance), self.attr_name)
def delegate(self, instance):
return getattr(instance, self.delegate_name)
def __str__(self):
return ""
# def delegate_as(delegate_cls):
# # gather the attributes of the delegate class to add to the decorated class
# attributes = delegate_cls.__dict__.keys()
# def inner(cls):
# # create property for storing the delegate
# setattr(cls, 'delegate', delegate_cls)
# # set all the attributes
# for attr in attributes:
# setattr(cls, attr, DelegatedAttribute(to, attr))
# return cls
# return inner
def delegate_as(delegate_cls, to='delegate'):#, include=frozenset(), ignore=frozenset()):
# turn include and ignore into sets, if they aren't already
# if not isinstance(include, set):
# include = set(include)
# if not isinstance(ignore, set):
# ignore = set(ignore)
print(delegate_cls)
delegate_attrs = set(delegate_cls.__dict__.keys())
attributes = delegate_attrs#include | delegate_attrs - ignore
def inner(cls):
# create property for storing the delegate
#setattr(cls, to, delegate_cls)
# don't bother adding attributes that the class already has
attrs = attributes - set(cls.__dict__.keys())
print("attributes", attrs)
print("dict_line" in attrs)
print(cls)
# set all the attributes
for attr in attrs:
setattr(cls, attr, DelegatedAttribute(to, attr))
return cls
return inner
# +
#export
#@delegate_as(dd.core.DataFrame, to="df")
#@delegate_df_cls(dd.core.DataFrame)
#@add_numerics(dd.core.DataFrame, dunder_delegate_attr="col", other_delegate_attr="df", dunder_list = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'pow', 'radd', 'rsub', 'rmul', 'rtruediv', 'rfloordiv', 'rmod', 'rpow', 'lt', 'gt', 'le', 'ge', 'ne', 'eq'])
class Field():
# DELEGATED_METHODS = {
# 'df': ['add','radd','sub','rsub','mul','rmul']
# }
# data_dict= data_dict
# pheno_df = pheno_df
def __init__(self,pheno, *, name="Unamed_Field", instances=None, arrays=None):
#self.dd_cls = dd_cls
#allow passing in both the FieldID int and Field str
if isinstance(pheno, int):
dict_line_df = data_dict.query(f"FieldID == '{pheno}'")
else:
dict_line_df = data_dict.query(f"Field == '{pheno}'")
if dict_line_df.empty:
raise ValueError(f"Cannot find Field/FieldID '{pheno}' in data dict file '{data_dict.file}' ")
self.dict_line = DictLine(dict_line_df)
#make dict_line attributes accessible from Field object
for col in self.dict_line.df.columns:
setattr(self, col, getattr(self.dict_line, f"_{col}"))
#reminder of what attribute is available from self.dict_line
self.help = self.dict_line.df.columns.tolist()
self.pheno_str = self.Field
self.pheno_cols = self.dict_line.get_pheno_cols(pheno_df.columns, instances=instances, arrays=arrays)
try:
self.df = pheno_df.loc[:, self.pheno_cols].fillna(np.nan)
except KeyError:
raise KeyError(f"Cannot find phenotype '{self.pheno_str}'', ID: {self.FieldID} in the phenotype file. Please make sure your phenotype file contains all following columns: {self.pheno_cols} ")
self.name = name
#some fields don't have coding
if self.Coding:
#replacing coding int with more versatile coding object
self.Coding = self.get_coding(coding_file_path_template)
self.get_codes = self.Coding.get_codes
else:
print(f"Data field {self} has no Coding. The Coding attribute will be 'None'")
def copy(self):
obj = type(self).__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.df = self.df.copy()
return obj
@classmethod
def init_multi_type(cls, data, name):
accepted_types = (str, int)
#create new instance(s) of class Field
##handling dict
if isinstance(data, dict):
field = data["pheno"]
if isinstance(field, accepted_types):
updated_kwargs = data
updated_kwargs["name"] = name
return cls(**data)
elif isinstance(data, accepted_types):
return cls(data, name=name)
#create copy of instance with a different name
elif isinstance(data, cls):
new_field = data.copy()
new_field.name = name
return new_field
else:
raise TypeError(f"Invalid pheno data type {type(field)}, can only accept input of type {accepted_types} ")
@classmethod
def make_fields_dict(cls, data: dict)-> dict:
if isinstance(data, dict):
iter_obj = data.items()
return {name: cls.init_multi_type(dict_or_obj, name) for name, dict_or_obj in iter_obj}
raise TypeError("Can only accept dictionary ")
@property
def name(self, ):
return self._name
@name.setter
def name(self,new_name):
def make_full_name(orig_name, new_name=None):
if new_name is None:
new_name = "Unamed_field"
#for first time parsing from source
if "." in orig_name:
name_list = orig_name.split(".")
instance = name_list[2]
array = name_list[3]
else:
name_list = orig_name.split("_")
instance = name_list[-2]
array = name_list[-1]
return f"{new_name}_{instance}_{array}"
#make_name = partial(make_full_name, new_name=new_name)
#if only one instance and array, then we simplify the column name
if self.Array == self.Instances == 1:
self.df.columns = [new_name for col in self.df.columns]
else:
self.df.columns = [ make_full_name(col, new_name=new_name) for col in self.df.columns]
self._name = new_name
return self
def rename(self, new_name):
self.name = new_name
return self
@property
def pheno_str_no_space(self):
return self.pheno_str.replace(" ", "_")
def __repr__(self):
return f"Field(Name:{self.name}, Pheno: `{self.pheno_str}`, ID: {self.FieldID}, Original Column(s): {self.pheno_cols}, Named Column(s): {self.all_cols_df.columns})"
def __str__(self):
return self.__repr__()
def get_coding(self, coding_file_path_template):
coding_file_name = coding_file_path_template.replace("*", str(self.Coding))
return(Coding(coding_file_name, self.Coding))
#need to call .compute() before you can perform masking
def get_attr_childs(self, attr,*, input_field, output_field):
all_related_fields = self.Coding.get_codes(attr, input_field, output_field)
print(f"All the related fields that are classified as '{attr}' in coding file \n '{self.Coding.coding_file}' \n are {all_related_fields}")
return all_related_fields #(self.df[self.pheno_str_no_space]).isin(all_related_fields)
@property
def value_counts(self):
return self.col.value_counts()
@property
def all_cols_df(self):
col = self.df.loc[:,self.df.columns]
if (self.ValueType).lower() == "continuous":
return col.astype("float")
return col
class DictLine():
def __init__(self, dict_line_df):
self.df = dict_line_df
#turn columns into private attributes _{col}
for col in self.df.columns:
setattr(self.__class__, f"_{col}", self.get_dict_field(col, self.df))
def get_dict_field(self,dict_field, dict_line):
@property
def func(self):
#catch multiple rows dataframes
try:
field = self.df[dict_field].item()
except ValueError:
return None
if dict_field == "Coding":
coding_num = field
#some fields do not have coding
if np.isnan(coding_num):
return None
else:
return int(coding_num)
return field
return func
def get_pheno_cols(self, col_list, instances=None, arrays=None):
all_cols=[col for col in col_list if col.startswith(f"f.{self._FieldID}.")]
if instances:
all_cols = [col for col in all_cols if (int(col.split(".")[2]) in instances)]
if arrays:
all_cols = [col for col in all_cols if (int(col.split(".")[3]) in arrays)]
return all_cols #[f"f.{self._FieldID}.{instance}.{array}" for array in range(self._Array) for instance in range(self._Instances)]
class Coding():
def __init__(self, coding_file, coding_num):
self.coding_num = coding_num
self.coding_file = coding_file
self.df = pd.read_csv(coding_file, sep="\t")
def __repr__(self):
return f"{self.__class__}. Coding num: {self.coding_num}, coding file: {self.coding_file}"
def get_codes(self, selection, input_field, output_field):
valid_coding_fields = ["coding","meaning","node_id"]
for i_o_field in [input_field,output_field]:
if (i_o_field not in valid_coding_fields):
raise ValueError(f"Could not find field {i_o_field} field in coding file {self.coding_file}, choose from the following options: {valid_coding_fields}")
if selection not in self.df[input_field].values:
raise ValueError(f"Value not found in field {input_field} of coding file")
code_line = DictLine(self.df[self.df[input_field] == selection])
codes = [getattr(code_line,f"_{output_field}")]
#code_line = DictLine(coding_df.query(f"meaning == '{selection}'"))
df = self.df.query(f"parent_id=={code_line._node_id}")
child_nodes = df[input_field].values.tolist()
#recurse on the child nodes to get codings of level below the selection
for child in child_nodes:
codes.extend(self.get_codes(child, input_field, output_field))
return codes
# -
test = Field("Monocyte count", name= "monocyte_count")
test
test.df.compute()
icd10_df = Field(41202, name= "ICD10_primary").compute()
icd10_df
icd10_df.join(icd10_secondary_df).to_csv("ICD10_pheno_matrix.tsv", sep = "\t", index = True)
Field(41202, name= "ICD10")
Field(41202, name= "ICD10").notnull().sum().compute()
|
notebooks/package/Field.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Map
# map(function, iterables)
def square(a):
return a*a
x = map(square,[2,4,6,8])
print(type(x)) # A map object
print(list(x))
"""Short Hand"""
y = list(map(lambda x:x*x,[2,4,6,8]))
print("Short hand with lambda")
print(y)
"""OR"""
print([x**2 for x in [2,4,6,8]])
# # Filter
# + tags=[]
# filter(condition, iterables)
arr = [1,23,56,7,3,89,23,4,6,2,46,-33,5,-61,66]
print(list(filter(lambda x: x<5,arr))) #filter object
# -
# # Reduce
#reduce(function, iterables)
from functools import reduce
arr = [1,23,56,-7]
print(reduce(lambda x,y:x*y,arr))
# # filter() with map()
print(list(map(lambda x:x**3,filter(lambda x: x>=4,[1,2,3,4,5]))))
"""OR List comprehension"""
print([x**3 for x in [1,2,3,4,5] if x>=4])
# # map() filter() & reduce()
# + tags=[]
"""filters value in arr<4 and take cubes of through map() and sum it all up using the reduce function()"""
from functools import reduce
arr =[1,2,3,4,5]
print(reduce(lambda x,y:x+y,map(lambda x:x**3,filter(lambda x: x>=4,arr))))
# -
# # lambda - Anonymous functions
# lambda's are usaully used in a nameless manner within another higher order funciton (map,filter,reduce).
# lambda arguements: expression
cube = lambda x: x**3
cube(9)
"""OR"""
print(list(map(lambda x:x**3, [1,2,3,4,5,6,7])))
|
007/map_filter_reduce_lambda.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] state="normal"
# # Lab 02: N-grams
#
# ## Overview
#
# An *n-gram* -- in the context of parsing natural languages such as English -- is a sequence of *n* consecutive *tokens* (which we might define as characters separated by whitespace) from some passage of text. Based on the following passage:
#
# > I really really like cake.
#
# We have the following 2-grams:
#
# [('I', 'really'), ('really', 'really'), ('really', 'like'), ('like', 'cake.')]
#
# And the following 3-grams:
#
# [('I', 'really', 'really'),
# ('really', 'really', 'like'),
# ('really', 'like', 'cake.')]
#
# (I omit a 1-gram listing because it would merely be a list of all tokens in the original text.)
#
# Among other things, n-grams are useful for describing the vocabulary of and statistical correlation between tokens in a sample body of text (e.g., as taken from a book). We can use an n-gram model to determine the likelihood of finding
# a particular sequence of words after another. This information, in turn, can be used to generate passages of text that statistically mimic the sample.
#
# We can convert the above 3-gram list into the following lookup structure (i.e., a dictionary mapping strings to lists of 2-tuples), where the first token of each n-gram maps to all sequences that follow it in the text:
#
# {'I': [('really', 'really')],
# 'really': [('really', 'like'), ('like', 'cake.')]}
#
# We can now generate passages of text using the following method:
#
# 1. Select a random key and use it as the start token of the passage. It will also serve as the current token for the next step.
# 2. Select a random tuple from the list associated with the current token and append the sequence to the passage. The last token of the selected sequence will be the new current token.
# 3. If the current token is a key in the dictionary then simply repeat step 2, otherwise select another random key from the dictionary as the current token and append it to the passage before repeating step 2.
#
# E.g., we might start by selecting `'I'` in step (1), which gives us `('really', 'really')` as our only choice in (2). The second `'really'` in that tuple is the new current token (which is a valid key), which takes us back to (2) and gives us a choice between two tuples. If we choose `('like', 'cake.')`, then we have `'cake.'` as our new current token --- it is not a key in the map, however, so we'd have to choose a new random key if we wanted to generate a longer passage. Either way, the passage we've generated thus far is `'I really really like cake.'` (which also happens to be the original passage).
#
# Here's a lengthier passage that could be generated from the 3-gram dictionary above -- note that for clarity I've added `*`'s every time a new random key is selected (i.e., when the previous token isn't a key in the dictionary):
#
# > \* really like cake. \* I really really really like \* really like cake. \* I really really really like \* really
#
# This gets more interesting when we build n-gram dictionaries from lengthier bodies of text. For instance, the following text was generated (with a little programmed embellishment for prettier capitalization and punctuation) from a 3-gram dictionary extracted from Romeo's famous balcony monologue:
#
# > Lamp her eyes were there they in their spheres till they in her eyes in all the fairest stars in all the heaven having some business do wear it is my love! O it is envious her cheek would through the heaven having some business do entreat her eyes were there they in their spheres till they in her eyes to.
#
# For reference, here is the dictionary entry for the token `'her'` used to generate the above:
#
# 'her': [('maid', 'art'),
# ('maid', 'since'),
# ('vestal', 'livery'),
# ('eyes', 'to'),
# ('eyes', 'were'),
# ('head?', 'The'),
# ('cheek', 'would'),
# ('eyes', 'in'),
# ('cheek', 'upon'),
# ('hand!', 'O')],
#
# If you haven't already guessed it, your assignment is to implement a function that constructs an n-gram dictionary from a list of strings (tokens), and another that returns a passage of text generated from a given n-gram dictionary.
# + [markdown] state="normal"
# ## Implementation Details
#
# Before you start working on the aforementioned functions, it's important to consider how we'll be parsing passages for tokens.
#
# Here's the body of Romeo's balcony soliloquy:
# + deletable=false editable=false id="romeo" nbgrader={"grade": false, "grade_id": "romeo", "locked": true, "schema_version": 1, "solution": false} state="read_only"
ROMEO_SOLILOQUY = """
But, soft! what light through yonder window breaks?
It is the east, and Juliet is the sun.
Arise, fair sun, and kill the envious moon,
who is already sick and pale with grief,
That thou her maid art far more fair than she:
be not her maid, since she is envious;
her vestal livery is but sick and green
and none but fools do wear it; cast it off.
It is my lady, O, it is my love!
O, that she knew she were!
She speaks yet she says nothing: what of that?
Her eye discourses; I will answer it.
I am too bold, 'tis not to me she speaks:
two of the fairest stars in all the heaven,
having some business, do entreat her eyes
to twinkle in their spheres till they return.
What if her eyes were there, they in her head?
The brightness of her cheek would shame those stars,
as daylight doth a lamp; her eyes in heaven
would through the airy region stream so bright
that birds would sing and think it were not night.
See, how she leans her cheek upon her hand!
O, that I were a glove upon that hand,
that I might touch that cheek!"""
# + [markdown] state="normal"
# Using the string's built-in `split` method --- previously mentioned in class --- along with `lower`, we can derive from the passage a list of tokens.
# + state="normal"
toks = [t.lower() for t in ROMEO_SOLILOQUY.split()]
toks[:8]
# + [markdown] state="normal"
# We could do more interesting things (such as separating out punctuation), but we'll keep our parser simple. For the sake of consistency, we'll rely on this fairly straighttforward approach to parsing. Onwards!
#
# ### `compute_ngrams`
#
# Your first task is to write `compute_ngrams`, which will take a list of tokens, a value `n` indicating the n-gram length (e.g., 3 for 3-grams), and return an n-gram dictionary. The keys in the returned dictionary should all be strings, whose values will be lists of one or more tuples. Note that even in the case of `n`=2 (which would be the minimum value) the dictionary should map strings to lists of 1-tuples (i.e., instead of to lists of individual tokens).
# + deletable=false id="compute_ngrams" nbgrader={"grade": false, "grade_id": "compute_ngrams", "locked": false, "schema_version": 1, "solution": true} starter_code="def compute_ngrams(toks, n=2):\n \"\"\"Returns an n-gram dictionary based on the provided list of tokens.\"\"\"\n " state="graded"
def compute_ngrams(toks, n=2):
"""Returns an n-gram dictionary based on the provided list of tokens."""
# compute_ngrams homework
ngrams_dict = {}
for i in range(len(toks)-(n-1)):
if toks[i] not in ngrams_dict.keys():
ngrams_dict[toks[i]] = [tuple(toks[(i+1):(i+n)])]
else:
ngrams_dict[toks[i]].append(tuple(toks[(i+1):(i+n)]))
return ngrams_dict
# + [markdown] state="normal"
# And now for some simple tests:
# + deletable=false editable=false id="test_compute_ngrams" nbgrader={"grade": true, "grade_id": "test_compute_ngrams", "locked": true, "points": 5, "schema_version": 1, "solution": false} state="read_only"
# (5 points)
from unittest import TestCase
tc = TestCase()
simple_toks = [t.lower() for t in 'I really really like cake.'.split()]
compute_ngrams(simple_toks)
tc.assertEqual(compute_ngrams(simple_toks),
{'i': [('really',)], 'like': [('cake.',)], 'really': [('really',), ('like',)]})
tc.assertEqual(compute_ngrams(simple_toks, n=3),
{'i': [('really', 'really')],
'really': [('really', 'like'), ('like', 'cake.')]})
romeo_toks = [t.lower() for t in ROMEO_SOLILOQUY.split()]
dct = compute_ngrams(romeo_toks, n=4)
tc.assertEqual(dct['but'], [('sick', 'and', 'green'), ('fools', 'do', 'wear')])
tc.assertEqual(dct['it'],
[('is', 'the', 'east,'),
('off.', 'it', 'is'),
('is', 'my', 'lady,'),
('is', 'my', 'love!'),
('were', 'not', 'night.')])
# + [markdown] state="normal"
# I've also placed the entire text of <NAME> (courtesy of [Project Gutenberg][]) on the server, to be used to stress test your function just a bit. Evaluate the following cell to read the text of the book into `peter_pan_text`.
#
# If you're not on the course server, you can uncomment the line to read the text directly from the Project Gutenberg website and comment out the lines which access the file for testing.
#
# [Project Gutenberg]: http://gutenberg.org
# + deletable=false editable=false id="load_peter_pan" state="read_only"
import urllib.request
PETER_PAN_URL = 'https://moss.cs.iit.edu/cs331/data/peterpan.txt'
peter_pan_text = urllib.request.urlopen(PETER_PAN_URL).read().decode()
chapt1_start = peter_pan_text.index('All children')
print(peter_pan_text[chapt1_start:chapt1_start+1000])
# + [markdown] state="normal"
# Time for some larger test cases!
# + deletable=false editable=false id="test_compute_ngrams_2" nbgrader={"grade": true, "grade_id": "test_compute_ngrams_2", "locked": true, "points": 5, "schema_version": 1, "solution": false} state="read_only"
# (5 points)
from unittest import TestCase
tc = TestCase()
pp_toks = [t.lower() for t in peter_pan_text.split()]
dct = compute_ngrams(pp_toks, n=3)
tc.assertEqual(dct['crocodile'],
[('passes,', 'but'),
('that', 'happened'),
('would', 'have'),
('was', 'in'),
('passed', 'him,'),
('is', 'about'),
('climbing', 'it.'),
('that', 'was'),
('pass', 'by'),
('and', 'let'),
('was', 'among'),
('was', 'waiting')])
tc.assertEqual(len(dct['wendy']), 202)
tc.assertEqual(len(dct['peter']), 243)
# + [markdown] state="normal"
# ### Random selection
# + [markdown] state="normal"
# One more thing before you start work on generating passages from an n-gram dictionary: we need a way to choose a random item from a sequence.
#
# The [`random.choice` function](https://docs.python.org/3/library/random.html#random.choice) provides just this functionality. Consider (and feel free to play with) the following examples --- you should, at the very least, evaluate the cell a few separate times to see the results:
# + state="normal"
import random
print(random.choice(['lions', 'tigers', 'bears']))
print(random.choice(range(100)))
print(random.choice([('really', 'like'), ('like', 'cake')]))
# + [markdown] state="normal"
# Note that a separate tutorial on random number generators (and other [`random` module](https://docs.python.org/3/library/random.html) APIs) will be posted separately, but for now just understanding how to use `random.choice` should be sufficient for this assignment.
# + [markdown] state="normal"
# ### `gen_passage`
#
# Finally, you're ready to implement `gen_passage`, which will take an n-gram dictionary and a length for the passage to generate (as a token count).
#
# As described earlier, it will work as follows:
#
# 1. Select a random key from the dictionary and use it as the start token of the passage. It will also serve as the current token for the next step.
# 2. Select a random tuple from the list associated with the current token and append the sequence to the passage. The last token of the selected sequence will be the new current token.
# 3. If the current token is a key in the dictionary then simply repeat step 2, otherwise select another random key from the map as the current token and append it to the passage before repeating step 2.
#
# You will use `random.choice` whenever a random selection needs to be made. In order for your results to be reproduceable, be sure to sort the dictionary's keys (which, recall, are in no discernible order) before selecting a random one, like this (assuming `ngram_dict` is the dictionary):
#
# random.choice(sorted(ngram_dict.keys()))
# + deletable=false id="gen_passage" nbgrader={"grade": true, "grade_id": "gen_passage", "locked": false, "points": 5, "schema_version": 1, "solution": true} starter_code="import random\n\ndef gen_passage(ngram_dict, length=100):\n " state="graded"
import random
def gen_passage(ngram_dict, length=100):
""" gen_passage homework """
start_word = random.choice(sorted(ngram_dict.keys()))
gen_word_list = [[start_word]]
word_list_flat = [start_word]
while len(word_list_flat) < length:
words = gen_word_list[-1][-1]
if words in ngram_dict.keys():
new_word = list(random.choice(ngram_dict[words]))
else:
new_word = [random.choice(sorted(ngram_dict.keys()))]
gen_word_list.append(new_word)
word_list_flat = [item for sublist in gen_word_list for item in sublist]
passage = ' '.join(word_list_flat)
return passage
# + [markdown] state="normal"
# For the following test cases to work, it is *critical* that you do not invoke `random.choice` more than is absolutely necessary, and only as prescribed in the steps described above!
#
# Note that in addition to the automated test cases, we'll also be manually grading your code above.
# + deletable=false editable=false id="gen_passage_test" nbgrader={"grade": true, "grade_id": "gen_passage_test", "locked": true, "points": 5, "schema_version": 1, "solution": false} state="read_only"
# (10 points)
from unittest import TestCase
tc = TestCase()
random.seed(1234)
simple_toks = [t.lower() for t in 'I really really like cake.'.split()]
tc.assertEqual(gen_passage(compute_ngrams(simple_toks), 10),
'like cake. i really really really really like cake. i')
random.seed(1234)
romeo_toks = [t.lower() for t in ROMEO_SOLILOQUY.split()]
tc.assertEqual(gen_passage(compute_ngrams(romeo_toks), 10),
'too bold, \'tis not night. see, how she leans her')
# -
|
cs-master/cs401/.ipynb_checkpoints/02-ngrams-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tracts
# language: python
# name: python3
# ---
# +
# default_exp connectomes
# -
# # Module connectomes
#
# > Pipeline making use of `postprocessing` nodes to generate a connectivity matrix and distance matrix.
#hide
from nbdev.showdoc import *
# +
#exporti
import os
import bids
bids.config.set_option('extension_initial_dot', True)
from nipype.pipeline import Node, Workflow
import pipetography.nodes as nodes
import pipetography.core as ppt
# +
#export
class connectome:
"""
Create a pipeline that produces connectomes based on input atlases and streamlines, the pipeline will create sub-graphs based on inputs BIDS directory subject & session combinations.
Inputs:
- BIDS_dir (str): base BIDS directory path
- atlas_list (List of strings): names of atlases: aal, brainnectome, desikan-killiany, default is set to brainnectome for now.
- SIFT_mask (bool): Uses 5ttgen tissue segmentation during SIFT2. Defaults to False. If in pipeline `gmwmi = False`, this should also be false.
- debug (bool): Default = False; if True, saves node outputs and log files.
"""
def __init__(self, BIDS_dir, atlas_list, SIFT_mask=False, skip_tuples=[()], debug=False):
"""
Initialize workflow nodes
"""
self.bids_dir = BIDS_dir
self.atlas_list = atlas_list
self.sub_list, self.ses_list, self.layout = ppt.get_subs(BIDS_dir)
self.SIFT_mask = SIFT_mask
self.skip_combos = skip_tuples
self.debug_mode = debug
self.subject_template = {
'tck': os.path.join(self.bids_dir, 'derivatives', 'streamlines','sub-{subject_id}', 'ses-{session_id}', 'sub-{subject_id}_ses-{session_id}_gmwmi2wm.tck'),
'brain': os.path.join(self.bids_dir, 'derivatives', 'pipetography', 'sub-{subject_id}', 'ses-{session_id}', 'preprocessed', 'dwi_space-acpc_res-1mm_seg-brain.nii.gz'),
'dwi_mif': os.path.join(self.bids_dir, 'derivatives', 'pipetography', 'sub-{subject_id}', 'ses-{session_id}', 'preprocessed', 'dwi_space-acpc_res-1mm.mif'),
'T1A': os.path.join(self.bids_dir, 'derivatives', 'pipetography', 'sub-{subject_id}', 'ses-{session_id}', 'preprocessed', 'T1w_space-acpc.nii.gz'),
'mask': os.path.join(self.bids_dir, 'derivatives', 'pipetography', 'sub-{subject_id}', 'ses-{session_id}', 'preprocessed', 'dwi_space-acpc_res-1mm_seg-brain_mask.nii.gz'),
}
if self.SIFT_mask:
self.subject_template['mrtrix5tt'] = os.path.join(self.bids_dir, 'derivatives', 'pipetography', 'sub-{subject_id}', 'ses-{session_id}', 'preprocessed', 'T1w_space-acpc_seg-5tt.mif')
def create_nodes(self):
"""
Create postprocessing nodes, and make output path substitutions so outputs are BIDS compliant.
"""
self.PostProcNodes = nodes.PostProcNodes(
BIDS_dir=self.bids_dir,
subj_template = self.subject_template,
sub_list = self.sub_list,
ses_list = self.ses_list,
skip_tuples = self.skip_combos)
self.PostProcNodes.linear_reg.iterables = [('moving_image', self.atlas_list)]
self.workflow = None
def connect_nodes(self, wf_name="connectomes"):
"""
Connect postprocessing nodes into workflow
"""
self.workflow = Workflow(name=wf_name, base_dir=os.path.join(self.bids_dir, 'derivatives'))
self.workflow.connect(
[
(self.PostProcNodes.subject_source, self.PostProcNodes.select_files, [('subject_id', 'subject_id'),
('session_id', 'session_id')]),
(self.PostProcNodes.select_files, self.PostProcNodes.linear_reg, [('brain', 'fixed_image')]),
(self.PostProcNodes.linear_reg, self.PostProcNodes.nonlinear_reg, [('warped_image', 'moving_image')]),
(self.PostProcNodes.select_files, self.PostProcNodes.nonlinear_reg, [('brain', 'fixed_image')]),
(self.PostProcNodes.nonlinear_reg, self.PostProcNodes.connectome, [('warped_image', 'in_parc')]),
(self.PostProcNodes.nonlinear_reg, self.PostProcNodes.distance, [('warped_image', 'in_parc')]),
(self.PostProcNodes.select_files, self.PostProcNodes.response, [('dwi_mif', 'in_file')]),
(self.PostProcNodes.select_files, self.PostProcNodes.fod, [('dwi_mif', 'in_file')]),
(self.PostProcNodes.select_files, self.PostProcNodes.fod, [('mask', 'mask_file')]),
(self.PostProcNodes.response, self.PostProcNodes.fod, [('wm_file', 'wm_txt')]),
(self.PostProcNodes.response, self.PostProcNodes.fod, [('gm_file', 'gm_txt')]),
(self.PostProcNodes.response, self.PostProcNodes.fod, [('csf_file', 'csf_txt')]),
(self.PostProcNodes.select_files, self.PostProcNodes.sift2, [('tck', 'in_file')]),
(self.PostProcNodes.fod, self.PostProcNodes.sift2, [('wm_odf', 'in_fod')]),
(self.PostProcNodes.sift2, self.PostProcNodes.connectome, [('out_file', 'in_weights')]),
(self.PostProcNodes.select_files, self.PostProcNodes.connectome, [('tck', 'in_file')]),
(self.PostProcNodes.select_files, self.PostProcNodes.distance, [('tck', 'in_file')]),
(self.PostProcNodes.connectome, self.PostProcNodes.datasink, [('out_file', 'connectomes.@connectome')]),
(self.PostProcNodes.distance, self.PostProcNodes.datasink, [('out_file', 'connectomes.@distance')])
])
if self.SIFT_mask:
self.workflow.connect(
[
(self.PostProcNodes.select_files, self.PostProcNodes.sift2, [('mrtrix5tt', 'act')])
])
self.PostProcNodes.sift2.inputs.fd_scale_gm=True
if self.debug_mode:
self.workflow.config["execution"] = {
"use_relative_paths": "True",
"hash_method": "content",
"stop_on_first_crash": "True",
}
else:
self.workflow.config["execution"] = {
"use_relative_paths": "True",
"hash_method": "content",
"stop_on_first_crash": "True",
"remove_node_directories": "True",
}
def draw_pipeline(self, graph_type='orig'):
"""
Visualize workflow
"""
self.workflow.write_graph(
graph2use=graph_type,
dotfilename = os.path.join(
self.bids_dir, 'derivatives', 'pipetography', 'graph', 'postprocessing.dot'
),
)
def run_pipeline(self, parallel=None):
"""
Run nipype workflow
"""
if type(parallel) == int:
print("Running workflow with {} parallel processes".format(parallel))
self.workflow.run('MultiProc', plugin_args = {'n_procs': parallel})
elif parallel is None:
print("Parallel processing disabled, running workflow serially")
self.workflow.run()
# +
#hide
#test
test_wf = connectome(BIDS_dir='./testing/BIDS_dir',
atlas_list=['./testing/Atlases/DK_Atlas_86_2mm.nii.gz',
'/testing/Atlases/BN_Atlas_246_1mm.nii.gz'])
test_wf.create_nodes()
test_wf.connect_nodes()
test_wf.draw_pipeline()
# assert drawn workflow is present as png and dot:
assert os.path.exists('./testing/BIDS_dir/derivatives/pipetography/graph/postprocessing.dot')
assert os.path.exists('./testing/BIDS_dir/derivatives/pipetography/graph/postprocessing.png')
# assert output is compliant with BIDS
assert os.path.abspath(test_wf.PostProcNodes.datasink.inputs.base_directory) == os.path.abspath(os.path.join('./testing/BIDS_dir', 'derivatives', 'pipetography'))
# -
# ## Example
#
# Workflow creating a connectivity matrix and a distance adjacency matrix for brainnectome and desikan-killiany atlas.
#example
post_wf = connectome(BIDS_dir='./testing/BIDS_dir',
atlas_list=['./testing/Atlases/BN_Atlas_246_1mm.nii.gz',
'./testing/Atlases/DK_Atlas_86_2mm.nii.gz'],
skip_tuples=[('11045', '02')])
# Take a look at the post-processing workflow:
# +
#example
from IPython.display import Image
post_wf.create_nodes()
post_wf.connect_nodes(wf_name='connectomes')
post_wf.draw_pipeline(graph_type='flat')
Image('./testing/BIDS_dir/derivatives/pipetography/graph/postprocessing.png')
# -
#example
post_wf.run_pipeline(parallel=2)
# View outputs:
# !tree ./testing/BIDS_dir/derivatives/pipetography
# +
#usage
#example
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# load matrices:
CM = pd.read_csv('./testing/BIDS_dir/derivatives/pipetography/sub-01/ses-002/connectomes/BN_Atlas_246_1mm/connectome.csv', header = None)
DM = pd.read_csv('./testing/BIDS_dir/derivatives/pipetography/sub-01/ses-002/connectomes/BN_Atlas_246_1mm/distances.csv', header = None)
# Visualize:
fig, (ax1, ax2) = plt.subplots(1,2, sharey = True, figsize = (12,12))
# log1p - calculates log(1+x)
cdk=ax1.imshow(np.log1p(CM), cmap = plt.get_cmap('inferno'), interpolation = 'nearest')
ax1.set_title('BNA246 Connectivity Matrix')
cbar=fig.colorbar(cdk, ax=ax1, shrink=0.4)
cbar.set_label('Log Scale Streamline Counts')
ddk=ax2.imshow(DM, interpolation = 'nearest')
ax2.set_title('BNA246 Distance Adjacency Matrix')
dbar=fig.colorbar(ddk, ax=ax2, shrink=0.4)
dbar.set_label('Mean Streamline Length (mm)')
|
03_connectomes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# ## Test 3 assumptions in methods
# Imports
# +
# %pylab inline
pylab.rcParams['figure.figsize'] = (10, 6)
# #%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rand
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_1samp
# -
# ## Assumption 1
#
# Assumption 1 states that $P(R|H)$ is independent of network weights $W$.
#
# We demonstrate that is the case here by plotting a heatmap of reward densities as a function of one of the network weights.
# +
c = 0.5
nsims = 1
N = 19
#fn_in = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_simulations.npz'%(N, nsims, c)
fn_in = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_counterfactual_simulations.npz'%(N, nsims, c)
sims = np.load(fn_in)
hs = sims['hs']
vs = sims['vs']
us = sims['us']
wvals = sims['wvals']
# +
DeltaT = 50
tau_s = 0.2
p = 0.2
wmax = 20
wmin = 2
t_filter = np.linspace(0, 1, 2000)
exp_filter = np.exp(-t_filter/tau_s)
exp_filter = exp_filter/np.sum(exp_filter)
ds = exp_filter[0]
B1 = 1
B2 = 2
x = .01
y = 0.1
z = 0
cost2_fun = lambda s1, s2: (B1*s1-x)**2 + (z+B2*s2 - B2*(B1*s1-y)**2)**2
cost2_w1 = np.zeros((N,N,hs.shape[4]))
hs2_w1 = np.zeros((N,N,2,hs.shape[4]))
for i in range(N):
print("W[0] = %d"%i)
for j in range(N):
for k in range(nsims):
s1 = np.convolve(hs[i,j,k,0,:], exp_filter)[0:hs.shape[4]]
s2 = np.convolve(hs[i,j,k,1,:], exp_filter)[0:hs.shape[4]]
cost2_w1[i,j,:] = cost2_fun(s1, s2)
hs2_w1[i,j,:,:] = hs[i,j,k,:,:]
# +
#Take last of cost2 in each bin
cost_r = cost2_w1.reshape((N, N, -1, DeltaT))
cost = np.squeeze(cost_r[:,:,:,-1])
cost.shape
hs_r = hs2_w1.reshape((N, N, 2, -1, DeltaT))
hs_bin = np.max(hs_r,4)
hs_bin.shape
#Total spikes
hs_r = hs2_w1.reshape((N, N, 2, -1, DeltaT))
hs_sum = np.sum(hs_r,4)
#Find bins for each firing pattern (0,0), (0,1), (1,0), (1,1)
h0_0_h1_0 = np.multiply(hs_bin[:,:,0,:] == 0, hs_bin[:,:,1,:] == 0)
h0_0_h1_1 = np.multiply(hs_bin[:,:,0,:] == 0, hs_bin[:,:,1,:] == 1)
h0_1_h1_0 = np.multiply(hs_bin[:,:,0,:] == 1, hs_bin[:,:,1,:] == 0)
h0_1_h1_1 = np.multiply(hs_bin[:,:,0,:] == 1, hs_bin[:,:,1,:] == 1)
# -
hs_sum.shape
# +
#Make histogram for each weight
M = 30
hist00 = np.zeros((N, N, M))
hist01 = np.zeros((N, N, M))
hist10 = np.zeros((N, N, M))
hist11 = np.zeros((N, N, M))
rng = (0, 1e-4)
for i in range(N):
for j in range(N):
hist00[i,j,:] = np.histogram(cost[i,j,h0_0_h1_0[i,j,:]], M, rng, density = True)[0]
hist01[i,j,:] = np.histogram(cost[i,j,h0_0_h1_1[i,j,:]], M, rng, density = True)[0]
hist10[i,j,:] = np.histogram(cost[i,j,h0_1_h1_0[i,j,:]], M, rng, density = True)[0]
hist11[i,j,:] = np.histogram(cost[i,j,h0_1_h1_1[i,j,:]], M, rng, density = True)[0]
# -
fig, axes = plt.subplots(2,2,sharex = True, sharey=True)
axes[0,0].imshow(hist00.T, interpolation = 'none', origin = 'lower')
axes[0,1].imshow(hist01.T, interpolation = 'none', origin = 'lower')
axes[1,0].imshow(hist10.T, interpolation = 'none', origin = 'lower')
axes[1,0].set_xlabel('W')
axes[1,1].imshow(hist11.T, interpolation = 'none', origin = 'lower')
axes[1,1].set_xlabel('W')
hist11.shape
# +
#Take the mean of each of these...
mean00 = np.zeros((N,N))
mean01 = np.zeros((N,N))
mean10 = np.zeros((N,N))
mean11 = np.zeros((N,N))
for i in range(N):
for j in range(N):
mean00[i,j] = np.mean(cost[i,j,h0_0_h1_0[i,j,:]])
mean01[i,j] = np.mean(cost[i,j,h0_0_h1_1[i,j,:]])
mean10[i,j] = np.mean(cost[i,j,h0_1_h1_0[i,j,:]])
mean11[i,j] = np.mean(cost[i,j,h0_1_h1_1[i,j,:]])
grad00 = np.gradient(mean00)
grad01 = np.gradient(mean01)
grad10 = np.gradient(mean10)
grad11 = np.gradient(mean11)
absgrad00 = np.sqrt(grad00[0]**2 + grad00[1]**2)#/mean00
absgrad01 = np.sqrt(grad01[0]**2 + grad01[1]**2)#/mean01
absgrad10 = np.sqrt(grad10[0]**2 + grad10[1]**2)#/mean10
absgrad11 = np.sqrt(grad11[0]**2 + grad11[1]**2)#/mean11
# -
absgrad00.shape
fig, axes = plt.subplots(2,2,figsize=(8,8), sharex = True)
a1 = axes[0,0].imshow(absgrad00, interpolation = 'none', origin = 'lower')
a2 = axes[0,1].imshow(absgrad01, interpolation = 'none', origin = 'lower')
a3 = axes[1,0].imshow(absgrad10, interpolation = 'none', origin = 'lower')
a4 = axes[1,1].imshow(absgrad11, interpolation = 'none', origin = 'lower')
plt.colorbar(a1, ax = axes[0,0])
plt.colorbar(a2, ax = axes[0,1])
plt.colorbar(a3, ax = axes[1,0])
plt.colorbar(a4, ax = axes[1,1])
plt.xlabel('W_1')
plt.ylabel('E(R|H)')
plt.savefig('S1_assumption_1a.eps')
#Plot relative gradients as a function of firing rate
hs_sum.shape
mean_hs = np.mean(hs_sum, 3)
mean_hs_comb = np.sqrt(mean_hs[:,:,0]**2 + mean_hs[:,:,1]**2)
#Convert to spikes/sec
mean_hs_comb = mean_hs_comb/(0.002*DeltaT)
#plt.imshow(mean_hs_comb, interpolation = 'none', origin = 'lower')
fig, axes = plt.subplots(2,2, figsize = (6,6), sharex = True)
axes[0,0].plot(mean_hs_comb.reshape(-1,1), absgrad00.reshape(-1, 1), '.')
axes[0,1].plot(mean_hs_comb.reshape(-1,1), absgrad01.reshape(-1, 1), '.')
axes[1,0].plot(mean_hs_comb.reshape(-1,1), absgrad10.reshape(-1, 1), '.')
axes[1,1].plot(mean_hs_comb.reshape(-1,1), absgrad11.reshape(-1, 1), '.')
axes[1,1].set_xlabel('Firing rate (spikes/sec)')
axes[1,0].set_xlabel('Firing rate (spikes/sec)')
axes[1,0].set_ylabel('E(R|H)')
axes[0,0].set_ylabel('E(R|H)')
axes[0,0].set_title('H_0 = 0, H_1 = 0')
axes[0,1].set_title('H_0 = 0, H_1 = 1')
axes[1,0].set_title('H_0 = 1, H_1 = 0')
axes[1,1].set_title('H_0 = 1, H_1 = 1')
#sns.despine(trim = True)
plt.savefig('S1_assumption_1b.eps')
# Looks like the assumption doesn't hold for the density in general. In fact........ if it did, we would expect these to be flat as a function of $W$
#
# However, perhaps an argument can be made that these assumptions _do_ hold at bins where one of the units is near threshold.
#
# Further, the assumption does appear to hold for the expected values. Which is actually all we need. It holds, at least, for lower firing rate. This also, is as we would expect. Good to have a visual of when the assumption breaks down...
# ## Assumption 2
# Here we test the assumption that $\frac{\partial}{\partial w}E(H_0|H_1)$ is independent of $H_1$. That is, we check if the gradient of each $\frac{\partial}{\partial w}E(H_0|H_1=i)$ are roughly equal, for $i = 0,1$.
#
# We just make a heatmap of each to see...
# +
DeltaT = 50
tau_s = 0.2
p = 0.2
wmax = 20
wmin = 2
t_filter = np.linspace(0, 1, 2000)
exp_filter = np.exp(-t_filter/tau_s)
exp_filter = exp_filter/np.sum(exp_filter)
ds = exp_filter[0]
B1 = 1
B2 = 2
x = .01
y = 0.1
z = 0
cost2_fun = lambda s1, s2: (B1*s1-x)**2 + (z+B2*s2 - B2*(B1*s1-y)**2)**2
cost2_w12 = np.zeros((N,N,hs.shape[4]))
hs12 = np.zeros((N,N,2,hs.shape[4]))
for i in range(N):
print("W[0] = %d"%i)
for j in range(N):
for k in range(nsims):
s1 = np.convolve(hs[i,j,k,0,:], exp_filter)[0:hs.shape[4]]
s2 = np.convolve(hs[i,j,k,1,:], exp_filter)[0:hs.shape[4]]
cost2_w12[i,j,:] = cost2_fun(s1, s2)
hs12[i,j,:,:] = hs[i,j,k,:,:]
# +
#Total spikes
hs_r = hs12.reshape((N, N, 2, -1, DeltaT))
hs_sum = np.sum(hs_r,4)
h0_h1_0 = hs_sum[:,:,1,:] == 0
h0_h1_1 = hs_sum[:,:,1,:] == 1
# -
hs_sum[i,j,0,h0_h1_0[i,j,:]].shape
mean0 = np.zeros((N,N))
mean1 = np.zeros((N,N))
for i in range(N):
for j in range(N):
mean0[i,j] = np.mean(hs_sum[i,j,0,h0_h1_0[i,j,:]])
mean1[i,j] = np.mean(hs_sum[i,j,0,h0_h1_1[i,j,:]])
fig, axes = plt.subplots(1,2, sharey = True)
im1 = axes[0].imshow(mean0, interpolation = 'none', origin = 'lower')
im2 = axes[1].imshow(mean1, interpolation = 'none', origin = 'lower')
plt.colorbar(im1, ax = axes[0])
plt.colorbar(im2)
# Thus assumption 2 is easily satisified.
grad_h0h10 = np.gradient(mean0)
grad_h0h11 = np.gradient(mean1)
# +
fig, axes = plt.subplots(2,2)
a1 = axes[0,0].imshow(grad_h0h10[1], interpolation = 'none', origin = 'lower')
a2 = axes[0,1].imshow(grad_h0h11[1], interpolation = 'none', origin = 'lower')
#a3 = axes[1,0].imshow(grad_h0h10[0], interpolation = 'none', origin = 'lower')
#a4 = axes[1,1].imshow(grad_h0h11[0], interpolation = 'none', origin = 'lower')
#a3 = axes[1,0].imshow((grad_h0h11[1]-grad_h0h10[1])/grad_h0h11[1], interpolation = 'none', origin = 'lower')
#a4 = axes[1,1].imshow((grad_h0h11[1]-grad_h0h10[1]), interpolation = 'none', origin = 'lower')
a3 = axes[1,0].imshow((grad_h0h11[0]-grad_h0h10[0])/grad_h0h11[0], interpolation = 'none', origin = 'lower')
a4 = axes[1,1].imshow((grad_h0h11[0]-grad_h0h10[0]), interpolation = 'none', origin = 'lower')
#plt.imshow(grad_h0h10[1], interpolation = 'none', origin = 'lower')
plt.colorbar(a1, ax = axes[0,0])
plt.colorbar(a2, ax = axes[0,1])
plt.colorbar(a3, ax = axes[1,0])
plt.colorbar(a4, ax = axes[1,1])
# -
fig, ax = plt.subplots(1,1, figsize = (4,4))
#a4 = ax[0].imshow((grad_h0h11[0]-grad_h0h10[0]), interpolation = 'none', origin = 'lower')
#ax[0].set_xlabel('W_0')
#ax[0].set_ylabel('W_1')
#plt.colorbar(a4, ax = ax[0])
ax.plot(grad_h0h11[0].reshape(-1,1),grad_h0h10[0].reshape(-1,1), '.')
ax.plot([-0.0001, 0.0002],[-0.0001, 0.0002])
ax.set_xlabel('H_0')
sns.despine(trim = True)
ax.set_ylabel('H_1')
plt.savefig('./S1_assumption_2.eps')
fig, axes = plt.subplots(1,1, figsize = (4,4))
axes.hist((absdiffgrad/absgrad1).reshape(-1,1))
plt.xlabel('|grad_0 - grad_1|/mean')
plt.ylabel('density')
#plt.savefig('./S1_assumption_2.eps')
# ## Assumption 3
#
# Test that nodes $H_1$ satisfies backdoor criterion with respect to $H_0 \to R$.
#
# Not sure what easiest way to do this is, yet.
#
# Take uncorrelated data as representing the interventional distribution. Compare
#
# $$
# E(R|H_i = j)
# $$
#
# in the $c=0$ case to
#
# $$
# EE(R|H_i = j, H_k)
# $$
# in the correlated case. These should be equal for all values of $H_i$.
# +
## Load c = 0 data and compute matrix
M = 1000
R_uncorr = np.zeros((N, N, 2, 2, M))
ER_uncorr = np.zeros((N, N, 2, 2))
ER_corr = np.zeros((N, N, 2, 2))
# -
# ### Load uncorrelated data
# +
c = 0.01
nsims = 1
N = 19
#fn_in = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_simulations.npz'%(N, nsims, c)
fn_in = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_counterfactual_simulations.npz'%(N, nsims, c)
sims = np.load(fn_in)
hs = sims['hs']
vs = sims['vs']
us = sims['us']
wvals = sims['wvals']
# +
DeltaT = 50
tau_s = 0.2
p = 0.2
wmax = 20
wmin = 2
t_filter = np.linspace(0, 1, 2000)
exp_filter = np.exp(-t_filter/tau_s)
exp_filter = exp_filter/np.sum(exp_filter)
ds = exp_filter[0]
B1 = 1
B2 = 2
x = .01
y = 0.1
z = 0
cost2_fun = lambda s1, s2: (B1*s1-x)**2 + (z+B2*s2 - B2*(B1*s1-y)**2)**2
cost2_w1 = np.zeros((N,N,hs.shape[4]))
hs2_w1 = np.zeros((N,N,2,hs.shape[4]))
for i in range(N):
print("W[0] = %d"%i)
for j in range(N):
for k in range(nsims):
s1 = np.convolve(hs[i,j,k,0,:], exp_filter)[0:hs.shape[4]]
s2 = np.convolve(hs[i,j,k,1,:], exp_filter)[0:hs.shape[4]]
cost2_w1[i,j,:] = cost2_fun(s1, s2)
hs2_w1[i,j,:,:] = hs[i,j,k,:,:]
# -
#Take last of cost2 in each bin
cost_r = cost2_w1.reshape((N, N, -1, DeltaT))
cost = np.squeeze(cost_r[:,:,:,-1])
cost.shape
# +
hs_r = hs2_w1.reshape((N, N, 2, -1, DeltaT))
hs_bin = np.max(hs_r,4)
hs_bin.shape
h0_0 = hs_bin[:,:,0,:] == 0
h0_1 = hs_bin[:,:,0,:] == 1
h1_0 = hs_bin[:,:,1,:] == 0
h1_1 = hs_bin[:,:,1,:] == 1
for i in range(N):
for j in range(N):
#ER_uncorr = np.zeros((N, N, 2, 2, M))
R_uncorr[i,j,0,0,:] = np.random.choice(cost[i,j,h0_0[i,j,:]], M)
R_uncorr[i,j,0,1,:] = np.random.choice(cost[i,j,h0_1[i,j,:]], M)
R_uncorr[i,j,1,0,:] = np.random.choice(cost[i,j,h1_0[i,j,:]], M)
R_uncorr[i,j,1,1,:] = np.random.choice(cost[i,j,h1_1[i,j,:]], M)
ER_uncorr[i,j,0,0] = np.mean(cost[i,j,h0_0[i,j,:]])
ER_uncorr[i,j,0,1] = np.mean(cost[i,j,h0_1[i,j,:]])
ER_uncorr[i,j,1,0] = np.mean(cost[i,j,h1_0[i,j,:]])
ER_uncorr[i,j,1,1] = np.mean(cost[i,j,h1_1[i,j,:]])
# -
# ### Load correlated data
# +
c = 0.5
nsims = 1
N = 19
#fn_in = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_simulations.npz'%(N, nsims, c)
fn_in = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_counterfactual_simulations.npz'%(N, nsims, c)
sims = np.load(fn_in)
hs = sims['hs']
vs = sims['vs']
us = sims['us']
wvals = sims['wvals']
# +
DeltaT = 50
tau_s = 0.2
p = 0.2
wmax = 20
wmin = 2
t_filter = np.linspace(0, 1, 2000)
exp_filter = np.exp(-t_filter/tau_s)
exp_filter = exp_filter/np.sum(exp_filter)
ds = exp_filter[0]
B1 = 1
B2 = 2
x = .01
y = 0.1
z = 0
cost2_fun = lambda s1, s2: (B1*s1-x)**2 + (z+B2*s2 - B2*(B1*s1-y)**2)**2
cost2_w1 = np.zeros((N,N,hs.shape[4]))
hs2_w1 = np.zeros((N,N,2,hs.shape[4]))
for i in range(N):
print("W[0] = %d"%i)
for j in range(N):
for k in range(nsims):
s1 = np.convolve(hs[i,j,k,0,:], exp_filter)[0:hs.shape[4]]
s2 = np.convolve(hs[i,j,k,1,:], exp_filter)[0:hs.shape[4]]
cost2_w1[i,j,:] = cost2_fun(s1, s2)
hs2_w1[i,j,:,:] = hs[i,j,k,:,:]
# -
#Take last of cost2 in each bin
cost_r = cost2_w1.reshape((N, N, -1, DeltaT))
cost = np.squeeze(cost_r[:,:,:,-1])
cost.shape
# +
hs_r = hs2_w1.reshape((N, N, 2, -1, DeltaT))
hs_bin = np.max(hs_r,4)
hs_bin.shape
#Find bins for each firing pattern (0,0), (0,1), (1,0), (1,1)
h0_0_h1_0 = np.multiply(hs_bin[:,:,0,:] == 0, hs_bin[:,:,1,:] == 0)
h0_0_h1_1 = np.multiply(hs_bin[:,:,0,:] == 0, hs_bin[:,:,1,:] == 1)
h0_1_h1_0 = np.multiply(hs_bin[:,:,0,:] == 1, hs_bin[:,:,1,:] == 0)
h0_1_h1_1 = np.multiply(hs_bin[:,:,0,:] == 1, hs_bin[:,:,1,:] == 1)
#ph0_0 = np.sum(h0_0_h1_0[i,j,:]) + np.sum(h0_0_h1_1[i,j,:])
for i in range(N):
for j in range(N):
ph0_0 = (np.sum(h0_0_h1_0[i,j,:]) + np.sum(h0_0_h1_1[i,j,:]))/float(hs_bin.shape[3])
ph0_1 = (np.sum(h0_1_h1_0[i,j,:]) + np.sum(h0_1_h1_1[i,j,:]))/float(hs_bin.shape[3])
ph1_0 = (np.sum(h0_0_h1_0[i,j,:]) + np.sum(h0_1_h1_0[i,j,:]))/float(hs_bin.shape[3])
ph1_1 = (np.sum(h0_0_h1_1[i,j,:]) + np.sum(h0_1_h1_1[i,j,:]))/float(hs_bin.shape[3])
#do h0 = 0
#ER_uncorr[i,j,0,0,:] = cost[i,j,h0_0[i,j,:]]
ER_corr[i,j,0,0] = ph1_0*np.mean(cost[i,j,h0_0_h1_0[i,j,:]]) + ph1_1*np.mean(cost[i,j,h0_0_h1_1[i,j,:]])
#do h0 = 1
#ER_uncorr[i,j,0,1,:] = cost[i,j,h0_1[i,j,:]]
ER_corr[i,j,0,1] = ph1_0*np.mean(cost[i,j,h0_1_h1_0[i,j,:]]) + ph1_1*np.mean(cost[i,j,h0_1_h1_1[i,j,:]])
#do h1 = 0
#ER_uncorr[i,j,1,0,:] = cost[i,j,h1_0[i,j,:]]
ER_corr[i,j,1,0] = ph0_0*np.mean(cost[i,j,h0_0_h1_0[i,j,:]]) + ph0_1*np.mean(cost[i,j,h0_1_h1_0[i,j,:]])
#do h1 = 1
#ER_uncorr[i,j,1,1,:] = cost[i,j,h1_1[i,j,:]]
ER_corr[i,j,1,1] = ph0_0*np.mean(cost[i,j,h0_0_h1_1[i,j,:]]) + ph1_1*np.mean(cost[i,j,h0_1_h1_1[i,j,:]])
# -
# ### Compute expected reward conditional distributions
# +
#Perform statistics test
pvals = np.zeros((N,N,2,2))
alpha = 0.001
#one sided t test
for i in range(N):
for j in range(N):
t,p00 = ttest_1samp(R_uncorr[i,j,0,0,:], ER_corr[i,j,0,0])
t,p01 = ttest_1samp(R_uncorr[i,j,0,1,:], ER_corr[i,j,0,1])
t,p10 = ttest_1samp(R_uncorr[i,j,1,0,:], ER_corr[i,j,1,0])
t,p11 = ttest_1samp(R_uncorr[i,j,1,1,:], ER_corr[i,j,1,1])
pvals[i,j,0,0] = p00
pvals[i,j,0,1] = p01
pvals[i,j,1,0] = p10
pvals[i,j,1,1] = p11
sig = pvals < (alpha/N/N/2/2)
print(np.sum(sig)/float(N)/N/2/2)
# +
#Then do statistical tests on the similarity of these distrbutions
fig, axes = plt.subplots(4,3,figsize = (12,12))
a00 = axes[0,0].imshow(ER_uncorr[:,:,0,0], origin = 'lower', interpolation = 'nearest')
a01 = axes[0,1].imshow(ER_corr[:,:,0,0], origin = 'lower', interpolation = 'nearest')
a02 = axes[0,2].imshow(sig[:,:,0,0], origin = 'lower', interpolation = 'nearest')
plt.colorbar(a00, ax = axes[0,0])
plt.colorbar(a01, ax = axes[0,1])
a10 = axes[1,0].imshow(ER_uncorr[:,:,0,1], origin = 'lower', interpolation = 'nearest')
a11 = axes[1,1].imshow(ER_corr[:,:,0,1], origin = 'lower', interpolation = 'nearest')
a12 = axes[1,2].imshow(sig[:,:,0,1], origin = 'lower', interpolation = 'nearest')
plt.colorbar(a10, ax = axes[1,0])
plt.colorbar(a11, ax = axes[1,1])
a20 = axes[2,0].imshow(ER_uncorr[:,:,1,0], origin = 'lower', interpolation = 'nearest')
a21 = axes[2,1].imshow(ER_corr[:,:,1,0], origin = 'lower', interpolation = 'nearest')
a22 = axes[2,2].imshow(sig[:,:,1,0], origin = 'lower', interpolation = 'nearest')
plt.colorbar(a20, ax = axes[2,0])
plt.colorbar(a21, ax = axes[2,1])
a30 = axes[3,0].imshow(ER_uncorr[:,:,1,1], origin = 'lower', interpolation = 'nearest')
a31 = axes[3,1].imshow(ER_corr[:,:,1,1], origin = 'lower', interpolation = 'nearest')
a32 = axes[3,2].imshow(sig[:,:,1,1], origin = 'lower', interpolation = 'nearest')
plt.colorbar(a30, ax = axes[3,0])
plt.colorbar(a31, ax = axes[3,1])
# +
fig, axes = plt.subplots(2,3,figsize = (12,6))
a00 = axes[0,0].imshow(ER_uncorr[:,:,0,0], origin = 'lower', interpolation = 'nearest')
a01 = axes[0,1].imshow(ER_corr[:,:,0,0], origin = 'lower', interpolation = 'nearest')
a02 = axes[0,2].imshow(sig[:,:,0,0], origin = 'lower', interpolation = 'nearest')
plt.colorbar(a00, ax = axes[0,0])
plt.colorbar(a01, ax = axes[0,1])
a10 = axes[1,0].imshow(ER_uncorr[:,:,0,1], origin = 'lower', interpolation = 'nearest')
a11 = axes[1,1].imshow(ER_corr[:,:,0,1], origin = 'lower', interpolation = 'nearest')
a12 = axes[1,2].imshow(sig[:,:,0,1], origin = 'lower', interpolation = 'nearest')
plt.colorbar(a10, ax = axes[1,0])
plt.colorbar(a11, ax = axes[1,1])
plt.savefig('./S1_assumption_3.eps')
# -
|
test_methods_assumptions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating the Finite-Difference Table for Centered First and Second Derivatives, from 2nd through 10th-Order Accuracy
#
# ## *Courtesy <NAME>*
# +
print("Installing astropy, needed for creating the output table. Please wait a few seconds...")
# !pip install -U pip astropy > /dev/null
print("astropy installed.")
# Step 0: Import needed modules
import numpy as np
import finite_difference as fin
from astropy.table import Table
# Step 1: Set the maximum finite-difference accuracy order computed in the table
max_fdorder = 10
# Step 2: Set up table parameters
# One column for deriv order, one for deriv accuracy, and max_fdorder+1
numcols = 2 + max_fdorder + 1
# 8 rows: max_fdorder accuracy orders per derivative order, times 2 derivative orders (first & second derivative)
numrows = int(max_fdorder/2 * 2)
# Center column index of table will be at 2 + max_fdorder/2 (zero-offset indexing)
column_corresponding_to_zero_fd_point = 2 + int(max_fdorder/2)
# The table is initialized as a matrix of zeroes in numpy...
numpy_matrix = np.zeros((numrows, numcols), dtype=object)
# Then we replace all elements with the empty string to match the Wikipedia article.
for row in range(numrows):
for col in range(numcols):
numpy_matrix[row,col] = ""
# Step 3: Construct the first-order derivative finite difference coefficients
rowcount = 0
for fdorder in range(2, max_fdorder+1, 2): # loop runs from 2 to max_fdorder inclusive, skipping odd orders.
numpy_matrix[rowcount, 0] = "1st"
numpy_matrix[rowcount, 1] = fdorder
fdcoeffs, fdstencl = fin.compute_fdcoeffs_fdstencl("D0", fdorder)
for i in range(fdorder):
numpy_matrix[rowcount, column_corresponding_to_zero_fd_point + fdstencl[i][0]] = fdcoeffs[i]
rowcount += 1
# Step 4: Construct the second-order derivative finite difference coefficients
for fdorder in range(2, max_fdorder+1, 2): # loop runs from 2 to max_fdorder inclusive, skipping odd orders.
numpy_matrix[rowcount, 0] = "2nd"
numpy_matrix[rowcount, 1] = fdorder
fdcoeffs, fdstencl = fin.compute_fdcoeffs_fdstencl("DD00", fdorder)
for i in range(fdorder+1):
numpy_matrix[rowcount, column_corresponding_to_zero_fd_point + fdstencl[i][0]] = fdcoeffs[i]
rowcount += 1
# Step 5: Construct an astropy table from the numpy matrix with the following header info, and then print it:
colnames = ['Derivative','Accuracy']
for i in range(-int(max_fdorder/2),int(max_fdorder/2)+1):
colnames.append(str(i))
table = Table(numpy_matrix, names=colnames)
table.pprint(max_width=-1)
|
Tutorial-Finite_Difference_Derivatives-FDtable_soln.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="boC50Y4UGuMi" colab_type="text"
# # IEEE Coders Week
# ### Final Project : Pneumonia Lungs Detection
# #### By : <NAME> (<EMAIL>)
# ---
# **Disclaimer**
#
# All of the work result below is done by me, based on every resources that have been given. Please contact me if you have any good suggestion.
#
# + [markdown] id="AlyVkf3zN5nx" colab_type="text"
# ### Import Module
# + id="5lFwH_uzHfcT" colab_type="code" colab={}
import tensorflow as tf
import cv2
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Conv2D, Flatten, Dropout, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from keras import backend as K
# + [markdown] id="GhFdUqRRN8bR" colab_type="text"
# ### Download Dataset
# + id="IWBxL5qdHl-A" colab_type="code" colab={}
os.environ['KAGGLE_USERNAME'] = 'fahmijabbar'
os.environ['KAGGLE_KEY'] = '48f31e76b8b36ce3270c80c36306c84c'
# + id="DYQeTIhtncW4" colab_type="code" outputId="cc81cae3-824e-45b6-91f0-db594b19832c" colab={"base_uri": "https://localhost:8080/", "height": 68}
# !kaggle datasets download -d paultimothymooney/chest-xray-pneumonia
# + [markdown] id="SpB-B89l3DBD" colab_type="text"
# ### Extract Dataset
# + id="B5GC3mEwncyc" colab_type="code" colab={}
import zipfile as extract
zip_file = '/content/chest-xray-pneumonia.zip'
do_extract = extract.ZipFile(zip_file, 'r')
do_extract.extractall('/content/')
do_extract.close()
# + [markdown] id="FEoSbWqcOTRL" colab_type="text"
# Deleting unused folder
# + id="K5LwQIk2Bmry" colab_type="code" colab={}
# !rm -rf /content/chest_xray/__MACOSX/
# !rm -rf /content/chest_xray/chest_xray/
# + [markdown] id="Ar_xJKbE3FpQ" colab_type="text"
# Switching validation & test set
# + id="UWh6YYTp42bS" colab_type="code" colab={}
# !mv /content/chest_xray/val /content/chest_xray/vals
# !mv /content/chest_xray/test /content/chest_xray/val
# !mv /content/chest_xray/vals /content/chest_xray/test
# + [markdown] id="JWDobMkM3KPP" colab_type="text"
# Fixing imbalance dataset with deleting files
# + id="tl19s7bxyZqH" colab_type="code" colab={}
# !find /content/chest_xray/train/PNEUMONIA -type f -print0 | sort -zR | tail -zn +1342| xargs -0 rm
# !find /content/chest_xray/val/PNEUMONIA -type f -print0 | sort -zR | tail -zn +235 | xargs -0 rm
# + [markdown] id="Erntrr1v3TpV" colab_type="text"
# ### Dataset Information
# + id="0Eog2SQY5oTA" colab_type="code" outputId="0b651b0b-c727-46b1-e78d-39d0c811dff9" colab={"base_uri": "https://localhost:8080/", "height": 420}
train_n = len(os.listdir('/content/chest_xray/train/NORMAL'))
train_p = len(os.listdir('/content/chest_xray/train/PNEUMONIA'))
val_n = len(os.listdir('/content/chest_xray/val/NORMAL'))
val_p = len(os.listdir('/content/chest_xray/val/PNEUMONIA'))
test_n = len(os.listdir('/content/chest_xray/test/NORMAL'))
test_p = len(os.listdir('/content/chest_xray/test/PNEUMONIA'))
print("Dataset Information")
print("===================")
index = np.arange(2)
label = ['Normal', 'Pneumonia']
train = np.array([train_n, train_p])
val = np.array([val_n, val_p])
test = np.array([test_n, test_p])
fig, ax = plt.subplots(figsize=(5,5))
rects1 = ax.bar(index - 0.1, train, 0.1, color='r', label='Training Set', align="center")
rects2 = ax.bar(index, val, 0.1, color='g', label='Validation Set', align="center")
rects3 = ax.bar(index + 0.1, test, 0.1, color='b', label='Test Set', align="center")
def autolabel(rects, xpos='center'):
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0, 'right': 1, 'left': -1}
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(offset[xpos]*3, 3),
textcoords="offset points",
ha=ha[xpos], va='bottom')
autolabel(rects1, "center")
autolabel(rects2, "center")
autolabel(rects3, "center")
plt.xlabel('Data')
plt.ylabel('Total')
plt.xticks(index, ("Normal", "Pneumonia"))
plt.legend()
plt.tight_layout()
plt.show()
print("Total Dataset = ", train[0] + train[1] + val[0] + val[1] + test[0] + test[1])
# + [markdown] id="rw88JkWLOYQ0" colab_type="text"
# ## Data Preparation
# + id="0ZVXIpTTuEKW" colab_type="code" colab={}
train_dir = "/content/chest_xray/train/"
val_dir = "/content/chest_xray/val/"
if K.image_data_format() == 'channels_first':
input_shape = (3, 150, 150)
else:
input_shape = (150, 150, 3)
# + id="iwKtKBy2dYv7" colab_type="code" colab={}
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1. / 255)
# + id="6DoIGGv7uRL6" colab_type="code" outputId="768f504c-5700-475d-8829-cb5509da026e" colab={"base_uri": "https://localhost:8080/", "height": 51}
train_generator = train_datagen.flow_from_directory(
directory = train_dir,
target_size=(150, 150),
class_mode='binary',
batch_size=149,
shuffle=True)
validation_generator = validation_datagen.flow_from_directory(
directory = val_dir,
target_size=(150, 150),
class_mode='binary',
batch_size=26,
shuffle=True)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# + [markdown] id="iFimjVQ5OeU7" colab_type="text"
# ## Preview Data
# + id="1GK-pRMmKEP0" colab_type="code" outputId="25714357-6653-4632-f49b-85e90639e7b9" colab={"base_uri": "https://localhost:8080/", "height": 268}
sample_training_image, _ = next(train_generator)
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
ax.imshow(img)
ax.axis("off")
plt.tight_layout()
plt.show()
plotImages(sample_training_image[:5])
# + [markdown] id="S-a3iub03jVh" colab_type="text"
# ## Model Training
# + id="RHhHIuZoEG9W" colab_type="code" outputId="045bb888-2b9a-4797-ba27-84e89639921d" colab={"base_uri": "https://localhost:8080/", "height": 663}
model.summary()
model.compile(
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer= 'adam',
metrics=['accuracy'])
# + id="Vk_Kd3VZuguX" colab_type="code" outputId="15d1000d-abe5-4109-fe31-2142bb3cff8d" colab={"base_uri": "https://localhost:8080/", "height": 357}
history = model.fit(
train_generator,
epochs=10,
steps_per_epoch= train_generator.samples / train_generator.batch_size,
validation_data = validation_generator,
verbose = 1,
validation_steps= validation_generator.samples / validation_generator.batch_size)
# + [markdown] id="OlOWgp4ZCoCH" colab_type="text"
# ## Result & Graph
# + id="3fAPQyP96TvY" colab_type="code" outputId="febbaf4b-58ee-461d-b218-bede521da5bc" colab={"base_uri": "https://localhost:8080/", "height": 545}
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# + [markdown] id="QrYuxfWHO1Gu" colab_type="text"
# Much more better model than before
# + id="OMjItz8flw3W" colab_type="code" outputId="c491a433-a504-4ac4-db95-b89f4065819b" colab={"base_uri": "https://localhost:8080/", "height": 238}
path = os.listdir('/content/chest_xray/test/PNEUMONIA/')
cn = 0
cp = 0
for gambar in path:
url = '/content/chest_xray/test/PNEUMONIA/' + gambar
img = image.load_img(url, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
prediction = model.predict(images, batch_size=10)
if (prediction[0][0] <= 0.5):
print("Normal Lungs (Prediction confidence ", (1 - prediction[0][0])*100 ,"%)")
cn += 1
else:
print("Pneumonia Lungs (Prediction confidence ", prediction[0][0]*100,"%)")
cp += 1
print("\nDetecting Pneumonia")
print("=====================")
print("Total Normal (Wrong) = ", cn)
print("Total Pneumonia (Correct) = ", cp)
# + id="zVeOYe6IxqsG" colab_type="code" outputId="5b026288-9164-40a1-d789-0ac2f20aab10" colab={"base_uri": "https://localhost:8080/", "height": 238}
path = os.listdir('/content/chest_xray/test/NORMAL/')
cn = 0
cp = 0
for gambar in path:
url = '/content/chest_xray/test/NORMAL/' + gambar
img = image.load_img(url, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
prediction = model.predict(images, batch_size=10)
if (prediction[0][0] <= 0.5):
print("Normal Lungs, Confidence ", (1 - prediction[0][0])*100 ,"%)")
cn += 1
else:
print("Pneumonia Lungs, Confidence ", prediction[0][0]*100,"%)")
cp += 1
print("\nDetecting Normal")
print("===================")
print("Total Normal (Correct) = ", cn)
print("Total Pneumonia (Wrong) = ", cp)
# + id="CFOREAn5h_pN" colab_type="code" colab={}
test_datagen = ImageDataGenerator(rescale=1. / 255)
# + id="cCxU7kG9iAtb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d0aba65b-6c58-42d3-ce7e-3dafb64280f3"
test_generator = test_datagen.flow_from_directory(
directory='/content/chest_xray/test/',
target_size=(150, 150),
batch_size=1,
class_mode='binary')
# + id="xdTXoaV3h4wJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="c0525aee-fae4-4aa9-b729-81a27e98ec14"
scores = model.evaluate_generator(test_generator)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# + [markdown] id="IQwki1XPPXd3" colab_type="text"
# Not really bad right!
#
# But I'm working on much more better model
# + [markdown] id="odRElnAr3_eh" colab_type="text"
# ## Saving Model
# + id="i88vegnC3vPm" colab_type="code" colab={}
model.save('ieee-cw-fahmi.h5')
# + id="IT60cFG7ULEC" colab_type="code" outputId="cd67971d-a432-436d-d41b-3979a1a9ee9a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install tensorflowjs
# + id="7hNCq67cUT7T" colab_type="code" colab={}
# !tensorflowjs_converter --input_format=keras /content/ieee-cw-fahmi.h5 ./js
# + id="yKoq_GCy5tcZ" colab_type="code" outputId="42ccd5d7-4c39-4955-dd1a-3e4b65376ccc" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import files
listfile = os.listdir('/content/js')
cc = 0
while cc < len(listfile):
paths = '/content/js/' + listfile[cc]
files.download(paths)
cc += 1
else:
print("Task Completed")
# + id="_HARkoGLueSA" colab_type="code" outputId="d39c9449-d405-4574-e99e-d7044cd24872" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import files
listfile = os.listdir('/content/chest_xray/test/PNEUMONIA/')
cc = 0
while cc < len(listfile):
paths = '/content/chest_xray/test/PNEUMONIA/' + listfile[cc]
files.download(paths)
cc += 1
else:
print("Task Completed")
# + id="cQOFOSNj9kcf" colab_type="code" colab={}
# This cell is for my checkpoint
# !mv /content/chest-xray-pneumonia.zip /tmp/chest-xray-pneumonia.zip
# !rm -rf /content/*
# !mv /tmp/chest-xray-pneumonia.zip /content/chest-xray-pneumonia.zip
# + [markdown] id="FjvPg9F2OrPw" colab_type="text"
# After converting keras model to json, i will deploy the model at https://fahmij8.github.io/CodersWeek-ML/
|
Project/Coders_week_Final_Project2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Version history
# * V1 - Inference with basic model trained 1000 iterations
# * V2 - Same training procedure but for more iterations. Validation score .267, LB score .286
# * V3 - Same model weights as previously, but score thresholds set for each class individually. LB score .293 *
# * V4 - Another small inference time improvement - throw away predictions with area smaller than a threshold (set per cell type). LB score .294
#
# \* *Initally that version had score .287 but that was due to a bug in the code*
# ## Inference and submission
# After [part one](https://www.kaggle.com/slawekbiel/positive-score-with-detectron-1-3-input-data/) and [part two](https://www.kaggle.com/slawekbiel/positive-score-with-detectron-2-3-training) we have a trained model. I'm attaching it to this notebook through a dataset. Now all that's left is to run all the test files through it.
#
# There are two minor details we need to handle:
# - The submission notebooks don't have access to the internet, in order to install detectron2 I needed to download dependecies with `pip download`, put them into a dataset and attach it to the notebook: https://www.kaggle.com/slawekbiel/detectron-05
# - The masks we submit can't overlap, see [the discussion](https://www.kaggle.com/c/sartorius-cell-instance-segmentation/discussion/279790#1550666). So I'm manually clipping the output returned from the model) I'm processing the masks ordereded by score, so in the case of conflict the more confident one remaines whole and the other one gets clipped.
# + _kg_hide-input=true _kg_hide-output=true
# !pip install ../input/detectron-05/whls/pycocotools-2.0.2/dist/pycocotools-2.0.2.tar --no-index --find-links ../input/detectron-05/whls
# !pip install ../input/detectron-05/whls/fvcore-0.1.5.post20211019/fvcore-0.1.5.post20211019 --no-index --find-links ../input/detectron-05/whls
# !pip install ../input/detectron-05/whls/antlr4-python3-runtime-4.8/antlr4-python3-runtime-4.8 --no-index --find-links ../input/detectron-05/whls
# !pip install ../input/detectron-05/whls/detectron2-0.5/detectron2 --no-index --find-links ../input/detectron-05/whls
# -
import detectron2
import torch
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from PIL import Image
import cv2
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from fastcore.all import *
dataDir=Path('/kaggle/input/sartorius-cell-instance-segmentation')
# +
# From https://www.kaggle.com/stainsby/fast-tested-rle
def rle_decode(mask_rle, shape=(520, 704)):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape) # Needed to align to RLE direction
def rle_encode(img):
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def get_masks(fn, predictor):
im = cv2.imread(str(fn))
pred = predictor(im)
pred_class = torch.mode(pred['instances'].pred_classes)[0]
take = pred['instances'].scores >= THRESHOLDS[pred_class]
pred_masks = pred['instances'].pred_masks[take]
pred_masks = pred_masks.cpu().numpy()
res = []
used = np.zeros(im.shape[:2], dtype=int)
for mask in pred_masks:
mask = mask * (1-used)
if mask.sum() >= MIN_PIXELS[pred_class]: # skip predictions with small area
used += mask
res.append(rle_encode(mask))
return res
# -
ids, masks=[],[]
test_names = (dataDir/'test').ls()
# ### Initiate a Predictor from our trained model
# +
# plt.imread("/kaggle/input/sartorius-cell-instance-segmentation/train/0030fd0e6378.png").shape
# -
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.INPUT.MASK_FORMAT='bitmask'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
cfg.MODEL.WEIGHTS = os.path.join('/kaggle/input/somu-detectron2-mrcnn-train/output/model_final.pth')
cfg.TEST.DETECTIONS_PER_IMAGE = 1000
predictor = DefaultPredictor(cfg)
THRESHOLDS = [.15, .35, .55]
MIN_PIXELS = [75, 150, 75]
# ### Look at the outputs on a sample test file to sanity check
# I'm encoding here in the competition format and decoding back to bit mask just to make sure everything is fine
# +
encoded_masks = get_masks(test_names[0], predictor)
_, axs = plt.subplots(1,2, figsize=(40,15))
axs[1].imshow(cv2.imread(str(test_names[0])))
mask_decode = np.zeros((520,704)).astype(np.int64)
for enc in encoded_masks:
dec = rle_decode(enc)
mask_decode += np.ma.masked_where(dec==0, dec)
axs[0].imshow(np.ma.masked_where(dec==0, dec))
# -
plt.imshow(mask_decode)
# +
# def rle_encode(img):
# pixels = img.flatten()
# pixels = np.concatenate([[0], pixels, [0]])
# runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
# runs[1::2] -= runs[::2]
# return ' '.join(str(x) for x in runs)
# mask = rle_encode(encoded_masks[0])
# # print(np.unique(mask))
# # plt.imshow(mask)
# type(mask)
# -
# ### Looks good, so lets generate masks for all the files and create a submission
for fn in test_names:
encoded_masks = get_masks(fn, predictor)
for enc in encoded_masks:
ids.append(fn.stem)
masks.append(enc)
pd.DataFrame({'id':ids, 'predicted':masks}).to_csv('submission.csv', index=False)
pd.read_csv('submission.csv').head()
# # Local CV:
# + jupyter={"source_hidden": true}
def compute_iou(labels, y_pred):
true_objects = len(np.unique(labels))
pred_objects = len(np.unique(y_pred))
# Compute intersection between all objects
intersection = np.histogram2d(
labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects)
)[0]
# Compute areas (needed for finding the union between all objects)
area_true = np.histogram(labels, bins=true_objects)[0]
area_pred = np.histogram(y_pred, bins=pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
iou = intersection / union
return iou[1:, 1:] # exclude background
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) >= 1 # Correct objects
false_negatives = np.sum(matches, axis=1) == 0 # Missed objects
false_positives = np.sum(matches, axis=0) == 0 # Extra objects
tp, fp, fn = (
np.sum(true_positives),
np.sum(false_positives),
np.sum(false_negatives),
)
return tp, fp, fn
def iou_map(truths, preds, verbose=0):
ious = [compute_iou(truth, pred) for truth, pred in zip(truths, preds)]
# print(ious[0].shape)
if verbose:
print("Thresh\tTP\tFP\tFN\tPrec.")
prec = []
for t in np.arange(0.5, 1.0, 0.05):
tps, fps, fns = 0, 0, 0
for iou in ious:
tp, fp, fn = precision_at(t, iou)
tps += tp
fps += fp
fns += fn
p = tps / (tps + fps + fns)
prec.append(p)
if verbose:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tps, fps, fns, p))
if verbose:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
# +
import json
with open("/kaggle/input/sartorius-coco-dataset-notebook/val_dataset.json") as f:
val_data = json.load(f)
val_file_name = []
for i in range(len(val_data['images'])):
val_file_name.append("/kaggle/" + val_data['images'][i]['file_name'][3:-1]+'g')
# val_file_name[0].split('/')[-1]
# +
# # ! ls -GFlash --color /kaggle/input/sartorious-nb-1-data-preprocessing-visualization/masks/img
# -
def get_val_mask(val_file_name):
val_mask = []
for i in val_file_name:
encoded_masks = get_masks(i, predictor)
mask_decode = np.zeros((520,704)).astype(np.int64)
for enc in encoded_masks:
dec = rle_decode(enc)
mask_decode += np.ma.masked_where(dec==0, dec)
val_mask.append(mask_decode)
return val_mask
pred_masks = get_val_mask(val_file_name)
plt.imshow(pred_masks[30])
# +
train_csv = "../input/sartorius-cell-instance-segmentation/train.csv"
df = pd.read_csv(train_csv)
# +
def rle_decode1(mask_rle, shape, color=1):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros((shape[0] * shape[1], shape[2]), dtype=np.float32)
for lo, hi in zip(starts, ends):
img[lo : hi] = color
return img.reshape(shape)
def build_masks(image_id,input_shape=(520,704), colors=False):
height, width = input_shape
labels = df[df["id"] == image_id]["annotation"].tolist()
if colors:
mask = np.zeros((height, width, 3))
for label in labels:
mask += rle_decode1(label, shape=(height,width , 3), color=np.random.rand(3))
else:
mask = np.zeros((height, width, 1))
for label in labels:
mask += rle_decode1(label, shape=(height, width, 1))
mask = mask.clip(0, 1)
return mask
# -
# +
gt_mask = []
for fn in val_file_name:
mask_id = fn.split('/')[-1].split('.')[0]
mask_from_df = build_masks(mask_id)
gt_mask.append(mask_from_df)
plt.imshow(gt_mask[30])
# +
# plt.imshow(gt_mask[22])
# +
all_map = []
for i in range(len(pred_masks)):
all_map.append(iou_map(gt_mask[i],pred_masks[i],verbose=1))
print(np.mean(all_map))
# -
|
Inference/somu-detectron2-mrcnn-inference.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Metaprogramming - Application 2
# There's another pattern we can implement using metaprogramming - Singletons.
# If you read online, you'll see that singleton objects are controversial in Python.
#
# I'm not going to get into a debate on this, other than to say I do not use singleton objects, not because I have deep thoughts about it (or even shallow ones for that matter), but rather because I have never had a need for them.
# However, the question often comes up, so here it is - the metaclass way of implementing the singleton pattern.
#
# Whether you think you should use it or not, is entirely up to you!
# We have seen singleton objects - objects such as `None`, `True` or `False` for example.
# No matter where we create them in our code, they always refer to the **same** object.
# We can recover the type used to create `None` objects:
NoneType = type(None)
# And now we can create multiple instances of that type:
n1 = NoneType()
n2 = NoneType()
id(n1), id(n2)
# As you can see, any instance of `NoneType` is actually the **same** object.
# The same holds true for booleans:
b1 = bool([])
b2 = bool("")
id(b1), id(b2)
# These are all examples of singleton objects. Now matter how we create them, we always end up with a reference to the same instance.
# There is no built-in mechanism to Python for singleton objects, so we have to do it ourselves.
# The basic idea is this:
#
# When an instance of the class is being created (but **before** the instance is actually created), check if an instance has already been created, in which case return that instance, otherwise, create a new instance and store that instance reference somewhere so we can recover it the next time an instance is requested.
# We could do it entirely in the class itself, without any metaclasses, using the `__new__` method.
#
# We can start with this:
class Hundred:
def __new__(cls):
new_instance = super().__new__(cls)
setattr(new_instance, 'name', 'hundred')
setattr(new_instance, 'value', 100)
return new_instance
h1 = Hundred()
vars(h1)
# But of course, this is not a singleton object.
h2 = Hundred()
h1 is h2
# So, let's fix this to make it a singleton:
class Hundred:
_existing_instance = None # a class attribute!
def __new__(cls):
if not cls._existing_instance:
print('creating new instance...')
new_instance = super().__new__(cls)
setattr(new_instance, 'name', 'hundred')
setattr(new_instance, 'value', 100)
cls._existing_instance = new_instance
else:
print('instance exists already, using that one...')
return cls._existing_instance
h1 = Hundred()
h2 = Hundred()
h1 is h2
# And there you are, we have a singleton object.
# So this works, but if you need to have multiple of these singleton objects, the code will just become repetitive.
# Metaclasses to the rescue!
# Remember what we are trying to do:
#
# If we create two instances of our class `Hundred` we expect the same instance back.
# But how do we create an instance of a class - we **call** it, so `Hundred()`.
# Which `__call__` method is that? It is not the one in the `Hundred` class, that would make **instances** of `Hundred` callable, it is the `__call__` method in the **metaclass**.
# So, we need to override the `__call__` in our metaclass.
class Singleton(type):
def __call__(cls, *args, **kwargs):
print(f'Request received to create an instance of class: {cls}...')
return super().__call__(*args, **kwargs)
class Hundred(metaclass=Singleton):
value = 100
h = Hundred()
h.value
# OK, that works, but now we need to make it into a singleton instance.
# We have to be careful here. Initially we had used the class itself (`Hundred`) to store, as a class variable, whether an instance had already been created.
#
# And here we could try to do the same thing.
#
# We could store the instance as a class variable in the class of the instance being created
#
# That's actually quite simple, since the class is received as the first argument of the `__call__` method.
class Singleton(type):
def __call__(cls, *args, **kwargs):
print(f'Request received to create an instance of class: {cls}...')
if getattr(cls, 'existing_instance', None) is None:
print('Creating instance for the first time...')
setattr(cls, 'existing_instance', super().__call__(*args, **kwargs))
else:
print('Using existing instance...')
return getattr(cls, 'existing_instance')
class Hundred(metaclass=Singleton):
value = 100
h1 = Hundred()
h2 = Hundred()
h1 is h2, h1.value, h2.value
# So that seems to work just fine. Let's create another singleton class and see if things still work.
class Thousand(metaclass=Singleton):
value = 1000
t1 = Thousand()
t2 = Thousand()
h1 is h2, h1.value, h2.value
t1 is t2, t1.value, t2.value
h1 is t1, h2 is t2
# So far so good.
# Finally let's make sure everything works with **inheritance** too - if we inherit from a Singleton class, that subclass should also be a singleton.
class HundredFold(Hundred):
value = 100 * 100
hf1 = HundredFold()
# Whaaat? Using existing instance? But this is the first time we created it!!
# The problem is this: How are we checking if an instance has already been created?
# We did this:
# ```if getattr(cls, 'existing_instance')```
# But since `HundredFold` inherits from `Hundred`, it also inherited the class attribute `existing_instance`.
# This means we have to be a bit more careful in our metaclass, we need to see if we have an instance of the **specific** class already created - and we cannot rely on storing a class attribute in the classes themselves since that breaks the pattern when subclassing.
# So, instead, we are going to store the class, and the instance of that class, in a dictionary **in the metaclass** itself, and use that dictionary to lookup the existing instance (if any) for a specific class.
class Singleton(type):
instances = {}
def __call__(cls, *args, **kwargs):
print(f'Request received to create an instance of class: {cls}...')
existing_instance = Singleton.instances.get(cls, None)
if existing_instance is None:
print('Creating instance for the first time...')
existing_instance = super().__call__(*args, **kwargs)
Singleton.instances[cls] = existing_instance
else:
print('Using existing instance...')
return existing_instance
# +
class Hundred(metaclass=Singleton):
value = 100
class Thousand(metaclass=Singleton):
value = 1000
class HundredFold(Hundred):
value = 100 * 100
# -
h1 = Hundred()
h2 = Hundred()
t1 = Thousand()
t2 = Thousand()
hf1 = HundredFold()
hf2 = HundredFold()
h1 is h2, t1 is t2, hf1 is hf2
h1.value, h2.value, t1.value, t2.value, hf1.value, hf2.value
# And just to make sure :-)
h1 is hf1
|
dd_1/Part 4/Section 14 - Metaprogramming/12 - Metaprogramming - Application 2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Calcule a integral dada</b>
# $\int tln(t^2)dt$
# $\int tln(t^2)dt = ln(t^2)\frac{t^2}{2} - \int \frac{t^2}{2}\frac{2}{t}$
# $\int tln(t^2)dt = ln(t^2)\frac{t^2}{2} - \int t $
# $\int tln(t^2)dt = ln(t^2)\frac{t^2}{2} - \frac{t^2}{2} + C $
# $\int tln(t^2)dt = \frac{t^2}{2}(ln(t^2) - 1) + C $
|
Problemas 6.1/06.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, Conv2D, MaxPooling2D
from keras.callbacks import CSVLogger
#from livelossplot.keras import PlotLossesCallback
#import efficientnet.keras as efn
from tensorflow.keras.models import Model
from keras.models import Sequential
from keras import models
from keras import layers
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy
import tensorflow as tf
from keras.models import Model
from keras.applications import MobileNetV2, ResNet50, InceptionV3 # try to use them and see which is better
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils import get_file
from keras.preprocessing.image import ImageDataGenerator
import os
import pathlib
import numpy as np
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, Conv2D, MaxPooling2D
from keras.callbacks import CSVLogger
#from livelossplot.keras import PlotLossesCallback
#import efficientnet.keras as efn
#from keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
#from keras import backend as K
#from tensorflow.keras import backend as K
#import keras
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend
from sklearn.model_selection import train_test_split
import pandas as pd
#from tensorflow.python.estimator.keras import Layer
from tensorflow.keras.layers import Embedding, Dense, Input, Dropout, LSTM, Activation, Conv2D, Reshape, Average, Bidirectional
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras import backend as K
#from sklearn.metrics import confusion_matrix
from sklearn import metrics
#from tensorflow.keras.models import models
#from tensorflow. keras.utils import plot_mode
# -
import keras
keras.__version__
# +
from keras.applications import VGG16
# example of loading the inception v3 model
from keras.applications.inception_v3 import InceptionV3
conv_base = InceptionV3(weights='imagenet',#,
include_top=False,#, bese true
input_shape=(150, 150, 3))
#)
conv_base.summary()
# -
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
base_dir1 = 'D:\\MARIJA na FEIT\\Diplomska\\Diatoms'
entire_data = os.path.join(base_dir1)
# +
from tensorflow.keras.models import Model
from keras.models import Sequential
from keras import models
from keras import layers
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy
import tensorflow as tf
from keras.models import Model
from keras.applications import MobileNetV2, ResNet50, InceptionV3 # try to use them and see which is better
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils import get_file
from keras.preprocessing.image import ImageDataGenerator
import os
import pathlib
import numpy as np
# +
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 10
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count,3,3,2048))#1000)) #8, 8, 2048)) #3,3
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
X, Y = extract_features(entire_data, 1100)
# -
print(X.shape)
print(X.shape)
print(Y.shape)
# +
#enkodiranje na outputite
# import keras.utils as np_utils
from tensorflow.keras.utils import to_categorical
Y = to_categorical(Y)
print(Y.shape)
# -
X = np.reshape(X, (X.shape[0],3*3* 2048))#1000)) #8 * 8 * 2048))
print(X.shape)
# +
#dodadeno
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'conv2d_188':
set_trainable = True
if layer.name == 'conv2d_180':
set_trainable = True
if layer.name == 'conv2d_187':
set_trainable = True
if layer.name == 'conv2d_186':
set_trainable = True
if layer.name == 'conv2d_183':
set_trainable = True
if layer.name == 'conv2d_182':
set_trainable = True
if layer.name == 'conv2d_283':
set_trainable = True
if layer.name == 'conv2d_284':
set_trainable = True
if layer.name == 'conv2d_285':
set_trainable = True
if layer.name == 'conv2d_286':
set_trainable = True
if layer.name == 'conv2d_287':
set_trainable = True
if layer.name == 'conv2d_291':
set_trainable = True
if layer.name == 'conv2d_289':
set_trainable = True
if layer.name == 'conv2d_292':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
# +
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
#model.add(layers.Flatten()) #dodadeno
model.add(layers.Dense(256, activation='relu', input_dim=3 * 3 * 2048))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(55, activation='softmax'))
#model.add(layers.Dense(55, activation='softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['acc']) #bese accuracy
# +
# Plot the training and validation loss + accuracy
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')
# plt.figure()
# plt.plot(epochs, loss, 'r.')
# plt.plot(epochs, val_loss, 'r-')
# plt.title('Training and validation loss')
plt.show()
plt.savefig('acc_vs_epochs.png')
# +
# evaluate a model using k-fold cross-validation
from sklearn.model_selection import KFold
from sklearn import metrics
def evaluate_model(model,all_features, all_labels, n_folds=10):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
#print(all_features.shape)
for train_ix, test_ix in kfold.split(all_features):
# select rows for train and test
trainX, trainY, testX, testY = all_features[train_ix], all_labels[train_ix],all_features[test_ix], all_labels[test_ix]
# fit model
print(trainX.shape)
print(testX.shape)
#testX = np.testX.reshape(testX, (testX.shape[0],8, 8, 2048))
history = model.fit(trainX, trainY, epochs=100, batch_size=10, validation_data=(testX, testY), verbose=0) #bese 100 epohi
plot_training(history)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
predictions = model.predict(testX)
cm = metrics.confusion_matrix(np.argmax(testY, axis=1),np.argmax(predictions, axis=1))
print(cm)
print(metrics.classification_report(np.argmax(testY, axis=1),np.argmax(predictions, axis=1)))
print(metrics.f1_score(np.argmax(testY, axis=1),np.argmax(predictions, axis=1), average='weighted'))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# -
# +
scores, histories = evaluate_model(model, X,Y)
# -
|
TransferLearningClassification_Cross-validation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ### Entropy weighting KMeans
library(wskm)
# Import data for training models
df <- read.csv("~/mandy/imputed_17_columns.csv")
df <- df[!(names(df) %in% c("X", "CEN_bg_pctConstructionIndustry"))]
sum(is.na(df))
names(df)
# "score_C4", # Auto Score
# "EstMarketValue_C1",
# "finscr_C4", # Credit Score
# "CEN_tr_pctOwnOccSecondMort",
# "CEN_bg_pctSeasonalHousingUnits",
# "CEN_bg_pctHHincomeLT15K",
# "CEN_tr_pctHHInvestIncome",
# "CEN_tr_pctHHSocialSecurityIncome",
# "CEN_bg_pctLiveAloneHH",
# "CEN_tr_pctHSGrad",
# "iat89_C4", # Highest delinquency on a trade
# "imt01_C4", # Number of mortgages
# "IssAgeALB", # Issue Age
# "HealthScore_C5", # Health Score
# "Length.of.Residence_num",
# "Target.Narrow.Band.Income_num"
# Scaling is needed for entropy weighting KMeans
df_scale <- scale(df)
# Define function to train ewkm model and store clustering result
cluster_df = data.frame()
fit_ewkm <- function(k, lambda, data, maxiteration){
ewkm_model <- ewkm(data, k, lambda=lambda, maxiter=maxiteration)
print(paste0("K = ", k))
print(paste0("iterations: ", ewkm_model$iterations))
print(paste0("total.iterations: ", ewkm_model$total.iterations))
print(paste0("restarts: ", ewkm_model$restarts))
print(table(ewkm_model$cluster))
weight=round(ewkm_model$weight, 2)
print(weight[, colSums(abs(weight)) > 0.1])
return(ewkm_model)
}
#
k = 4
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k4_l1 = ewkm_model$cluster
#
k = 4
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k4_l2 = ewkm_model$cluster
#
k = 4
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k4_l3 = ewkm_model$cluster
#
k = 4
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k4_l4 = ewkm_model$cluster
#
k = 5
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k5_l1 = ewkm_model$cluster
#
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k5_l2 = ewkm_model$cluster
#
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k5_l3 = ewkm_model$cluster
#
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k5_l4 = ewkm_model$cluster
dim(df)
#
k = 6
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k6_l1 = ewkm_model$cluster
#
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k6_l2 = ewkm_model$cluster
#
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k6_l3 = ewkm_model$cluster
#
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k6_l4 = ewkm_model$cluster
dim(df)
#
k = 7
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k7_l1 = ewkm_model$cluster
#
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k7_l2 = ewkm_model$cluster
#
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k7_l3 = ewkm_model$cluster
#
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k7_l4 = ewkm_model$cluster
dim(df)
#
k = 8
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k8_l1 = ewkm_model$cluster
#
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k8_l2 = ewkm_model$cluster
#
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k8_l3 = ewkm_model$cluster
#
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k8_l4 = ewkm_model$cluster
#
k = 9
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k9_l1 = ewkm_model$cluster
#
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k9_l2 = ewkm_model$cluster
#
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k9_l3 = ewkm_model$cluster
#
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k9_l4 = ewkm_model$cluster
#
k = 10
lambda = 1
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k10_l1 = ewkm_model$cluster
#
lambda = 2
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k10_l2 = ewkm_model$cluster
#
lambda = 3
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k10_l3 = ewkm_model$cluster
#
lambda = 4
ewkm_model = fit_ewkm(k=k, lambda=lambda, data=df_scale, maxiteration=100)
df$ewkm_k10_l4 = ewkm_model$cluster
dim(df)
write.csv(df, file = "ewkm_result.csv")
# +
# score_C4 EstMarketValue_C1 CEN_bg_pctHHincomeLT15K CEN_tr_pctHSGrad
# 1 0 0 0 0
# 2 0 1 0 0
# 3 1 0 0 0
# 4 0 1 0 0
# 5 0 0 1 0
# 6 0 1 0 0
# 7 0 0 0 1
# HealthScore_C5
# 1 1
# 2 0
# 3 0
# 4 0
# 5 0
# 6 0
# 7 0
# 1 2 3 4 5 6 7
# 2697532 501439 281938 571279 322217 216788 141505
df_score_c4 <- boxplot(score_C4~ewkm_k7_l3, df, plot = FALSE)$stats
df_EstMarketValue_C1 <- boxplot(EstMarketValue_C1~ewkm_k7_l3, df, plot = FALSE)$stats
df_CEN_bg_pctHHincomeLT15K <- boxplot(CEN_bg_pctHHincomeLT15K~ewkm_k7_l3, df, plot = FALSE)$stats
df_CEN_tr_pctHSGrad <- boxplot(CEN_tr_pctHSGrad~ewkm_k7_l3, df, plot = FALSE)$stats
df_HealthScore_C5 <- boxplot(HealthScore_C5~ewkm_k7_l3, df, plot = FALSE)$stats
# -
df_score_c4
df_EstMarketValue_C1
df_CEN_bg_pctHHincomeLT15K
df_CEN_tr_pctHSGrad
df_HealthScore_C5
save(df_HealthScore_C5, df_CEN_tr_pctHSGrad, df_CEN_bg_pctHHincomeLT15K, df_EstMarketValue_C1, df_score_c4, file = "ewkm_k7_l3.RData")
|
finalModel/ewkm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Storm Events
# Exploratory data analysis of storm events from 2010 to 2019. The datasets are from Storm Events Database of National Centers for Environmental Information (NCEI).
# #### Get data from Storm Events Database of NCEI
# +
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2010_c20191116.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2010_c20191116.csv.gz StormEvents_2010.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2011_c20180718.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2011_c20180718.csv.gz StormEvents_2011.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2012_c20200317.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2012_c20200317.csv.gz StormEvents_2012.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2013_c20170519.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2013_c20170519.csv.gz StormEvents_2013.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2014_c20191116.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2014_c20191116.csv.gz StormEvents_2014.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2015_c20191116.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2015_c20191116.csv.gz StormEvents_2015.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2016_c20190817.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2016_c20190817.csv.gz StormEvents_2016.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2017_c20200121.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2017_c20200121.csv.gz StormEvents_2017.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2018_c20200317.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2018_c20200317.csv.gz StormEvents_2018.csv.gz
# !wget https: // www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2019_c20200416.csv.gz
# !mv StormEvents_details-ftp_v1.0_d2019_c20200416.csv.gz StormEvents_2019.csv.gz
# +
# import necessary libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
# %matplotlib inline
# -
# ## Initial data exploration
df_2010 = pd.read_csv('StormEvents_2010.csv.gz', compression='gzip')
# returns the first five rows
df_2010.head()
# check for number of rows and columns
df_2010.shape
# check the names of the columns (fields)
df_2010.columns
# +
# select columns of interest from the yearly Storm Events datasets
selected_columns = ['EVENT_ID', 'STATE', 'YEAR', 'MONTH_NAME',
'EVENT_TYPE', 'DEATHS_DIRECT', 'DEATHS_INDIRECT', 'DAMAGE_PROPERTY']
# +
# read and concatenate yearly Storm Events datasets into a single data frame
df = pd.read_csv('StormEvents_2010.csv.gz',
compression='gzip')[selected_columns]
for i in range(1, 10):
df_temp = pd.read_csv('StormEvents_' + str(2010 + i)
+ '.csv.gz', compression='gzip')
df = pd.concat([df, df_temp[selected_columns]])
# -
# check for number of rows and columns
df.shape
# number of distinct event types
num_types = df['EVENT_TYPE'].nunique()
print("There are {} storm event types.".format(num_types))
# ## Event type
# #### Which storm events occured most frequently?
# plot 10 most frequent storm types
df['EVENT_TYPE'].value_counts()[0:10]\
.plot(kind='bar', title='Top 10 Strom Events from 2010 to 2019')
# check 10 most frequent storm types
df['EVENT_TYPE'].value_counts()[:10]
# # Fatalities
# check if there are any null values for fatalities
df[['DEATHS_DIRECT', 'DEATHS_INDIRECT']].isnull().any()
def fatality(col_name):
""" Returns direct, indirect, and total fatalities grouped by col_name. """
df_fatality = df.groupby(col_name).sum(
)[['DEATHS_DIRECT', 'DEATHS_INDIRECT']]
df_fatality['DEATHS_TOTAL'] = df_fatality['DEATHS_DIRECT'] + \
df_fatality['DEATHS_INDIRECT']
return df_fatality
# ## Direct fatalities by event type
#
# #### Which storm events caused the most direct fatalities?
# +
# fatality dataframe grouped by storm type, and sorted by direct fatality
df_fatalityD_event = fatality('EVENT_TYPE').sort_values(
'DEATHS_DIRECT', ascending=False)
# add cumulative percentage for direct fatality
df_fatalityD_event['cumpercentage'] = df_fatalityD_event['DEATHS_DIRECT']\
.cumsum() / df_fatalityD_event['DEATHS_DIRECT'].sum()*100
# -
# top 5 storm types for direct fatality
df_fatalityD_event.head()
# total fatalities
df_total_deaths = df_fatalityD_event.sum()
print(df_total_deaths)
print('\n')
print("The total direct fatality is {}.".format(df_total_deaths[0]))
# +
# create and save direct_fatalities bar plot
fig, axs = plt.subplots(figsize=(15, 10))
# plot direct fatality
df_fatalityD_event['DEATHS_DIRECT'][:33].plot(kind='bar')
axs.set_title('Direct Fatality', fontsize=24)
axs.set_xlabel('', fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
# create twin axes
axs2 = axs.twinx()
axs2.yaxis.set_major_formatter(PercentFormatter())
# plot cumulative percentage
axs2.plot(df_fatalityD_event.index, df_fatalityD_event['cumpercentage'],
color="C1", marker="D", ms=7)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
fig.savefig('direct_fatalities.png')
# -
# top 5 event types for direct fatality
top5_events = ['Tornado', 'Flash Flood',
'Excessive Heat', 'Heat', 'Rip Current']
# ## Top 5 direct fatalities by year
#
# #### What are the yearly direct fatalities caused by the top five storm event types?
# +
# create yearly direct fatalities by top 5 storm event types dataframe
df_yearly_deaths = df[df['EVENT_TYPE'] == 'Tornado'].groupby('YEAR').sum()[
'DEATHS_DIRECT']
for event in top5_events[1:]:
df_yearly_temp = df[df['EVENT_TYPE'] == event].groupby('YEAR').sum()[
'DEATHS_DIRECT']
# concatenate the storm event types
df_yearly_deaths = pd.concat([df_yearly_deaths, df_yearly_temp], axis=1)
# rename the columns
df_yearly_deaths.columns = top5_events
# -
df_yearly_deaths
# yearly total direct fatalities by top 5 storm types
df_yearly_deaths.sum(axis=1)
# total direct fatality by top 5 storm types grouped by types
df_yearly_deaths.sum(axis=0)
# total direct fatality by top 5 storm types
total_direct_deaths = df_yearly_deaths.sum().sum()
print("The total direct fatality by the top 5 storm events is {}."
.format(total_direct_deaths))
3275/5568
print("58.8% of direct fatality was caused by the top 5 strom events.")
# statistics for the yearly direct fatalities by the top five storm types
df_yearly_deaths.describe().transpose()
# create and save the yearly direct fatalities by the top five storm types box plot
fig, axs = plt.subplots(figsize=(8, 5))
fig = sns.boxplot(data=df_yearly_deaths)
axs.set_title('Direct Fatality', fontsize=16)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
fig.figure.savefig('direct_boxplt.png')
# create and save the yearly direct fatalities by the top five storm types stacked bar plot
fig, axs = plt.subplots(figsize=(10, 5))
df_yearly_deaths.plot(kind='bar', stacked=True, ax=axs)
axs.set_title('Direct fatality by top 5 storm types', fontsize=16)
axs.set_xlabel('')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
fig.savefig('top5_direct_fatalities.png')
# ## Total fatalities by event type
#
# #### Which storm event types caused the most total fatalities?
# dataframe for fatality grouped by storm type and sorted by total fatality
df_fatality_event = fatality('EVENT_TYPE')\
.sort_values('DEATHS_TOTAL', ascending=False)
# add cumulative percentage for total fatality
df_fatality_event['cumpercentage'] = df_fatality_event['DEATHS_TOTAL']\
.cumsum()/df_fatality_event['DEATHS_TOTAL'].sum() * 100
df_fatality_event
# fatalities
df_fatality_event.sum()
# +
# create and save total_fatalities bar plot
# plot total fatality
fig, axs = plt.subplots(figsize=(15, 10))
df_fatality_event['DEATHS_TOTAL'][:41].plot(kind='bar')
axs.set_title('Total Fatality', fontsize=24)
axs.set_xlabel('', fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
# create twin axes
axs2 = axs.twinx()
axs2.yaxis.set_major_formatter(PercentFormatter())
# plot cumulative percentage
axs2.plot(df_fatality_event.index, df_fatality_event['cumpercentage'],
color="C1", marker="D", ms=7)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
fig.savefig('total_fatalities.png')
# -
# top 5 total fatalities
df_fatality_event['DEATHS_TOTAL'][:5]
# total fatality by top 5 storm types
df_fatality_event['DEATHS_TOTAL'][:5].sum()
3730/7551 * 100
print("49.4% of total fatality was caused by the top 5 strom events.")
# ### Total fatalities by state
df_fatality_state = fatality('STATE').sort_values(
'DEATHS_TOTAL', ascending=False)
df_fatality_state.head()
fig, axs = plt.subplots(figsize=(15, 7))
df_fatality_state['DEATHS_TOTAL'].plot(kind='bar')
axs.set_title('Total Fatalities')
plt.tight_layout()
# ### Total fatalities by year
df_fatality_year = fatality('YEAR').sort_values(
'DEATHS_TOTAL', ascending=False)
df_fatality_year.head()
df_fatality_year['DEATHS_TOTAL'] \
.plot(kind='bar', title='Total Fatalities')
# # Property Damage
df_damage = df[['EVENT_ID', 'STATE', 'EVENT_TYPE',
'YEAR', 'MONTH_NAME', 'DAMAGE_PROPERTY']]
df_damage.count()
# percentage of missing values for DAMAGE_PROPERTY
(626677-513677) / 626777 * 100
# check the number of unique EVENT_ID
df_damage['EVENT_ID'].nunique()
# check for yearly missing values for DAMAGE_PROPERTY
df_damage.groupby('YEAR').count()
# percentage of yearly missing values
percent_missing = (1 - (df_damage.groupby('YEAR').count()
['DAMAGE_PROPERTY'] / df_damage.groupby('YEAR').count()['STATE'])) * 100
percent_missing
percent_missing.describe()
def convert_billion(damage):
""" converts property damage(string) into property damage in billions(float) """
dict_1 = {'K': 0.000001, 'M': 0.001, 'B': 1}
if str(damage)[-1] in ['K', 'M', 'B']:
damage = float(damage[:-1]) * dict_1[damage[-1]]
return float(damage)
df_damage_1 = df_damage.drop(['EVENT_ID'], axis=1)
df_damage_1['DAMAGE_PROPERTY_billion'] = df_damage_1['DAMAGE_PROPERTY'].apply(
convert_billion)
df_damage_1.head(20)
# check statistics of damage property for flash flood
df_damage_1[df_damage_1['EVENT_TYPE'] ==
'Flash Flood']['DAMAGE_PROPERTY_billion'].describe()
# +
# find median DAMAGE_PROPERTY_billion for each storm type
# returns a dictionary of medians
storm_types = df_damage_1['EVENT_TYPE'].unique()
medians = []
for event in storm_types:
medians.append(df_damage_1[df_damage_1['EVENT_TYPE'] == event]
['DAMAGE_PROPERTY_billion'].median())
dict_medians = dict(zip(storm_types, medians))
# -
def impute_damage(cols):
""" returns imputed damage_property in billions.
Arg:
cols = ['DAMAGE_PROPERTY_billion', 'EVENT_TYPE']
Return:
imputed 'DAMAGE_PROPERTY_billion'
"""
damage = cols[0]
event_type = cols[1]
if pd.isnull(damage):
return dict_medians[event_type]
else:
return damage
# apply imputation to DAMAGE_PROPERTY_billiion
df_damage_1['DAMAGE_PROPERTY_billion'] = df_damage_1[[
'DAMAGE_PROPERTY_billion', 'EVENT_TYPE']].apply(impute_damage, axis=1)
df_damage_1.head(20)
# total property damage
df_damage_1['DAMAGE_PROPERTY_billion'].sum()
# property damage by flash flood
df_damage_1[df_damage_1['EVENT_TYPE'] ==
'Flash Flood']['DAMAGE_PROPERTY_billion'].sum()
# ## Billion-dollar storm events
df_damage_billion = df_damage_1[df_damage_1['DAMAGE_PROPERTY_billion'] >= 1]
df_damage_billion.sort_values('DAMAGE_PROPERTY_billion', ascending=False)
df_damage_billion['EVENT_TYPE'].value_counts()
fig, axs = plt.subplots(figsize=(12, 5))
sns.countplot(x='EVENT_TYPE', data=df_damage_billion)
axs.set_title('Billion-dollar storm events', fontsize=16)
axs.set_xlabel('')
axs.set_ylabel('Count', fontsize=16)
plt.xticks(fontsize=12)
plt.yticks(fontsize=14)
plt.tight_layout()
fig.savefig('billion_dollar_storm.png')
print("There are {} storm events that caused 1 billion or more in property damage."
.format(df_damage_billion.shape[0]))
df_damage_billion['DAMAGE_PROPERTY_billion'].sum()
print("The total property damage by billion-dollar storm events is $115.75 billion.")
115.75/217.12280303000006 * 100
print("The billion-dollar storms caused 53% of property damage by all storm events")
df_damage_billion.groupby('EVENT_TYPE').sum(
)['DAMAGE_PROPERTY_billion'].sort_values(ascending=False).plot(kind='bar')
df_damage_billion.groupby('EVENT_TYPE').sum(
)['DAMAGE_PROPERTY_billion'].sort_values(ascending=False)
# ## Property damage by event type
#
# #### Which storm events caused the most property damage?
df_damage_event = pd.DataFrame(df_damage_1.groupby('EVENT_TYPE')
.sum()['DAMAGE_PROPERTY_billion']
.sort_values(ascending=False))
df_damage_event['cumpercentage'] = df_damage_event['DAMAGE_PROPERTY_billion'].cumsum(
) / df_damage_event['DAMAGE_PROPERTY_billion'].sum() * 100
df_damage_event[:10]
# +
# create and save total_fatalities bar plot
# plot damage_property
fig, axs = plt.subplots(figsize=(15, 10))
df_damage_event['DAMAGE_PROPERTY_billion'][:25].plot(kind='bar')
axs.set_title('Property damage in $ billions', fontsize=24)
axs.set_xlabel('', fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
# create twin axes
axs2 = axs.twinx()
axs2.yaxis.set_major_formatter(PercentFormatter())
# plot cumulative percentage
axs2.plot(df_damage_event.index, df_damage_event['cumpercentage'],
color="C1", marker="D", ms=7)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
fig.savefig('damage_property.png')
# -
# top 5 damage events
top5_damage = ['Flash Flood', 'Wildfire', 'Flood', 'Tornado', 'Coastal Flood']
# total property damage in millions
df_damage_event.sum()
# property damage by the top 5 events
df_damage_event[:5].sum()
163.77893544000102/217.12280303000188
print("75.4% of property damage was caused by top 5 strom events.")
# ## Top 5 property damage by year
#
# #### What are the yearly property damages caused by the top 5 storm event types?
# +
# yearly distribution of top 5 property damage
df_yearly_damage = df_damage_1[df_damage_1['EVENT_TYPE'] == 'Flash Flood']\
.groupby('YEAR')\
.sum()['DAMAGE_PROPERTY_billion']
for event in top5_damage[1:]:
df_yearly_temp = df_damage_1[df_damage_1['EVENT_TYPE'] == event]\
.groupby('YEAR')\
.sum()['DAMAGE_PROPERTY_billion']
df_yearly_damage = pd.concat([df_yearly_damage, df_yearly_temp], axis=1)
df_yearly_damage.columns = top5_damage # rename the columns
# -
df_yearly_damage
df_yearly_damage.describe().transpose()
fig, axs = plt.subplots(figsize=(8, 5))
fig = sns.boxplot(data=df_yearly_damage)
axs.set_title('Property damage in $billions', fontsize=16)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
fig.figure.savefig('property_boxplt.png')
fig, axs = plt.subplots(figsize=(10, 5))
df_yearly_damage.plot(kind='bar',
stacked=True,
ax=axs)
axs.set_title(
'Property damage in $billions by top 5 strom types', fontsize=16)
axs.set_xlabel('')
# axs.yaxis.grid(True)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
fig.savefig('top5_damage.png')
|
storm_events.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/satfail/AI-Reading-Materials/blob/master/BAJOGRADO_CheckPoint_Xception_Articulo_CAT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BG9VZWydWc5I"
# # Cargamos las imágenes el Drive e importamos Tensorflow
# + colab={"base_uri": "https://localhost:8080/"} id="zctkyGn6WUys" outputId="c5a43829-fb7c-49b7-8449-6daa09c47694"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="LNbMdI4ZWema" outputId="20386b12-30ac-4f02-a406-15bda66acab3"
# !ls "/content/drive/MyDrive/CelulasArticulo"
#raiz
PATH = "/content/drive/My Drive"
#ipunt
INPATH = PATH + '/CelulasArticulo'
OUTPUT = PATH + '/CelulasFiltradas_Ascus'
#checkpoints
CPATH = PATH + '/checkpointsCelulas'
# !ls "/content/drive/MyDrive/checkpointsCelulas"
# + colab={"base_uri": "https://localhost:8080/"} id="JB9AI8JI2ddi" outputId="2a6d3171-be6f-455f-bbd2-2a5b45395388"
# !pip uninstall tensorflow -y
# !pip install tensorflow==2.4.1
# + colab={"base_uri": "https://localhost:8080/"} id="KrHkrl1bWrW0" outputId="19d2f8d4-9634-4b99-8db0-ac5c5bcf6bf1"
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import AveragePooling2D
print(tf.__version__)
# + [markdown] id="6d0bu-GMWxW7"
# # Cargamos el set de Datos y Preprocesado
# * Defininmos el ImageDataGenerator para aumentar el dataset, con zoom, flip,rotación..
# * Hacemos plot para compronbar como quedan las imágenes generadas
# + id="3TlbyVyGZAiO" colab={"base_uri": "https://localhost:8080/"} outputId="7d19d6dd-a494-47a2-93e5-affcc6ceb72d"
import pathlib
data_train = pathlib.Path(INPATH + '/entrenamiento/')
count = len(list(data_train.glob('*/*.tiff')))
print('Entrenamiento : ' + str(count))
data_test = pathlib.Path(INPATH + '/test/')
count = len(list(data_test.glob('*/*.png')))
print('Test : ' + str(count))
data_train
# + id="7S1jsFGlWxeV" colab={"base_uri": "https://localhost:8080/"} outputId="53431dbd-9f72-4b88-f730-8e2fd23a861c"
#from keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
batch_size = 32
img_height = 224
img_width = 224
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.1) # set validation split
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
data_train,
target_size=(img_height, img_width),
batch_size=batch_size,
shuffle=True,
class_mode='categorical',
subset='training',
seed=25) # set as training data
validation_generator = train_datagen.flow_from_directory(
data_train, # same directory as training data
target_size=(img_height, img_width),
batch_size=batch_size,
shuffle=True,
class_mode='categorical',
subset='validation',
seed=25) # set as validation data
test_generator = test_datagen.flow_from_directory(INPATH + '/test',
target_size=(224, 224),
batch_size=1,
shuffle=False,
class_mode='categorical')
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="f6Ai1q6OwjI8" outputId="81f68de8-c111-4415-f8c3-6e73f5d1f313"
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
augmented_images = [train_generator[1][0][0] for i in range(5)]
plotImages(augmented_images)
# + [markdown] id="mVc5ILQgw2Q0"
# # Creamos el modelo
#
#
#
# * Modelo Secuencial, 32 filtros de entrada.
# * strides = número de pasos que se mueve el filtro y padding= añadimos 0 para generar salida igual a entrada
# * BatchNormalization() Normaliza capas de entrada
# * Dropout Eliminar sobreajuste, desactivamos algunas conjuntos de neuronas
# + id="24RfRSIwwznv"
from tensorflow.keras.layers import Dropout, BatchNormalization
#from keras.applications.xception import Xception
from tensorflow.python.keras.applications.xception import Xception
baseModel = Xception(weights="imagenet", include_top=False,
input_shape = (224, 224, 3) )
# + [markdown] id="rizM171aYvKc"
# ## Fine Tuning
#
# El ajuste está en la cabeza del modelo, donde especificamos las clases que vamos a tener que predecir
# + id="LDhu3cbtYXMp"
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(256, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(4, activation="softmax")(headModel)
# + id="SwACgeudZiWN"
#from keras.models import Model
from tensorflow.python.keras.models import Model
model = Model(inputs=baseModel.input, outputs=headModel)
# + id="lvXI47OSZ914" colab={"base_uri": "https://localhost:8080/"} outputId="7907256c-a27c-4bbf-d8b1-32ebe8c2f48b"
model.summary()
# + id="CJFFDGLIazID" colab={"base_uri": "https://localhost:8080/"} outputId="56149694-7496-4c45-f23c-fbea691fc5a3"
print("Number of layers in the base model: ", len(model.layers))
# + [markdown] id="zyPXq2l5bMT9"
# ## CUIDADO!
#
# En este ejemplo:
#
# ### Congelamos las 40 primeras capas, ahí estarían las formas más básicas en el modelo aprendidas, el resto de capas las entrenamos para que se ajuste al problema
#
# + id="XQ6hm9BWxLSZ" colab={"base_uri": "https://localhost:8080/"} outputId="689b35d9-a2bc-4c63-cdb4-bd753c4afdbf"
steps_per_epoch = train_generator.n // batch_size
validation_steps = validation_generator.n // batch_size
print(steps_per_epoch)
print(validation_steps)
# + id="7Yuk4Ot8xb3Z" colab={"base_uri": "https://localhost:8080/"} outputId="9fba43a0-6b12-4a7b-d33d-70c1f5de3aca"
from tensorflow.keras.callbacks import EarlyStopping
import os
earlystopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
optimizer = tf.keras.optimizers.Adam (lr=0.001)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
reduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
checkpoint_filepath = CPATH + '/testGanAltoGrado/'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
save_freq = 15,
save_best_only=False)
# Cargar check
model.load_weights(checkpoint_filepath)
# + [markdown] id="kPrL_xkB8-El"
# ## OPCIONAL: ENTRENAR EL MODELO
# + colab={"base_uri": "https://localhost:8080/"} id="8R3PsAxN89eq" outputId="e37fa3e3-af5f-429e-addc-c8abbdd8bb66"
history = model.fit(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=100,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=[reduce_lr,model_checkpoint_callback,earlystopping])
# + colab={"base_uri": "https://localhost:8080/"} id="UK1NXQwKjohl" outputId="ed9531e0-cf92-4582-93fe-bc9183b76cd8"
model.evaluate(x=validation_generator,
steps=validation_steps)
# + colab={"base_uri": "https://localhost:8080/"} id="oBKhkA1uJcbG" outputId="4b7e0369-9380-49e9-bac9-b2a6385495bb"
test_predict = model.predict(test_generator, steps = test_generator.n // 1, verbose =1)
# + colab={"base_uri": "https://localhost:8080/"} id="mR1DbUjLJcLd" outputId="0fef0432-a3f8-4dfb-d474-8ba6117f7401"
test_predict.shape
# + colab={"base_uri": "https://localhost:8080/"} id="DQfhfI-ZJbcH" outputId="4a965cc6-f77e-4251-d6fd-ae05f8351ea8"
test_predict
# + id="S9nMq6kpM6WV"
predict = []
for i in test_predict:
predict.append(int(np.argmax(i)))
predict = np.asarray(predict)
# + colab={"base_uri": "https://localhost:8080/"} id="3txhoFJSM6xZ" outputId="89250a2c-df0a-4c13-832d-28b17af620c2"
np.asarray(predict)
# + colab={"base_uri": "https://localhost:8080/"} id="XTXdyAoXQJvD" outputId="8c1c691d-ef37-4718-9acb-2d482949ff70"
# Obtenemos la tasa de acierto del modelo
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(test_generator.classes, np.asarray(predict))
accuracy
# + colab={"base_uri": "https://localhost:8080/"} id="bBR5ZLMIM88g" outputId="34e3582c-7069-4592-a339-a0c11a14c3de"
test_generator.class_indices
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="YuRJcLcMRNN3" outputId="6a806cb2-4caf-495f-cf78-973d39a76e44"
# Representamos la matriz de confusión
from sklearn.metrics import confusion_matrix
import seaborn as sns
cm = confusion_matrix(test_generator.classes, predict)
plt.figure(figsize = (7,7))
ax= plt.subplot()
sns.heatmap(cm, annot=True ,fmt="d",cmap='Blues')
# labels, title and ticks
ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['Alto Grado', 'Ascus', 'Bajo Grado', 'Benigna']); ax.yaxis.set_ticklabels(['Alto Grado', 'Ascus', 'Bajo Grado', 'Benigna']);
# + colab={"base_uri": "https://localhost:8080/", "height": 624} id="ZS9M5Czqx_tF" outputId="69f25934-1df9-4315-b6d8-de555fc8d498"
acc = history.history['accuracy' ]
val_acc = history.history[ 'val_accuracy' ]
loss = history.history[ 'loss' ]
val_loss = history.history['val_loss' ]
epochs = range(1,len(acc)+1,1) # obtener número de epochs
plt.plot ( epochs, acc, 'r--', label='Training acc' )
plt.plot ( epochs, val_acc, 'b', label='Validation acc')
plt.title ('Training and validation accuracy')
plt.ylabel('acc')
plt.xlabel('epochs')
plt.legend()
plt.figure()
plt.plot ( epochs, loss, 'r--' )
plt.plot ( epochs, val_loss , 'b' )
plt.title ('Training and validation loss' )
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend()
plt.figure()
# + [markdown] id="KOjBMSQM7QnT"
# # Filtrado de Imágenes
# + colab={"base_uri": "https://localhost:8080/"} id="GQYAK39Q8zT7" outputId="f9884c4d-a0d4-4abe-a8cd-055c17f7a86e"
array_predict_labels = np.asarray(predict)
predict
# + colab={"base_uri": "https://localhost:8080/"} id="Zqn3hFHr7TjA" outputId="9fb60240-4895-42bb-8a3b-ee4a3e5b65eb"
import shutil, os
directory = INPATH + '/test/ascus (1)'
for label, filename in zip( predict, os.listdir(directory) ):
if label == 0:
print('{} AltoGrado, en ruta {}'.format(label,os.path.join(directory, filename)))
shutil.copyfile(os.path.join(directory, filename), os.path.join(OUTPUT + '/altogrado', filename))
elif label == 1:
print('{} Ascus, en ruta {}'.format(label,os.path.join(directory, filename)))
shutil.copyfile(os.path.join(directory, filename), os.path.join(OUTPUT + '/ascus', filename))
elif label == 2:
print('{} Bajo Grado, en ruta {}'.format(label,os.path.join(directory, filename)))
shutil.copyfile(os.path.join(directory, filename), os.path.join(OUTPUT + '/bajogrado', filename))
elif label == 3:
print('{} Benigna, en ruta {}'.format(label,os.path.join(directory, filename)))
shutil.copyfile(os.path.join(directory, filename), os.path.join(OUTPUT + '/benigna', filename))
else:
continue
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="LQYJEHAjBe29" outputId="847f26d1-3962-44b4-c9cb-5be09f808e0c"
|
BAJOGRADO_CheckPoint_Xception_Articulo_CAT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="_69sRZ5ktmDp" colab_type="text"
# # 함수 연습문제
# ___
# + [markdown] id="oY4-5IZktmDq" colab_type="text"
# 1. 숫자를 parameter로 받아 짝수/홀수를 구분하는 odd_even 함수를 만드시오.
# >- 숫자를 입력으로 받는다.
# >- 짝수면 "짝수", 홀수면 "홀수"를 return 한다.
# + id="m6aBzbjItmDr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1592220164701, "user_tz": -540, "elapsed": 1671, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}} outputId="35cda5a0-b881-4428-97d3-abef48aa69bd"
def odd_even(x) :
return ("짝수" if x % 2 == 0 else "홀수")
odd_even(24)
# + [markdown] id="_GG71MzYtmDw" colab_type="text"
# 2. 숫자 2개를 입력으로 받아 그 숫자의 합과 차를 리턴하는 함수를 만드시오.
# >- 숫자를 2개 입력받는다.
# >- 그 2개 숫자의 합과 차를 구하여 tuple로 리턴한다.
# + id="jiHl-pCLtmDx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1592220225889, "user_tz": -540, "elapsed": 730, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}} outputId="ecdf1c57-7083-47de-f5fc-42e9f8d572f2"
def plus_minus(x, y):
return (x+y, x-y)
plus_minus(29, 35)
# + [markdown] id="Hd0arJfgEZJu" colab_type="text"
# 3. 가변 인수를 입력 받아 양수만 리턴하는 함수를 생성하시오.
# + id="CGMAyErLEruq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1593049454393, "user_tz": -540, "elapsed": 1016, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}} outputId="bbd87221-4503-4bcf-d065-101cd0d01942"
def positive(*nums) :
results = [num for num in nums if num > 0]
return results
x = positive(10,0,-10,80,87,-34, 0)
print(x)
# + [markdown] id="yIT6glieJZLa" colab_type="text"
# 4. 성적 리스트를 입력 받아 평균을 리턴하는 print_score 함수를 정의하라.
# + id="oSe3S3mMJ4aF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1593049663632, "user_tz": -540, "elapsed": 1130, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}} outputId="c130a92e-17c6-4fcc-84a3-36c5dbee2d8f"
def avg(x):
return (sum(x)/len(x))
print(avg([20, 89, 90, 100, 65]))
|
01Basic/04함수_연습_해답.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 315. Count of Smaller Numbers After Self
#
# [@stefanpochmann](https://leetcode.com/problems/count-of-smaller-numbers-after-self/discuss/76584/Mergesort-solution) The smaller numbers on the right of a number are exactly those that jump from its right to its left during a stable sort. So I do mergesort with added tracking of those right-to-left jumps.
#
# [315. Count of Smaller Numbers After Self](https://leetcode.com/problems/count-of-smaller-numbers-after-self/description/)
def countSmaller(nums):
def mergesort(enum):
half = len(enum) // 2
if half:
left, right = mergesort(enum[:half]), mergesort(enum[half:])
for i in reversed(range(len(enum))):
if len(right) == 0 or len(left) > 0 and left[-1][1] > right[-1][1]:
ret[left[-1][0]] += len(right)
enum[i] = left.pop()
else:
enum[i] = right.pop()
return enum
ret = [0] * len(nums)
mergesort(list(enumerate(nums)))
return ret
nums = [5, 2, 6, 1]
print(countSmaller(nums))
# # 327. Count of Range Sum
#
# [@dietpepsi](https://leetcode.com/problems/count-of-range-sum/discuss/77990/Share-my-solution)
#
# Recall count smaller number after self where we encountered the problem
# ```
# count[i] = count of nums[j] - nums[i] < 0 with j > i
# ```
# Here, after we did the preprocess, we need to solve the problem
# ```
# count[i] = count of a <= S[j] - S[i] <= b with j > i
# ans = sum(count[:])
# ```
# Therefore the two problems are almost the same. We can use the same technique used in that problem to solve this problem. One solution is merge sort based; another one is Balanced BST based. The time complexity are both O(n log n).
#
# The merge sort based solution counts the answer while doing the merge. During the merge stage, we have already sorted the left half [start, mid) and right half [mid, end). We then iterate through the left half with index i. For each i, we need to find two indices k and j in the right half where
#
# * j is the first index satisfy sums[j] - sums[i] > upper and
# * k is the first index satisfy sums[k] - sums[i] >= lower.
#
# Then the number of sums in [lower, upper] is j-k. We also use another index t to copy the elements satisfy sums[t] < sums[i] to a cache in order to complete the merge sort.
#
# Despite the nested loops, the time complexity of the "merge & count" stage is still linear. Because the indices k, j, t will only increase but not decrease, each of them will only traversal the right half once at most. The total time complexity of this divide and conquer solution is then O(n log n).
#
# [327. Count of Range Sum](https://leetcode.com/problems/count-of-range-sum/description/)
def countRangeSum(nums, lower, upper):
sums = [0] * len(nums) + [0]
for i, n in enumerate(nums):
sums[i + 1] = sums[i] + n
def mergesort(lo, hi):
m = (lo + hi) // 2
if m == lo:
return 0
print(sums[lo:m], sums[m:hi])
count = mergesort(lo, m) + mergesort(m, hi)
i = j = m
for left in sums[lo:m]:
# two pointer to find the range
while i < hi and sums[i] - left < lower : i += 1
while j < hi and sums[j] - left <= upper : j += 1
count += j - i
sums[lo:hi] = sorted(sums[lo:hi]) # sort the array
print(count)
return count
return mergesort(0, len(sums))
nums = [-2,5,-1]
countRangeSum(nums, -2, 2)
# # 493. Reverse Pairs
#
# The same as trick as 327.Count of Range Sum
#
# * left part and right part are all **sorted**
# * *j* is the first index that **violoates** ```nums[i] > 2 * nums[j]```
#
# [493. Reverse Pairs](https://leetcode.com/problems/reverse-pairs/description/)
def reversePairs(nums):
"""
:type nums: List[int]
:rtype: int
"""
def mergesort(lo, hi):
m = (lo + hi) // 2
if m == lo:
return 0
count = mergesort(lo, m) + mergesort(m, hi)
print(nums[lo:m], nums[m:hi])
j = m
for i in range(lo, m):
# nums[i] > 2*nums[j]
while j < hi and nums[i] > 2 * nums[j] : j += 1
count += j - m
nums[lo:hi] = sorted(nums[lo:hi]) # sort the array
print(count)
return count
return mergesort(0, len(nums))
nums = [1,3,2,3,1]
reversePairs(nums)
nums = [2,4,3,5,1]
reversePairs(nums)
|
MergeSort like algorithms.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Понижение размерности можно использовать для:
#
# * Сокращение ресурсоемкости алгоритмов
# * Ослабление влияния проклятия размерности и тем самым уменьшение переобучения
# * Переход к более информативным признакам
#
# На этом семинаре мы будем понижать размерность ориентируясь как раз на эти цели.
# Тогда этот процесс также можно называть и выделением признаков.
# ## Отбор признаков
#
# Самый простой способ выделения признаков - их отбор. Не будем заострять много внимания
# на этом методе, так как он очень простой, просто приведем пример, показывающий, что
# так можно примитивно сокращать ресурсоемкость алгоритмов.
#
# Отберем признаки на основе их корреляции с целевым признаком, и сравним результаты с исходными.
# +
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from scipy.stats import pearsonr
ds = load_boston()
X, y = ds.data, ds.target
indexes = np.arange(len(y))
np.random.seed(52342)
np.random.shuffle(indexes)
X = X[indexes, :]
y = y[indexes]
features_ind = np.arange(X.shape[1])
corrs = np.abs([pearsonr(X[:, i], y)[0] for i in features_ind])
importances_sort = np.argsort(corrs)
plt.barh(ds['feature_names'], corrs[importances_sort])
X = X[:, importances_sort]
# +
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
features_counts = np.arange(1, X.shape[1] + 1)
def scores_by_features_count(reg):
scores = []
for features_part in features_counts:
X_part = X[:, :features_part]
scores.append(cross_val_score(reg, X_part, y).mean())
return scores
plt.figure()
linreg_scores = scores_by_features_count(LinearRegression())
plt.plot(ds['feature_names'], linreg_scores, label='LinearRegression')
rf_scores = scores_by_features_count(RandomForestRegressor(n_estimators=100, max_depth=3))
plt.plot(ds['feature_names'], rf_scores, label='RandomForest')
plt.legend(loc='best')
# -
# В общем, если мы захотим немного сократить потребление ресурсов, пожертвовав частью качества,
# видно, что это можно сделать.
# ## Метод главных компонент (Principal Component Analysis, PCA)
#
# Выделение новых признаков путем их отбора часто дает плохие результаты, и
# в некоторых ситуациях такой подход практически бесполезен. Например, если
# мы работаем с изображениями, у которых признаками являются яркости пикселей,
# невозможно выбрать небольшой поднабор пикселей, который дает хорошую информацию о
# содержимом картинки.
#
# Поэтому признаки нужно как-то комбинировать. Рассмотрим метод главных компонент.
#
# Этот метод делает два важных упрощения задачи
#
# 1. Игнорируется целевая переменная
# 2. Строится линейная комбинация признаков
#
# П. 1 на первый взгляд кажется довольно странным, но на практике обычно не является
# таким уж плохим. Это связано с тем, что часто данные устроены так, что имеют какую-то
# внутреннюю структуру в пространстве меньшей размерности, которая никак не связана с
# целевой переменной. Поэтому и оптимальные признаки можно строить не глядя на ответ.
#
# П. 2 тоже сильно упрощает задачу, но далее мы научимся избавлятся от него.
# ### Теория
#
# Кратко вспомним, что делает этот метод (подробно см. в лекции).
#
# Обозначим $X$ - матрица объекты-признаки, с нулевым средним каждого признака,
# а $w$ - некоторый единичный вектор. Тогда
# $Xw$ задает величину проекций всех объектов на этот вектор. Далее ищется вектор,
# который дает наибольшую дисперсию полученных проекций (то есть наибольшую дисперсию
# вдоль этого направления):
#
# $$
# \max_{w: \|w\|=1} \| Xw \|^2 = \max_{w: \|w\|=1} w^T X^T X w
# $$
#
# Подходящий вектор тогда равен собственному вектору матрицы $X^T X$ с наибольшим собственным
# значением. После этого все пространство проецируется на ортогональное дополнение к вектору
# $w$ и процесс повторяется.
# ### PCA на плоскости
#
# Для начала посмотрим на метод PCA на плоскости для того, чтобы
# лучше понять, как он устроен.
#
# Попробуем специально сделать один из признаков более значимым и проверим, что PCA это обнаружит. Сгенерируем выборку из двухмерного гауссовского распределения. Обратите внимание, что выборка
# изначально выбирается центрированной.
# +
np.random.seed(314512)
data_synth_1 = np.random.multivariate_normal(
mean=[0, 0],
cov=[[4, 0],
[0, 1]],
size=1000)
# -
# Теперь изобразим точки выборки на плоскости и применим к ним PCA для нахождения главных компонент.
# В результате работы PCA из sklearn в `dec.components_` будут лежать главные направления (нормированные), а в `dec.explained_variance_` - дисперсия, которую объясняет каждая компонента. Изобразим на нашем графике эти направления, умножив их на дисперсию для наглядного отображения их
# значимости.
# +
from sklearn.decomposition import PCA
def PCA_show(dataset):
plt.scatter(*zip(*dataset), alpha=0.5)
dec = PCA()
dec.fit(dataset)
ax = plt.gca()
for comp_ind in range(dec.components_.shape[0]):
component = dec.components_[comp_ind, :]
var = dec.explained_variance_[comp_ind]
start, end = dec.mean_, component * var
ax.arrow(start[0], start[1], end[0], end[1],
head_width=0.2, head_length=0.4, fc='k', ec='k')
ax.set_aspect('equal', adjustable='box')
plt.figure(figsize=(7, 7))
PCA_show(data_synth_1)
# -
# Видим, что PCA все правильно нашел. Но это, конечно, можно было сделать и просто посчитав
# дисперсию каждого признака. Повернем наши данные на некоторый фиксированный угол и проверим,
# что для PCA это ничего не изменит.
# +
angle = np.pi / 6
rotate = np.array([
[np.cos(angle), - np.sin(angle)],
[np.sin(angle), np.cos(angle)],
])
data_synth_2 = rotate.dot(data_synth_1.T).T
plt.figure(figsize=(7, 7))
PCA_show(data_synth_2)
# -
# Ну вот, все нормально.
#
# Ниже пара примеров, где PCA отработал не так хорошо (в том смысле, что направления задают не очень хорошие признаки).
#
# **Упражнение.** Объясните, почему так произошло.
# +
from sklearn.datasets import make_circles, make_moons, make_blobs
np.random.seed(54242)
data_synth_bad = [
make_circles(n_samples=1000, factor=0.2, noise=0.1)[0]*2,
make_moons(n_samples=1000, noise=0.1)[0]*2,
make_blobs(n_samples=1000, n_features=2, centers=4)[0]/5,
np.random.multivariate_normal(
mean=[0, 1.5],
cov=[[3, 1],
[1, 1]],
size=1000),
]
plt.figure(figsize=(10, 8))
rows, cols = 2, 2
for i, data in enumerate(data_synth_bad):
plt.subplot(rows, cols, i + 1)
PCA_show(data)
plt.gca().set_aspect('equal', adjustable='box')
# -
# ### Лица людей
#
# Рассмотрим датасет с фотографиями лиц людей и применим к его признакам PCA.
#
# Ниже изображены примеры лиц из базы, и последняя картинка - это "среднее лицо".
# +
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True, random_state=432542)
faces_images = faces.data
faces_ids = faces.target
image_shape = (64, 64)
mean_face = faces_images.mean(axis=0)
plt.figure(figsize=(10, 4))
rows, cols = 2, 4
n_samples = rows * cols
for i in range(n_samples - 1):
plt.subplot(rows, cols, i + 1)
plt.imshow(faces_images[i, :].reshape(image_shape), interpolation='none',
cmap='gray')
plt.xticks(())
plt.yticks(())
plt.subplot(rows, cols, n_samples)
plt.imshow(mean_face.reshape(image_shape), interpolation='none',
cmap='gray')
plt.xticks(())
plt.yticks(())
# -
# Теперь найдем главные компоненты
# +
red = PCA()
faces_images -= mean_face
red.fit(faces_images)
plt.figure(figsize=(10, 4))
rows, cols = 2, 4
n_samples = rows * cols
for i in range(n_samples):
plt.subplot(rows, cols, i + 1)
plt.imshow(red.components_[i, :].reshape(image_shape), interpolation='none',
cmap='gray')
plt.xticks(())
plt.yticks(())
# -
# Получилось жутковато, что уже неплохо, но есть ли от этого какая-то польза?
#
# Во-первых, новые признаки дают более высокое качество классификации.
# +
# %%time
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
gscv_rf = GridSearchCV(RandomForestClassifier(),
{'n_estimators': [100, 200, 500, 800], 'max_depth': [2, 3, 4, 5]},
cv=5)
gscv_rf.fit(faces_images, faces_ids)
print(gscv_rf.best_score_)
gscv_rf.fit(red.transform(faces_images), faces_ids)
print(gscv_rf.best_score_)
# -
# Во-вторых, их можно использовать для компактного хранения данных. Для этого объекты трансформируются
# в новое пространство, и из него выкидываются самые незначимые признаки.
#
# Ниже приведены результаты сжатия в 10 раз.
# +
def compress_and_show(compress_ratio):
min_ = min(faces_images.shape)
red = PCA(n_components=min(min_, int(base_size * compress_ratio)))
red.fit(faces_images)
faces_compressed = red.transform(faces_images)
faces_restored = red.inverse_transform(faces_compressed) + mean_face
plt.figure(figsize=(10, 4))
rows, cols = 2, 4
n_samples = rows * cols
for i in range(n_samples):
plt.subplot(rows, cols, i + 1)
plt.imshow(faces_restored[i, :].reshape(image_shape), interpolation='none',
cmap='gray')
plt.xticks(())
plt.yticks(())
compress_and_show(0.8)
# -
# И даже при сжатии в 20 раз лица остаются узнаваемыми.
compress_and_show(0.05)
# ### PCA с ядрами
#
# Так как PCA фактически работает не исходными признаками, а с матрицей их ковариаций, можно
# использовать для ее вычисления вместо скалярного произведения $\langle x_i, x_j \rangle$ произвольное
# ядро $K(x_i, x_j)$. Это будет соответствовать переходу в другое пространство, в котором
# наше предположение о линейности уже будет иметь смысл. Единственная проблема - непонятно, как
# подбирать ядро.
#
# Ниже приведены примеры объектов в исходном пространстве (похожие группы обозначены одним цветом
# для наглядности), и результат их трансформации в новые пространства (для разных ядер). Если результаты
# получаются линейно разделимыми - значит мы выбрали подходящее ядро.
# +
from sklearn.decomposition import KernelPCA
def KPCA_show(X, y):
reds = y == 0
blues = y == 1
plt.figure(figsize=(8, 8))
rows, cols = 2, 2
plt.subplot(rows, cols, 1)
plt.scatter(X[reds, 0], X[reds, 1], alpha=0.5, c='r')
plt.scatter(X[blues, 0], X[blues, 1], alpha=0.5, c='b')
ax = plt.gca()
ax.set_aspect('equal', adjustable='box')
kernels_params = [
dict(kernel='rbf', gamma=10),
dict(kernel='poly', gamma=10),
dict(kernel='cosine', gamma=10),
]
for i, p in enumerate(kernels_params):
dec = KernelPCA(**p)
X_transformed = dec.fit_transform(X)
plt.subplot(rows, cols, i + 2)
plt.scatter(X_transformed[reds, 0], X_transformed[reds, 1], alpha=0.5, c='r')
plt.scatter(X_transformed[blues, 0], X_transformed[blues, 1], alpha=0.5, c='b')
ax = plt.gca()
ax.set_aspect('equal', adjustable='box')
np.random.seed(54242)
KPCA_show(*make_circles(n_samples=1000, factor=0.2, noise=0.1))
# -
np.random.seed(54242)
KPCA_show(*make_moons(n_samples=1000, noise=0.1))
|
week11_dim_reduction/class-10_pca.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Systems Biology and Medicine Practical: Flux Balance Analysis in Python
# **Authors**: <NAME>, <NAME>, <NAME> & <NAME> <br/>
# University of Amsterdam <br/>2016-2019
#
# ## Important notes before we begin
#
# - These notebooks will remain online during the course (and after).
#
# - <span style="color:red">**Note:** ONCE YOU LOG OUT (or click away the tab in your browser), or if the system crashes, YOUR WORK IS NOT SAVED!</span>
#
# ### How to save your work
# Whenever you take a break, stop for the day, or every half hour or so save your notebook to your local computer storage:
# - "File -> Download as -> Notebook".
#
# When you open this tutorial again at a later date you can upload your saved notebooks in the "Tree view" (where you end up after clicking the Binder image on the initial website). Simply click the "Upload" button in the top-right corner in Tree view and upload your saved notebook.
#
# ### Table of Contents and Assignments
# At the bottom of this page you will find a the table of contents linking to all the notebooks. **Return here when you finish each one**.
#
# Through the various notebooks we have set up some assignments for you to complete. These are highlighted in red, with a time estimate for how long you should roughly spend on this, as follows:
#
# <span style="color:red">**Assignment (3 min):**</span> Read the sections below.
#
# ### Golden rule
# **<span style="color:red">Golden rule:</span>** Ask one of the teaching assistants if anything is unclear!
#
# ## FBA with Python and CobraPy
# The aim of this tutorial is to bring your conceptual understanding of constraint-based modeling (flux balance analysis or "FBA") into practice and to apply it to the human metabolic reconstruction "Recon".
#
# There are various software packages that enable the user to perform COnstraint-Based Reconstruction and Analysis (COBRA). We will use: [Cobrapy](https://opencobra.github.io/cobrapy/).
#
# <span style="color:red">**Assignment (3 min):**</span> Visit the [COBRA website](https://opencobra.github.io/), scroll down and read the "What is COBRA?" section.
#
# To perform flux balance analysis with Cobrapy you need to understand at least the basics of the [Python](https://www.python.org/) programming language.
#
# As an introduction, the first two tutorials will cover the basics of the Jupyter notebook interface and Python. If you already have experience with Python feel free to skim (not skip) the first part. Even if you already have some experience there may be some tips and tricks in the first part that will come in handy later.
#
# After the Python introduction we will introduce Cobrapy and how to do computational analysis on the human metabolic reconstruction.
#
# Happy learning!
#
#
# ## Table of Contents
# The links below will take you to various parts of this tutorial.
#
# **Tutorial Day 1**
# - [Getting to know the Jupyter notebook interface](./FBA_tutorials/0_running_code_in_notebook.ipynb) ~ 30 min
# - [Python essentials](./FBA_tutorials/1_introduction_to_python.ipynb) ~ 45 min
# - [A crash course on flux balance analysis](./FBA_tutorials/2_introduction_to_FBA_FVA_RECON2.ipynb) ~ 90 min
#
# **Tutorial Day 2**
# - [Investigating phenylketonuria on the human metabolic map](./FBA_tutorials/3_Phenylketonuria_network_illustration.ipynb) ~ 60 min
#
# **Additional material / Project: biomarker prediction using flux variability analysis**
# - [The biomarker prediction method by Shlomi et al.](./FBA_tutorials/4_biomarker_prediction_Schlomi_example.ipynb) ~ 30 min
# - [Predicting biomarkers for phenylketonuria](./FBA_tutorials/5_biomarker_prediction_PKU.ipynb) ~ 60 min
#
#
# **Additional material / Project: Metabolism of e. coli and rhodospirillum rubrum**
# - [Playing with the e. coli core model](./FBA_tutorials/extra_exploring_ecoli_core.ipynb) **Note:** this may also be performed with a core model for rhodospirillum rubrum (ask your TA!)
#
|
VU_course_tutorial_hub.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Split list of files into a test and train sample
#
# 80% of the available images (800 out of the 1000) will be used for training. This script picks 800 random images from the Training_Images folder (assuming that the folder only contains images from the 100m - 120m altitude range), and then moves all the images not selected into the Validation_Images sibling folder
# +
from os import listdir
from os.path import isfile, join
import random
import numpy as np
import shutil
frames = None
filenames = [f for f in listdir('../Training_Images') if isfile(join('../Training_Images', f))]
required_count = 800
training_set = random.sample(filenames, required_count)
move_files = np.setdiff1d(filenames, training_set)
for f in move_files:
source = '../Training_Images/' + f
destination = '../Validation_Images/' + f
shutil.move(source, destination)
|
Create_CNN/Split_Training_Testing_Images.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + hide_input=true inputHidden=true language="html"
# <span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at 'In [7]'.</span>
# + papermill={"duration": 1.792161, "end_time": "2019-08-06T21:59:57.383017", "exception": false, "start_time": "2019-08-06T21:59:55.590856", "status": "completed"} tags=[]
import sys
import pickle
from pathlib import Path
gpu_implementation_path = '/home/mt/repos/research.mtne/gpu_implementation'
if gpu_implementation_path not in sys.path:
sys.path.append(gpu_implementation_path)
import re
import numpy as np
import pandas as pd
import seaborn as sns
# %matplotlib inline
from esmt import TrainingState
main_log_dir = Path(gpu_implementation_path) / "logs"
# + papermill={"duration": 0.018913, "end_time": "2019-08-06T21:59:57.410984", "exception": false, "start_time": "2019-08-06T21:59:57.392071", "status": "completed"} tags=["parameters"]
log_session = "mtes_20190630_114103"
# + papermill={"duration": 0.019178, "end_time": "2019-08-06T21:59:57.438968", "exception": false, "start_time": "2019-08-06T21:59:57.419790", "status": "completed"} tags=["injected-parameters"]
# Parameters
log_session = "mtes_20190526_160515"
# + papermill={"duration": 5.618612, "end_time": "2019-08-06T22:00:03.066542", "exception": false, "start_time": "2019-08-06T21:59:57.447930", "status": "completed"} tags=[]
# log_session = "salvatore-0to70"
logdir = main_log_dir / log_session
iterlogfiles = {}
iterations = []
for i in logdir.iterdir():
f = i.name
if not f.endswith('-game1_elite.pkl'):
continue
itr = re.sub('^0*', '', f.split('-')[0])
if itr == '':
itr = 0
else:
itr = int(itr)
iterations.append(itr)
iterations.sort()
last_iteration = iterations[-1]
def get_iter_log(iteration, pickle_file):
filename = logdir / "{:04d}-{}.pkl".format(iteration, pickle_file)
with open(str(filename), 'rb') as f:
return pickle.load(f)
def get_iter_logs(iteration):
df = {}
loadfiles = ['state', 'offsprings',
'game0_elite', 'game0_rewards', 'game0_episode_lengths',
'game1_elite', 'game1_rewards', 'game1_episode_lengths'
]
for loadfile in loadfiles:
df[loadfile] = get_iter_log(iteration, loadfile)
return df
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.rad2deg(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))
def compute_ranks(x):
"""
Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns ranks in [1, len(x)].
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= .5
return y
rewards_df = pd.DataFrame(columns=['game0_rewards', 'game1_rewards', 'game0_elite', 'game1_elite', 'iteration'])
for i in iterations:
# print("Loading iteration {}".format(i))
df = {
'game0_rewards': [np.mean(get_iter_log(i, 'game0_rewards'))],
'game1_rewards': [np.mean(get_iter_log(i, 'game1_rewards'))],
'game0_elite': [np.mean(get_iter_log(i, 'game0_elite'))],
'game1_elite': [np.mean(get_iter_log(i, 'game1_elite'))]
}
df['iteration'] = [i]
rdf = pd.DataFrame.from_dict(df)
rewards_df = pd.concat([rewards_df, rdf], sort=True)
print("Last iteration: {}".format(max(iterations)))
# + papermill={"duration": 0.038426, "end_time": "2019-08-06T22:00:03.114383", "exception": false, "start_time": "2019-08-06T22:00:03.075957", "status": "completed"} tags=[]
def get_config_body(logdir):
with open(str(logdir / "log.txt"), "r") as f:
data = f.read()
config_data = data.find(" Logging to: ")
return data[0:config_data]
print(get_config_body(logdir))
# + papermill={"duration": 0.47045, "end_time": "2019-08-06T22:00:03.594063", "exception": false, "start_time": "2019-08-06T22:00:03.123613", "status": "completed"} tags=[]
sns.set(rc={'figure.figsize':(20, 10)})
rewards_df.set_index('iteration').plot().set(ylabel='Game score')
# + papermill={"duration": 0.172664, "end_time": "2019-08-06T22:00:03.779538", "exception": true, "start_time": "2019-08-06T22:00:03.606874", "status": "failed"} tags=[]
eplens_df = pd.DataFrame(columns=[
'game0_episode_lengths',
'game1_episode_lengths',
'iteration'
])
for i in range(last_iteration):
# print("Loading iteration {}".format(i))
df = {
'game0_episode_lengths': np.array([l for l in get_iter_log(i, 'game0_episode_lengths')]).flatten(),
'game1_episode_lengths': np.array([l for l in get_iter_log(i, 'game1_episode_lengths')]).flatten(),
}
df['iteration'] = [i] * df['game0_episode_lengths'].shape[0]
edf = pd.DataFrame.from_dict(df)
eplens_df = pd.concat([eplens_df, edf], sort=True)
print("Last iteration: {}".format(max(iterations)))
m = pd.melt(eplens_df, id_vars=['iteration'], value_vars=['game0_episode_lengths', 'game1_episode_lengths'])
m['value'] = m['value'].astype('float32')
m['variable'] = m['variable'].astype('category')
m = m.query('iteration >= 0')
sns.set(rc={'figure.figsize':(20, 20)})
sns.boxplot(x='iteration', y='value', hue='variable', data=m)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
reward_eplen = pd.DataFrame(columns=['eplen', 'reward', 'iteration'])
for i in range(last_iteration):
df = {
'eplen': np.array(get_iter_log(i, 'game0_episode_lengths')).flatten(),
'reward': np.array(get_iter_log(i, 'game0_rewards')).flatten()
}
edf0 = pd.DataFrame.from_dict(df)
edf0['game'] = [0] * edf0['eplen'].shape[0]
edf0['iteration'] = [i] * edf0['eplen'].shape[0]
df = {
'eplen': np.array(get_iter_log(i, 'game1_episode_lengths')).flatten(),
'reward': np.array(get_iter_log(i, 'game1_rewards')).flatten()
}
edf1 = pd.DataFrame.from_dict(df)
edf1['game'] = [1] * edf1['eplen'].shape[0]
edf1['iteration'] = [float(i)] * edf1['eplen'].shape[0]
reward_eplen = pd.concat([reward_eplen, edf0, edf1], sort=True)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
sns.set(rc={'figure.figsize':(20, 10)})
sns.scatterplot(x='eplen', y='reward', hue='iteration', data=reward_eplen.query('game == 0')).set(title='Game 0 episode length vs reward')
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
sns.set(rc={'figure.figsize':(20, 10)})
sns.scatterplot(x='eplen', y='reward', hue='iteration', data=reward_eplen.query('game == 1')).set(title='Game 1 episode length vs reward')
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# thetas = pd.DataFrame(columns=['iteration', 'theta'])
# for iteration in iterations:
# print("Loading iteration {}".format(iteration))
# thetas = pd.concat(
# [thetas,
# pd.DataFrame.from_dict({
# 'iteration': [iteration] * 1008450,
# 'theta': np.array(get_iter_log(iteration, 'state').theta)
# })
# ]
# )
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# # %time
# theta_deltas = []
# for ti in iterations[1:]:
# print("Calculating {}".format(ti))
# tc = thetas[thetas['iteration'] == ti]['theta'].values
# tp = thetas[thetas['iteration'] == ti-1]['theta'].values
# td = np.linalg.norm(tc - tp)
# theta_deltas.append(td)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# pd.DataFrame(theta_deltas).plot()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# # %time
# theta_deltas_from0 = []
# t0 = thetas[thetas['iteration'] == 0]['theta'].values
# for ti in iterations[1:-1]:
# print("Calculating {}".format(ti))
# tc = thetas[thetas['iteration'] == ti]['theta'].values
# td = np.linalg.norm(tc - t0)
# theta_deltas_from0.append(td)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# pd.DataFrame(theta_deltas).plot()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# sns.set(rc={'figure.figsize':(20, 10)})
# pd.DataFrame(theta_deltas_from0).plot()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ms = pd.DataFrame(columns=['iteration', 'game0_mean', 'game0_std', 'game1_mean', 'game1_std'])
# for iteration in iterations:
# game0_rewards = np.array(get_iter_log(iteration, 'game0_rewards'))
# game1_rewards = np.array(get_iter_log(iteration, 'game1_rewards'))
# game0_centered_ranks = compute_centered_ranks(game0_rewards)
# game1_centered_ranks = compute_centered_ranks(game1_rewards)
# game0_centered = np.abs(game0_centered_ranks[:, 0] - game0_centered_ranks[:, 1])
# game1_centered = np.abs(game1_centered_ranks[:, 0] - game1_centered_ranks[:, 1])
# game0_centered_top = np.sort(game0_centered)[-10:]
# game1_centered_top = np.sort(game1_centered)[-10:]
# m0, s0 = game0_centered_top.mean(), game0_centered_top.std()
# m1, s1 = game1_centered_top.mean(), game1_centered_top.std()
# d = {'iteration': iteration, 'game0_mean': [m0], 'game0_std': [s0], 'game1_mean': [m1], 'game1_std': [s1]}
# ms = pd.concat([ms, pd.DataFrame.from_dict(d)], sort=True)
# sns.set(rc={'figure.figsize':(20, 10)})
# ms.set_index('iteration').plot()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
|
gpu_implementation/analysis/mtes_20190526_160515.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''opt'': conda)'
# language: python
# name: python37664bitoptconda7682a877a7f444b3afc1b0ca847868c9
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from itertools import permutations, product, combinations
from scipy.stats import pearsonr, spearmanr
from sklearn.preprocessing import StandardScaler
# -
# # Settings
#analysis = "Fastcore"
#analysis = "gimme"
#analysis = "iMAT"
#analysis = "init"
analysis = "tinit"
# ## Read the data
if analysis == "Fastcore":
df = pd.read_csv("data\\"+analysis+"_rxnMatrix.csv", sep=",")
df = df.drop(columns='Unnamed: 0')
else:
df = pd.read_csv("data\\"+analysis+"_rxnMatrix.csv", sep=";")
M = np.transpose(df.values[:,1:])
models = list(df.columns[1:])
models = list(map(lambda x: x.split("_")[1].split(".")[0], models))
# ### Grouping by genotype
# WT
genotype0 = ["GSM1405493","GSM1405505","GSM1405517",
"GSM1405489","GSM1405501","GSM1405513",
"GSM1405485","GSM1405497","GSM1405509",
"GSM1405494","GSM1405506","GSM1405518",
"GSM1405490","GSM1405502","GSM1405514",
"GSM1405486","GSM1405498","GSM1405510"]
# KO
genotype1 = ["GSM1405495","GSM1405507","GSM1405519",
"GSM1405491","GSM1405503","GSM1405515",
"GSM1405487","GSM1405499","GSM1405511",
"GSM1405496","GSM1405508","GSM1405520",
"GSM1405492","GSM1405504","GSM1405516",
"GSM1405488","GSM1405500","GSM1405512"]
genotype = (genotype0, genotype1)
# ### Grouping by diet
# +
# LFnC
diet0 = ["GSM1405485","GSM1405497","GSM1405509","GSM1405487","GSM1405499","GSM1405511",
"GSM1405486","GSM1405498","GSM1405510","GSM1405488","GSM1405500","GSM1405512"]
# HFnC
diet1 = ["GSM1405489","GSM1405501","GSM1405513","GSM1405491","GSM1405503","GSM1405515",
"GSM1405490","GSM1405502","GSM1405514","GSM1405492","GSM1405504","GSM1405516"]
# HFC
diet2 = ["GSM1405493","GSM1405505","GSM1405517","GSM1405495","GSM1405507","GSM1405519",
"GSM1405494","GSM1405506","GSM1405518","GSM1405496","GSM1405508","GSM1405520"]
diet = (diet0, diet1, diet2)
# -
# ### Grouping by gender
# +
# F
gender0 = ["GSM1405493","GSM1405505","GSM1405517",
"GSM1405489","GSM1405501","GSM1405513",
"GSM1405485","GSM1405497","GSM1405509",
"GSM1405495","GSM1405507","GSM1405519",
"GSM1405491","GSM1405503","GSM1405515",
"GSM1405487","GSM1405499","GSM1405511"]
# M
gender1 = ["GSM1405494","GSM1405506","GSM1405518",
"GSM1405490","GSM1405502","GSM1405514",
"GSM1405486","GSM1405498","GSM1405510",
"GSM1405496","GSM1405508","GSM1405520",
"GSM1405492","GSM1405504","GSM1405516",
"GSM1405488","GSM1405500","GSM1405512"]
gender = (gender0, gender1)
# -
groups = {"genotype": genotype, "diet": diet, "gender": gender}
labels = {"genotype": ("WT","KO"), "diet": ("LFnC", "HFnC", "HFC"), "gender": ("F","M")}
# ## Generate data
# +
"""
models = ["M1", "M2", "M3", "M4", "M5", "M6"]
groups = {
"gender": (["M1", "M2", "M3"], ["M4", "M5", "M6"]),#, "M7", "M8"]),
"phenotype": (["M1", "M4"], ["M2", "M5"], ["M3", "M6"]),
"diet": (["M1", "M5"], ["M2", "M6"], ["M3", "M4"])}
"""
"""
# number of fictional reactions
n_R = 100
"""
#M = np.random.randint(2, size=(len(models), n_R))
"""
# Let's say that reactions are dependent only on gender
M[np.isin(models, groups['gender'][0]),:n_R//2] = 1
M[np.isin(models, groups['gender'][0]),n_R//2:] = 0
M[np.isin(models, groups['gender'][1]),:n_R//2] = 0
M[np.isin(models, groups['gender'][1]),n_R//2:] = 1
"""
#M[np.isin(models, groups['phenotype'][0]),:] = 1
#M[np.isin(models, groups['phenotype'][1]),:] = 0
# -
# ## Preprocessing
# Remove the reactions that are always 0 or always 1
M = M[:,~np.all(M==0, axis=0)]
M = M[:,~np.all(M==1, axis=0)]
M = np.array(M, dtype=float)
# Zero centering the rows
M = StandardScaler(with_mean=True, with_std=False).fit_transform(M) #centering only
# ## PCA
pca = PCA(n_components=3)
comps = pca.fit_transform(M)
df = pd.DataFrame(data = comps, columns = ['PC1', 'PC2', 'PC3'])
df['model'] = models
#df = df.set_index('model')
df_pca = pd.DataFrame()
df_pca = df_pca.append(pd.DataFrame(pca.explained_variance_).T)
df_pca = df_pca.append(pd.DataFrame(pca.explained_variance_ratio_).T)
df_pca.columns = ['PC1', 'PC2', 'PC3']
df_pca['label'] = ['explained variance', 'explained variance ratio']
df_pca = df_pca.set_index('label')
df_pca.to_csv("results_PCA\\"+analysis+"_explained_variance.csv")
pca_explained = pca.explained_variance_ratio_
for c in combinations(range(3), 2):
i1 = c[0]
i2 = c[1]
for group in groups:
for subgroup, label in zip(groups[group], labels[group]):
locs = np.isin(models, subgroup)
plt.plot(comps[locs,i1], comps[locs,i2],"o", label=label)
for model, x, y in zip(models, comps[:,i1], comps[:,i2]):
#plt.text(x,y,model[6:])
for subgroup in groups[group]:
if model in subgroup:
plt.text(x+0.05,y+0.05,model)
plt.title(group + " (" + analysis+")")
plt.xlabel("PC"+str(i1+1) + " (" + str(round(100*pca_explained[i1],2))+"%)")
plt.ylabel("PC"+str(i2+1) + " (" + str(round(100*pca_explained[i2],2))+"%)")
plt.legend()
plt.gcf().set_size_inches(20,10)
plt.savefig("figures_PCA\\"+analysis+"_PC"+str(i1+1)+'_'+"PC"+str(i2+1)+'_'+group+".pdf", format="pdf", bbox_inches = 'tight')
plt.savefig("figures_PCA\\"+analysis+"_PC"+str(i1+1)+'_'+"PC"+str(i2+1)+'_'+group+".png", format="png", bbox_inches = 'tight')
plt.show()
groups
# # Sort and correlate (faster)
# +
factors = list(groups.keys())
Rs = np.zeros((len(factors), 3))
rhos = np.zeros((len(factors), 3))
for ii, factor in enumerate(groups):
scores1 = []
scores2 = []
scores3 = []
for i in range(len(groups[factor])):
idxs = np.array(np.where(np.isin(models, groups[factor][i])==True)).flatten()
scores1.append(sorted(df.iloc[idxs, 0].values))
scores2.append(sorted(df.iloc[idxs, 1].values))
scores3.append(sorted(df.iloc[idxs, 2].values))
for idx in permutations(range(len(scores1))):
s1 = []
s2 = []
s3 = []
for i in idx:
s1 += scores1[i]
s2 += scores2[i]
s3 += scores3[i]
R_PC1 = pearsonr(np.arange(len(s1)), s1)[0]
R_PC2 = pearsonr(np.arange(len(s2)), s2)[0]
R_PC3 = pearsonr(np.arange(len(s3)), s3)[0]
rho_PC1 = spearmanr(np.arange(len(s1)), s1)[0]
rho_PC2 = spearmanr(np.arange(len(s2)), s2)[0]
rho_PC3 = spearmanr(np.arange(len(s3)), s3)[0]
Rs[ii, 0] = max(Rs[ii, 0], abs(R_PC1))
Rs[ii, 1] = max(Rs[ii, 1], abs(R_PC2))
Rs[ii, 2] = max(Rs[ii, 2], abs(R_PC3))
rhos[ii, 0] = max(rhos[ii, 0], abs(rho_PC1))
rhos[ii, 1] = max(rhos[ii, 1], abs(rho_PC2))
rhos[ii, 2] = max(rhos[ii, 2], abs(rho_PC3))
# -
# ## Postprocessing
# ### Pearson
Rs2 = Rs ** 2 # coefficient of determination
#Rs = Rs / np.sum(Rs, axis=0) # conversion to percentages
#Rs2 = Rs2 / np.sum(Rs2, axis=0) # conversion to percentages
# +
#df_R = pd.DataFrame(data = Rs, columns = ['R(PC1)', 'R(PC2)', 'R(PC3)'])
#df_R['factor'] = groups.keys()
#df_R2 = pd.DataFrame(data = np.column_stack((Rs2,abs_Rs2)), columns = ['R2(PC1) [%]', 'R2(PC2) [%]', 'R2(PC3) [%]','R2(PC1)', 'R2(PC2)', 'R2(PC3)'])
df_R2 = pd.DataFrame(data = Rs2, columns = ['R2(PC1)', 'R2(PC2)', 'R2(PC3)'])
df_R2['factor'] = groups.keys()
#df_R = pd.merge(df_R, df_R2)
df_R2=df_R2.set_index('factor')
# -
df_R2.to_csv("results_PCA\\"+analysis+"_pearson_variability_explained.csv")
# ### Spearman
rhos2 = rhos ** 2 # coefficient of determination
#rhos = rhos / np.sum(rhos, axis=0) # conversion to percentages
#rhos2 = abs_rhos2 / np.sum(abs_rhos2, axis=0) # conversion to percentages
# +
#df_rho = pd.DataFrame(data = rhos, columns = ['Rho(PC1)', 'Rho(PC2)', 'Rho(PC3)'])
#df_rho['factor'] = groups.keys()
#df_rho2 = pd.DataFrame(data = np.column_stack((rhos2,abs_rhos2)), columns = ['Rho2(PC1) [%]', 'Rho2(PC2) [%]', 'Rho2(PC3) [%]','Rho2(PC1)', 'Rho2(PC2)', 'Rho2(PC3)'])
df_rho2 = pd.DataFrame(data = rhos2, columns = ['Rho2(PC1)', 'Rho2(PC2)', 'Rho2(PC3)'])
df_rho2['factor'] = groups.keys()
#df_rho = pd.merge(df_rho, df_rho2)
df_rho2=df_rho2.set_index('factor')
# -
df_rho2
df_rho2.to_csv("results_PCA\\"+analysis+"_spearman_variability_explained.csv")
|
01_1_PCA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic Machine Learning Models
# * Author : Harrytsz
# * Github : https://github.com/harrytsz/Deep_Learning_Models
# ## 1. Logistic Regression
# You can get MNIST Dataset from : https://yann.lecun.exdb/mnist/
# * Training set images: train-images-idx3-ubyte.gz (9.9 MB, 60,000)
# * Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60,000)
# * Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 10,000)
# * Test set labels: t10k-labels-idx1-ubyte.gz (5KB, 10,000)
# **Imports:**
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
# **Hyper Parameters:**
input_size = 784
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 1e-3
# ## Download MNIST Data
# **Get Dataset:**
# +
# train (bool, optional): If True, creates dataset from ``training.pt``,otherwise from ``test.pt``
train_dataset = torchvision.datasets.MNIST(root='./data/',train=True,transform=transforms.ToTensor(),download=True)
test_dataset = torchvision.datasets.MNIST(root='./data/',train=False,transform=transforms.ToTensor())
# -
# **DataLoader:**
# torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, sampler=None,
# batch_sampler=None, num_workers=0, collate_fn=<function default_collate>,
# pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None)
# **Parameters:**
#
# * dataset (Dataset) – dataset from which to load the data.
#
# * batch_size (int, optional) – how many samples per batch to load (default: 1).
#
# * shuffle (bool, optional) – set to True to have the data reshuffled at every epoch (default: False).
#
# * sampler (Sampler, optional) – defines the strategy to draw samples from the dataset. If specified, shuffle must be False.
# * batch_sampler (Sampler, optional) – like sampler, but returns a batch of indices at a time. Mutually exclusive with batch_size, shuffle, sampler, and drop_last.
# * num_workers (int, optional) – how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0)
# * collate_fn (callable, optional) – merges a list of samples to form a mini-batch.
# * pin_memory (bool, optional) – If True, the data loader will copy tensors into CUDA pinned memory before returning them.
# * drop_last (bool, optional) – set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: False)
# * timeout (numeric, optional) – if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: 0)
# * worker_init_fn (callable, optional) – If not None, this will be called on each worker subprocess with the worker id (an int in [0, num_workers - 1]) as input, after seeding and before data loading. (default: None)
#
# **Data Loader:**
# +
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size, shuffle=False)
#----------------------------#
for i, (images, labels) in enumerate(train_loader):
if (i % 100 == 0):
print(images.size(), labels)
print("#----------------------------------------------------------------#")
print("Train_Loader Type: ", type(train_loader))
# -
# ## CrossEntropyLoss Introduction:
# torch.nn.CrossEntropyLoss(weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')
#
# * This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
# * It is useful when training a classification problem with C classes.
# * The input is expected to contain scores for each class.
# * This criterion expects a class index (0 to C-1) as the target for each value of a 1D tensor of size minibatch
#
# **公式:**
# $$loss(x, class) = - log(\frac{exp(x[class])}{\sum_{j}{exp(x[j])}}) = -x[class] + log(\sum_{j}{exp(x[j])})$$
# * x[class] x[\text {class}]x[class]:给实际类的打分
# * x 的形状:(N, C)where C = number of classes
# * class 的形状:(N) where each value is 0 ≤ targets[i]≤C−10≤targets[i]≤C−1.
# ## Define and Training the Logistic Regression Model
# +
# Define Logistic Regression Model
class LR(nn.Module):
def __init__(self, input_dims, output_dims):
super().__init__()
self.linear = nn.Linear(input_dims, output_dims, bias=True)
def forward(self, x):
x = self.linear(x)
return x
LR_Model = LR(input_size, num_classes)
# Define the loss function of Logistic Regression
criterion = nn.CrossEntropyLoss(reduction='mean')
# Define Optimizer
optimizer = torch.optim.SGD(LR_Model.parameters(), lr=learning_rate)
# Training the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# 将图像序列转换至大小为 (batch_size, input_size), 应为 (100, 784)
images = images.reshape(-1, 28*28)
# Forward
y_pred = LR_Model(images)
loss = criterion(y_pred, labels)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i % 100 == 0):
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# -
# ## Test The Model
# 在测试阶段,为了运行内存效率,就不需要计算梯度了
# pytorch 默认每一次前向传播都会计算梯度
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
outputs = LR_Model(images)
# torch.max 的输出: out(tuple, optional) -- > the result tuple of two output tensor (max, max_indices)
max, predicted = torch.max(outputs.data, 1)
# print(max.data)
# print(predicted)
total += labels.size(0)
correct += (predicted==labels).sum()
print('Accuracy of the model on the 10000 test images: {} %.'.format(100*correct/total))
# ## Save The Model
torch.save(LR_Model.state_dict(), 'model.ckpt')
|
Basic_Machine_Learning_Models/Basic Machine Learning Models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LeetCode #1021. Remove Outermost Parentheses
#
# ## Question
#
# https://leetcode.com/problems/remove-outermost-parentheses/
#
# A valid parentheses string is either empty (""), "(" + A + ")", or A + B, where A and B are valid parentheses strings, and + represents string concatenation. For example, "", "()", "(())()", and "(()(()))" are all valid parentheses strings.
#
# A valid parentheses string S is primitive if it is nonempty, and there does not exist a way to split it into S = A+B, with A and B nonempty valid parentheses strings.
#
# Given a valid parentheses string S, consider its primitive decomposition: S = P_1 + P_2 + ... + P_k, where P_i are primitive valid parentheses strings.
#
# Return S after removing the outermost parentheses of every primitive string in the primitive decomposition of S.
#
# Example 1:
#
# Input: "(()())(())"
# Output: "()()()"
#
# Explanation:
#
# The input string is "(()())(())", with primitive decomposition "(()())" + "(())".
# After removing outer parentheses of each part, this is "()()" + "()" = "()()()".
#
# Example 2:
#
# Input: "(()())(())(()(()))"
# Output: "()()()()(())"
#
# Explanation:
#
# The input string is "(()())(())(()(()))", with primitive decomposition "(()())" + "(())" + "(()(()))".
# After removing outer parentheses of each part, this is "()()" + "()" + "()(())" = "()()()()(())".
#
# Example 3:
#
# Input: "()()"
# Output: ""
# Explanation:
# The input string is "()()", with primitive decomposition "()" + "()".
# After removing outer parentheses of each part, this is "" + "" = "".
#
# Note:
#
# S.length <= 10000
# S[i] is "(" or ")"
# S is a valid parentheses string
# ## My Solution
def removeOuterParentheses(S):
res = []
c = 0
for p in S:
if p == '(':
c += 1
if c != 1:
res.append(p)
if p == ')':
c -= 1
if c != 0:
res.append(p)
return ''.join(res)
S = "(()())(())"
removeOuterParentheses(S)
# ## My Result
#
# __Runtime__ : 32 ms, faster than 64.74% of Python online submissions for Remove Outermost Parentheses.
#
# __Memory Usage__ : 12.1 MB, less than 38.46% of Python online submissions for Remove Outermost Parentheses.
|
LeetCode/LeetCode_1021RemoveOutermostParentheses.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# gradient descent
# derivatives = change in value with change in arguments (indicates min, max, saddle).
# Interested in direction of fastest decrease in cost function (-nablaC)
# +
import numpy as np
def gradient_descent(gradient, start, learn_rate, epoch):
vector = start
for _ in range(epoch):
diff = -learn_rate * gradient(vector)
vector += diff
return vector
# -
gradient_descent(gradient=lambda v: 2 * v, start=10.0, learn_rate=0.2, epoch = 10)
# +
import matplotlib.pyplot as plt
import numpy as np
def graph(formula, x_range):
x = np.array(x_range)
#^ use x as range variable
y = formula(x)
#^ ^call the lambda expression with x
#| use y as function result
plt.plot(x,y)
plt.show()
graph(lambda x : (2 * x), range(-10, 10))
# ^use a lambda expression
# +
#concerned with the problem of finding the rate of change of a function with respect to the variable on which it depends
# +
import numpy as np
from sklearn.metrics import mean_squared_error
def gradient_descent(X, y, learningrate = 0.05, epoch = 10):
'''
Gradient descent math
'''
m, b = 0.33, 0.48 # parameters
log, mse = [], [] # lists to store learning process
N = len(X) # number of samples
for _ in range(epoch):
f = y - (m*X + b)
# Updating m and b
m -= lr * (-2 * X.dot(f).sum() / N)
b -= lr * (-2 * f.sum() / N)
log.append((m, b))
mse.append(mean_squared_error(y, (m*X + b)))
return m, b, log, mse
|
gradient descent.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **NOTE:** This Notebook is downloaded from Kaggle and is therefore intended to be used as a Kaggle Kernel
# + [markdown] papermill={"duration": 0.012076, "end_time": "2021-10-12T05:35:25.457297", "exception": false, "start_time": "2021-10-12T05:35:25.445221", "status": "completed"} tags=[]
# # 📦 Packages and Basic Setup
# + _kg_hide-input=true _kg_hide-output=true papermill={"duration": 29.173356, "end_time": "2021-10-12T05:35:54.641630", "exception": false, "start_time": "2021-10-12T05:35:25.468274", "status": "completed"} tags=[]
# %%capture
# -------- Basic Packages -------- #
import os
import gc
import sys
gc.enable()
import math
import time
import torch
import numpy as np
import pandas as pd
from sklearn import model_selection
# !pip install --upgrade -q transformers tokenizers
# -------- Output Prettification ✨ -------- #
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from transformers import logging
logging.set_verbosity_warning()
logging.set_verbosity_error()
# -------- Custom Library -------- #
wrapperdir = "../input/d/sauravmaheshkar/coffee"
sys.path.append(wrapperdir)
# -------- Weights and Biases Setup -------- #
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
api_key = user_secrets.get_secret("WANDB_API_KEY")
wandb.login(key=api_key);
# + [markdown] papermill={"duration": 0.009388, "end_time": "2021-10-12T05:35:54.661529", "exception": false, "start_time": "2021-10-12T05:35:54.652141", "status": "completed"} tags=[]
# # 📃 Configuration
# + _kg_hide-input=false _kg_hide-output=false papermill={"duration": 0.020254, "end_time": "2021-10-12T05:35:54.691333", "exception": false, "start_time": "2021-10-12T05:35:54.671079", "status": "completed"} tags=[]
CONFIG = dict(
# Model
model_type = 'rembert',
model_name_or_path = "google/rembert",
config_name = "google/rembert",
output_head_dropout_prob = 0.0,
gradient_accumulation_steps = 2,
# Tokenizer
tokenizer_name = "google/rembert",
max_seq_length = 400,
doc_stride = 135,
# Training
epochs = 1,
folds = 4,
train_batch_size = 2,
eval_batch_size = 8,
# Optimizer
optimizer_type = 'AdamW',
learning_rate = 1.5e-5,
weight_decay = 1e-2,
epsilon = 1e-8,
max_grad_norm = 1.0,
# Scheduler
decay_name = 'cosine-warmup',
warmup_ratio = 0.1,
logging_steps = 100,
# Misc
output_dir = 'output',
seed = 21,
# W&B
competition = 'chaii',
_wandb_kernel = 'sauravm'
)
# + [markdown] papermill={"duration": 0.009222, "end_time": "2021-10-12T05:35:54.710285", "exception": false, "start_time": "2021-10-12T05:35:54.701063", "status": "completed"} tags=[]
# # 💿 Dataset
# + _kg_hide-input=true _kg_hide-output=true papermill={"duration": 1.328287, "end_time": "2021-10-12T05:35:56.047834", "exception": false, "start_time": "2021-10-12T05:35:54.719547", "status": "completed"} tags=[]
train = pd.read_csv('../input/d/sauravmaheshkar/coffee/data/official_data/train.csv')
test = pd.read_csv('../input/d/sauravmaheshkar/coffee/data/official_data/test.csv')
external_mlqa = pd.read_csv('../input/d/sauravmaheshkar/coffee/data/external_data/mlqa_hindi.csv')
external_xquad = pd.read_csv('../input/d/sauravmaheshkar/coffee/data/external_data/xquad.csv')
external_train = pd.concat([external_mlqa, external_xquad])
def create_folds(data, num_splits):
data["kfold"] = -1
kf = model_selection.StratifiedKFold(n_splits=num_splits, shuffle=True, random_state=42)
for f, (t_, v_) in enumerate(kf.split(X=data, y=data['language'])):
data.loc[v_, 'kfold'] = f
return data
train = create_folds(train, num_splits=5)
external_train["kfold"] = -1
external_train['id'] = list(np.arange(1, len(external_train)+1))
train = pd.concat([train, external_train]).reset_index(drop=True)
def convert_answers(row):
return {'answer_start': [row[0]], 'text': [row[1]]}
train['answers'] = train[['answer_start', 'answer_text']].apply(convert_answers, axis=1)
# + [markdown] papermill={"duration": 0.009323, "end_time": "2021-10-12T05:35:56.067125", "exception": false, "start_time": "2021-10-12T05:35:56.057802", "status": "completed"} tags=[]
# # ⚙️ Helper Function
# + _kg_hide-input=true _kg_hide-output=true papermill={"duration": 0.179741, "end_time": "2021-10-12T05:35:56.256249", "exception": false, "start_time": "2021-10-12T05:35:56.076508", "status": "completed"} tags=[]
# %%capture
from coffee.helpers import make_model, make_loader, make_optimizer, make_scheduler
from coffee.utils import set_seed
def init_training(args, data, fold):
set_seed(CONFIG["seed"])
if not os.path.exists(CONFIG["output_dir"]):
os.makedirs(CONFIG["output_dir"])
# model
model_config, tokenizer, model = make_model(args)
if torch.cuda.device_count() >= 1:
print('Model pushed to {} GPU(s), type {}.'.format(
torch.cuda.device_count(),
torch.cuda.get_device_name(0))
)
model = model.cuda()
else:
raise ValueError('CPU training is not supported')
print("✅ Model Initialized")
# data loaders
train_dataloader, valid_dataloader = make_loader(args, data, tokenizer, fold)
print("✅ DataLoaders Initialized")
# optimizer
optimizer = make_optimizer(args, model, strategy = 'a')
print("✅ Optimizer Initialized")
# scheduler
num_training_steps = math.ceil(len(train_dataloader) / CONFIG["gradient_accumulation_steps"]) * CONFIG["epochs"]
if CONFIG["warmup_ratio"] > 0:
num_warmup_steps = int(CONFIG["warmup_ratio"] * num_training_steps)
else:
num_warmup_steps = 0
print(f"Total Training Steps: {num_training_steps}, Total Warmup Steps: {num_warmup_steps}")
scheduler = make_scheduler(args, optimizer, num_warmup_steps, num_training_steps)
print("✅ Scheduler Initialized")
result_dict = {
'epoch':[],
'train_loss': [],
'val_loss' : [],
'best_val_loss': np.inf
}
return (
model, model_config, tokenizer, optimizer, scheduler,
train_dataloader, valid_dataloader, result_dict
)
# + [markdown] papermill={"duration": 0.009042, "end_time": "2021-10-12T05:35:56.275369", "exception": false, "start_time": "2021-10-12T05:35:56.266327", "status": "completed"} tags=[]
# # 🔥 Training
# + papermill={"duration": 0.032039, "end_time": "2021-10-12T05:35:56.317121", "exception": false, "start_time": "2021-10-12T05:35:56.285082", "status": "completed"} tags=[]
from coffee.engine import Trainer, Evaluator
def run(data, fold):
args = CONFIG
run = wandb.init(project='chaii',
entity='sauravmaheshkar',
group='rembert',
job_type='train',
config=CONFIG)
model, model_config, tokenizer, optimizer, scheduler, train_dataloader, \
valid_dataloader, result_dict = init_training(args, data, fold)
wandb.watch(model)
trainer = Trainer(model, tokenizer, optimizer, scheduler)
evaluator = Evaluator(model)
train_time_list = []
valid_time_list = []
for epoch in range(CONFIG["epochs"]):
result_dict['epoch'].append(epoch)
# Train
torch.cuda.synchronize()
tic1 = time.time()
result_dict = trainer.train(
args, train_dataloader,
epoch, result_dict
)
torch.cuda.synchronize()
tic2 = time.time()
train_time_list.append(tic2 - tic1)
# Evaluate
torch.cuda.synchronize()
tic3 = time.time()
result_dict = evaluator.evaluate(
valid_dataloader, epoch, result_dict
)
torch.cuda.synchronize()
tic4 = time.time()
valid_time_list.append(tic4 - tic3)
output_dir = os.path.join(CONFIG["output_dir"], f"checkpoint-fold-{fold}")
if result_dict['val_loss'][-1] < result_dict['best_val_loss']:
print("{} Epoch, Best epoch was updated! Valid Loss: {: >4.5f}".format(epoch, result_dict['val_loss'][-1]))
result_dict["best_val_loss"] = result_dict['val_loss'][-1]
os.makedirs(output_dir, exist_ok=True)
torch.save(model.state_dict(), f"{output_dir}/pytorch_model.bin")
model_config.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print(f"Saving model checkpoint to {output_dir}.")
print()
evaluator.save(result_dict, output_dir)
print(f"Total Training Time: {np.sum(train_time_list)}secs, Average Training Time per Epoch: {np.mean(train_time_list)}secs.")
print(f"Total Validation Time: {np.sum(valid_time_list)}secs, Average Validation Time per Epoch: {np.mean(valid_time_list)}secs.")
torch.cuda.empty_cache()
del trainer, evaluator
del model, model_config, tokenizer
del optimizer, scheduler
del train_dataloader, valid_dataloader, result_dict
gc.collect()
run.finish()
# + papermill={"duration": 25951.97874, "end_time": "2021-10-12T12:48:28.307575", "exception": false, "start_time": "2021-10-12T05:35:56.328835", "status": "completed"} tags=[]
for fold in range(CONFIG["folds"]):
print();print()
print('-'*50)
print(f'FOLD: {fold}')
print('-'*50)
run(train, fold)
|
notebooks/coffee-rembert.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
a = 10
a
# + pycharm={"name": "#%%\n"}
b = 100
b
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
a
b
# -
print(a)
print(b)
# # Jupyterの練習
# - リスト1
# - リスト2
# - リスト3
#
# + [markdown] pycharm={"name": "#%% md\n"}
#
# + pycharm={"name": "#%%\n"}
# # %matplotlib inline
# import matplotlib.pyplot as plt
# %whos
|
.ipynb_checkpoints/hello_jupyter-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling M&M Colour Distributions
#
# In this exercise, we will model M&M colour distributions using Bayesian inference.
#
# Here's the basic problem: You have a bag of M&Ms, and you want to know how many blue ones are in it. Easy, right? You open the bag, and count all the blue ones! But what if you have a [10 pound bag](https://www.mymms.com/product/bulk-mms-candy-10-lb.do?sortby=ourPicksAscend&refType=&from=fn&ecList=7&ecCategory=100601) of M&Ms? Counting them all would take you ages, wouldn't it? Or what if you had several bags. The exact number of blue M&Ms as well as the total number of M&Ms in each bag might vary! So really, what we need is a model of the *average* number of blue M&Ms per bag. We don't just need any model, but we need a *statistical* model that describes the number of blue M&Ms we would get out of a bag of M&Ms given some underlying true fraction of blue M&Ms.
#
# #### Imports
#
# This exercise initially only requires `numpy`, `scipy` and `matplotlib` for plotting. Below, you might also want to use `pymc3` for sampling the hierarchical model.
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
# prettier plotting;
# comment out if seaborn is not installed
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("talk")
import numpy as np
import scipy.stats
import scipy.special
# -
# ## The Binomial Likelihood
#
# In statistics, the statistical model for how one draws observations given some underlying process is called the *likelihood*.
#
# In our case, we have two observations: the total number of M&Ms in a bag $N$, and the number of blue M&Ms out of that total number, $k$.
# There are only two things the M&Ms can be in our (simplified) model: blue and not-blue. It is worth noting at this point that virtually any model used to make sense of data is always a *simplification* of the true underlying process. In reality, M&Ms come in six different colours. We have *simplified* this to just two. This is fine as long as blue M&Ms are all we care about. If we suddenly also cared about green M&Ms, we'd need to make our model more complicated to account for this (more on this later)!
#
# Back to our blue M&Ms. Every time you draw an M&M out of a bag, you get one of two outcomes: blue or not-blue. In a more statistical language, a draw of an M&M out of a bag is called a *trial*, drawing a blue M&M is called a *success* (a not-blue M&M is called a *failure*). You can do this $N$ times and then record the number of successes, $k$.
#
# Assuming that there is some underlying fraction $q$ of blue M&Ms being produced and put into bags, then for every time you draw an $M&M$ out of the bag, you will draw a blue one with probability $q$ and a not-blue one with probability $(1-q)$ (since these are our only two, mutually exclusive options, and all probabilities must sum up to $1$).
#
# $$
# p(k | N, q) = {N \choose k} q^k (1-q)^(N-k) \; .
# $$
#
# Let's talk about how to read this equation. On the right side is a probability $p$, and it's the probability of getting $k$ blue M&Ms out of a bag with $N$ total M&Ms, and an underlying fraction of $q$ blue M&Ms per total. The $|$ symbol always denotes the term *given*, which implies *truths* about the world, or things we know. In this case, we *know* that we've drawn $N$ M&Ms out of the bag, and we're trying to figure out how probable it is that $k$ will be blue, given some true underlying rate $q$. Note that here, we assume that we actually *know* what the true number of blue M&Ms per bag should be, but in reality, we don't!
# Keep this in the back of your head, we'll get back to it in a little while!
#
# On the left-hand side is the definition of the probability distribution we are interested in. The probability of drawing $k$ blue M&Ms is $q^k$ (if the draws are all independent). Then we have $N-k$ not-blue M&Ms left, and the probability of drawing those is $(1-q)^{N-k}$. The ${N choose k}$ term in the front of the expression comes from the fact that $q^k (1-q)^{N-k}$ is the probability of one *specific* sequence. For example, you could have drawn something like \[blue, blue, not-blue, blue, not-blue, not-blue, not-blue, not-blue, blue\], which is a specific sequence. But we don't really care about whether you draw a blue or not-blue first, all we care about is the total number of blue M&Ms out of the total. The first term corrects the expression for all possible permutations of sequences that could produce $k$ blue M&Ms out of $N$ total.
#
# This expression, as a whole, is called the *binomial distribution*, and is the likelihood we're going to use.
#
# **Exercise**: Without looking at any M&Ms, take a guess for what a reasonable value might be for $q$. Then open your bag of M&Ms and take out 20, recording all blue ones. Calculate the probability of that number of blue M&Ms out of your set of 20 trials, given the value of $q$ you've chosen. How large is that probability?
#
# +
n = # number of draws out of bag
k = # add the number of blue M&Ms you drew to this variable
q = # add the value you chose for q here
# -
# Now we need to write down the equation of the binomial distribution.
#
# **Hint**: The function `scipy.special.comb` allows you to calculate the combinatorial pre-factor of the binomial distribution:
from scipy.special import comb
def binomial_distribution(n, k, q):
"""
Calculate the probability of $k$ successes out
of $n$ trials, given an underlying success rate $q$.
Parameters
----------
n : int
The total number of trials
k : int
The number of successful draws out of $n$
q : float, [0,1]
The success rate
Returns
-------
prob : float [0,1]
The binomial probability of $k$ draws out
of $n$ trials
"""
# n choose k factor
bin_fac = comb(n,k)
# all successes
first_prob = q ** k
# all failures
second_prob = (1. - q) ** (n - k)
# put all the probabilities together
prob = bin_fac * first_prob * second_prob
return prob return prob
# Let's use this function to calculate the probability of our M&Ms above, given the value we assumed for the fraction of blue M&Ms:
# calculate the binomial probability
print("Probability: " + str(binomial_distribution(n, k, q)))
# ## Calculating the Likelihood Function
#
# There's a fundamental problem with the process above: it assumes we know $q$ (it's on the right side of the "|") and that $k$ is a random variable. But this is often not what we observe in reality! Often, we can make *observations* of the process (here: $k$ blue M&Ms out of $N$), and we care about the *parameters* of the underlying model (here: the success rate $q$). What we really want to know is not $p(k | N, q)$, but $p(q | N, k)$. It is important to realize that these two are not the same! For an illustrative example, consider a simpler case. Consider that you're given the information that it is raining outside. What can you conclude about the cloud cover overhead? Conversely, imagine you're being told the it is cloudy. Can you conclude with equal probability that it is also raining?
#
# So, if they're not the same, do we get from $p(k | N, q)$, which we've shown we can calculate, to $p(q | N, k)$? In principle, nothing stops you from measuring your $k$ successes out of $N$ trials, and then calculating $p(k | N, q)$ for different values of $q$. However, there is a reason this is called a likelihood *function*: it is *not* a probability distribution of the parameter $q$, because $q$ is on the right-hand side of the "|" sign. It is fixed, known, assumed to be true. The binomial probability is a probability distribution in $k$, not $q$. This may sound subtle, but has huge consequences, one of them being that $p(k | N, q)$ as a function of $q$ does not integrate to 1, like a proper probability distribution.
#
# ## Going from Likelihood to Posterior
#
# It's not that the likelihood isn't useful: it often gives you a pretty good guess which parameter might do a good job of producing the data you've observed. But the crux is that it doesn't tell you what you want to know, because it is *not* a probability of the parameter $q$, but of the outcomes $k$.
#
# So can we calculate the actual probability we're interested in, $p(q | N, k)$?
#
# Well, this is where Bayes' theorem comes in handy. Bayes' theorem can be derived directly from some fundamental rules of probability, most importantly the *joint* probability distribution of two variables:
#
# $$
# P(A, B) = P(A|B)P(B) = P(B|A)P(A)
# $$
#
# for some generic random variables $A$ and $B$ (e.g. whether it's raining outside, and whether it is cloudy or sunny). What does this term say?
#
# Let's make a little table for the four possible outcomes (cloudy/sunny, rain/no rain):
#
# | categories | rain (r) | no rain (nr) |
# |------------|----------|--------------|
# | cloudy (c) | 0.1 | 0.4 |
# | sunny (s) | 0.0 | 0.5 |
#
# This table expresses the *joint* probabilities of all possible outcomes. For example, the joint probability of it currently being cloudy without rain is $p(\mathrm{pr}, \mathrm{f}) = 0.4$. The probability that the it's both sunny and raining is zero (where would the rain come from without clouds?).
#
# What does this have to do with our case above? Well, what's the joint probability of it being both cloudy and not raining? $p(\mathrm{c}, \mathrm{nr}) = 0.4$ given our table above.
#
# Let's ask a harder question: what's the probability of a it being cloudy, $p(\mathrm{c})$? To answer this question, it doesn't matter whether it's raining or not raining, so we just have to sum up both columns, $p(\mathrm{c}) = 0.1 + 0.4 = 0.5$. In reality, our variables are often continuous, so this often requires an integral instead of a simple sum.
#
# Let's ask something a little bit more complex: what's the probability that it's raining, *given* that it is cloudy, $p(\mathrm{r} | \mathrm{c}) = 0.1$? Note that this is *not* the same as the *joint* probability. In the latter case, I don't know it's cloudy, and I'm trying to calculate the probability that it is both cloudy and raining. In the case we're currently looking at, I already *know* that it's cloudy (maybe I've looked out of the window), and I'm curious whether I might be able to get to work without getting wet. So I already have one piece of information (it's cloudy). Because I already know this, the whole row labelled "sunny" no longer matters, and I only have two cases left (rain and not-rain). However, the sum of those two options is $0.5$, and I said earlier that probabilities must sum up to 1! So we'll need to re-normalize the probability to sum up to 1:
#
# $$
# p(\mathrm{r} | \mathrm{c}) = \frac{p(\mathrm{r}, \mathrm{c})}{p(\mathrm{c})}
# $$
#
# So the probability that it is raining given that it is cloudy is $0.1/(0.1 + 0.5) = 0.2$.
#
# If you move $p(\mathrm{c})$ on the other side, you get an expression for the joint probability:
#
# $$
# p(\mathrm{r} , \mathrm{c}) = p(\mathrm{r} | \mathrm{c})p(\mathrm{c})
# $$
#
# Note that you can turn that expression around: the joint probability for it being both cloudy and raining is:
#
# $$
# p(\mathrm{r} , \mathrm{c}) = p(\mathrm{c} | \mathrm{r})p(\mathrm{r})
# $$
#
# You can put these two together, and you've got Bayes rule as stated above (I'm going to go back to the generic variables $A$ and $B$ for this):
#
# $$
# P(A | B) = \frac{p(B|A)P(A)}{P(B)} \, .
# $$
#
# And this is Bayes' rule! This particular theorem has many more implications than simply tallying up probabilities as we've done in the example above. In particular, there are fundamental philosophical differences between Bayesian statistics and its alternative--often also called frequentist statistics--in how one sees probabilities. In Bayesian statistics, almost anything can be a random variable, and Bayesians see probabilities as encoding our uncertainty or lack of knowledge about the world. Frequentists tend to have a more literal view of the world, and interpret probabilities as frequencies of truly random events, e.g. rolls of dice.
#
# ## The Posterior Probability Distribution
#
# What does all of this have to do with our M&Ms? Well, above, we have basically written down the blue-print for how to get from $p(k | N, q)$ to $p(q | N, k)$. We can stick these particular variables in our equation above:
#
# $$
# p(q | N, k) = \frac{p(k | N, q) p(q)}{p(k)} \, .
# $$
#
# In theory, this tells us exactly how to calculate the probability of the *parameter* $q$ that we're looking for, given that we've drawn $k$ blue M&Ms out of a bag with $N$ M&Ms total. $p(q | N, k)$ is generally called the *posterior probability distribution*. We're not done, though. In particular, we've written down the equation, and we know how to calculate $p(k | N, q)$, but what are $p(q)$ and $p(x)$?
#
# I've made a big production above that $p(k | N, q)$ isn't normalized, and that this is important. The product $p(k | N, q) p(q)$ is still not normalized, but $p(q | N, k)$, so $p(k)$ is effectively a normalizing constant:
#
# $$
# p(k) = \int{p(k | N, q) p(q) dq}
# $$
#
# such that the whole probability distribution integrates to 1. In practice, this is the probability of observing the data $k$ times $p(q)$, integrated over all possible values of $q$. This is also called the *marginal likelihood* or *evidence*. While this no longer depends on $q$, this doesn't mean it has no assumptions. For example, we've assumed above that our data can be modelled by a binomial distribution. This may not be true, and we should probably have included another variable $B$ on the given side of our probabilities to indicate there's an underlying assumption there, e.g. $p(k | B)$. Notice that this looks a lot like a likelihood? Well it is, but it is now the likelihood of observing the data given the generalized assumption that the data were drawn from any binomial distribution. If we had another model for the data, say a Poisson distribution, we could also calculate $p(k | P)$ (where $P$ stands for the Poisson model) and compare the two. This is why the marginal likelihood is often used for *model comparison*.
#
# In this tutorial, we're not going to worry about comparing different kinds of (statistical) models, but instead worry about estimating the parameter $q$. For this the normalizing constant $p(k)$ is exactly that, a constant, the same for all possible values of $q$. if we're only interested in the relative probabilities of a specific $q_0$ to a different $q_1$, we can ignore that constant and write
#
# $$
# p(q | N, k) \propto p(k | N, q) p(q) \, .
# $$
#
# which is going to make our lives a whole lot easier, because $p(k)$ is often very hard to compute in practice
#
# ## Priors
#
# The one thing we *haven't* talked about yet is $p(q)$. You'll notice that this is a probability distribution of $q$ only, without the data playing a role. This is often called the **prior probability distribution**, and it encodes whatever prior knowledge you might have about this parameter before you've looked at the data. For example, you might know that there are six colours in a package of M&M, so you might conclude that it's extremely unlikely that $q=1$, i.e. that there are only blue M&Ms in your bag.
#
# **Exercise**: Think about what you know about M&Ms. Do you have any prior knowledge about how many M&Ms there might be in a bag? Think about the number you picked for $q$ when you calculated the binomial probability earlier. Why did you pick that value?
#
# **Careful**: Of course, you've already seen the data! This is generally not how you go about things, so you're going to have to pretend you haven't (or take a new bag :) ).
#
# Note that I've said earlier that $p(q)$ is a probability *distribution*, so it has to be more than one value. It has to encode your knowledge about $q$ for all possible values of $q$, which can in principle be anywhere in the range between 0 and 1. One simple choice is to make all possible values of $q$ equally likely, but we've already said earlier that this is probably not a good assumption, because we don't think our bag will be all blue M&Ms. In general, this kind of prior is called a *uniform distribution*, and while it may seem like the choice that is least affected by what you know, this is in practice *not* always true! We won't go into the details here of why or when this is the case, but be mindful that this is something you might have to think about in practice.
# There's another thing to be aware of with the uniform distribution: it makes a very, very strong assumption about what values $q$ is allowed to take. If we set the prior to be uniform between $0$ and $1$, this would be an okay choice, because these are all the values $q$ can take in practice. However, imagine you picked a prior for $q$ between 0.5 and 1. You have just assumed that $q$ can *never, ever* be lower than 0.5, *no matter* what your data tell you! This is a really strong assumption to make, and you'd better be really sure that it's a reasonable one!
# In practice, it's often better to choose distributions that fall off sharply, but retain some (small, but not impossible) prior probability in all theoretically allowed values, unless you're absolutely confident that these values cannot be true.
#
# ### Conjugate Priors
#
# So in principle, you could choose any distribution for $q$. Maybe you eat a package of M&Ms every day, so you have a pretty good feeling for $q$. You could choose a normal distribution around your estimated value of $q = \mu$, assign some narrow width $\sigma$ to the distribution, and you'd have a perfectly good prior: $p(k | \mu, \sigma) \sim \mathcal{N}(\mu, \sigma)$. Note that in this case $\mu$ and $\sigma$ define the shape of the prior distribution, and are called **hyperparameters**. They're given (i.e we've set them in advance), so they're on the right-hand side of the "|".
#
# One issue with this is that you don't just want to calculate $p(k | \mu, \sigma)$, but $\propto p(k | N, q) p(q | \mu, \sigma)$, and there's no guarantee that the latter will be an analytically solveable equation for any choice of $p(k)$. However, for most likelihood functions, there do exist functions that you can use as priors that will lead to analytical expressions for the posterior. These are called **conjugate priors** and are a good choice when you don't have much prior information about your parameter $q$ and/or the conjugate prior matches the shape of what you *do* know about $q$.
#
# The conjugate prior for the binomial distribution is the [beta distribution](https://en.wikipedia.org/wiki/Beta_distribution). This distribution has two parameters, $\alpha$ and $\beta$ and is defined as
#
# $$
# p(x | \alpha ,\beta ) =\mathrm{constant} \cdot x^{\alpha -1}(1-x)^{\beta -1} \; .
# $$
#
# It can take many different shapes.
#
# **Exercise**: Calculate the prior probability density for different values of $\alpha$ and $\beta$, and plot the result. How does the shape of the distribution change for different values of the two parameters? Which combination of parameters makes a good prior for $q$ in your opinion?
#
# **Hint**: You don't have to write your own version of the beta-distribution. The `scipy.stats` package contains a large list of distributions ready-made for you, including the [beta distribution](https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.stats.beta.html).
# +
qtrial = np.linspace(0, 1, 500) # trial values of q
alpha = # set the value for alpha
beta = # set the value for beta
# set up the probability distribution
beta_dist = scipy.stats.beta(alpha, beta)
# calculate the probability density for qtrial
beta_pdf = beta_dist.pdf(qtrial)
# -
# plot the results
fig, ax = plt.subplots(1, 1, figsize=(6,4))
ax.plot(qtrial, beta_pdf, lw=2, color="black")
ax.set_label(r"Parameter $q$")
ax.set_ylabel(r"Prior distribution $p(q)$")
ax.set_title("beta-distribution prior")
# Chosen values for the hyper-parameters
# * $\alpha = $
# * $\beta = $
#
# **Exercise**: Share your results with the class and discuss. Did you all pick similar values? Are your choices significantly different to those of others? How do your assumptions differ?
#
# It is important to notice that there is no one correct choice for a prior, because by its very definition, it depends on your *prior knowledge* about the problem you're trying to solve! Someone who has eaten M&Ms regularly since childhood might have a different knowledge about the fraction of blue M&Ms in a bag than someone who has never had any before today! This may at first seem like a disadvantage, because making different assumptions about $q$ seems like it's not very objective, and science is supposed to be objective, right?
#
# Well, it's not that easy, because the idea that science is absolutely objective is itself a fallacy. Whenever we write down a model for observations, we *always* make assumptions (as for example, we pointed out explicitly above with the binomial model), and those assumptions can differ from researcher to researcher and change over time.
# A lack of explicit prior probability distribution does *not* equal a lack of assumptions. The assumptions might not be explicit, but they exist. An advantage of Bayesian statistics is that it requires you to state your assumptions explicitly, which means the can be examined and discussed like anything else we do.
#
# ### Calculating the Posterior
#
# Okay, now we've got all of our components in place, which means we can calculate our posterior probability density. And there are some good news: because we've chosen a conjugate prior for our likelihood, the posterior is analytical. In fact, the posterior to a binomial likelihood and a beta-prior is also a beta-distribution,
#
# $$
# p(q | k, N) = \mathrm{Beta}(\alpha+k,\beta+N-k)
# $$
#
# **Exercise**: Calculate both the prior for your chosen values of $\alpha$ and $\beta$ and plot them in the same figure. How has the posterior changed from your prior?
# +
### First, repeating this from the prior above
qtrial = np.linspace(0, 1, 500) # trial values of q
alpha = # set the final value for alpha
beta = # set the final value for beta
# set up the probability distribution
beta_prior = scipy.stats.beta(alpha, beta)
# calculate the probability density for qtrial
beta_prior_pdf = beta_dist.pdf(qtrial)
### Now let's calculate the posterior
a_post = # alpha + k
b_post = # beta + N - k
print("The alpha parameter of the posterior is: " + str(a_post))
print("The beta parameter of the posterior is: " + str(b_post))
# set up the probability distribution
beta_posterior =
# calculate PDF
beta_post_pdf =
# +
# plot the results
fig, ax = plt.subplots(1, 1, figsize=(6,4))
ax.plot(qtrial, beta_prior_pdf, lw=2, color="black", label="prior")
ax.plot(qtrial, beta_post_pdf, lw=2, color="black",
linestyle="dashed", label="posterior")
ax.set_label(r"Parameter $q$")
ax.set_ylabel(r"Prior distribution $p(q)$")
ax.set_title("beta-distribution prior")
ax.legend()
plt.tight_layout()
# -
# **Exercise**: Imagine that you'd chosen values for $\alpha$ and $\beta$ that are very unlikely to be true (e.g. a distribution that rises towards $q=1$. Repeat the comparison between prior and posterior above with these unlikely values. Does the different prior affect the results? How?
#
# **Important Note**: The above exercise, i.e. to change the prior and go back to re-calculate the posterior, is an academic exercise only! In practice, you **cannot** go back and change your prior once you've looked at your data and calculated your posterior! The prior *only* encodes knowledge about $q$ *before* you looked at the data. If you look at the data, then change your prior and calculate the posterior again, you've effectively used the data twice! In practice, this will lead you to be unreasonably overconfident in your results. Once you've looked at your data, your only real solution is to gather more data and use the posterior from your current analysis as a prior for the future (more M&Ms! Oh No! :) ).
#
#
# ### Adding More Information
#
# These are the results for one package of M&Ms. Can we actually make this better? Yes, because you have classmates all around you who also have counted blue M&Ms!
#
# **Exercise**: Tally up the total number of blue M&Ms counted by everyone in the class, and the total number of M&Ms from everyone. Then use the new numbers for $k$ and $N$ to calculate and plot the posterior as well as the prior.
# +
n_total = # add number of all M&Ms here
k_total = # add number of all blue M&Ms here
qtrial = np.linspace(0, 1, 500) # trial values of q
alpha = # set the final value for alpha
beta = # set the final value for beta
# set up the probability distribution
beta_prior = scipy.stats.beta(alpha, beta)
# calculate the probability density for qtrial
beta_prior_pdf = beta_dist.pdf(qtrial)
### Now let's calculate the posterior
a_post = # alpha + k
b_post = # beta + N - k
print("The alpha parameter of the posterior is: " + str(a_post))
print("The beta parameter of the posterior is: " + str(b_post))
# set up the probability distribution
beta_posterior =
# calculate PDF
beta_post_pdf =
# +
# plot the results
fig, ax = plt.subplots(1, 1, figsize=(6,4))
ax.plot(qtrial, beta_prior_pdf, lw=2, color="black", label="prior")
ax.plot(qtrial, beta_post_pdf, lw=2, color="black",
linestyle="dashed", label="posterior")
ax.set_label(r"Parameter $q$")
ax.set_ylabel(r"Prior distribution $p(q)$")
ax.set_title("beta-distribution prior")
ax.legend()
plt.tight_layout()
# -
# ## Markov Chain Monte Carlo
#
# ### Or: What to do when your posterior is not analytical.
#
# In practice, you will often end up in situations where conjugate priors are not a good choice, and your posterior will not be analytical. What you do in this case depends on what you want to know. For example, you might only be interested in the *most probable* value of $q$. In this case, optimization algorithms are often a good choice. This is sometimes also your only option for example if the likelihood function is very expensive to calculate.
#
# However, often the posterior probability can be complex, and trying to find the most probable value isn't good enough. Imagine you had a probability distribution with two roughly equally tall peaks, at 0.2 and 0.8, with a valley of low probability in between. An optimization algorithm will always end up in one of the two peaks, and will give you a single value, but you might never find out about the other peak!
#
# So what can we do? If you need to map out the probability distribution as a whole, there are several approaches. The simplest and straightforward is to make a grid in $q$, e.g. of a 100 points, and calculate $p(q | N, k)$ for each of those points, and then plot the result. Easy, right? This works well for problems with very low dimensions, like ours, where we only have a single parameter. What if you don't have a single parameter, but 50? You now need to sample 100 points in each of those 50 dimensions, meaning you need $100^{50}$ points. If your posterior takes a microsecond to calculate, you'll still need longer than the age of the universe to calculate all of those points! This is clearly impossible.
#
# So can we do something smarter than a grid? Yes! In fact, we can find clever ways to jump through parameter space in such a way that we'll evaluate our posterior often in regions where the posterior is large, and less often in regions where the posterior is low. There are a whole range of different algorithms that can do this, but **Markov Chain Monte Carlo (MCMC)** is the most common and most popular one.
#
#
# **TODO: FINISH MCMC PART**
# ## A slightly more complex model ...
#
# Let's make our problem above more complicated. What if we're not just interested in the blue M&Ms, but want to know the distribution of all six colour? Well, where the binomial distribution only considered *success* and *failure*, there is a generalization to this distribution that considers *multiple categorical outcomes* (in our case six colours). In this case, we don't have a single $k$ given $N$ trials, but multiple $\mathbf{k} = \{k_1, k_2, ..., k_l\}$ for $l$ possible outcomes. In our case, $l=6$, and each $k_i$ stands for a single colour (e.g. $k_0 = \mathrm{blue}$,$k_1 = \mathrm{green}$, ...). Similarly, we now have a vector $\mathbf{q} = \{q_1, q_2, ..., q_l\}$ for the underlying true fraction of each colour.
#
# This generalization is the [multinomial distribution](https://en.wikipedia.org/wiki/Multinomial_distribution), defined as:
#
# $$
# p(\mathbf{k} | \mathbf{q}, N)=
# \begin{cases}
# \frac{N!}{k_1! k_2! ... k_l!}q_1^{k_1}q_2^{k_2} ... q_l^{k_l},& \text{when } \sum_{i=1}^{l}k_i=N \\
# 0, & \text{otherwise}
# \end{cases}
# $$
#
# Our measurements are now the number of M&Ms for each colour. Our parameters are the underlying fractions $q_i$ for each colour. We now have a six-dimensional measurement, and six parameters for our new model.
#
# **Exercise**: Define a six-element vector with your prior expectations for what you think the different $q_i$ should be. Do you think all colours are represented equally?
#
# +
q_blue = # fraction of blue M&Ms
q_green = # fraction of green M&Ms
q_red = # fraction of red M&Ms
q_yellow = # fraction of yellow M&Ms
q_orange = # fraction of orange M&Ms
q_brown = # fraction of brown M&Ms
q_all = np.array([q_blue, q_green, q_red,
q_yellow, q_orange, q_brown])
# -
# Now tally up all the colours in your package of M&Ms and write down the result:
# +
k_blue = # blue M&Ms
k_green = # green M&Ms
k_red = # red M&Ms
k_yellow = # yellow M&Ms
k_orange = # orange M&Ms
k_brown = # brown M&Ms
# all measurements together
k_all = np.array([k_blue, k_green, k_red,
k_yellow, k_orange, k_brown])
n_total = # total number of M&Ms in package
# -
# Let's calculate the multinomial probability for these measurements:
# +
# define the distribution
mult = scipy.stats.multinomial(n=n_total, p=q_all)
# calculate the probability for our measurements:
print("multinomial probability: " + str(mult.pdf(k_all)))
# -
# Ideally, we'd like to calculate a posterior probability for this, too, so we'll need to define a prior for $\mathbf{q}$. The conjugate prior for the multinomial distribution is a [*Dirichlet distribution*](https://en.wikipedia.org/wiki/Dirichlet_distribution), the multivariate generalization of the beta-ditribution. The Dirichlet distribution appears fairly often in problems with categorical variables and is very useful to know. A nice conceptual introduction can be found [here](http://blog.bogatron.net/blog/2014/02/02/visualizing-dirichlet-distributions/).
#
# For our 6 different categories (colours), the Dirichlet distribution has six parameters called *concentration parameters*, $\mathbf{\alpha} = \{\alpha_1, \alpha_2, ..., \alpha_l\} \, , \, \alpha_i > 0$.
# Note that it is only defined on the interval $(0,1)$, and also only in the region where $\sum_{i=1}^{l}q_i = 1$ (remember: our $q_i$ are relative fractions of colour $i$, and if we take all fractions for all colours, they must make up all of our M&Ms).
#
# Of course, `scipy.stats` also has an implementation of the [Dirichlet distribution](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.dirichlet.html#scipy.stats.dirichlet).
#
# In practice, the PDF of the Dirichlet distribution is a bit tricky to plot, because of the way that your trial values of $q_i$ need to sum up to $1$. You can look at [this illustration](https://en.wikipedia.org/wiki/Dirichlet_distribution#/media/File:Dirichlet-3d-panel.png) to see how the PDF changes for different values of $\alpha$.
#
# **Exercise**: Let's plot the PDF of a Dirichlet distribution with two categories, i.e. two concentration parameters $\alpha_i$, and plot the results. Repeat for different values and combinations of $\alpha_1$ and $\alpha_2$. How does the distribution change? What do you think are reasonable values for the different values of $\alpha_i$?
# +
alpha1 = # add your guess for alpha1
alpha2 = # add your guess for alpha2
alpha = [alpha1, alpha2] # add
# define the dirichlet distribution
dirichlet = scipy.stats.dirichlet(alpha=alpha)
# -
x1 = np.linspace(0, 1, 1000)
x2 = 1.0 - x1
pdf = dirichlet.pdf([x1, x2])
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,4))
ax1.plot(x1, pdf, lw=2, color="black")
ax1.set_xlim(0,1)
ax1.set_xlabel(r"$q_1$")
ax2.plot(x2, pdf, lw=2, color="black")
ax2.set_xlim(0,1)
ax2.set_xlabel(r"$q_2$")
# -
# Now we can set up our posterior inference!
#
# First, set your concentration parameters for each of the values of $\alpha_i$:
# +
alpha_blue = # blue M&Ms concentration parameter
alpha_green = # green M&Ms concentration parameter
alpha_red = # red M&Ms concentration parameter
alpha_yellow = # yellow M&Ms concentration parameter
alpha_orange = # orange M&Ms concentration parameter
alpha_brown = # brown M&Ms concentration parameter
# all parameters together
alpha_all = np.array([alpha_blue, alpha_green, alpha_red,
alpha_yellow, alpha_orange, alpha_brown])
# -
# The posterior distribution of a multinomial likelihood with a Dirichlet prior is also a Dirichlet distribution, with concentration parameter $\mathbf{\alpha}_{\mathrm{posterior}} = \mathbf{\alpha}_{\mathrm{prior}} + \mathbf{k}$:
# +
alpha_post = alpha_all + k_all
dir_post = scipy.stats.dirichlet(alpha=alpha_post)
# -
# For a 6-dimensional distribution, it's much harder to think about where the Dirichlet distribution is even defined (it should lie on a 5-dimensional volume in six dimensions for which $\sum_{i=1}^{l}q_i = 1$. Instead of calculating the posterior for a grid of values for $q_i$, we're just going to draw samples directly from the posterior distribution and then plot them:
# generate random samples from the posterior
post_rvs = dir_post.rvs(size=1000000)
# +
# plot marginal distributions
fig, axes = plt.subplots(2, 3, figsize=(8,6), sharex=True, sharey=True)
# flatten the array of axis objects
axes = np.hstack(axes)
# we have six colours, so we're going to loop over each
for i in range(6):
axes[i].hist(post_rvs[:,i], bins=50, histtype="stepfilled",
color="black", alpha=0.4, density=True)
axes[i].set_xlabel(r"$q_%i$"%i)
# set the y-axis labels only on the left-most plots
if i == 0 or i == 3:
axes[i].set_ylabel("posterior pdf")
# automatically improve spacings between subplots
plt.tight_layout()
# -
# ## Bayesian Hierarchical Modelling
#
# Now that we can model all colours at once, it's time to let you in on a little secret: M&Ms in the US are produced by two different factories, one in Tennesse and one in New Jersey. The interesting part is that they produce different distributions of colours! Why? Nobody is really sure (except probably the Mars Company, which makes M&Ms).
#
# So each of you has their own package of M&Ms, and you've all recorded the number of different colours and calculated the posterior distribution for your parameters, but now you'd like to figure out which factory your M&Ms came from. However, while you know that the two different factories make different colour distributions, you don't know the distributions each makes, and you also don't know which factory your particular package came from! This seems like an insurmountable lack of knowledge, but fear not! Through the power of sharing information between you, you'll be able to figure all of that out.
#
# In the previous examples, you pooled your information for the entire class in order to improve your posterior. However, we glossed over the fact that your packages did not come from the same factory! How can we take better account of that fact? Through Bayesian hierarchical modelling!
#
# In the previous models you've built you had a prior distribution on your parameters, and the hyperparameters of these prior distribution were fixed. They were numbers you chose based on your prior information and intuition of the problem. In a hierarchical model, the parameters describing the prior are *not* fixed, but something that we *infer* along with the parameters of the colour distributions. Instead of describing prior knowledge, they describe the *population* of data sets, in our case the *population* of bags.
#
# We go from a model like this
#
# $$
# p(q | \{\mathbf{k}_b\}_{b=1}^{B}) \propto p(q | \alpha) \prod_{b=1}^{B} p(\mathbf{k}_b | q)
# $$
#
# where $\alpha$ were fixed hyperparameters, to adding one more layer of parameters:
#
# $$
# p(\{q\}_{b=1}^{B}, \alpha | \{\mathbf{k}_b\}_{b=1}^{B}) \propto p(\alpha | \beta) \prod_{b=1}^{B}p(\mathbf{k}_b | q_b) p(q_b | \alpha)
# $$
#
# where now $\theta$ isn't shared anymore among the individual data sets (i.e. bags of M&Ms), and we're inferring the population parameters $\alpha$ along with the $\theta$ for each bag of M&Ms.
#
# In our case, the difference to our previous model is that we now have *two* colour distributions--one for each factory--and that each bag comes from one of those factories based on some unknown mixture distribution.
#
# How can we write that down? Well, we are going to introduce a new variable $\theta$ to describe the probability distribution of a bag of M&Ms coming from the New Jersey factory as opposed to the Tennessee factory. And we're also going to give each bag a new variable $z_b$ drawn from the discrete distribution for $\theta$ which describes the assignment of each individual bag to come from a certain factory. There we have our hierarchy in the model: Each bag has a probability distribution of coming from the NJ or TN factory, and together, these probabilities are drawn from a prior describing the overall proportions of *all* bags coming from either factory. We're going to infer both together.
#
# The rest of the model doesn't really change, except that we need a prior for $\theta$. Much like our initial example, where we only had two possible outcomes, we only have two factories, so our prior in this case is also a beta-distribution.
|
code/.ipynb_checkpoints/ATA_Modelling_M&Ms-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
px = np.array([1/4, 1/4, 1/4, 1/4], dtype=np.float32)
pz = np.array([1/2, 1/2], dtype=np.float32)
py_x = np.array([
[0.9, 0.1],
[0.9, 0.1],
[0.1, 0.9],
[0.1, 0.9]
], dtype=np.float32)
def bottleneck(pz_x, py_z, beta=1.0):
pxz = px[:, np.newaxis] * pz_x
outer = px[:, np.newaxis] * pz[np.newaxis, :]
mi = pxz * np.log(pxz / outer)
mi = np.sum(mi)
kl = py_x[:, np.newaxis, :] * np.log(py_z[np.newaxis, :, :] / py_x[:, np.newaxis, :])
kl = np.sum(kl, axis=2)
kl = kl * pxz
kl = np.sum(kl)
kl = - beta * kl
return mi, kl
# +
best_loss = None
best_pz_x = None
best_py_z = None
for i in range(10000):
pz_x = np.random.dirichlet(np.ones(pz.shape[0]), size=(px.shape[0]))
py_z = np.random.dirichlet(np.ones(py_x.shape[1]), size=(pz.shape[0]))
mi, kl = bottleneck(pz_x, py_z, beta=100.0)
loss = mi + kl
if not np.any(np.isnan(loss)) and np.all(np.isfinite(loss)):
if best_loss is None or loss < best_loss:
best_loss = loss
best_pz_x = pz_x
best_py_z = py_z
print(best_loss)
print(best_pz_x)
print(best_py_z)
|
demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Multipanel Figures
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# +
#define an array of angles and sines and cosines using numpy w linspace
x = np.linspace(0,2*np.pi,100)
print(x[-1],2*np.pi)
y = np.sin(x)
z = np.cos(x)
w = np.sin(4*x)
v = np.cos(4*x)
# -
# to make a two panel plot side by side:
# +
#call subplots to generate a multipanel figure
f, axarr = plt.subplots(1, 2) #'axis array', 1 row 2 columns
#first panel
axarr[0].plot(x, y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$\sin(x)$') #latec rendering
#second panel
axarr[1].plot(x, z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
# -
# ## adjustments
# +
#call subplots to generate a multipanel figure
f, axarr = plt.subplots(1, 2) #'axis array', 1 row 2 columns
#first panel
axarr[0].plot(x, y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$\sin(x)$') #latec rendering
#second panel
axarr[1].plot(x, z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
#add more space between figures
f.subplots_adjust(wspace=0.8)
#fix axis ratio
axarr[0].set_aspect('equal') #makes the range of data equal
axarr[1].set_aspect(np.pi) #make a square, sets aspect to ratio of the range
# -
# ## legends
#
# keep the square figure, merge them into one, remove the titles, and add legends
# +
#adjust size
fig = plt.figure(figsize=(6,6))
plt.plot(x, y, label=r'$y = \sin(x)$') #adds label to line
plt.plot(x, z, label=r'$y = \cos(x)$')
plt.plot(x, w, label=r'$y = \sin(4x)$')
plt.plot(x, v, label=r'$y = \cos(4x)$')
plt.xlabel(r'$x$')
plt.ylabel(r'$y(x)$')
plt.xlim([0,2*np.pi])
plt.ylim([-1.2,1.2])
plt.legend(loc=1,framealpha=0.95) #adds legend w semi transparent frame in upper RH corner
#fix axis ratio
plt.gca().set_aspect(np.pi/1.2) #use"gca" to get current axis
# -
|
multipanel_figs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Answers to Assignment 3 - Python Fundamentals I
#
# Complete the tasks below. You should turn in a single Jupyter notebook named `3_first_last.ipynb` (substitute your first and last name); please run Kernel > Restart & Run All on your notebook before turning in. Also turn in the input CSV file used in question B3. For questions where Python scripts are used instead of the Jupyter notebook (questions A1, B1, B2), instructions are provided about pasting output/scripts into Markdown cells in your notebook.
# #### Answer to A. Python basics, strings, printing (Shaw Exercises 1–10)
#
# 1. Take the file of Python code you generated in Exercise 1, called `ex1.py`, and run it on the command line. Copy the command and output from your terminal and paste it in a markdown cell in your Jupyter notebook. (Ex1)
# Content of `ex1.py`:
# ```python
# # ex1.py
# print("Hello World!")
# print("Hello Again")
# print("I like typing this.")
# print("This is fun.")
# print('Yay! Printing.')
# print("I'd much rather you 'not'.")
# print('I "said" do not touch this.')
# ```
# Answer:
# ```bash
# $ python ex1.py
# Hello World!
# Hello Again
# I like typing this.
# This is fun.
# Yay! Printing.
# I'd much rather you 'not'.
# I "said" do not touch this.
# ```
# Note that the symbols \`\`\` around a block of text render that text as code not plaintext. A single newline between rows of plaintext must be separated with a `<BR>` to be rendered on separate lines.
# 2. Print a sentence that contains a phrase in double quotes; print a sentence that contains an apostrophe. (Ex1)
# Answer:
# print the sentence with a comma separating it into two strings for readability
print('The Kuiper belt\'s over 100,000 "KBOs" (Kuiper belt objects)',
'are found 30 to 50 AUs from the Sun.')
# 3. Deliberately enter five incorrect commands (in separate cells) and interpret the error messages. (Ex1)
# Answer:
# print something -- which version of python is this?
print "No parentheses"
# Missing parentheses in call to 'print'. Python 3 requires parentheses around those objects to be printed.
# a sentence with an apostrophe
print('It's my birthday')
# Invalid syntax. The single quote `'` either needs to be wrapped in double quotes `"` or escaped with `\`.
# let's try to increment x by 1
x = 5
x++
# Invalid syntax. Python does not support the `x++` increment operator or its variations. You can use `x += 1` or `x = x + 1`.
# list comprehension
x+1 for x in range(3)
# Invalid syntax. The list comprehension must be wrapped in square brackets like so: `[x+1 for x in range(3)]`.
# trying in vain to assign to a tuple
t = ('a', 'b', 'c')
t[3] = 'd'
# Just what it says: 'tuple' object does not support item assignment. If you need to change a tuple, you should probably just use a list instead.
# 4. Add comments to your code in \#2 and \#3 explaining what is happening. (Ex2)
# Answer: see above
# 5. Write and evaluate five mathematical expressions. (Ex3)
# Answer:
2 + 5
2 * 5
2**5
2/5
2*11/7
# Optional: Use the many features of the `math` module.
import math
math.pi
# The mathematical constant $\pi$ differs from the quotient 22/7, an approximation of $\pi$, by only 0.04%.
(22/7 - math.pi) / math.pi
# 6. Assign values to three numeric and three string variables. (Ex4)
# Answer:
num1 = 7
num2 = 11
num3 = num1 + num2
str1 = 'first'
str2 = 'second'
str3 = 'third'
# 7. Print values of the six variables you assigned in \#6. (Ex4–5)
# Answer:
# note we can hit enter after the open-parentheses
print('The sum of the %s number (%s) and the %s number (%s) is the %s (%s).' % (
str1, num1, str2, num2, str3, num3))
# 8. Print two strings in raw format (%r formatter) and two strings in general/string format (%s formatter) with formatting characters. (Ch6)
# Answer:
# notice how the tab characters are rendered
print('%r\t%r\n%s\t%s' % ('hello', 'A\tB', 'hello', 'A\tB'))
# 9. Concatenate two strings into a third string, then find the length of all three strings.
# Answer:
s1 = 'Bird is '
s2 = 'the word'
s3 = s1 + s2
len(s3)
# 10. Print the three strings from \#9 with a tab between the first and second and a newline between the second and third.
#
# Answer:
print('%s\t%s\n%s' % (s1, s2, s3))
# 11. Print the three strings from \#9 with a stored formatter. (Ch7–8)
# Answer:
fmtr = '%s\t%s\n%s'
print(fmtr % (s1, s2, s3))
# 12. Print a piece of text with five lines using both newline characters and a text block. (Ex9)
# Answer:
print('A mathematician, prophetic,\n'
'invented a language, herpetic.\n'
'decidedly brilliant,\n'
'syntacticly elegant,\n'
'Made ideas far less hypothetic.\n')
print('''A mathematician, prophetic,
invented a language, herpetic.
decidedly brilliant,
syntacticly elegant,
Made ideas far less hypothetic.''')
# 13. Print a string containing a backslash, single-quote, double-qoute, newline, and tab. (Ex10)
# Answer:
print('This is a backslash \\, single-quote \', double-qoute ", newline,\nand a\ttab.')
# #### Answer to B. Taking input, reading and writing files, functions (Shaw Exercises 11-26)
#
# 1. Write some code, without using functions, that calculates the average of 5 numbers. Do it three different ways:
#
# - Write a .py file that takes input from the command line using `input()`. After the script works, paste the text of the file into your Jupyter notebook.
# - Write a .py file that takes input from the command line using `argv`. After the script works, paste the text of the file into your Jupyter notebook.
# - Enter code into two Jupyter notebooks cells: the first stores value as variables, and the second computes the average.
# Answer for 1-1:
# ```python
# # assign3_qB1-1.py
# print('Hello! I am Average-Bot! Let me average five numbers please!')
# n1 = float(input('Number 1? '))
# n2 = float(input('Number 2? '))
# n3 = float(input('Number 3? '))
# n4 = float(input('Number 4? '))
# n5 = float(input('Number 5? '))
# print('Shazzam! The average is %s!' % ((n1+n2+n3+n4+n5)/5))
# ```
# Notice we have to convert the entered numbers (strings) to floats.
# Answer for 1-2:
# ```python
# # assign_qB1-2.py
# from sys import argv
#
# x = len(argv)
# if x != 6:
# print('Error! Only %s arguments!\n,'
# 'Average-Bot requires five numbers as arguments please!' % x)
# else:
# n1 = float(argv[1])
# n2 = float(argv[2])
# n3 = float(argv[3])
# n4 = float(argv[4])
# n5 = float(argv[5])
# print('Shazzam! The average is %s!' % ((n1+n2+n3+n4+n5)/5))
# ```
# Again, notice we have to convert the entered numbers (strings) to floats. What would happen if we converted them to ints instead?
# Answer for 1-3:
numbers = [1, 2, 3, 4, 5]
average = sum(numbers)/5
average
# It's a lot easier to get input from the user when you are the user!
# 2. Using functions, write some code that takes two strings, prints them with the first letter capitalized, prints them with all letters capitalized, prints the first and last letter of each, prints the length of each, and then prints the concatenation of the two strings. Do it two different ways:
#
# - Write a .py file that uses `argv`. After the script works, paste the text of the file into your Jupyter notebook.
# - In your Jupyter notebook, comment out the `argv` portions and hard code in the values of your strings. Then make sure the code runs the same.
# Answer for 2-1:
# ```python
# # assign3_qB2.py
# from sys import argv
#
# s1 = argv[1] # 'apple'
# s2 = argv[2] # 'banana'
#
# def exerciseB2(string1, string2):
# print(string1.capitalize(), string2.capitalize())
# print(string1.upper(), string2.upper())
# print(string1[0], string1[-1], string2[0], string2[-1])
# print(len(string1), len(string2))
# print(len(string1)+len(string2))
#
# exerciseB2(s1, s2)
# ```
# Answer for 2-2:
# +
# from sys import argv
s1 = 'apple' # argv[1]
s2 = 'banana' # argv[2]
def exerciseB2(string1, string2):
print(string1.capitalize(), string2.capitalize())
print(string1.upper(), string2.upper())
print(string1[0], string1[-1], string2[0], string2[-1])
print(len(string1), len(string2))
print(len(string1)+len(string2))
exerciseB2(s1, s2)
# -
# 3. Using a text editor, create a comma-separated values (CSV) file with 5 columns and 5 rows. Save it in the same directory as your Jupyter notebook. In the Jupyter notebook, read and print the file in different ways, and write new files, as follows:
#
# - Read your CSV file using `read()`, `readline()`, or `readlines()`, and print the output to the screen (`print()` command is optional in notebooks!).
# - Do the same but use a `with` block and a different one of `read()`, `readline()`, or `readlines()`.
# - Using either of the two above methods, then change one row of data, and write your data to a new CSV file.
# - Read your CSV file using Pandas and display the resulting DataFrame.
# - Save your DataFrame to a new file using Pandas.
# Answer for 3-1:
txt = open('../../data/assign3_qB3.csv')
print(txt.read())
txt.close()
# Answer for 3-2:
with open('../../data/assign3_qB3.csv', 'r') as f:
print(f.read())
# Answer for 3-3:
with open('../../data/assign3_qB3.csv', 'r') as f:
data = f.readlines()
data[2] = 'row2,17,18,19,20\n'
with open('assign3_qB3a.csv', 'w') as target:
for row in data:
target.write(row)
data
# Answer for 3-4:
import pandas as pd
df = pd.read_csv('../../data/assign3_qB3.csv', index_col=0)
df
# Answer for 3-5:
df.to_csv('assign3_qB3b.csv')
|
assignments/answers/assignment3_answers.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recursion vs Loops
# ## MSDS-432 - Kiley - Week 3 Assignment
# ### Assignment Overview
# The mini-programming assignment requires us to implement and test the performance of a recursive factorial algorithm manually applied in Python.
#
# The algorithm's performance will be measured using Big O notation. <br>
# **Big O Notation** is a way to communicate the number of operations required to perform an algorithm in the worst-case scenario. The number of operations is represented by n.
#
# For both recursive and loop factorial algorithms, the worst-case scenario is also the best case scenario, where the the algorithm must be applied n times as it works through n! . This requires that the algorithm operate in O(n) time.
#
# This assignment is git controlled. The repo was prepared using git bash.
#
# # Setting up working environment, define and test key functions
# The packages selected in "In[1]" are all the packages required for the subsequent operations. <br>
# They are named based on standard python naming conventions.
# Set up working environment
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
# **fact** function leverages code from *Grokking Algorithms*; the code was sourced from github.
# https://github.com/egonSchiele/grokking_algorithms/blob/master/03_recursion/python/03_factorial.py
#
#
# We will define it as a function so that it may be applied to multiple trials consistently and quickly.
# Recursively define factorial
def fact(x):
if x == 1:
return 1
else:
return x * fact(x-1)
# **loop_fact** function leverages code from *the crazy programer*; the code was adapted from the link below.
#
# https://www.thecrazyprogrammer.com/2017/04/python-program-find-factorial-number-using-loop.html
#
#
# We will define it as a function so that it may be applied to multiple trials consistently and quickly.
# Python program to find the factorial of a number provided by the user.num=6
def loop_fact(num):
fac = 1
for i in range(1, num + 1):
fac = fac * i
return fac
# Check that algorithm works
print('The results for the recursive method are:', fact(5))
print('The results for the loop method are:', loop_fact(5))
print('The results for the manual method are:', 5*4*3*2*1)
# **Results of the test: The algorithms works as expected.** <br>
# The assignment has asked that the algorithms be tested at various values of random numbers between 100 & 500.
# To ensure that the same tests are performed on each data set, a function will be generated to capture the requested tests.
#
# The requested output are:
# * The number being factorized
# * The output of the factorization
# * Time (in milliseconds) to run
#
# **Note** The factorized value for these requested numbers is too large to be stored in a numpy array. Becuase of this the resulting output is 0.
#
# These output will be put into a pandas dataframe called "df".
# # Generate And Test Random Data
# The random seed generator provided in the project requirements returned the value 922.<br>
# Leveraging a seed as such creates reproducable work.<br>
# That seed has been set below.
#
# Per the requirements, using a random number generator with a uniform probabilty of each value occuring generating a single array of a length of 10. The values are to be between 100 & 500.
#
# Randomly generated values will be rounded to the nearest whole number to be properly factorialed.
#
# +
# Set Random Seeds
SEED = np.random.seed(922)
# Create array 1
a1 = list(np.random.randint(100,500,10))
print("The array's values are: ", a1)
print("The array's length is: ", len(a1))
print("The max value in the array is: ", np.max(a1))
print("The min value in the array is: ", np.min(a1))
# +
# Create array 2, basic array to test function
a2 = [1,2,3,4,5,6,7,8,9,10]
print("The array's values are: ", a2)
print("The array's length is: ", len(a2))
print("The max value in the array is: ", np.max(a2))
print("The min value in the array is: ", np.min(a2))
# -
# Here we define a function to perform the test for both versions of the algorithm.
#
# This ensures that we get consistent output and can quickly iterate through the trials.
def run_trial(method, sel_array):
# Create an empty list to store results
fun_results=list([])
# Create an empty list to store timer results
fun_timer=list([])
# Select array to run through the loop
array=sel_array
# Loop to run through the array and perform recursive factorial
# Appends results to results & timer lists
for x in array:
start = time.clock()
fun_results.append(method(x))
end = time.clock()
fun_timer.append(round((end - start)*1000,6)) # Convert to milliseconds
return fun_results, fun_timer
#
# # Run Trials and Create Pandas Data Frame
#
# Most of the work has been completed, this next step passes the generated data to the selected functions defined previously. The arrays are then summarized into a pandas data frame for further analysis.
# Run with Recurive Function
results = run_trial(fact, a1)[0]
timer = run_trial(fact, a1)[1]
# Run with Loop Function
results2 = run_trial(loop_fact, a1)[0]
timer2 = run_trial(loop_fact, a1)[1]
# +
# Create pandas dataframe of trials
df = pd.DataFrame({'number':a1,'factorial':results,
'recursive_timer':timer, 'loop_timer':timer2})
# Print pandas dataframe of trials
print(df)
# -
# The resulting data frame produces results that are consistent with our expectations.
#
# **Note: Once again the factorial output is too large to be stored in a numpy array and shows up at 0, using a2, the testing array, the correct values are generated in the output**
#
# # Visualize Results
# +
# Create plot of time versus complexity of array
plt.plot(df['number'], df['recursive_timer'], 'bo')
plt.plot(df['number'], df['loop_timer'], 'ro')
# Label Graph
plt.ylabel('Time in Milliseconds')
plt.xlabel('Number to factorial')
plt.title('Recursion vs. Loop Processing Time')
#Create Legend and define axes
plt.legend()
plt.axis([0, 550, -1, 2])
plt.show()
# -
# The resulting graphic for all tests performed as part of this assigment.
# The algorithm operates extremely quickly and hardly takes any time to run.
#
# **Recursion does appear to be slower than the loop timer, which is consistent with expectations.**
#
# Data engineers are required to consider how to ensure that an operation can perform effectively at scale. This exercise helps to reinforce the value of measuring and testing algorithms as they scale.
|
3_recursion_v_loops.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:ap-northeast-2:806072073708:image/datascience-1.0
# ---
# +
import sagemaker
print(sagemaker.__version__)
sess = sagemaker.Session()
# + language="sh"
# wget -N https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip
# unzip -o bank-additional.zip
# -
bucket = sess.default_bucket()
prefix = 'sagemaker/DEMO-automl-dm/'
s3_input_data = sess.upload_data(path="./bank-additional/bank-additional-full.csv", key_prefix=prefix+'input')
print(s3_input_data)
# ! head bank-additional/bank-additional-full.csv
from sagemaker.automl.automl import AutoML
auto_ml_job = AutoML(
role = sagemaker.get_execution_role(),
sagemaker_session = sess,
target_attribute_name = 'y',
output_path = 's3://{}/{}/output'.format(bucket,prefix),
max_runtime_per_training_job_in_seconds = 600,
max_candidates = 250,
total_job_runtime_in_seconds = 3600
)
auto_ml_job.fit(inputs=s3_input_data, logs=False, wait=False)
# +
from time import sleep
def wait_for(job, state):
job = auto_ml_job.describe_auto_ml_job()
job_status = job['AutoMLJobStatus']
job_sec_status = job['AutoMLJobSecondaryStatus']
if job_status not in ('Stopped', 'Failed'):
while job_status in ('InProgress') and job_sec_status in state:
sleep(60)
job = auto_ml_job.describe_auto_ml_job()
job_status = job['AutoMLJobStatus']
job_sec_status = job['AutoMLJobSecondaryStatus']
print (job_status, job_sec_status)
# -
wait_for(auto_ml_job, 'AnalyzingData')
# +
job = auto_ml_job.describe_auto_ml_job()
#print(job)
job_candidate_notebook = job['AutoMLJobArtifacts']['CandidateDefinitionNotebookLocation']
job_data_notebook = job['AutoMLJobArtifacts']['DataExplorationNotebookLocation']
print(job_candidate_notebook)
print(job_data_notebook)
# + magic_args="-s $job_candidate_notebook $job_data_notebook" language="sh"
# aws s3 cp $1 .
# aws s3 cp $2 .
# -
wait_for(auto_ml_job, 'FeatureEngineering')
wait_for(auto_ml_job, 'ModelTuning')
# +
import pandas as pd
from sagemaker.analytics import ExperimentAnalytics
job = auto_ml_job.describe_auto_ml_job()
exp = ExperimentAnalytics(
experiment_name=job['AutoMLJobName']+'-aws-auto-ml-job',
metric_names=['Objective:F1']
)
df = exp.dataframe()
print(df)
#print("Number of jobs: ", len(df))
#if (len(df) !=0):
# df = pd.concat([df['Objective:f1'], df.drop(['ObjectiveMetric'], axis=1)], axis=1)
# df.sort_values('ObjectiveMetric', ascending=0)[:5]
# -
job_best_candidate = auto_ml_job.best_candidate()
print(job_best_candidate['CandidateName'])
print(job_best_candidate['FinalAutoMLJobObjectiveMetric'])
# +
import boto3
job_outputs_prefix = '{}/output/{}'.format(prefix, job['AutoMLJobName'])
s3_bucket = boto3.resource('s3').Bucket(bucket)
s3_bucket.objects.filter(Prefix=job_outputs_prefix).delete()
# -
|
Chapter 03/AutoPilot with SageMaker SDK.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preparation
#
# Load necessary modules and initialize relavent devices
# %run seisidd/tomo_init.py
mode.set('dryrun')
A_shutter = get_shutter('dryrun')
tomostage = get_motors('dryrun')
preci = tomostage.preci
samX = tomostage.samX
ksamX = tomostage.ksamX
ksamZ = tomostage.ksamZ
samY = tomostage.samY
psofly = get_fly_motor(mode='dryrun')
tomostage.psofly = psofly
det = get_detector(mode='dryrun')
tomostage.preci
# You can check the __prefined variables and functions__ in the following two dictionaries:
#
# ```
# keywords_vars = {} # {name: short description}
# keywords_func = {} # {name: short descciption}
# ```
#
# or the corresponding functions:
#
# ```
# list_predefined_vars()
# list_predefined_func()
# ```
mode
print('func:')
list_predefined_func()
# __Fill__ out the __experiment details__ below for the metadata handler (MongoDB)
RE.md['beamline_id'] = 'APS 6-ID-D'
RE.md['proposal_id'] = 'internal test'
RE.md['pid'] = os.getpid()
RE.md['login_id'] = USERNAME + '@' + HOSTNAME
RE.md['BLUESKY_VERSION'] = bluesky.__version__
RE.md['OPHYD_VERSION'] = ophyd.__version__
RE.md['apstools_VERSION'] = apstools.__version__
RE.md['SESSION_STARTED'] = datetime.isoformat(datetime.now(), " ")
# # Initialization
#
# Perform control initialization for all related hardware.
mode.set('dryrun')
# Install global __suspenders__ as a safeguard for this experiment
# +
import apstools.devices as APS_devices
from bluesky.suspenders import SuspendFloor
aps = APS_devices.ApsMachineParametersDevice(name="APS")
# monitor ring current
suspend_APS_current = SuspendFloor(aps.current, 2, resume_thresh=10)
RE.install_suspender(suspend_APS_current)
# monitor shutter status
# NOTE: do not install right away since we need to close the shutter to collect dark field
suspend_A_shutter = SuspendFloor(A_shutter.pss_state, 1)
# -
# Import the __necessary__ predefined plan stubs
# +
from seisidd.tomo_plans import tomo_scan
scan_cfg = 'seisidd/config/tomo_scan_template.yml'
# -
# Make sure check the scan with _summarize_plan_ before running
from bluesky.simulators import summarize_plan
summarize_plan(tomo_scan(det, tomostage, A_shutter, suspend_A_shutter, scan_cfg, init_motors_pos))
RE(tomo_scan(det, tomostage, A_shutter, suspend_A_shutter, scan_cfg, init_motors_pos))
|
tomo_template_deprecated.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Functions
# ## Type-Along Notebook
# ***
# ## Learning Objectives
# In this lesson you will:
#
# 1. Learn the fundamentals of functions in Python
# 2. Understand the difference between local and global variables
# 3. Write Python code to define and call your own functions
# 4. Import a module to expand Python's functionality
#
# ## New modules covered in this lesson:
# >- `random`
#
# ## New functions covered in this lesson:
# >- `random.randint()`
#
#
# ## Links to topics and functions:
# >- <a id='def'></a>[def Statement](#def-Statements-with-Parameters)
# >- <a id='LocalGlobal'></a>[Local and Global Scope](#Local-and-Global-Scope)
# >- <a id='random'></a>[random() Function](#Return-values-and-return-statements)
# >- <a id='HW'></a> [Homework](#Homework)
#
# #### References: Sweigart(2015, pp. 61-77)
#
# #### Great tool for testing code and seeing how your code is working: http://pythontutor.com/visualize.html#mode=display
# + [markdown] slideshow={"slide_type": "slide"}
# # Initial Notes on Functions
# >- We have learned how to use some Python built-in functions such as: `print()`, `input()`, and `list.append()`
# >- A main reason for the use of functions is to group code that gets executed multiple times
# >>- If you find yourself duplicating code that is usually a cue that you could write a function to be more efficient in your code
# >- One of the strengths of Python is that people are continually creating functions for various tasks
# >>- Many people rely on various Python modules with built-in functions to write code
# >>- However, there could be instances where you would need to create your own functions
# >>- Regardless if you ever have to write your own functions in practice or not, it is still good to understand the fundamentals of functions
#
# ### Some Function Basic Notes and "Talking" Functions
#
# 1. "Calling" a function is when you write the function in your code: print(), input(), etc are "calls" to these functions
# 2. "Passing" in values, called arguments, is when you input something into the function. For example:
# >- In general: print(*argument*)
# >- A specific argument: print('Hello World')
# >>- Here we are "calling" the print function and "passing" the value/argument 'Hello World' into it
# 3. A *parameter* is a variable that an argument is stored in when a function is called
#
# Let's work through some examples to get more familiar with functions.
# -
# ##### Notice when we run the previous cell we do not see an output. Why?
# ##### Can we "pass" our buffs() function to another function?
# ##### Why did we get the extra line of output of 'None'?
# >- Because we did not specify a return statement, Python automatically added a return None value to our function
# >>- So, when we called buffs() in the print() function, the code runs through all of the buffs() function plus prints the return value
# >>- More on function returns later in this lesson
# ##### Could we write the same program without a function?
# ### The answer to the previous question is, yes, you could get those 4 print() statements without building a function
# But consider this programming scenario:
# >- You have a program with hundreds of lines of code
# >- You need to print the previous 4 print statements multiple times throughout your longer program
# >- Then, someone requests that instead of 'BUFFS', we want 'Buffaloes!' printed
# >>- If you use a function to define and call the 4 print statements, you only have to update 'BUFFS' to 'Buffaloes' one time in the function
# >>- If you copy and pasted those 4 lines throughout your program, you would have to update 'BUFFS' to 'Buffaloes' every where in your code
# # `def` Statements with Parameters
#
# ## Task: create a function that:
# 1. Defines a function named, `hi`, with one parameter, `name`
# 2. When the function is called and a `name` is passed to it:
# >- Prints "Hello {name}" on one line
# >- Prints "What did you do this weekend, {name}" on the other line
#
# Note: the {name} in the print statements should be values passed to the function.
# + slideshow={"slide_type": "slide"}
# -
# #### Call the function, `hi()`, and pass your name to it
# #### Notes on the previous example
# 1. def hi(name): #here we are defining a function named, hi(), with a *parameter*, name.
# 2. The next two lines of code define what our function will do. Print two statements.
# 3. Then in the next cell we "called" our hi(name) function
# >- Then we "passed" specific values to the name *parameter*
# ### Could we write our previous function to ask for user inputted name values? Let's try it.
# Now call your hello() function and see what happens
#
# ##### Q: What value is stored in the name variable now?
#
# Try it and see what we get if we ask Python to show us name
# #### We should have gotten a "NameError: name 'name' is not defined". Why?
# >- Notice that we defined the name variable inside of the function defintion, or within it's local scope
# # Local and Global Scope
#
# Parameters and variables that are assigned in a called function are said to exist in that function's `Local Scope`
# >- A variable that exists in a local scope, is called a *local variable*
#
# Parameters and variables that are assigned outside all functions are said to exist in the `Global Scope`
# >- A variable that exists in the global scope is called a *global variable*
#
# Some Notes on `scope`
# >- Think of the scope as a storage container for variables
# >>- Once the scope is destroyed, all the values stored in the scope's variables are forgotten
# >- There is only one global scope and it is created when your program begins
# >>- When the program terminates, the global scope is destroyed and all variable values are forgotten
# >- A local scope is created whenever a function is called
# >>- Any variables defined within the function belong to the local scope
# >>- When a function returns, the local scope is destroyed and any values in the local variables are forgotten
#
# Why do we need to understand scope?
# 1. Code written in the global scope cannot use any local variables
# 2. But, a local scope can access global variables
# 3. Code written in one function's local scope cannot use variables from another local scope
# 4. You can use the same name for different variables if they are in different scopes
#
# Why does Python use different scopes?
# >- Mainly to help with debugging our code
# >>- As programs get to be 100's if not 1000's of lines of code scope becomes more important
# >>- If all variables were global, it is usually harder to debug a program
# >>- By using local variables, the error code can more accurately point us to the potential problem
#
# Let's work through some examples to get familiar with global and local scope
#
# <a id='top'></a>[TopPage](#Teaching-Notes)
#
# #### What is the current value of eggs?
# ##### On your own: walk through the previous example in the Python visualization tool to see what is going on
#
# http://pythontutor.com/visualize.html#mode=display
#
# Q: Why is the output 1234 and not 0?
# #### Another example to help understand local and global variables
# ##### Now lets run this code through the visualizer tool
# >- Take note of when the local scopes are defined and then destroyed as we step through the code
#
# http://pythontutor.com/visualize.html#mode=display
# ### 4 Rules to determine whether a variable is local or global
# 1. If a variable is being used in the global scope(always outside of functions), then it is always global
# 2. If there is a global statement for a variable in a function, it is a global variable
# >- For example, the following uses the global statement to define eggs as a global variable
# def spam():
# global eggs
# eggs = 'spam'
# 3. If the global statement is not used and the variable is used in an assignment statement in a fuction, it is a local variable
# >- For example, eggs is local to the bacon() function below:
# def bacon():
# eggs = 1234
# 4. But if a variable is used in a function but not in an assignment statement, it is a global variable.
# >- For example, because eggs in the following code is not used in a assignement statement it is global
# def ham():
# print(eggs)
#
# ## Return values and return statements
# >- Definition: a *return value* is the value that a function call evaluates to.
# >>- If a function does not have a return statement, Python automatically adds return None
# >>- The None value is the only value of the NoneType data type
# # Creating a `Magic 8 Ball` Program With a Function
# Task: Create a Magic 8 Ball program that randomly returns the various answers of the Magic 8 Ball game to the screen.
# 1. Create a function called `getAnswer` that has the parameter variable, `answerNumber`
# 2. Pick 9 of the possible responses from the Magic 8 Ball designed by Mattel in the 1950s
# 3. Design a control flow in your `getAnswer` function that will output the various Magic 8 Ball text
# 4. Create a variable, `randNum`, that is assigned random numbers from 1-9
# 5. Print a user's fortune when the function is called
# ### First, let's define our Magic 8 Ball responses in a function, `getAnswer`
# >- If you don't know possible values of the Magic 8 Ball, google it and find out
# #### Now, let's try inputting a few values into our new function to see if it is working correctly.
# ### Now, let's figure out a way to get random numbers as values for our `answerNumber` parameter
# #### Importing the module, `random`
# >- Google random functions for Python and see what you get.
# >- You should find the module, `random`, with a function in it called, `randint(a, b)`
# >>- The documentation on `randint(a,b)` says that it returns random values in a range with a,b parameters inclusive
# ##### Check for help and see the various methods within the `random` module
# >- Note the `randint()` method...
# ##### Let's try the `randint()` method
# >- Run this cell about 10 times and note the output each time
# #### Now, how do we combine the random number generator with our `getAnswer` function to generate random Magic 8 Ball responses?
# 1. What does our function ask for in terms of parameter(s)?
# 2. What does the `random.randint()` function return?
# 3. How can we write code that combines (1) and (2) to generate random Magic 8 ball responses?
# ##### Here's one way by creating a new variable, `randNum`, and assigning it random integers
# ##### Here's another way by passing the `randint()` values directly into our function
# # Exception Handling
# >- Exception handling is writing code so that our program can try and fix errors instead of completing crashing with error codes
# >>- We handle errors with the `try` and `except` statements
# >>- Good exception statements will try and point the user to the mistake so they can easily fix it
#
# Let's look at some examples of how to do this
# ### Task: create a function, `percent()`, that accepts two arguments, `num` and `denom`, and returns the quotient
# ##### Now, pass the the `percent` function to several print statements with various values for `num` and `demom`
# ##### But what if someone was using our `percent(num, denom)` function and passed a 0 value to the `denom` parameter?
# ##### How can we fix the division by zero error in our function?
# >- Here's one way using the error code
# ##### Another way using an `if` statement
# <a id='top'></a>[TopPage](#Teaching-Notes)
# # Homework
# >- See the homework notebook for function practice problems
|
Week 4/Functions_Student.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Automated Machine Learning
# _**Regression with Aml Compute**_
#
# ## Contents
# 1. [Introduction](#Introduction)
# 1. [Setup](#Setup)
# 1. [Data](#Data)
# 1. [Train](#Train)
# 1. [Results](#Results)
# 1. [Test](#Test)
#
#
# ## Introduction
# In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.
#
# If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
#
# In this notebook you will learn how to:
# 1. Create an `Experiment` in an existing `Workspace`.
# 2. Configure AutoML using `AutoMLConfig`.
# 3. Train the model using local compute.
# 4. Explore the results.
# 5. Test the best fitted model.
# ## Setup
#
# As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
# +
import logging
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.core.dataset import Dataset
from azureml.train.automl import AutoMLConfig
# -
# This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
print("This notebook was created using version 1.19.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
# +
ws = Workspace.from_config()
# Choose a name for the experiment.
experiment_name = 'automl-regression'
experiment = Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Run History Name'] = experiment_name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
# -
# ### Using AmlCompute
# You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "reg-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=4)
compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# -
# ## Data
#
# ### Load Data
# Load the hardware dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model.
# +
data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv"
dataset = Dataset.Tabular.from_delimited_files(data)
# Split the dataset into train and test datasets
train_data, test_data = dataset.random_split(percentage=0.8, seed=223)
label = "ERP"
# -
# ## Train
#
# Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.
#
# |Property|Description|
# |-|-|
# |**task**|classification, regression or forecasting|
# |**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|
# |**n_cross_validations**|Number of cross validation splits.|
# |**training_data**|(sparse) array-like, shape = [n_samples, n_features]|
# |**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|
#
# **_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)
# + tags=["automlconfig-remarks-sample"]
automl_settings = {
"n_cross_validations": 3,
"primary_metric": 'r2_score',
"enable_early_stopping": True,
"experiment_timeout_hours": 0.3, #for real scenarios we reccommend a timeout of at least one hour
"max_concurrent_iterations": 4,
"max_cores_per_iteration": -1,
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(task = 'regression',
compute_target = compute_target,
training_data = train_data,
label_column_name = label,
**automl_settings
)
# -
# Call the `submit` method on the experiment object and pass the run configuration. Execution of remote runs is asynchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.
remote_run = experiment.submit(automl_config, show_output = False)
# +
# If you need to retrieve a run that already started, use the following code
#from azureml.train.automl.run import AutoMLRun
#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')
# -
remote_run
# ## Results
# #### Widget for Monitoring Runs
#
# The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
#
# **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
from azureml.widgets import RunDetails
RunDetails(remote_run).show()
remote_run.wait_for_completion()
# ### Retrieve the Best Model
#
# Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
# #### Best Model Based on Any Other Metric
# Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):
lookup_metric = "root_mean_squared_error"
best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
# #### Model from a Specific Iteration
# Show the run and the model from the third iteration:
iteration = 3
third_run, third_model = remote_run.get_output(iteration = iteration)
print(third_run)
print(third_model)
# ## Test
# +
# preview the first 3 rows of the dataset
test_data = test_data.to_pandas_dataframe()
y_test = test_data['ERP'].fillna(0)
test_data = test_data.drop('ERP', 1)
test_data = test_data.fillna(0)
train_data = train_data.to_pandas_dataframe()
y_train = train_data['ERP'].fillna(0)
train_data = train_data.drop('ERP', 1)
train_data = train_data.fillna(0)
# +
y_pred_train = fitted_model.predict(train_data)
y_residual_train = y_train - y_pred_train
y_pred_test = fitted_model.predict(test_data)
y_residual_test = y_test - y_pred_test
# +
# %matplotlib inline
from sklearn.metrics import mean_squared_error, r2_score
# Set up a multi-plot chart.
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})
f.suptitle('Regression Residual Values', fontsize = 18)
f.set_figheight(6)
f.set_figwidth(16)
# Plot residual values of training set.
a0.axis([0, 360, -100, 100])
a0.plot(y_residual_train, 'bo', alpha = 0.5)
a0.plot([-10,360],[0,0], 'r-', lw = 3)
a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)
a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)
a0.set_xlabel('Training samples', fontsize = 12)
a0.set_ylabel('Residual Values', fontsize = 12)
# Plot residual values of test set.
a1.axis([0, 90, -100, 100])
a1.plot(y_residual_test, 'bo', alpha = 0.5)
a1.plot([-10,360],[0,0], 'r-', lw = 3)
a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)
a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)
a1.set_xlabel('Test samples', fontsize = 12)
a1.set_yticklabels([])
plt.show()
# -
# %matplotlib inline
test_pred = plt.scatter(y_test, y_pred_test, color='')
test_test = plt.scatter(y_test, y_test, color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
|
how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Parte 2: Intro al Aprendizaje Federado
#
# En la sección previa, aprendimos sobre punteros de tensores (PointerTensors), que crean la infraestructura subyacente que necesitamos para preservar la privacidad con aprendizaje profundo. En esta sección, veremos cómo utilizar estas herramientas básicas para implementar nuestro primer algoritmo de aprendizaje profundo con preservación de privacidad, el aprendizaje federado.
#
# Autores:
# - <NAME> - Twitter: [@iamtrask](https://twitter.com/iamtrask)
#
# Traductores:
# - <NAME> - Twitter: [@arturomf94](https://twitter.com/arturomf94)
# - <NAME> - Twitter: [@ricardopretelt](https://twitter.com/ricardopretelt)
#
#
# ### ¿Qué es el aprendizaje federado?
#
# Es una manera simple y poderosa para entrenar modelos de aprendizaje profundo. Si piensas sobre datos de entrenamiento, siempre son el resultado de algún tipo de proceso de recolección. La gente (por medio de dispositivos) generan datos al registrar eventos del mundo real. Normalmente, estos datos son agregados a una ubicación central y única de tal manera que uno pueda entrenar un modelo de aprendizaje de máquina. El aprendizaje federado le da la vuelta a esto.
#
# En lugar de llevar los datos de entrenamiento al modelo (o servidor central), uno lleva el modelo a los datos de entraniento (donde sea que que esté).
#
# La idea es que esto la única copia permanente de los datos pertenece a quien los está creando, y así mantiene el control de su acceso. Cool, no?
# # 2.1 Un Ejemplo de Aprendizaje Federado de Juguete
#
# Comencemos entrenando un modelo de juguete con el método centralizado. Este modelo es de lo más simple. Primero necesitamos:
#
# - Un conjunto de datos de juguete
# - Un modelo
# - Una lógica de entrenamiento básica para que el modelo se ajuste a los datos.
#
# Nota: Si esta API no te es familiar visita [fast.ai](http://fast.ai) y toma el curso antes de continuar con este tutorial.
import torch
from torch import nn
from torch import optim
# +
# Un conjunto de datos de juguete
data = torch.tensor([[0,0],[0,1],[1,0],[1,1.]], requires_grad=True)
target = torch.tensor([[0],[0],[1],[1.]], requires_grad=True)
# Un modelo de juguete.
model = nn.Linear(2,1)
def train():
# Lógica de entrenamiento
opt = optim.SGD(params=model.parameters(),lr=0.1)
for iter in range(20):
# 1) borra los gradientes previos (si existen)
opt.zero_grad()
# 2) haz una predicción
pred = model(data)
# 3) calcula cuál fue la pérdida
loss = ((pred - target)**2).sum()
# 4) encuentra los pesos que causaron la pérdida
loss.backward()
# 5) cambia esos pesos
opt.step()
# 6) imprime nuesto progreso
print(loss.data)
# -
train()
# ¡Y ahí lo tienes! Hemos entrenado un modelo básico con un método convencional. Todos nuestros datos están agregados en nuestra máquina local y podemos usarlos para hacerle actualizaciones al modelo. El aprendizaje federado, sin embargo, no funciona así. Así que modifiquemos este ejemplo para hacerlo con aprendizaje federado.
#
# Lo que necesitamos:
#
# - Crear un par de trabajadores
# - Hacer que los punteros entrenen los datos en cada trabajador.
# - Actualizar la lógica de entrenamiento para que haga aprendizaje federado.
#
# Nuevos Pasos de Entrenamiento:
# - Mandar el modelo al trabajador correcto.
# - Entrenarlo con los datos que se encuentran ahí.
# - Recuperar el modelo y repetir con el próximo trabajador.
import syft as sy
hook = sy.TorchHook(torch)
# +
# Crear un par de trabajadores
bob = sy.VirtualWorker(hook, id="bob")
alice = sy.VirtualWorker(hook, id="alice")
# +
# Un conjunto de datos de juguete
data = torch.tensor([[0,0],[0,1],[1,0],[1,1.]], requires_grad=True)
target = torch.tensor([[0],[0],[1],[1.]], requires_grad=True)
# Hacer que los punteros entrenen los datos en
# cada trabajador mandando datos a bob y alice
data_bob = data[0:2]
target_bob = target[0:2]
data_alice = data[2:]
target_alice = target[2:]
# Inicializar un modelo de juguete
model = nn.Linear(2,1)
data_bob = data_bob.send(bob)
data_alice = data_alice.send(alice)
target_bob = target_bob.send(bob)
target_alice = target_alice.send(alice)
# Organizar los punteros en una lista
datasets = [(data_bob,target_bob),(data_alice,target_alice)]
opt = optim.SGD(params=model.parameters(),lr=0.1)
# +
def train():
# Lógica de entrenamiento
opt = optim.SGD(params=model.parameters(),lr=0.1)
for iter in range(10):
# NUEVO) Itera sobre el conjunto de datos de cada trabajador
for data,target in datasets:
# NUEVO) Manda el modelo a el trabajador correcto
model.send(data.location)
# 1) borra los gradientes previos (si existen)
opt.zero_grad()
# 2) haz una predicción
pred = model(data)
# 3) calcula cuál fue la pérdida
loss = ((pred - target)**2).sum()
# 4) encuentra los pesos que causaron la pérdida
loss.backward()
# 5) cambia esos pesos
opt.step()
# NUEVO recupera el modelo (con gradientes)
model.get()
# 6) imprime nuestro progreso
print(loss.get()) # NUEVO) Necesitamos llamar a .get()
# federated averaging
# -
train()
# ## ¡Muy bien!
#
# ¡Y voilà! Ahora estamos entrenando un modelo de aprendizaje profundo muy simple utilizando aprendizaje federado. Mandamos el modelo a cada trabajador, generamos un nuevo gradiente, y luego recuperamos el gradiente en nuestro servidor local donde actualizamos nuestro modelo global. En ningún punto de este proceso vemos o requerimos acceso al conjunto de datos de entrenamiento subyacente. ¡Preservamos la privacidad de Bob y Alice!
#
# ## Algunos defectos de este ejemplo
#
# Aunque este ejemplo es una buena introducción al aprendizaje federado, aún tiene algunos defectos importantes. Notablemente, cuando llamamos `model.get()` y recibimos el modelo actualizado de Bob o Alice de hecho podemos aprender mucho sobre el conjunto de entrenamiento de Bob/Alice sólo con la información de sus gradientes. ¡En algunos casos, podemos restaurar el conjunto de entrenamiento a la perfección!
#
# Así que, ¿qué podemos hacer? Bueno, la primera estrategia que utiliza la gente es **promediar el gradiente sobre múltiples individuos antes de actulizarlo en el servidor central**. Esta estrategia, sin embargo, requiere el uso de objetos PointerTensor más sofisticados. Entonces, en la siguiente sección nos tomaremos un tiempo para aprender un poco de la funcionalidad avanzada de los punteros y actualizaremos este ejemplo de aprendizaje federado.
#
# # # !Felicitaciones! - !Es hora de unirte a la comunidad!
#
# ¡Felicitaciones por completar esta parte del tutorial! Si te gustó y quieres unirte al movimiento para preservar la privacidad, propiedad descentralizada de IA y la cadena de suministro de IA (datos), puedes hacerlo de las ¡siguientes formas!
#
# ### Dale una estrella a PySyft en GitHub
#
# La forma más fácil de ayudar a nuestra comunidad es por darle estrellas a ¡los repositorios de Github! Esto ayuda a crear consciencia de las interesantes herramientas que estamos construyendo.
#
# - [Star PySyft](https://github.com/OpenMined/PySyft)
#
# ### ¡Únete a nuestro Slack!
#
# La mejor manera de mantenerte actualizado con los últimos avances es ¡unirte a la comunidad! Tú lo puedes hacer llenando el formulario en [http://slack.openmined.org](http://slack.openmined.org)
#
# ### ¡Únete a un proyecto de código!
#
# La mejor manera de contribuir a nuestra comunidad es convertirte en un ¡contribuidor de código! En cualquier momento puedes ir al _Github Issues_ de PySyft y filtrar por "Proyectos". Esto mostrará todos los tiquetes de nivel superior dando un resumen de los proyectos a los que ¡te puedes unir! Si no te quieres unir a un proyecto, pero quieres hacer un poco de código, también puedes mirar más mini-proyectos "de una persona" buscando por Github Issues con la etiqueta "good first issue".
#
# - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
# - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
#
# ### Donar
#
# Si no tienes tiempo para contribuir a nuestra base de código, pero quieres ofrecer tu ayuda, también puedes aportar a nuestro *Open Collective"*. Todas las donaciones van a nuestro *web hosting* y otros gastos de nuestra comunidad como ¡hackathons y meetups!
#
# [OpenMined's Open Collective Page](https://opencollective.com/openmined)
|
examples/tutorials/translations/español/Parte 02 - Intro al Aprendizaje Federado.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 一维随机漫步算法 平方根曲线分布
# 二维随机漫步算法 ?
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
n_person = 2000
n_times = 500
t = np.arange(n_times)
steps = 2 * np.random.randint(0,2,(n_person,n_times)) - 1 # [0,2)
amount = np.cumsum(steps,axis=1) # 轴向元素累加和 axis=1表示按列累加 列的方向为 x 轴
sd_amount = amount ** 2
mean_sd_amount = sd_amount.mean(axis=0) # 按行求均值 行方向 是 y 轴正方向
plt.xlabel(r"$t$")
plt.ylabel(r"$sqrt{\langle (\delta x)^2 \langle}$")
# '-' solid line style
# '--' dashed line style
# '.' point marker
plt.plot(t,np.sqrt(mean_sd_amount),'g.',t,np.sqrt(t),'r-')
# -
# 一维随机漫步算法 平方根曲线分布
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
n_person = 20
n_times = 5
t = np.arange(n_times)
print(t)
steps = 2 * np.random.randint(0,2,(n_person,n_times)) - 1
print(steps)
amount = np.cumsum(steps,axis=1)
print(amount)
print(amount.shape)
sd_amount = amount ** 2
print(sd_amount)
mean_sd_amount = sd_amount.mean(axis=0)
print(mean_sd_amount)
# +
# 多项式拟合
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
n_dots = 20
n_order = 3
x = np.linspace(0,1,n_dots)
y = np.sqrt(x) + 0.2*np.random.rand(n_dots)
p = np.poly1d(np.polyfit(x,y,n_order))
print(p.coeffs)
t = np.linspace(0,1,200)
plt.plot(x,y,'ro',t,p(t),'-') # 'o' circle marker,ro red circle marker;'-':solid line style
# -
# 蒙特卡洛方法计算圆周率
import numpy as np
n_dots = 10000000
x = np.random.random(n_dots)
y = np.random.random(n_dots)
distance = np.sqrt(x**2 + y**2)
in_circle = distance[distance < 1]
pi = 4 * float(len(in_circle)) / n_dots
print(pi)
|
scikit/ch02_02.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gabrielalastra/TwitterAPI_COVID_2022/blob/main/data_collected.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="kqBhdg2pErcU"
import tweepy as tw
import pandas as pd
import json
# + [markdown] id="Qtz0CVZ4E_1G"
# <h3 align='right'>Twitter API Keys and Authorization</h3>
# <hr color='cian'>
# + id="u9AiAf_7E2lh"
twKey = 'xxxx'
twKeysecret = 'xxxxx'
twbearertoken = 'xxxx'
twtoken = 'xxx'
twtokensecret = 'xxxxx'
# + id="1438XNUdE647"
auth=tw.OAuthHandler(twKey, twKeysecret)
auth.set_access_token(twtoken, twtokensecret)
# + id="uOZDWL4sE87o"
api=tw.API(auth, wait_on_rate_limit=True)
# + [markdown] id="X2Gbon9mFhxw"
# <h3 align='right'>Collecting the Data</h3>
# <h4 align='right'>27.jan.2022</h4>
# <hr color='cian'>
# + [markdown] id="uVXzhv_6LWDM"
# <h5 align='right'>English</h5>
# + id="33xHq3N_E-b_"
diciEN = {}
diciEN = diciEN.fromkeys(['created_at',
'text',
'user',
'source',
'favorite_count',
'retweet_count'])
# + id="_JtzdulTFvq5"
word = "COVID" +'-filter:retweets'
cursor = tw.Cursor(api.search, q=word, lang='en', count=100).items(300)
# + id="UrUU3DTOGJL1"
for i in cursor:
for key in diciEN.keys():
try:
value=i._json[key]
diciEN[key].append(value)
except KeyError:
value=''
if(diciEN[key] is None):
diciEN[key]=value
else:
diciEN[key].append(value)
except:
diciEN[key]=[value]
# + id="lhqvYx6IKZ7-"
with open('diciEN_COVID_jan2022.json', 'w') as fp:
json.dump(diciEN, fp, indent=4)
# + [markdown] id="FfJ1Av1wLfBX"
# <h5 align='right'>Deutsch</h5>
# + id="rCZlAKVNLjMG"
diciDE = {}
diciDE = diciDE.fromkeys(['created_at',
'text',
'user',
'source',
'favorite_count',
'retweet_count'])
# + id="WePq9Xe8Lp7X"
word = "COVID" +'-filter:retweets'
cursorDE = tw.Cursor(api.search, q=word, lang='de', count=100).items(300)
# + id="a5_WG3RIL8fL"
for i in cursorDE:
for key in diciDE.keys():
try:
value=i._json[key]
diciDE[key].append(value)
except KeyError:
value=''
if(diciDE[key] is None):
diciDE[key]=value
else:
diciDE[key].append(value)
except:
diciDE[key]=[value]
# + id="hUHbkmr3MoCh"
with open('diciDE_COVID_jan2022.json', 'w') as fp:
json.dump(diciDE, fp, indent=4)
# + colab={"base_uri": "https://localhost:8080/", "height": 478} id="pGfQK26GKLoh" outputId="f22d62f6-7ccf-4ba2-f0c4-db8d46448fc6"
#testing
COVID_de= pd.DataFrame.from_dict(diciDE)
COVID_de.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="D0Iq3bdcKSI4" outputId="69d2969d-a4f4-4afa-87a1-e9a9301271c9"
COVID_en = pd.DataFrame.from_dict(diciEN)
COVID_en.head()
# + id="ySma723dKVWV"
|
data_collected.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import re
from bs4 import BeautifulSoup as bs
movie = str(input('What''s the movie?\t'))
url = "https://yts.unblockit.dev/browse-movies/"+movie+"/all/all/0/latest/0/all"
page = requests.get(url)
soup = bs(page.content)
soup
# +
title = []
link = []
pages = []
def find_title(soup):
for a in soup.findAll('a',{'class','browse-movie-title'}):
title.append(a.getText())
def find_link(soup):
for a in soup.findAll('a',{'class','browse-movie-title'}):
link.append(a['href'])
find_title(soup)
find_link(soup)
# -
pages = []
for i in range(len(title)):
pages.append(title[i] + ' ' + '=======>' + ' ' + link[i])
pages
mov = int(input('Whick Movie in this(select in number)'))
print(r'You chose '+pages[mov-1])
movie_url = link[mov-1]
mov_pg = requests.get(movie_url)
mov_soup = bs(mov_pg.content)
mov_soup
clarities = []
clar_link = []
result = []
def dwn_link(mov_soup):
for a in mov_soup.findAll('p',{'class','hidden-xs hidden-sm'}):
for link in a.findAll('a',{'href':True}):
clarities.append(link.getText())
clar_link.append(link['href'])
dwn_link(mov_soup)
for i in range(len(clarities)):
result.append(clarities[i] + ' '+ '=====>' + ' ' + clar_link[i])
print('Available Links for the Movie ===>\t'+ title[mov-1])
result
|
YTS MX Scraper.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# ## Configuration
# +
method = 'gram-ood*' # or 'gram-ood'
model = 'vgg' # or 'resnet', 'densenet', 'mobilenet'
devs = np.loadtxt('final/final_vgg')
vals = np.loadtxt('final/test_vgg')
names = np.genfromtxt('final/names_vgg', dtype='str')
devs += np.loadtxt('final/final_mobilenet')
vals = np.loadtxt('final/test_mobilenet')
names = np.genfromtxt('final/names_mobilenet', dtype='str')
devs += np.loadtxt('final/final_resnet')
vals = np.loadtxt('final/test_resnet')
names = np.genfromtxt('final/names_resnet', dtype='str')
devs += np.loadtxt('final/final_densenet')
vals = np.loadtxt('final/test_densenet')
names = np.genfromtxt('final/names_densenet', dtype='str')
vals_norm = (vals - vals.min()) / (vals.max() - vals.min())
devs_norm = (devs - devs.min()) / (devs.max() - devs.min())
dic = {x:y for x,y in zip(names, devs_norm)}
data = pd.read_csv('OOD.csv')
cols = ['image', 'MEL', 'NV', 'BCC', 'AK', 'BKL', 'DF', 'VASC', 'SCC', 'UNK']
vals = list()
for _, row in data.iterrows():
img_name = row['image']
new_unk = dic[img_name]
new_row = row[cols].values
# if new_unk > 0.02:
# new_unk = 1.0
new_row[-1] = new_unk
vals.append(new_row)
new_df = pd.DataFrame(vals, columns=cols)
#new_df.to_csv("task3/sub_vgg.csv", index=False)
#new_df.to_csv("task3/sub_mobilenet.csv", index=False)
new_df.to_csv("task3/sub_all.csv", index=False)
#new_df.to_csv("task3/sub_resnet.csv", index=False)
#new_df.to_csv("task3/sub_densenet.csv", index=False)
thres = 1
t = 0
for v in vals_norm:
if v > 0.03:
t+=1
print(t/len(vals))
|
isic_submit/merge/.ipynb_checkpoints/merge_pred_unk-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Importing the necessary modules
#
# Note: For the successful loading of all the modules, it is necessary that LightGBM is installed in the current python environment.
#
# The assumed directory structure is,
# 1. Current dir: ../
# 2. Data dir: ../data/ contains pose/train, pose/test/, labels.csv, files.txt
# 3. Code dir: ../code/
# 4. Execution file dir: ../code/Sign_Language_Recognition_Ashutosh_Vyas_01601649.ipynb
# 5. Utilities dir: ../code/util/ contains helpers.py, vis.py, straified_groupk.py, results_plots_evalutaion.py
# 6. Feature processing dir: ../code/feature_engineering contains data_augmentation.py, feature_extractors_4D_array.py, feature_preprocessing.py, features_4D_array.py
# +
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from os.path import join as pjoin
import util.vis as V
import util.helpers as H
import data_analysis
import csv
import random
import gc
from glob import glob
import sklearn as sk
from sklearn import preprocessing
import feature_engineering.feature_preprocessing as feat_prepro
import feature_engineering.feature_extractors_4D_array as feat_extract
from feature_engineering.data_augmentation import SLRImbAugmentation
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GroupKFold
from util.stratified_group_cv import StratifiedGroupKFold
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from util.results_plots_evaluation import map3_scorer
import util.results_plots_evaluation as results
from sklearn.metrics import accuracy_score
import util.helpers as kaggle_submission
from sklearn.svm import SVC
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
print("Imports done...")
# -
# ### Initialize feature extraction flags and data paths
# +
np.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
rng = np.random.RandomState(42)
startTime= datetime.now()
# #splits for cross-validation and #frames for interpolation
n_splits = 5
interpolated_total_frames = 15
# Initialize features extraction flags
face_flag = True #False for feature set 1
body_flag = True #False
hand_flag = True #False
physics_flag = True
trajectory_flag = True #False
linear_flag = True
angular_flag = False
std_flag = False
velocity_flag = False
acceleration_flag = False
remove_keypoints = True #False
save_plot = False
# 137 keypoint indices to remove if remove_keypoints is True
# current list is for lower-body and many face keypoints
unwanted_keypoints=[10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94]
# Paths to load the data
DATA_DIR = '../data'
POSE_DIR = '../data/pose'
TRAIN_DIR = POSE_DIR + "/train"
TEST_DIR = POSE_DIR + "/test"
# -
# ### Load the data and interpolate
# +
# Read labels.csv into a pandas dataframe for convenience
full_dataframe = pd.read_csv(pjoin(DATA_DIR, "labels.csv"))
full_dataframe['Data'] = full_dataframe['File'].apply(lambda title: np.load(pjoin(TRAIN_DIR, title + ".npy")))
print(full_dataframe.head())
# 4D data as (n_samples, n_frames, n_keypoints, n_coords)
samples_centered_4D_array = feat_prepro.interpolate_allsamples(full_dataframe.Data, interpolated_total_frames=interpolated_total_frames, x_resolution=1.0, y_resolution=1.0)
print("\nInterpolated training data shape",samples_centered_4D_array.shape)
# -
# ### Train-test split if necessary
# + active=""
# ### Convert to code block if needed. The split is non-stratified.
# # Train and test split required only for Confusion Matrix or if comparing test scores with Kaggle
# X_train, X_valid, y_train, y_valid, group_train, group_valid = train_test_split(samples_centered_4D_array, np.asarray(full_dataframe.Label), np.asarray(full_dataframe.Person), test_size=0.25, random_state=42, shuffle=True, stratify=None)#=np.asarray(full_dataframe.Label))
#
# print("Training shape 4D split",X_train.shape)
# print("Validation shape 4D split",X_valid.shape)
#
# -
# ### Data augmentation
# + active=""
# ### Initialize the augmentaion class, convert to code block
# slr_obj = SLRImbAugmentation()
# aug_samp, aug_label, aug_group = slr_obj.fit(X=X_train, y=y_train, groups=group_train, augmentation_factor=3)
#
#
# -
# ### Extract features
#
# Extract features as per above enabled flags
# +
### Use this block if just performing cross-validation on original data
### NOT FOR Train-test split and augmentation, see next block for that.
print("\nExtracting features for training data")
X_train = feat_extract.main_feature_extractor(array_4D_data=samples_centered_4D_array, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
y_train = np.asarray(full_dataframe.Label)
group_train = np.asarray(full_dataframe.Person)
print("Training shape",X_train.shape)
print("NAN values:",np.isnan(X_train).sum())
# + active=""
# ### Convert this to code block for using with augmentation
# print("\nExtracting features for training data")
# X_train = feat_extract.main_feature_extractor(array_4D_data=aug_samp, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
#
# y_train = aug_label
# group_train = aug_group
# print("Training shape with augmentation",X_train.shape)
# print("NAN values:",np.isnan(X_train).sum())
# + active=""
# ### To be used with train-test split enabled, convert to code block
#
# print("\nExtracting features for validation data")
# X_valid = feat_extract.main_feature_extractor(array_4D_data=X_valid, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
# print("Validation shape",X_valid.shape)
# print("NAN values:",np.isnan(X_valid).sum())
# -
# ### Initialize objects for Scaling, PCA, Cross-validation, Feature selection
# +
### Standard Scaler
scl = StandardScaler()
# scl = RobustScaler()
### PCA
# pca_obj = PCA(n_components=0.95, random_state=42)
### Cross validator
# cvld = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.2, train_size=None, random_state=42)
# cvld = StratifiedGroupKFold(n_splits=n_splits, shuffle=True, random_state=42)
cvld = GroupKFold(n_splits=n_splits)
### Feature Selector
feature_selector = VarianceThreshold(threshold=0.0)
# feature_selector = SelectKBest(k=int(0.5*X_train.shape[1]))
### Flush the RAM before training
gc.collect()
# -
# ### Initialize one estimator
#
# Note: The hyper-parameters are set and initialized as per the GridSearchCV tuning.
# +
### Estimator
### Enable and select only one at a time
# estimator = LogisticRegression(C=0.275, tol=1e-4, max_iter=5000, penalty='l2', class_weight=None, multi_class='ovr', random_state=42, n_jobs=-1)
# estimator = SVC(C=6.5, decision_function_shape='ovo', kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, break_ties=False, random_state=42)
# estimator = SVC(C=6.5, decision_function_shape='ovr', kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, break_ties=False, random_state=42)
# estimator = SVC(C=0.0775, decision_function_shape='ovo', kernel='linear', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, break_ties=False, random_state=42)
# estimator = SVC(C=0.0775, decision_function_shape='ovr', kernel='linear', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, break_ties=False, random_state=42)
# estimator = RandomForestClassifier(n_estimators=130, max_depth=16, min_samples_split=2, min_samples_leaf=1, max_features='auto', max_leaf_nodes=150, bootstrap=True, criterion='entropy', min_weight_fraction_leaf=0.0, min_impurity_decrease=0.0, min_impurity_split=None, oob_score=True, n_jobs=-1, random_state=42, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)
# estimator = LGBMClassifier(boosting_type='gbdt', num_leaves=15, max_depth=15, learning_rate=0.525, n_estimators=102, objective='multiclass', min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, importance_type='gain', subsample_for_bin=200000, class_weight=None, random_state=42, n_jobs=-1, silent=True)
# estimator = GaussianNB(priors=None, var_smoothing=110)
########### Below are the estimators for body+mean features
estimator = LogisticRegression(C=0.9, tol=1e-4, max_iter=5000, penalty='l2', class_weight=None, multi_class='ovr', random_state=42, n_jobs=-1)
# estimator = LGBMClassifier(boosting_type='gbdt', num_leaves=18, max_depth=5, learning_rate=0.525, n_estimators=170, objective='multiclass', min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, importance_type='gain', subsample_for_bin=200000, class_weight=None, random_state=42, n_jobs=-1, silent=True)
print("\nTraining the model", str(estimator))
# -
# ### Initialize the pipeline object with above configurations
# pipe = Pipeline([('scale', scl), ('reduce_dims', pca_obj), ('clf', estimator)])
pipe = Pipeline([('selection', feature_selector), ('scale', scl), ('clf', estimator)])
# ### Initialize and perform hyperparameter tuning with grid search
#
# Note: Currently GridSearchCV object is initialized for Logistic Regression. Refer Appendix at the end of this file for parameter grid for other estimators.
# +
### Grid Search CV
param_grid = dict(clf__C=[0.85, 0.9, 0.95]) # Refer Appendix
print("Running GSCV.....")
grid = GridSearchCV(pipe, param_grid=param_grid, cv=cvld, n_jobs=-1, verbose=100, scoring=map3_scorer)
grid.fit(X_train, y_train, groups=group_train)
print(grid.best_params_)
print(grid.best_estimator_)
print(grid.best_score_)
pipe_submission = grid.best_estimator_
map3_trn, map3_vld = results.predict_print_results(pipe_submission, X_train, X_train, y_train, y_train)
# + active=""
# ### Enable this only if generating learning curve, validation curve, confusion matrix plots
#
# ### Learning Curves
# print("\nPlotting Learning Curve.....")
# title = str(estimator)
# results.plot_learning_curve(pipe_submission, X_train, y_train, groups=group_train, title=title, shuffle=True, ylim=(0.0, 1.1), cv=cvld, n_jobs=-1, save_plot=save_plot)
#
# ### Validation Curves
# print("\nPlotting Validation Curve.....")
# param_name = 'estim__C'
# param_range = np.logspace(-2,1,4) #pipe_submission.set_params(estim__C=)
# results.plot_validation_curve(pipe_submission, X_train, y_train, param_name=param_name, param_range=param_range, ylim=(0.0, 1.1), groups=group_train, cv=cvld, xlog=False, save_plot=save_plot)
#
# ### Confusion Matrices
# print("\nPlotting Confusion Matrix.....")
# results.plot_con_mat(pipe_submission, X_valid, y_valid, display_labels=np.unique(full_dataframe.Gloss), xticks_rotation='horizontal', save_plot=save_plot)
#
# -
# ### Testing and Kaggle submission
#
# Note: Currently testing is performed with GridSearchCV best estimator.
# +
### Create a submission using the test set data and write the submission file using the provided code
all_test_files = sorted(glob(pjoin(TEST_DIR, '*.npy')))
test_samples = []
for numpy_file in all_test_files:
sample = np.load(numpy_file)
test_samples.append(sample)
samples_centered_4D_array_test = feat_prepro.interpolate_allsamples(test_samples, interpolated_total_frames=interpolated_total_frames, x_resolution=1.0, y_resolution=1.0)
print("Interpolated test data shape",samples_centered_4D_array_test.shape)
print("\nExtracting features for testing data")
X_test = feat_extract.main_feature_extractor(array_4D_data=samples_centered_4D_array_test, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
print("Test shape",X_test.shape)
print("NAN values:",np.isnan(X_test).sum())
test_probas = pipe_submission.predict_proba(X_test)
fname_txt = 'main10_LightGBM_body_phy_traj'#'main1_logreg'
H.create_submission(test_probas, '{txt}.csv'.format(txt=fname_txt))
print("\nKaggle submission {txt}.csv generated. Check the current directory.".format(txt=fname_txt))
print("\n~~~~~##### Done #####~~~~~\n")
timeElapsed = datetime.now() - startTime
print('Time elpased (hh:mm:ss.ms) {}'.format(timeElapsed))
# -
# ## Appendix
# ### Parameter grid for various estimators
# Feature set 1: mean value of x-y-c over 15 frames i.e. 137*3 = 411 features
#
# Feature set 2: Removing keypoints. (body+face+hands) + mean of x-y-c + trajectory = 14+3+16 + 216 + 8 = 256 features
#
# #### Logistic Regression
# Feature set 1: param_grid = dict(clf__C=[0.25, 0.275, 0.30])
# Feature set 2: param_grid = dict(clf__C=[0.85, 0.875, 0.9, 0.925, 0.95])
#
# #### Support Vector Classifier 4 models
# 1. With RBF kernel and OneVsOne decision function: param_grid = dict(clf__C=[5.75, 6.0, 6.25, 6.5, 6.75, 7.0], clf__gamma=['scale', 'auto'])
# 2. With RBF kernel and OneVsRest decision function: param_grid = dict(clf__C=[5.75, 6.0, 6.25, 6.5, 6.75, 7.0], clf__gamma=['scale', 'auto'])
# 3. With Linear kernel and OneVsOne decision function: param_grid = dict(clf__C=[0.075, 0.0775, 0.08, 0.0825, 0.085], clf__gamma=['scale', 'auto'])
# 4. With Linear kernel and OneVsRest decision function: param_grid = dict(clf__C=[0.075, 0.0775, 0.08, 0.0825, 0.085], clf__gamma=['scale', 'auto'])
#
# #### Random Forest
# Note: Please select 1 or 2 or 3 parameter at a time to reduce the computation time.
#
# param_grid = dict(clf__n_estimators=[128, 130, 132, 134, 136], clf__max_depth=[12, 14, 16, 18], clf__max_features=['log', 'auto', None], clf__max_leaf_nodes=[130, 140, 150, 160, None], clf__bootstrap=[True, False], clf__criterion=['gini', 'entropy'], clf__max_samples=[0.2, 0.4, 0.6, 0.8, None])
#
#
# #### Light Gradient Bossting Machine - LightGBM
# Note: Please select 1 or 2 or 3 parameter at a time to reduce the computation time.
#
# Feature set 1: param_grid = dict(clf__num_leaves=[11, 13, 15, 17, 19], max_depth=[11, 12, 13, 14, 15, 16], learning_rate=[0.5, 0.525, 0.55, 0.575, 0.6, 0.625, 0.65], n_estimators=[101, 102, 103, 104, 105], clf__importance_type=['split', 'gain'])
#
# Feature set 2: param_grid = dict(clf__num_leaves=[16, 18, 20], max_depth=[4, 5, 6, 7], learning_rate=[0.5, 0.525, 0.55], n_estimators=[140, 150, 160, 170, 180])
#
#
# #### Gaussian Naive Bayes
# param_grid = dict(clf__var_smoothing=[90, 95, 100, 105, 110, 115, 120])
|
Sign_Language_Recognition_Ashutosh_Vyas_01601649.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Manipulating word embeddings
#
# In this week's assignment, you are going to use a pre-trained word embedding for finding word analogies and equivalence. This exercise can be used as an Intrinsic Evaluation for the word embedding performance. In this notebook, you will apply linear algebra operations using NumPy to find analogies between words manually. This will help you to prepare for this week's assignment.
# +
import pandas as pd # Library for Dataframes
import numpy as np # Library for math functions
import pickle # Python object serialization library. Not secure
word_embeddings = pickle.load( open( "word_embeddings_subset.p", "rb" ) )
len(word_embeddings) # there should be 243 words that will be used in this assignment
# -
# Now that the model is loaded, we can take a look at the word representations. First, note that the _word_embeddings_ is a dictionary. Each word is the key to the entry, and the value is its corresponding vector presentation. Remember that square brackets allow access to any entry if the key exists.
countryVector = word_embeddings['country'] # Get the vector representation for the word 'country'
print(type(countryVector)) # Print the type of the vector. Note it is a numpy array
print(countryVector) # Print the values of the vector.
# It is important to note that we store each vector as a NumPy array. It allows us to use the linear algebra operations on it.
#
# The vectors have a size of 300, while the vocabulary size of Google News is around 3 million words!
#Get the vector for a given word:
def vec(w):
return word_embeddings[w]
# ## Operating on word embeddings
#
# Remember that understanding the data is one of the most critical steps in Data Science. Word embeddings are the result of machine learning processes and will be part of the input for further processes. These word embedding needs to be validated or at least understood because the performance of the derived model will strongly depend on its quality.
#
# Word embeddings are multidimensional arrays, usually with hundreds of attributes that pose a challenge for its interpretation.
#
# In this notebook, we will visually inspect the word embedding of some words using a pair of attributes. Raw attributes are not the best option for the creation of such charts but will allow us to illustrate the mechanical part in Python.
#
# In the next cell, we make a beautiful plot for the word embeddings of some words. Even if plotting the dots gives an idea of the words, the arrow representations help to visualize the vector's alignment as well.
# +
words = ['oil', 'gas', 'happy', 'sad', 'city', 'town', 'village', 'country', 'continent', 'petroleum', 'joyful']
bag2d = np.array([vec(word) for word in words]) # Convert each word to its vector representation
bag2d
# -
bag2d[0][3], bag2d[0][2]
# +
import matplotlib.pyplot as plt # Import matplotlib
fig, ax = plt.subplots(figsize = (10, 10)) # Create custom size image
col1 = 3 # Select the column for the x axis
col2 = 2 # Select the column for the y axis
# Print an arrow for each word
for word in bag2d:
ax.arrow(0, 0, word[col1], word[col2], head_width=0.005, head_length=0.005, fc='r', ec='r', width = 1e-5)
ax.scatter(bag2d[:, col1], bag2d[:, col2]); # Plot a dot for each word
# Add the word label over each dot in the scatter plot
for i in range(0, len(words)):
ax.annotate(words[i], (bag2d[i, col1], bag2d[i, col2]))
plt.show()
# -
# Note that similar words like 'village' and 'town' or 'petroleum', 'oil', and 'gas' tend to point in the same direction. Also, note that 'sad' and 'happy' looks close to each other; however, the vectors point in opposite directions.
#
# In this chart, one can figure out the angles and distances between the words. Some words are close in both kinds of distance metrics.
# ## Word distance
#
# Now plot the words 'sad', 'happy', 'town', and 'village'. In this same chart, display the vector from 'village' to 'town' and the vector from 'sad' to 'happy'. Let us use NumPy for these linear algebra operations.
# +
words = ['sad', 'happy', 'town', 'village']
bag2d = np.array([vec(word) for word in words]) # Convert each word to its vector representation
fig, ax = plt.subplots(figsize = (10, 10)) # Create custom size image
col1 = 3 # Select the column for the x axe
col2 = 2 # Select the column for the y axe
# Print an arrow for each word
for word in bag2d:
ax.arrow(0, 0, word[col1], word[col2], head_width=0.0005, head_length=0.0005, fc='r', ec='r', width = 1e-5)
# print the vector difference between village and town
village = vec('village')
town = vec('town')
diff = town - village
ax.arrow(village[col1], village[col2], diff[col1], diff[col2], fc='b', ec='b', width = 1e-5)
# print the vector difference between village and town
sad = vec('sad')
happy = vec('happy')
diff = happy - sad
ax.arrow(sad[col1], sad[col2], diff[col1], diff[col2], fc='b', ec='b', width = 1e-5)
ax.scatter(bag2d[:, col1], bag2d[:, col2]); # Plot a dot for each word
# Add the word label over each dot in the scatter plot
for i in range(0, len(words)):
ax.annotate(words[i], (bag2d[i, col1], bag2d[i, col2]))
plt.show()
# -
# ## Linear algebra on word embeddings
#
# In the lectures, we saw the analogies between words using algebra on word embeddings. Let us see how to do it in Python with Numpy.
#
# To start, get the **norm** of a word in the word embedding.
print(np.linalg.norm(vec('town'))) # Print the norm of the word town
print(np.linalg.norm(vec('sad'))) # Print the norm of the word sad
# ## Predicting capitals
#
# Now, applying vector difference and addition, one can create a vector representation for a new word. For example, we can say that the vector difference between 'France' and 'Paris' represents the concept of Capital.
#
# One can move from the city of Madrid in the direction of the concept of Capital, and obtain something close to the corresponding country to which Madrid is the Capital.
# +
capital = vec('France') - vec('Paris')
country = vec('Madrid') + capital
print(country[0:5]) # Print the first 5 values of the vector
# -
# We can observe that the vector 'country' that we expected to be the same as the vector for Spain is not exactly it.
diff = country - vec('Spain')
print(diff[0:10])
# So, we have to look for the closest words in the embedding that matches the candidate country. If the word embedding works as expected, the most similar word must be 'Spain'. Let us define a function that helps us to do it. We will store our word embedding as a DataFrame, which facilitate the lookup operations based on the numerical vectors.
# +
# Create a dataframe out of the dictionary embedding. This facilitate the algebraic operations
keys = word_embeddings.keys()
data = []
for key in keys:
data.append(word_embeddings[key])
embedding = pd.DataFrame(data=data, index=keys)
# Define a function to find the closest word to a vector:
def find_closest_word(v, k = 1):
# Calculate the vector difference from each word to the input vector
diff = embedding.values - v
# Get the norm of each difference vector.
# It means the squared euclidean distance from each word to the input vector
delta = np.sum(diff * diff, axis=1)
# Find the index of the minimun distance in the array
i = np.argmin(delta)
# Return the row name for this item
return embedding.iloc[i].name
# -
# Print some rows of the embedding as a Dataframe
embedding.head(10)
# Now let us find the name that corresponds to our numerical country:
find_closest_word(country)
# ## Predicting other Countries
find_closest_word(vec('Italy') - vec('Rome') + vec('Madrid'))
print(find_closest_word(vec('Berlin') + capital))
print(find_closest_word(vec('Beijing') + capital))
# However, it does not always work.
print(find_closest_word(vec('Lisbon') + capital))
# ## Represent a sentence as a vector
#
# A whole sentence can be represented as a vector by summing all the word vectors that conform to the sentence. Let us see.
doc = "Spain petroleum city king"
vdoc = [vec(x) for x in doc.split(" ")]
doc2vec = np.sum(vdoc, axis = 0)
doc2vec
find_closest_word(doc2vec)
# **Congratulations! You have finished the introduction to word embeddings manipulation!**
|
Course 1 - Natural Language Processing with Classification and Vector Spaces/Week 3/NLP_C1_W3_lecture_nb_02.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # 1. Getting started with NumPy
# + slideshow={"slide_type": "fragment"}
#Sides of Square
square_sides = [4, 5, 6, 7, 9, 89, 99]
type(square_sides)
# -
#Calculate the Area - side*side
square_areas = [a*a for a in square_sides]
square_areas
# + slideshow={"slide_type": "fragment"}
#Can I directly calculate the square
square_areas = square_sides*square_sides
# + [markdown] slideshow={"slide_type": "subslide"}
# # Introducing NumPy
# + slideshow={"slide_type": "fragment"}
import numpy as np
sides_array = np.array(square_sides)
area_array = sides_array*sides_array
print(area_array)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Array addition example
# + slideshow={"slide_type": "fragment"}
array1 = np.array([1,2,3,4,5])
array2 = np.array([5,4,3,2,1])
array1+array2
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Calculate the BMI
# + slideshow={"slide_type": "fragment"}
heights_in_mtr = np.array([1.72, 1.75, 1.72, 1.45, 1.50, 2.10])
weights_in_kg = np.array([65, 75, 99, 33, 45, 95])
#Calculate BMI - Kg/mtr**2
bmi_array = weights_in_kg/(heights_in_mtr**2)
bmi_array
# -
# ## Quizz - What will be the result
quizz1 = np.array( [2, 2.5, False, 0.2, 333, True, "name"] )
print(quizz1)
quizz2 = np.array( [2, 2.5, False, 0.2, 333, True] ).astype('int')
quizz2
#Is it possible to create an array from a tuples and Dictionaries?
|
NumPy/01 Introduction_to_NumPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from vjf.model import VJF
# -
# Setup precision and random seeds
torch.set_default_dtype(torch.double) # using double precision
np.random.seed(0)
torch.manual_seed(0)
# +
# Generate data
T = 100. # length
dt = 1e-2 * math.pi # size of time step
xdim = 2 # state dimensionality
ydim = 20 # obsetvation dimensionality
udim = 0 # size of control input
C = torch.randn(xdim, ydim) # loading matrix
d = torch.randn(ydim) # bias
t = torch.arange(0, T, step=dt) # time point to be evaluated
x = torch.column_stack((torch.sin(t), torch.cos(t))) # latent trajectory
x = x + torch.randn_like(x) * 0.1 # add some noise
# observation
y = x @ C + d
y = y + torch.randn_like(y) * 0.1 # add some noise
# Plot latent trajectory
fig = plt.figure()
ax = fig.add_subplot(221)
plt.plot(x.numpy())
plt.title('True state')
# +
# Setup and fit VJF
n_rbf = 100 # number of radial basis functions for dynamical system
hidden_sizes = [20] # size of hidden layers of recognition model
likelihood = 'gaussian' # gaussian or poisson
# likelihood = 'poisson' # gaussian or poisson
model = VJF.make_model(ydim, xdim, udim=udim, n_rbf=n_rbf, hidden_sizes=hidden_sizes, likelihood=likelihood)
m, logvar, _ = model.fit(y, max_iter=150) # fit and return list of state posterior tuples (mean, log variance)
m = m.detach().numpy().squeeze()
# -
# draw the latent trajectory
ax = fig.add_subplot(222)
plt.plot(m)
plt.title('Posterior mean')
# +
# Draw the inferred velocity field
def grid(n, lims):
xedges = np.linspace(*lims, n)
yedges = np.linspace(*lims, n)
X, Y = np.meshgrid(xedges, yedges)
grids = np.column_stack([X.reshape(-1), Y.reshape(-1)])
return X, Y, grids
ax = fig.add_subplot(223)
r = np.mean(np.abs(m).max()) # determine the limits of plot
Xm, Ym, XYm = grid(51, [-1.5*r, 1.5*r])
Um, Vm = model.transition.velocity(torch.tensor(XYm)).detach().numpy().T # get velocity
Um = np.reshape(Um, Xm.shape)
Vm = np.reshape(Vm, Ym.shape)
plt.streamplot(Xm, Ym, Um, Vm)
plt.plot(*m.T, color='C1', alpha=0.5, zorder=5)
plt.title('Velocity field')
# +
# Plot trajectory forecast
ax = fig.add_subplot(224)
x, y = model.forecast(x0=m[9, ...], n_step=int(100 / dt), noise=False) # return predicted latent trajectory and observation
x = x.detach().numpy().squeeze()
plt.plot(x)
plt.title('Forecast')
plt.tight_layout()
plt.show()
plt.close()
# -
|
notebook/tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/parthrjpt/EAS555_Projects/blob/main/ProblemSet1_555.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="s2ssQTLbrW3Q"
# ## Initial Setup
# + id="8tlKDZhqeN-r"
# %matplotlib inline
import pandas as pd
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import random
import tensorflow as tf
# + [markdown] id="fbkDpFR_reEa"
# ## MNIST DataSet initialization using keras
# + colab={"base_uri": "https://localhost:8080/"} id="MhAmu1FtovS2" outputId="3100bcb5-1f39-4cff-89bb-a8fa871d5e6a"
from keras.datasets import mnist
(X, y), (X_test, y_test) = mnist.load_data()
print(X.shape, y.shape,X_test.shape, y_test.shape)
# # !wget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
# # !wget http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
# # !wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
# # !wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
# # !gzip -d train-images-idx3-ubyte.gz
# # !gzip -d train-labels-idx1-ubyte.gz
# # !gzip -d t10k-images-idx3-ubyte.gz
# # !gzip -d t10k-labels-idx1-ubyte.gz
# + [markdown] id="JOjbAm7drrFK"
# ## Appending training labels to training data
#
#
#
# + id="1ZJ1osnA_kMC"
X= X.reshape(60000,28,28)
#train_data = {}
train_data = []
i=0
while i<len(X):
train_data.append([y[i], X[i]])
# train_data[i]={y[i]: X[i]}
i=i+1
# + id="GtscFOEmhI2j"
digit_data=[]
digit=1
for ele in train_data:
key,value=ele
if(key==digit):
digit_data.append(value)
# + [markdown] id="3Z84m6JhsYyu"
# ## Splitting data by the label & Calculating probability of each class
#
# + id="iFygr1GFCMji"
def split_data(data,digit):
digit_data=[]
i=0
j=0
for ele in data:
key,value=ele
if(key==digit):
digit_data.append(value)
j=j+1
i=i+1
return np.array(digit_data)
digit_0 =split_data(train_data,0)
p0=len(digit_0)/60000
digit_1 =split_data(train_data,1)
p1=len(digit_1)/60000
digit_2 =split_data(train_data,2)
p2=len(digit_2)/60000
digit_3 =split_data(train_data,3)
p3=len(digit_3)/60000
digit_4 =split_data(train_data,4)
p4=len(digit_4)/60000
digit_5 =split_data(train_data,5)
p5=len(digit_5)/60000
digit_6 =split_data(train_data,6)
p6=len(digit_6)/60000
digit_7 =split_data(train_data,7)
p7=len(digit_7)/60000
digit_8 =split_data(train_data,8)
p8=len(digit_8)/60000
digit_9 =split_data(train_data,9)
p9=len(digit_9)/60000
probability =[p0,p1,p2,p3,p4,p5,p6,p7,p8,p9]
# + [markdown] id="6IHP1ACMsutf"
# ## Defining functions to calculate mean, standard deviation, and covariance for each class's training data
# + id="Ip7tv6wqPGnt"
def average_image(ele):
avi= ele.reshape(ele.shape[0],-1)
avi=avi.mean(axis=0)
avi=avi.reshape(28,28)
return avi
def stdev_image(digitset):
sdi= ele.reshape(ele.shape[0],-1)
sdi=sdi.std(axis=0)
sdi=sdi.reshape(28,28)
return sdi
def cov_image(dataset,mean):
for ele in dataset:
term=ele.reshape(ele.shape[0],-1).flatten()-mean.flatten()
img = (term)*np.transpose(term)
arr = img/len(dataset)
return arr
# + [markdown] id="Y84OWznBtCCY"
# ## Task 1: Calculating Mean and Standard Deviation
# + id="kmbrogQLNZwg"
digitdata=[digit_0,digit_1,digit_2,digit_3,digit_4,digit_5,digit_6,digit_7,digit_8,digit_9]
avg=[]
std=[]
for ele in digitdata:
avg.append(average_image(ele))
std.append(stdev_image(ele))
# + [markdown] id="OS8xt0jQtL8i"
# ## Task 1: Plotting average and standard deviation based images for each class
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="eOc6fl66tETO" outputId="26120472-3abc-4de3-ae8f-4d80d93be025"
def plotimg(avgimg,stdimg):
fig = plt.figure
plt.imshow(avgimg)
plt.xlabel("average image")
plt.show()
plt.imshow(stdimg)
plt.xlabel("standard deviation image")
plt.show()
i=0
while i<10:
plotimg(avg[i],std[i])
i=i+1
# + [markdown] id="ePtEL1NnaaLh"
# ## Performance of Linear Discriminant Classifier vs Performance of Classsifiers used on the MNIST Dataset as mentioned on Lecun's Webpage:
#
#
# >
#
# * It is evident that the models used to classify MNIST dataset such as various Neural Networks, K-NN Classifers,SVM, etc have performed better in comparison to the LDA based classifier.
# * The Linear Discriminant Classfier implemented here is a linear machine.
# * As per Patterm Classification [1], the decision regions for a linear machine are convex and this limitation impacts the flexibility and accuracy of the classifier.
#
#
# Citations:
# [1] <NAME>., et al. Pattern Classification, John Wiley & Sons, Incorporated, 2000. ProQuest Ebook Central, https://ebookcentral-proquest-com.gate.lib.buffalo.edu/lib/buffalo/detail.action?docID=699526.
#
#
#
#
# + [markdown] id="L90ooEBUthNX"
# ## Task 2: Calculating Covariance related data for each class
# + id="_obsgC8ZIgno"
#np.linalg.inv(cov0)
import math as mt
cov=[]
param=0.385
i=0
while i<10:
ele=cov_image(digitdata[i],avg[i])
cov.append(np.array(ele))
#cov.append(np.cov(digitdata[i].reshape(len(digitdata[i]),784).T))
i=i+1
# + [markdown] id="X7komZZktq7J"
# ## Task 2: Shifting The covariance matrix by a small parameter to avoid issue for determinant and log calculation
#
# > For this usecase, the value of 0.385 was selected since it seemed to be the best fir after a few trials and errors with multiple values.
#
#
# + id="YxKKB1xoluG7"
test_data=X_test.reshape(10000,784,1)
d=784
param=0.385
shiftedcov=[]
for ele in cov:
shiftedcov.append(ele.reshape(784)+(param*np.eye(784)))
invcov=[]
for ele in shiftedcov:
invcov.append(np.linalg.inv(ele))
logdet=[]
for ele in shiftedcov:
logdet.append(np.log(np.linalg.det(ele)))
# + id="IroblCa450mM"
# print(np.array(shiftedcov).shape)
# print(shiftedcov[0])
# np.array(invcov).shape
# print(invcov[0])
# np.log(np.linalg.det(shiftedcov[0]))
# + [markdown] id="vtfVuZVQuTyL"
# ## Task 2: Calculation for lda, functions were initially created for each term for ease of understanding and verification of logic
# + [markdown] id="KXU9EdboJXFF"
# #### **Note**: The parameter d and the term3 do not cause any significant impact to the estimations and need not be considered. (Source: Duda, <NAME>., et al. Pattern Classification, <NAME> & Sons, Incorporated, 2000. ProQuest Ebook Central, https://ebookcentral-proquest-com.gate.lib.buffalo.edu/lib/buffalo/detail.action?docID=699526.)
# + id="wfQcVNZlkQvW"
# def lda_term1(ele,avg,shiftedcov):
# #print('ele ',ele)
# #print('avg ',avg)
# #print(np.transpose(ele-avg).shape)
# e=ele-avg
# #print(e)
# e=e.reshape(784,1)
# term=np.dot(np.transpose(e),np.linalg.inv(shiftedcov))
# term=0.5*np.dot(term,(e))
# #print('term1: ',term)
# return term
# def lda_term2():
# term=0.5*784*np.log(2*mt.pi)
# #print('term2 done')
# return term
# def lda_term3(shiftedcov):
# term=0.5*np.log(np.linalg.det(shiftedcov))
# #print('term3 ', term)
# return term
# def lda_term4(prob):
# term=np.log(prob)
# #print('term4 done')
# return term
def lda(ele,avg,shiftedcov,invcov,logdet,prob):
term1=0.5*np.dot(np.dot(np.transpose(ele-avg),invcov),ele-avg)
term2=0.5*784*np.log(2*mt.pi)
term3=0.5*logdet
term4=np.log(prob)
value=-term1-term2-term3+term4
#print('value done')
return value
# + [markdown] id="YdCmj4rXum5n"
# ## Task 2: Using the lda calculated for each class for each test datapoint to determine the max inclination for predicting the output class
# + id="5toc4W0Bjk_3"
y_pred=[]
test_data=test_data.reshape(len(test_data),784)
reshapedavg=[]
for ele in avg:
reshapedavg.append(ele.reshape(784))
#test_data
for ele in test_data :
i=0
ldavalue=[]
while(i<10):
value=lda(ele,reshapedavg[i],shiftedcov[i],invcov[i],logdet[i],probability[i])
ldavalue.append(value)
i=i+1
maxincline=np.max(ldavalue)
maxindex=ldavalue.index(maxincline)
y_pred.append(maxindex)
# + [markdown] id="6hS1GtFivzIL"
# ## Task 2: Accuracy
# + id="5HFirAV6sPh_"
i=0
count=0
while i<10000:
if(y_test[i]==y_pred[i]):
count=count+1
i=i+1
# + colab={"base_uri": "https://localhost:8080/"} id="mz45TDqVx2GW" outputId="e85542e4-4822-4304-8554-221bdfd87436"
print("True positives: ", count)
print("Total Samples: ", len(y_test))
print("Accuracy: ",(count/len(y_test)*100),'%')
# + [markdown] id="eF1dcHyDMndp"
# ### The code completes its execution time in a few minutes, this includes the time required for classification of the data. This can be considered as an indicator of the computational peroformance of the code.
#
# ### The performance of the model itself can be measured using parameters like accuracy, which has been used in this case. The expected labels were compared to the predicted class labels in order to identify all the true postives. This divided by the total number of test samples gives the accuracy.
#
# ### The accuracy of the the model is around 81.29%. After a few test iterations this accuracy has been observed to be dependant on the parameter used to shift the covariance matrix. By further experimenting for a few more iterations by changing the value of the parameter. One can identify the optimal accuracy possible for this usecase.
|
ProblemSet1_555.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # El programa "<NAME>"
# El programa **"<NAME>"** es frecuentemente el primer programa que se escribe para aprender un nuevo lenguaje de programación y probar su sintaxis básica. El programa simplemente imprime en la pantalla la hilera de texto "Hola Mundo" o una similar.
#
# La tradición del uso de este programa en el aprendizaje de la programación de computadoras tiene su origen en el libro [_The C Programming Language_](http://en.wikipedia.org/wiki/The_C_Programming_Language_(book)), escrito en 1978 por <NAME> y <NAME>. El programa de ejemplo de este libro imprime la hilera en inglés **"hello, world"** (en minúscula, con una coma entre las palabras y sin signos de admiración). Aunque este el texto que le dio popularidad, el programa había sido utilizado previamente en el [tutorial del lenguaje de programación B](https://www.bell-labs.com/usr/dmr/www/bintro.html), de <NAME>.
#
# El sitio [The Hello World Collection](http://helloworldcollection.de/) presenta el código de "Hola Mundo" en aproximadamente 600 lenguajes de programación.
# ## "Hola Mundo" en Python
print("<NAME>")
|
nb/Hola mundo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Jupyter Notebook Examples
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Install igv-notebook
# + pycharm={"name": "#%%\n"}
# !pip install igv-notebook
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Define reference using absolute URLs (all platforms)
#
# + pycharm={"name": "#%%\n"}
import igv_notebook
igv_notebook.init()
b = igv_notebook.Browser(
{
"reference": {
"id": "hg19_custom",
"name": "Human (GRCh37/hg19)",
"fastaURL": "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta",
"indexURL": "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta.fai",
},
"locus": "chr22:24,376,277-24,376,350"
})
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Define reference using _**server**_ relative URLs (Jupyter Notebook and Lab)
#
# * Assumption: Server is started from repository root
#
# + pycharm={"name": "#%%\n"}
import igv_notebook
igv_notebook.init()
b = igv_notebook.Browser(
{
"reference": {
"id": "NC_045523v3",
"name": "SARS-COV-2",
"fastaURL": "/examples/data/NC_045512v2.fa",
"indexURL": "/examples/data/NC_045512v2.fa.fai",
},
"locus": "NC_045512v2:6,684-6,722"
})
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Define reference using _**notebook**_ relative URLs (Jupyter Notebook Only)
#
# * Tracks are loaded after browser creation with ```load_track``` function
# * Tracks loaded with URL paths. Paths are relatie to the Jupyter file tree.
#
# _**Note: specifying file urls relative to the notebook is not supported for JupyterLab.**_
# + pycharm={"name": "#%%\n"}
import igv_notebook
igv_notebook.init()
b = igv_notebook.Browser(
{
"reference": {
"id": "NC_045523v3",
"name": "SARS-COV-2",
"fastaURL": "data/NC_045512v2.fa",
"indexURL": "data/NC_045512v2.fa.fai",
},
"locus": "NC_045512v2:6,684-6,722"
})
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Define reference using file paths (Jupyter Notebook and Colab Only)
#
# * _**path**_ properties (e.g. _fastaPath_ and _indexPath_ can be used to access files anywhere on the local filesystem. This example specifies
# relative file paths (relative to the notebook) for portability of this demo, however the use of _**path**_
# properties is not recommended if files are in the Jupyter file space, _**url**_ properties will in general
# yield better performance.
#
# _**Note: "path" properties for local files are not supported for JupyterLab.**_
# + pycharm={"name": "#%%\n", "is_executing": true}
import igv_notebook
igv_notebook.init()
b = igv_notebook.Browser(
{
"reference": {
"id": "NC_045523v3",
"name": "SARS-COV-2",
"fastaPath": "data/NC_045512v2.fa",
"indexPath": "data/NC_045512v2.fa.fai"
},
"locus": "NC_045512v2:6,684-6,722"
})
# -
|
examples/Custom_Reference.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python для анализа данных
#
# ## Использование API. Работа с форматами XML и JSON
#
# *На основе лекции <NAME>, НИУ ВШЭ*
# *Дополнения: <NAME>, НИУ ВШЭ*
#
# # XML
# До этого мы с вами собирали данные вручную, обращаясь к html страницам, размеченным для отображения в браузере. Но данные также можно собирать и через API - — application program interface. Обычный интерфейс — это способ взаимодействия человека с программой, а API — одной программы с другой. Например, вашего скрипта на Python с удалённым веб-сервером.
#
# Для хранения веб-страниц, которые читают люди, используется язык HTML. Для хранения произвольных структурированных данных, которыми обмениваются между собой программы, используются другие языки — в частности, язык XML, похожий на HTML. Вернее было бы сказать, что XML это метаязык, то есть способ описания языков. В отличие от HTML, набор тегов в XML-документе может быть произвольным (и определяется разработчиком конкретного диалекта XML). Например, если бы мы хотели описать в виде XML некоторую студенческую группу, это могло бы выглядеть так:
#
# ```xml
# <group>
# <number>134</number>
# <student>
# <firstname>Виталий</firstname>
# <lastname>Иванов</lastname>
# </student>
# <student>
# <firstname>Мария</firstname>
# <lastname>Петрова</lastname>
# </student>
# </group>
# ```
# Для обработки XML-файлов можно использовать тот же пакет *Beautiful Soup*, который мы уже использовали для работы с HTML. Единственное различие — нужно указать дополнительный параметр `feautres="xml"` при вызове функции `BeautifulSoup` — чтобы он не искал в документе HTML-теги.
group = """<group>
<number>134</number>
<student>
<firstname>Виталий</firstname>
<lastname>Иванов</lastname>
</student>
<student>
<firstname>Мария</firstname>
<lastname>Петрова</lastname>
</student>
</group>"""
# !pip install lxml
# +
from bs4 import BeautifulSoup
obj = BeautifulSoup(group, features="lxml")
print(obj.prettify())
# -
# Номер группы можно найти, например, вот так - для каждого объекта через точку указываем его атрибут, в который надо спуститься.
obj.group.number.text # последний атрибут текст, точно также как делали в html
# Но это работает только тогда, когда тэг уникальный. В других случаях, парсер всегда будет попадать в первый child-тэг, который он встретил по пути вниз.
obj.group.student.lastname.text # до Петровой так не добраться
# Перечислить всех студентов можно с помощью цикла (похожая структура у нас была и в обработке html).
for student in obj.group.find_all('student'):
print(student.lastname.text, student.firstname.text)
# По сути, главное отличие xml от html, что работать вы будете не со стандартизированными структурами. Поэтому перед работой придется поиграть в детективов - запросить данные и внимательно изучить расположение узлов, чтобы понять, какие тэги вас интересуют.
#
# XML легко представить в виде дерева, где есть главный узел (parent) и его "дети".
#
# 
# *Источник: Python for Everybody, C.Severance*
# Кроме BS парсить xml можно и с помощью других библиотек. Например, ElementTree.
#
import xml.etree.ElementTree as ET
tree = ET.fromstring(group)
list(tree) # посмотрим, что внутри
# Cинтаксис очень похож на BS. Добрались до первой фамилии.
tree.find('student').find('lastname').text
for element in tree.findall('student'):
print(element)
print(element.find('lastname').text)
# Можно немного упростить код, включив дочерний тэг в findall.
for element in tree.findall('student/lastname'):
print(element.text)
# # Задача
# По ссылке данные в формате xml.
# http://py4e-data.dr-chuck.net/comments_42.xml
#
# Посчитайте все комментарии в этом документе (поля count).
# +
import requests
data = requests.get('http://py4e-data.dr-chuck.net/comments_42.xml').text
tree = ET.fromstring(data)
total = 0
for element in tree.findall('comments/comment/count'):
# print(type(element.text)
total += int(element.text)
print(total)
# -
sum([int(element.text) for element in tree.findall('comments/comment/count')])
# # Реальный пример: wiki API
# Допустим, нам потребовалось получить список всех статей из некоторой категории в Википедии. Мы могли бы открыть эту категорию в браузере и дальше действовать теми методами, которые обсуждались выше. Однако, на наше счастье разработчики Википедии сделали удобное API. Чтобы научиться с ним работать, придётся познакомиться с [документацией](https://www.mediawiki.org/wiki/API:Main_page) (так будет с любым API), но это кажется сложным только в первый раз. Ну хорошо, в первые 10 раз. Или 20. Потом будет проще.
#
# Многие API будут требовать токена (например, ваш google логин-пароль для работы с гугл-документами), но мы сейчас работаем с открытым интерфейсом.
#
# Итак, приступим. Взаимодействие с сервером при помощи API происходит с помощью отправки специальным образом сформированных запросов и получения ответа в одном из машинночитаемых форматов. Нас будет интересовать формат XML, хотя бывают и другие (позже мы познакомимся с JSON). А вот такой запрос мы можем отправить:
#
# https://en.wikipedia.org/w/api.php?action=query&list=categorymembers&cmtitle=Category:Physics&cmsort=timestamp&cmdir=desc&format=xmlfm
#
# Строка `https://en.wikipedia.org/w/api.php` (до знака вопроса) — это *точка входа* в API. Всё, что идёт после знака вопроса — это, собственно, запрос. Он представляет собой что-то вроде словаря и состоит из пар «ключ=значение», разделяемых амперсандом `&`. Некоторые символы приходится кодировать специальным образом.
#
# Например, в адресе выше сказано, что мы хотим сделать запрос (`action=query`), перечислить элементы категории `list=categorymembers`, в качестве категории, которая нас интересует, указана `Category:Physics` (`cmtitle=Category:Physics`) и указаны некоторые другие параметры. Если кликнуть по этой ссылке, откроется примерно такая штука:
#
# ```xml
# <?xml version="1.0"?>
# <api batchcomplete="">
# <continue cmcontinue="2015-05-30 19:37:50|1653925" continue="-||" />
# <query>
# <categorymembers>
# <cm pageid="24293838" ns="0" title="Wigner rotation" />
# <cm pageid="48583145" ns="0" title="Northwest Nuclear Consortium" />
# <cm pageid="48407923" ns="0" title="<NAME>" />
# <cm pageid="48249441" ns="0" title="Phase Stretch Transform" />
# <cm pageid="47723069" ns="0" title="Epicatalysis" />
# <cm pageid="2237966" ns="14" title="Category:Surface science" />
# <cm pageid="2143601" ns="14" title="Category:Interaction" />
# <cm pageid="10844347" ns="14" title="Category:Physical systems" />
# <cm pageid="18726608" ns="14" title="Category:Physical quantities" />
# <cm pageid="22688097" ns="0" title="Branches of physics" />
# </categorymembers>
# </query>
# </api>
# ```
# Мы видим здесь разные теги, и видим, что нас интересуют теги `<cm>`, находящиеся внутри тега `<categorymembers>`.
#
# Давайте сделаем соответствующий запрос с помощью Python. Для этого нам понадобится уже знакомый модуль `requests`.
# +
url = "https://en.wikipedia.org/w/api.php"
params = {
'action':'query',
'list':'categorymembers',
'cmtitle': 'Category:Physics',
'format': 'xml'
}
g = requests.get(url, params=params)
# -
# Как видно, список параметров мы передаем в виде обычного словаря. Посмотрим, что получилось.
g.ok
# ?g.ok # возвращает ошибку, если сервер или клиент не отвечает
# Всё хорошо. Теперь используем *Beautiful Soup* для обработки этого XML.
data = BeautifulSoup(g.text, features='xml')
print(data.prettify())
# Найдём все вхождения тега `<cm>` и выведем их атрибут `title`:
for cm in data.api.query.categorymembers("cm"):
print(cm['title'])
# Можно было упростить поиск `<cm>`, не указывая «полный путь» к ним:
for cm in data("cm"):
print(cm['title'])
# По умолчанию сервер вернул нам список из 10 элементов. Если мы хотим больше, нужно воспользоваться элементом `continue` — это своего рода гиперссылка на следующие 10 элементов.
data.find("continue")['cmcontinue']
# Мне пришлось использовать метод `find()` вместо того, чтобы просто написать `data.continue`, потому что `continue` в Python имеет специальный смысл.
#
# Теперь добавим `cmcontinue` в наш запрос и выполним его ещё раз:
params['cmcontinue'] = data.api("continue")[0]['cmcontinue']
params
g = requests.get(url, params=params)
data = BeautifulSoup(g.text, features='xml')
for cm in data.api.query.categorymembers("cm"):
print(cm['title'])
# Мы получили следующие 10 элементов из категории. Продолжая таким образом, можно выкачать её даже целиком (правда, для этого потребуется много времени).
#
# Аналогичным образом реализована работа с разнообразными другими API, имеющимися на разных сайтах. Где-то API является полностью открытым (как в Википедии), где-то вам потребуется зарегистрироваться и получить application id и какой-нибудь ключ для доступа к API, где-то попросят даже заплатить (например, автоматический поиск в Google стоит что-то вроде 5 долларов за 100 запросов). Есть API, которые позволяют только читать информацию, а бывают и такие, которые позволяют её править. Например, можно написать скрипт, который будет автоматически сохранять какую-то информацию в Google Spreadsheets. Всякий раз при использовании API вам придётся изучить его документацию, но это в любом случае проще, чем обрабатывать HTML-код. Иногда удаётся упростить доступ к API, используя специальные библиотеки.
# # JSON
#
# Другой популярный формат, в котором клиент может отдать вам данные - json. JSON расшифровывается как JavaScript Object Notation и изначально возник как подмножество языка JavaScript (пусть вас не вводит в заблуждение название, этот язык ничего не имеет общего с Java), используемое для описания объектов, но впоследствии стал использоваться и в других языках программирования, включая Python. Различные API могут поддерживать либо XML, либо JSON, либо и то, и другое, так что нам полезно научиться работать с обоими типами данных (например, wiki api могла бы выгрузить нам данные и в формате json при соответствующем запросе).
# +
url = "https://en.wikipedia.org/w/api.php"
params = {
'action':'query',
'list':'categorymembers',
'cmtitle': 'Category:Physics',
'format': 'json' # поменяли формат на json
}
j = requests.get(url, params=params)
j.ok
# -
j.text
# Посмотрим, что достали. Уже сразу видно, что структура у данных совсем другая.
#
# JSON очень похож на описание объекта в Python и смысл квадратных и фигурных скобок такой же. Правда, есть и отличия: например, в Python одинарные и двойные кавычки ничем не отличаются, а в JSON можно использовать только двойные. Мы видим, что полученный нами JSON представляет собой словарь, значения которого — строки или числа, а также списки или словари, значения которых в свою очередь также могут быть строками, числами, списками, словарями и т.д. То есть получается такая довольно сложная структура данных.
#
# В данный момент тот факт, что перед нами сложная структура данных, видим только мы — с точки зрения Python, j.text это просто такая строка. Однако в модуле requests есть метод, позволяющий сразу выдать питоновский объект (словарь или список), если результат запроса возвращён в формате JSON. Так что нам не придётся использовать никакие дополнительные библиотеки.
j_data = j.json()
j_data # получили честный питоновский словарь
# Содержательная информация хранится по ключу 'query'. А уже внутри есть ключ 'categorymembers', значением которого является список всех категорий. Каждая категория отображается в виде словаря, записями которого являются разные параметры категории (например, 'title' соответствует названию, а pageid — внутреннему идентификатору в системе).
#
#
j_data['query']['categorymembers'] # привычный нам список
for cm in j_data['query']['categorymembers']: # пройдемся по нему привычным нам циклом
print(cm['title'])
# Преимущества JSON в том, что мы получаем готовый объект Python и нет необходимости использовать какие-то дополнительные библиотеки для того, чтобы с ним работать. Недостатком является то же самое: зачастую поиск информации в XML-файле может проводиться более эффективно, чем в JSON. Продемонстрируем это на уже рассмотренном примере. Чтобы получить список всех тегов <cm>, в которых хранилась информация об элементах категории в XML, мы использовали полный «путь»:
#
# ```python
# for cm in data.api.query.categorymembers("cm"):
# print(cm['title'])
# ```
#
# Однако, это можно бы сделать (в данном случае) гораздо короче. Если посмотреть на XML, то можно заметить, что в нём нет других тегов <cm>, кроме тех, которые нам нужны. С другой стороны, Beautiful Soup ищет все теги с данным именем, а не только те, которые являются потомками первого уровня для данного тега. Таким образом, код выше можно было бы переписать более коротко:
for cm in data("cm"):
print(cm['title'])
# Конечно data("cm") выглядит короче, чем q['query']['categorymembers']. В JSON мы не можем использовать подобные методы. Так что у обоих форматов есть свои плюсы и минусы.
# ## JSON (парсинг VK)
#
# Как уже говорилось выше, не все API открытые. Так, чтобы достать информацию из vk вам придется сгенерировать токен с помощью вашего аккаунта (мы выложим отдельный блокнот как это сделать, для тех, кому будет интересно). API VK отдает данные в json. Структура тут будет посложнее, чем то, что мы уже видели, поэтому давайте еще потренируемся.
# Теперь научимся еще и загружать JSON файл с диска. Для этого нам понадобится модуль json.
import json
with open('vk.json', 'r', encoding='Utf-8') as json_data:
res_loaded = json.load(json_data) # считываем данные с помощью функции .load()
# Здесь у нас выгрузка постов со стены группы ВШЭ.
res_loaded
res_loaded['items'][:2]
res_loaded.keys()
# Ключами являются `count` и `items`. Нужные нам объекты (текст постов, id автора, дата и время публикации и проч.) находятся в `items`.
res_loaded['items'][0] # первый элемент items - первый пост со всей информацией о нем
# Помимо текста поста можно найти много всего интересного. Например, тип поста (`post_type`), дата (`date`), id поста (`id`), лайки (`likes`, которые включают информацию о том, могут ли пользователи лайкать пост и публиковать его, а также собственно число лайков), репосты (`reposts`, которые включают число репостов), число просмотров (`views`), комментарии (`comments`, которые включают информацию о том, могут ли пользователи комментировать пост, и число комментариев), и так далее.
#
# Давайте остановимся на тексте поста, id автора, id поста и дате публикации. Чтобы извлечь соответствующую информацию, сохраним `items` и извлечем из них нужные поля:
# +
items = res_loaded['items']
full_list = []
for item in items:
l = [item['from_id'], item['id'], item['text'], item['date']] # нужные поля
full_list.append(l) # добавляем в список списков full_list
# несколько элементов списка
full_list[0:4]
# -
# Видно, что в двух первых постах текста не обнаружено, там только картинки, ссылки и репосты.
#
# Из этого списка списков можно легко сделать датафрейм `pandas`.
# Но если у ссылок, репостов и картинок нет текста, то наш DataFrame будет выглядет неполноценно. Поэтому добавим условие, что если текст отсутсвует, вставим в DataFrame строку с этой отметкой.
# опять выберем только нужные поля
full_list = []
for item in items:
if item['text'] == "":
l = [item['from_id'], item['id'], "Картинка, ссылка или репост", item['date']]
else:
l = [item['from_id'], item['id'], item['text'], item['date']]
full_list.append(l)
# Оставлось превратить обновленный список `items` (список списков) в датафрейм. Импортируем `pandas`.
import pandas as pd
# Создадим датафрейм:
df = pd.DataFrame(full_list)
df.head(10)
# Ура! Осталось только дать внятные названия столбцам и разобраться, почему дата представлена в таком виде. что делать со столбцами, мы уже знаем.
df.columns = ['From_id', 'Id', 'Text', 'Date_Unix']
df.head(10)
# С датой все интереснее. То, что указано в столбце `date`, это дата в виде UNIX-времени (POSIX-времени). Это число секунд, прошедших с 1 января 1970 года. Несмотря на то, что такой формат даты-времени кажется необычным, он довольно широко распространен в разных системах и приложениях. Этот факт, конечно, радует, но хочется получить дату в более человеческом формате. Давайте напишем функцию для перевода UNIX-времени в формат год-месяц-день-часы-минуты-секунды. Для этого нам понадобится модуль datetime.
from datetime import datetime
def date_norm(date):
d = datetime.fromtimestamp(date) # timestamp - UNIX-время в виде строки
str_d = d.strftime("%Y-%m-%d %H:%M:%S") # %Y-%m-%d %H:%M:%S - год-месяц-день, часы:минуты:секунды
date_norm, time_norm = str_d.split(' ') # разобьем результат на части, отделим дату от времени
return date_norm, time_norm
# Применим нашу функцию к элементам столбца date и создадим новый ‒ `date_norm`.
df['Date_Norm'] = df.Date_Unix.apply(date_norm)
df.head()
# Можно было, конечно, не разбивать на части дату и время, сохранять одной строкой. А можно написать функции, которые будут отделять дату от времени ‒ извлекать их из кортежа в date_norm.
# +
def get_date(date):
return date[0]
def get_time(date):
return date[1]
# -
df['Date'] = df.Date_Norm.apply(get_date)
df['Time'] = df.Date_Norm.apply(get_time)
df.head()
# Всё! Материалы о разных методах и функциях для `vk.api` можно найти в [официальной документации](https://vk.com/dev/manuals).
|
lect11_Selenium_API/2021_DPO_11_5_API_XML_JSON.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Gendist V1.0
# %load_ext autoreload
# %autoreload 2
import os
import jax
import optax
import dojax
import pickle
import jax.numpy as jnp
import flax.linen as nn
import numpy as np
import torchvision
import pandas as pd
import matplotlib.pyplot as plt
from time import time
from augly import image
from sklearn.decomposition import PCA
from datetime import datetime
from flax import serialization
from multiprocessing import Pool
from loguru import logger
from tqdm.notebook import tqdm
import gendist
from gendist import training, processing
from gendist.models import MLPDataV1
from gendist.processing import flat_and_concat_params
from gendist.training import TrainingConfig, make_cross_entropy_loss_func
# %config InlineBackend.figure_format = "retina"
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
mnist_train = torchvision.datasets.MNIST(root=".", train=True, download=True)
mnist_test = torchvision.datasets.MNIST(root=".", train=False, download=True)
n_classes = 10
X_train = np.array(mnist_train.data)
y_train = np.array(mnist_train.targets)
y_train_ohe = jax.nn.one_hot(y_train, n_classes)
# +
key = jax.random.PRNGKey(314)
batch_size = 2000
n_epochs = 150
alpha = 0.001
tx = optax.adam(learning_rate=alpha)
# +
def processor(X, radius): return image.aug_np_wrapper(X, image.blur, radius=radius)
proc_class = gendist.processing.Factory(processor)
model = MLPDataV1(n_classes)
radii = jnp.r_[np.linspace(0.01, 1.0, 50), np.linspace(2.0, 3.0, 50)]
# -
train_config = TrainingConfig(model, proc_class, make_cross_entropy_loss_func, tx)
configs_params = []
configs_accuracy = []
for radius in tqdm(radii):
config = {"radius": radius}
params, train_acc = train_config.train_model_config(key, X_train, y_train_ohe,
config, n_epochs, batch_size)
configs_params.append(params)
configs_accuracy.append(train_acc)
# +
date_str = datetime.now().strftime("%y%m%d")
filename = f"mnist-shift-params-{date_str}.pkl"
output_elements = {
"configs": radii,
"params": configs_params,
"metric": configs_accuracy
}
with open(filename, "wb") as f:
pickle.dump(output_elements, f)
# -
with open("mnist-shift-params-220309.pkl", "rb") as f:
output_elements = pickle.load(f)
radii = output_elements["configs"]
configs_params = output_elements["params"]
configs_accuracy = output_elements["metric"]
configs_params_flat, fn_recontruct_params = dojax.flat_and_concat_params(configs_params)
# +
pca = PCA(n_components=2)
params_proj = pca.fit_transform(configs_params_flat)
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
ax[0].set_title("Projected weight dynamics: MNIST")
ax[0].scatter(*params_proj.T, c=radii)
ax[0].tick_params(axis="both", labelleft=False, labelbottom=False)
ax[0].axis("equal")
ax[0].axis("off")
ax[1].set_title("Accuracy")
ax[1].scatter(radii, configs_accuracy, c=radii)
ax[1].set_xlabel("Radius")
ax[1].set_ylabel("Accuracy");
# -
# ## Training weights
n_components = 60
pca = PCA(n_components=n_components)
params_proj = pca.fit_transform(configs_params_flat)
radii_dict = [{"radius": radius} for radius in radii]
n_train_subset = 6_000
subset_ix = pd.Series(y_train).sample(n_train_subset, random_state=314)
subset_ix = subset_ix.index.values
subset_ix
logger.add("training-weights-001-trench.log", rotation="5mb")
logger.warning("Initialising process")
# +
def processor(X, radius): return image.aug_np_wrapper(X, image.blur, radius=radius)
proc_class = gendist.processing.Factory(processor)
alpha = 0.0001
tx = optax.adam(learning_rate=alpha)
weights_model = gendist.models.MLPWeightsV1(n_components)
train_config = gendist.training.TrainingShift(weights_model, proc_class,
gendist.training.make_multi_output_loss_func,
tx)
# -
weigths_trained = None
batch_size = 10_000
num_epochs = 10
weights_trained, losses = train_config.train(key, X_train[subset_ix], params_proj,
radii_dict, batch_size, num_epochs, logger, weigths_trained)
with open("weights-dynamics-trench.params", "wb") as f:
bytes_params = serialization.to_bytes(weights_trained)
f.write(bytes_params)
# # Proxy zero-shot learning
radii_test = np.linspace(0.001, 3.0, 150)
# +
with open("mnist-shift-params-220309.pkl", "rb") as f:
output_elements = pickle.load(f)
radii = output_elements["configs"]
configs_params = output_elements["params"]
configs_accuracy = output_elements["metric"]
with open("weights-dynamics.params", "rb") as f:
weights_trained = weights_model.init(jax.random.PRNGKey(314), jnp.ones((1, 28 ** 2)))
weights_trained = serialization.from_bytes(weights_trained, f.read())
# -
logger.remove()
logger.add("projection-accuracy-trenches-v1.log", rotation="5mb")
X_test = np.array(mnist_test.data)
y_test = np.array(mnist_test.targets)
# +
ix = 4
accuracy_configs_learned = []
predicted_weights_array = []
for radius in tqdm(radii_test):
x_test_shift = proc_class.process_single(X_test[ix], radius=radius)
predicted_weights = weights_model.apply(weights_trained, x_test_shift.ravel())
reconstructed_predicted_weights = pca.inverse_transform(predicted_weights)
reconstructed_predicted_weights = fn_recontruct_params(reconstructed_predicted_weights)
X_test_shift_ravel = proc_class(X_test, {"radius": radius})
y_test_hat = model.apply(reconstructed_predicted_weights, X_test_shift_ravel)
y_test_hat = y_test_hat.argmax(axis=1)
accuracy_learned = (y_test_hat == y_test).mean()
accuracy_configs_learned.append(accuracy_learned)
predicted_weights_array.append(predicted_weights)
logger.info(f"{radius=:0.4f} | {accuracy_learned=:0.4f}")
# -
predicted_weights = weights_model.apply(weights_trained, x_test_shift.ravel())
reconstructed_predicted_weights = pca.inverse_transform(predicted_weights)
# +
import re
with open("projection-accuracy-trenches.log", "r") as f:
logs = f.read()
regexp = re.compile(r"radius=([0-9\.]+) \| accuracy_learned=([0-9\.]+)")
log_vals = []
for line in logs.split("\n")[:-1]:
rad = regexp.search(line)[1]
acc = regexp.search(line)[2]
log_vals.append({"radius": float(rad), "accuracy": float(acc)})
log_df = pd.DataFrame(log_vals).set_index("radius")
log_df.plot()
plt.axvline(x=1.0, c="tab:gray", linestyle="--")
plt.axvline(x=2.0, c="tab:gray", linestyle="--")
plt.title("One-shot weight-prediction")
# -
from sklearn.manifold import TSNE
from sklearn.decomposition import KernelPCA
# +
pca_reconstruct = TSNE(n_components=2, perplexity=15, random_state=314)
pca_reconstruct = PCA(n_components=2)
predicted_matrix = np.r_[predicted_weights_array]
colors = ["tab:red" if 1 < radius < 2 else "tab:blue" for radius in radii_test]
plt.scatter(*pca_reconstruct.fit_transform(predicted_matrix).T, c=colors)
plt.colorbar()
# -
import seaborn as sns
pca_reconstruct = PCA(n_components=1)
d1_reconstruct = pca_reconstruct.fit_transform(predicted_matrix)
sns.distplot(d1_reconstruct, kde=False)
# ## ToDo:
# 1. Test on out-of-sample (trench part) of the model.
|
notebooks/007-gendist-v1-partial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neo4j to Network X
#
# This notebook contains code for querying the neo4j knowledge graph and transforming the results of that query into a network x graph.
import networkx as nx
import os
import pandas as pd
from tqdm.notebook import tqdm
from neo4j import GraphDatabase
# You can visit this G Drive folder to get the data for this project: https://drive.google.com/drive/folders/1E-pxqkJDrS-jd_MNq6IbS-v7gGNNmrYd?usp=sharing. Insert this data into ../data/processed/
# ## Functions for coercing knowledge graph into NetworkX
# +
def getSubgraph(q, parameters=None):
'''
Given a Cypher query q, this function queries the knowledge graph,
returns the nodes and edges from this query, and uses them to construct
a networkx graph.
E.g. getSubgraph(r'MATCH (u:Cid)-[r:HYPERLINKS_TO]->(v:Cid) RETURN *')
returns the structural graph.
Optionally, can add in parameters (dictionary), allowing Python variables
to be integrated into the Cypher query q.
E.g.
parameters = {}
parameters['pages'] = ['a','list','of','stuff']
q7 = f"""
MATCH (u:Cid)-[r]-(v:Cid)
WHERE u.name IN $pages AND v.name in $pages
RETURN *
"""
g7 = getSubgraph(q7, parameters)
'''
# get credentials
# add to .secrets: export KG_PWD="<PASSWORD>"
KG_PWD = os.getenv("KG_PWD")
# create connection to knowledge graph
driver = GraphDatabase.driver(
"bolt+s://knowledge-graph.integration.govuk.digital:7687",
auth=("neo4j", KG_PWD),
)
# run query on knowledge graph
results = driver.session().run(q, parameters)
# create networkx graph object
G = nx.MultiDiGraph()
# add nodes into networkx graph object
nodes = list(results.graph()._nodes.values())
print("Adding nodes\n")
for node in tqdm(nodes):
G.add_node(node.id, labels=node._labels, properties=node._properties)
# add edges into networkx graph object
rels = list(results.graph()._relationships.values())
print("Adding edges\n")
for rel in tqdm(rels):
G.add_edge(
rel.start_node.id,
rel.end_node.id,
key=rel.id,
type=rel.type,
properties=rel._properties,
)
return G
def showGraph(g):
"""
Given a networkx graph g, this function visualises the graph.
Do not use for a large g.
"""
print(nx.info(g))
nx.draw(g)
# -
# ## Defining subgraph based on mainstream content
# +
# mainstream content in the structural graph
q3 = r"""
MATCH (u:Mainstream)-[r:HYPERLINKS_TO]->(v:Mainstream)
RETURN *
"""
g3 = getSubgraph(q3)
showGraph(g3)
# -
# ## Defining subgraph based on functional graph
# +
# get the functional graph
q5 = r"""
MATCH (u:Cid)-[r:USER_MOVEMENT]->(v:Cid)
RETURN *
"""
g5 = getSubgraph(q5)
g5.number_of_nodes(), g5.number_of_edges()
# -
# write the functional graph to disk
nx.write_gpickle(g5, "../data/processed/functional_graph.gpickle")
# ## Defining subgraph based on structural graph
# +
# get the structural grpah
q6 = r"""
MATCH (u:Cid)-[r:HYPERLINKS_TO]->(v:Cid)
RETURN *
"""
g6 = getSubgraph(q6)
g6.number_of_nodes(), g6.number_of_edges()
# -
# write structural graph to disk
nx.write_gpickle(g6, "../data/processed/structural_graph.gpickle")
# visualising nodes in the structural graph that mention 'start a business'
nodes = list(g6.nodes(data=True))
sabNodes = [
node
for node in nodes
if "start a business" in node[1]["properties"]["text"].lower()
]
nx.draw(g6.subgraph([node[0] for node in sabNodes]))
# ## Defining subgraph based on page hits from Big Query GA data
# ### By page path
page_paths = pd.read_csv('../data/processed/page_paths.csv')
page_paths.head()
# +
parameters = {}
parameters["pages"] = page_paths["pagePath"].tolist()
q7 = f"""
MATCH (u:Cid)-[r:HYPERLINKS_TO|USER_MOVEMENT]->(v:Cid)
WHERE u.name IN $pages AND v.name in $pages
RETURN *
"""
g7 = getSubgraph(q7, parameters)
nx.info(g7)
# -
# write graph to disk
nx.write_gpickle(g7, "../data/processed/5_hits_graph.gpickle")
# #### Running checks to ensure all SaB pages are in this subgraph
sab_pages = pd.read_csv('../data/processed/sab_pages.csv')
sab_pages.head()
# percentage of SaB nodes that are in our subgraph
g7nodes = list(g7.nodes(data=True))
g7names = set([node[1]["properties"]["name"] for node in g7nodes])
len(set(sab_pages.pagePath).intersection(g7names)) / len(set(sab_pages.pagePath)) * 100
# 99.67% of SaB nodes are in this subgraph. The missing node relates to a withdrawn page, therefore, effectively 100% of the SaB nodes are in this subgraph:
set(sab_pages.pagePath) - set(sab_pages.pagePath).intersection(g7names)
# ### By content ID
content_ids = pd.read_csv('../data/processed/content_ids.csv')
content_ids.head()
# +
parameters = {}
parameters["pages"] = content_ids["contentID"].tolist()
q8 = f"""
MATCH (u:Cid)-[r:HYPERLINKS_TO|USER_MOVEMENT]->(v:Cid)
WHERE u.contentID IN $pages AND v.contentID in $pages
RETURN *
"""
g8 = getSubgraph(q8, parameters)
nx.info(g8)
# -
# write graph to disk
nx.write_gpickle(g8, "../data/processed/5_hits_per_contentID_graph.gpickle")
# percentage of SaB nodes that are in our subgraph
g8nodes = list(g8.nodes(data=True))
g8names = set([node[1]["properties"]["name"] for node in g8nodes])
len(set(sab_pages.pagePath).intersection(g8names)) / len(set(sab_pages.pagePath)) * 100
|
notebooks/neo4j_to_networkx.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 10
#
# The elves are in the asteroid belt, and they can't track all the asteroids -- so they don't feel safe flying around
#
# ## Part 1
#
# They need to build a new monitoring station on a new asteroid, and need help figuring out which asteroid is in the best position for placing the station -- which asteroid that can detect the largest number of asteroids. This means a line can be drawn between them, and there is not another asteroid blocking the way
from math import gcd, atan2, pi
from typing import List
class AsteroidBelt:
def __init__(self, asteroid_map: str):
self.asteroid_map = [[char for char in row] for row in asteroid_map.split("\n")]
self.row_range = len(self.asteroid_map[0])
self.col_range = len(self.asteroid_map)
self._asteroid_coords = None
self.asteroid_coords = self._asteroid_coords
self.vaporized_asteroids = []
@property
def asteroid_coords(self):
return self.find_asteroids()
@asteroid_coords.setter
def asteroid_coords(self, value):
self._asteroid_coords = value
def __str__(self):
return "\n".join(["".join(row) for row in self.asteroid_map])
def find_asteroids(self) -> List[list]:
"""Return coordinates of all asteroids in the belt"""
return sorted([
[row, col]
for col in range(self.row_range)
for row in range(self.col_range)
if self.asteroid_map[row][col] == "#"
])
@staticmethod
def get_point_angle(origin: list, point: list) -> float:
"""If the point is directly above the origin, then it's radian angle is considered 0"""
origin_row, origin_col = origin
point_row, point_col = point
radian_angle = atan2(origin_col - point_col, origin_row - point_row)
return radian_angle if radian_angle > 0 else radian_angle + 2 * pi
def get_blocked_points(self, origin: list, point: list) -> List[list]:
"""Given an origin asteroid and a new asteroid return
a list of blocked points on the map
Examples:
> # For a 5x5 map
> self.get_blocked_points(origin=[2, 4], point=[2, 3])
> [[(2, 2], [2, 1], [2, 0]]
> self.get_blocked_points(origin=[4, 3], point=[2, 2])
> [[0, 1]]
"""
# Count a point as blocking itself, since an asteroid
# can't detect itself
if origin == point:
return [origin]
origin_row, origin_col = origin
point_row, point_col = point
blocked_points = []
row_diff = point_row - origin_row
col_diff = point_col - origin_col
gcd_diff = gcd(row_diff, col_diff)
row_diff = int(row_diff / gcd_diff)
col_diff = int(col_diff / gcd_diff)
while True:
next_row = point_row + row_diff
next_col = point_col + col_diff
# Map ends at [0, 0]
if next_row < 0 or next_col < 0:
break
try:
self.asteroid_map[next_row][next_col]
except IndexError:
break
blocked_points.append([next_row, next_col])
point_row = next_row
point_col = next_col
return blocked_points
def get_detected_asteroids(self, origin: list) -> List[list]:
"""Return a list of detected asteroids from a point of
origin on the asteroid map
"""
blocked_asteroids = []
for asteroid in self.asteroid_coords:
blocked_points = self.get_blocked_points(origin, asteroid)
blocked_asteroids += [
point for point in blocked_points
if point in self.asteroid_coords
and point not in blocked_asteroids
]
return [
asteroid for asteroid in self.asteroid_coords
if asteroid not in blocked_asteroids
]
def determine_monitoring_station_location(self) -> List[list]:
best_detected_asteroid_count = 0
best_asteroid = []
for asteroid in self.asteroid_coords:
detected_asteroid_count = len(self.get_detected_asteroids(asteroid))
best_asteroid = asteroid if detected_asteroid_count > best_detected_asteroid_count else best_asteroid
best_detected_asteroid_count = detected_asteroid_count if detected_asteroid_count > best_detected_asteroid_count else best_detected_asteroid_count
return best_asteroid
def rotate_laser(self, station_location: List[list]):
"""Rotate the laser around once, vaporizing all detected asteroids in it's path"""
sorted_asteroids = sorted(
[
[self.get_point_angle(station_location, asteroid), asteroid]
for asteroid in self.get_detected_asteroids(station_location)
],
reverse=True
)
for _, asteroid in sorted_asteroids:
row, col = asteroid
self.asteroid_map[row][col] = "."
self.vaporized_asteroids.append(asteroid)
def vaporize_asteroids(self, station_location: List[list]):
while len(self.asteroid_coords) > 1:
# print(self)
# print("\n")
self.rotate_laser(station_location)
# print(self)
# print("\n")
asteroid_map = """.#..#
.....
#####
....#
...##"""
asteroid_belt = AsteroidBelt(asteroid_map)
#assert len(asteroid_belt.asteroid_coords) == 10
assert asteroid_belt.get_blocked_points([4, 3], [2, 2]) == [[0, 1]]
assert asteroid_belt.get_blocked_points([2, 4], [2, 3]) == [[2, 2], [2, 1], [2, 0]]
assert asteroid_belt.get_blocked_points([0, 0], [0, 0]) == [[0, 0]]
assert asteroid_belt.get_detected_asteroids(origin=[2, 4]) == [[0, 1], [0, 4], [2, 3], [3, 4], [4, 3]]
best_location = asteroid_belt.determine_monitoring_station_location()
assert best_location == [4, 3]
assert len(asteroid_belt.get_detected_asteroids(best_location)) == 8
asteroid_map = """......#.#.
#..#.#....
..#######.
.#.#.###..
.#..#.....
..#....#.#
#..#....#.
.##.#..###
##...#..#.
.#....####"""
asteroid_belt = AsteroidBelt(asteroid_map)
best_location = asteroid_belt.determine_monitoring_station_location()
assert best_location == [8, 5]
assert len(asteroid_belt.get_detected_asteroids(best_location)) == 33
asteroid_map = """#.#...#.#.
.###....#.
.#....#...
##.#.#.#.#
....#.#.#.
.##..###.#
..#...##..
..##....##
......#...
.####.###."""
asteroid_belt = AsteroidBelt(asteroid_map)
best_location = asteroid_belt.determine_monitoring_station_location()
assert best_location == [2, 1]
assert len(asteroid_belt.get_detected_asteroids(best_location)) == 35
asteroid_map = """.#..#..###
####.###.#
....###.#.
..###.##.#
##.##.#.#.
....###..#
..#.#..#.#
#..#.#.###
.##...##.#
.....#.#.."""
asteroid_belt = AsteroidBelt(asteroid_map)
best_location = asteroid_belt.determine_monitoring_station_location()
assert best_location == [3, 6]
assert len(asteroid_belt.get_detected_asteroids(best_location)) == 41
asteroid_map = """.#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##"""
asteroid_belt = AsteroidBelt(asteroid_map)
best_location = asteroid_belt.determine_monitoring_station_location()
assert best_location == [13, 11]
assert len(asteroid_belt.get_detected_asteroids(best_location)) == 210
# Puzzle input
puzzle_map = """.###..#......###..#...#
#.#..#.##..###..#...#.#
#.#.#.##.#..##.#.###.##
.#..#...####.#.##..##..
#.###.#.####.##.#######
..#######..##..##.#.###
.##.#...##.##.####..###
....####.####.#########
#.########.#...##.####.
.#.#..#.#.#.#.##.###.##
#..#.#..##...#..#.####.
.###.#.#...###....###..
###..#.###..###.#.###.#
...###.##.#.##.#...#..#
#......#.#.##..#...#.#.
###.##.#..##...#..#.#.#
###..###..##.##..##.###
###.###.####....######.
.###.#####.#.#.#.#####.
##.#.###.###.##.##..##.
##.#..#..#..#.####.#.#.
.#.#.#.##.##########..#
#####.##......#.#.####."""
asteroid_belt = AsteroidBelt(puzzle_map)
best_location = asteroid_belt.determine_monitoring_station_location()
print(best_location)
print(len(asteroid_belt.get_detected_asteroids(best_location)))
# ## Part 2
#
# Hooray, we have a monitoring station! But now, we need to clear _all_ the surrounding asteroids with the laser that the monitoring station is equipped with.
#
# >The Elves are placing bets on which will be the 200th asteroid to be vaporized. Win the bet by determining which asteroid that will be; what do you get if you multiply its X coordinate by 100 and then add its Y coordinate? (For example, 8,2 becomes 802.)
#
# For me, I'll have to remember to reverse the x and y coordinate
test_map = """.#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##"""
test_belt = AsteroidBelt(test_map)
best_location = test_belt.determine_monitoring_station_location()
test_belt.vaporize_asteroids(best_location)
assert test_belt.vaporized_asteroids[0] == [12, 11]
assert test_belt.vaporized_asteroids[9] == [8, 12]
assert test_belt.vaporized_asteroids[49] == [9, 16]
assert test_belt.vaporized_asteroids[199] == [2, 8]
assert test_belt.vaporized_asteroids[298] == [1, 11]
puzzle_belt = AsteroidBelt(puzzle_map)
best_location = puzzle_belt.determine_monitoring_station_location()
print(best_location)
puzzle_belt.vaporize_asteroids(best_location)
row, col = puzzle_belt.vaporized_asteroids[199]
col * 100 + row
|
day-10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img align="center" style="max-width: 1000px" src="banner.png">
# + [markdown] colab_type="text" id="eGwNwDKEt8lG"
# <img align="right" style="max-width: 200px; height: auto" src="https://raw.githubusercontent.com/HSG-AIML-Teaching/IEMBA2022-Lab/main/lab_03/hsg_logo.png">
#
# ## Lab 03 - Supervised Machine Learning: k Nearest-Neighbors - Solutions
# IEMBA 8/9 - "Coding and Artificial Intelligence", University of St. Gallen
# + [markdown] colab_type="text" id="nYpS4wEPt8lI"
# In the last lab, we saw an application of **supervised machine learning** by using the **k Nearest-Neighbor (k NN) classifier** to classify features derived from delicious real-world **Wine samples**. You learned how to train a model and to evaluate and interpret its results. In this lab, we aim to leverage that knowledge by applying it to a set of related self-coding assignments. But before we do so let's start with a motivational video by OpenAI:
# -
# OpenAI: "Solving Rubik's Cube with a Robot Hand"
from IPython.display import YouTubeVideo
YouTubeVideo('x4O8pojMF0w', width=1000, height=500)
# + [markdown] colab_type="text" id="2Br5f8mEt8lK"
# As always, pls. don't hesitate to ask all your questions either during the lab, post them in our CANVAS (StudyNet) forum (https://learning.unisg.ch), or send us an email (using the course email).
# + [markdown] colab_type="text" id="D0Jnx-Ljt8lK"
# ## 1. Assignment Objectives:
# + [markdown] colab_type="text" id="ybF-i5mQt8lL"
# Similar today's lab session, after today's self-coding assignments you should be able to:
#
# > 1. Know how to setup a **notebook or "pipeline"** that solves a simple supervised classification task.
# > 2. Recognize the **data elements** needed to train and evaluate a supervised machine learning classifier.
# > 3. Understand how a discriminative **k Nearest-Neighbor (kNN)** classifier can be trained and evaluated.
# > 4. Know how to use Python's sklearn library to **train** and **evaluate** arbitrary classifiers.
# > 5. Understand how to **evaluate** and **interpret** the classification results.
# + [markdown] colab_type="text" id="CZaa0qAnt8lY"
# ## 2. Setup of the Jupyter Notebook Environment
# + [markdown] colab_type="text" id="2yTCqemyt8la"
# Similarly to the previous labs, we need to import a couple of Python libraries that allow for data analysis and data visualization. In this lab will use the `Pandas`, `Numpy`, `Scikit-Learn`, `Matplotlib` and the `Seaborn` library. Let's import the libraries by the execution of the statements below:
# + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="o3ShseCwt8lb" outputId="1254c7ff-5876-4508-8fde-5528e4d704f3"
# import the numpy, scipy and pandas data science library
import pandas as pd
import numpy as np
from scipy.stats import norm
# import sklearn data and data pre-processing libraries
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# import k-nearest neighbor classifier library
from sklearn.neighbors import KNeighborsClassifier
# import sklearn classification evaluation library
from sklearn import metrics
from sklearn.metrics import confusion_matrix
# import matplotlib data visualization library
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] colab_type="text" id="mFnbcu4yt8le"
# Enable inline Jupyter notebook plotting:
# + colab={} colab_type="code" id="uLbxWoZit8lf"
# %matplotlib inline
# + [markdown] colab_type="text" id="PsFqwDkYt8ln"
# Use the `Seaborn`plotting style in all subsequent visualizations:
# + colab={} colab_type="code" id="dMH7Y9-Ht8lo"
plt.style.use('seaborn')
# + [markdown] colab_type="text" id="n9HtRmw-t8nJ"
# ## 3. k Nearest-Neighbors (kNN) Classification Assignments
# + [markdown] colab_type="text" id="TfKxtSAMt8qw"
# ### 3.1 Wine Dataset Download
# + [markdown] colab_type="text" id="OljehqMht8qw"
# Let's download the delicious **Wine Dataset** that we will use for the following assignments. It is a classic and straightforward multi-class classification dataset.
# + [markdown] colab_type="text" id="zTyJoRggt8qx"
# <img align="center" style="max-width: 600px; height: auto" src="https://github.com/GitiHubi/courseAIML/blob/master/lab_03/wine_dataset.jpg?raw=1">
#
# (Source: https://www.empirewine.com)
# + [markdown] colab_type="text" id="lYsN2L2Gt8qx"
# The data is the result of a chemical analysis of wines grown in the same region in Italy by three different cultivators (types). The dataset consists in total of **178 wines** as well as their corresponding **13 different measurements** taken for different constituents found in the three types of wine. Please, find below the list of the individual measurements (features):
#
# >- `Alcohol`
# >- `Malic acid`
# >- `Ash`
# >- `Alcalinity of ash`
# >- `Magnesium`
# >- `Total phenols`
# >- `Flavanoids`
# >- `Nonflavanoid phenols`
# >- `Proanthocyanins`
# >- `Color intensity`
# >- `Hue`
# >- `OD280/OD315 of diluted wines`
# >- `CProline`
#
# Further details on the dataset can be obtained from the following puplication: *<NAME> al, PARVUS - "An Extendible Package for Data Exploration, Classification and Correlation.", Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy.*
#
# Let's load the dataset and conduct a preliminary data assessment:
# + colab={} colab_type="code" id="Cwm84bmft8qy"
wine = datasets.load_wine()
# + [markdown] colab_type="text" id="ty0vOQ3Lt8q3"
# Print and inspect feature names of the dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 234} colab_type="code" id="B9HA0ItTt8q3" outputId="5e52ea54-57a5-44d8-8ee2-955b6967fa66"
wine.feature_names
# + [markdown] colab_type="text" id="Uxm1svBIt8q6"
# Print and inspect the class names of the dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="7cg3VG6mt8q6" outputId="ccf66fdd-58b9-44ec-a963-d5d01a5256c5"
wine.target_names
# + [markdown] colab_type="text" id="Nl3CY4DVt8q8"
# Print and inspect the top 10 feature rows of the dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 365} colab_type="code" id="tI6YnJmvt8q8" outputId="bd0c259e-e9cb-4e01-87fe-03afd407fd9a"
pd.DataFrame(wine.data, columns=wine.feature_names).head(10)
# + [markdown] colab_type="text" id="_XBV6Zoht8q-"
# Print and inspect the top 10 labels of the dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 345} colab_type="code" id="coh7WqpKt8q_" outputId="472ed6f3-127f-4388-b910-c1dd853c1c40"
pd.DataFrame(wine.target).head(10)
# + [markdown] colab_type="text" id="HKVBBeXft8rB"
# Determine and print the feature dimensionality of the dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="jrjgx9oct8rC" outputId="cd8a0b89-2c70-4487-c642-d3029e4fb706"
wine.data.shape
# + [markdown] colab_type="text" id="-oLhWbAGt8rE"
# Determine and print the label dimensionality of the dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="wxKIAouGt8rF" outputId="37285902-42fd-42fa-8b87-142b875f8be3"
wine.target.shape
# + [markdown] colab_type="text" id="dV81H6ret8rJ"
# Plot the data distributions of the distinct features:
# + colab={"base_uri": "https://localhost:8080/", "height": 990} colab_type="code" id="I7unVIEWt8rJ" outputId="f7e68202-43e5-4759-b925-6c27a465b78a"
# init the plot
plt.figure(figsize=(10, 10))
# prepare the dataset to be plotable using seaborn
# convert to Panda's DataFrame
wine_plot = pd.DataFrame(wine.data, columns=wine.feature_names)
# add class labels to the DataFrame
wine_plot['class'] = wine.target
# plot a pairplot of the distinct feature distributions
sns.pairplot(wine_plot, diag_kind='hist', hue='class');
# + [markdown] colab_type="text" id="WbzKED-et8rK"
# ### 3.2 Dataset Pre-Processing
# + [markdown] colab_type="text" id="NgRYuUMKt8rL"
# #### 3.2.1 Feature Re-Scaling
# + [markdown] colab_type="text" id="bo6ERPyUt8rL"
# Let's re-scale the distinct feature values of the **Wine Dataset** using **Min-Max Normalization** using the `MinMaxScaler` class of the `sklearn` library:
# + colab={} colab_type="code" id="ccNkX14Vt8rM"
# init the min-max scaler
scaler = MinMaxScaler(feature_range=(0, 1), copy=True)
# min-max normalize the distinct feature values
wine_data_scaled = scaler.fit_transform(wine.data)
# + [markdown] colab_type="text" id="ZsWFNsVTt8rS"
# Print and inspect the top 10 feature rows of the normalized dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 365} colab_type="code" id="1cqcjpJZt8rT" outputId="be595c68-b074-41ee-f57a-d1846b3f63d9"
pd.DataFrame(wine_data_scaled, columns=wine.feature_names).head(10)
# + [markdown] colab_type="text" id="tHwTTkWxt8rY"
# Now that all feature values are scaled to a range between $[0,1]$, let's visualize the derived feature value distributions and inspect their distributions:
# + colab={"base_uri": "https://localhost:8080/", "height": 992} colab_type="code" id="UGDK8Me3t8rZ" outputId="3af54c21-3275-41d9-f7b2-5484669fd9aa"
# init the plot
plt.figure(figsize=(10, 10))
# prepare the dataset to be plotable using seaborn
# convert to Panda's DataFrame
wine_plot = pd.DataFrame(wine_data_scaled, columns=wine.feature_names)
# add class labels to the DataFrame
wine_plot['class'] = wine.target
# plot a pairplot of the distinct feature distributions
sns.pairplot(wine_plot, diag_kind='hist', hue='class');
# + [markdown] colab_type="text" id="k7PF3yj1t8ra"
# Excellent, the characteristics of the distinct feature value distributions remained unchanged.
# + [markdown] colab_type="text" id="jYsmr5Cwt8rb"
# #### 3.2.2 Extraction of Training- and Evaluation-Dataset
# + [markdown] colab_type="text" id="COf_ZHbrt8rb"
# We set the fraction of testing records to **30%** of the original dataset:
# + colab={} colab_type="code" id="w_Grln45t8rc"
eval_fraction = 0.3
# -
# Furthermore, let's set a random seed to insure reproducibility of the train-test split in potential future runs of the notebook:
seed = 42
# + [markdown] colab_type="text" id="ZizWtHIct8re"
# Randomly split the **Wine Dataset** into training set and evaluation set using sklearn's `train_test_split` function:
# + colab={} colab_type="code" id="Gj0XNs8Nt8rf"
# 70% training and 30% evaluation
X_train_scaled, X_eval_scaled, y_train_scaled, y_eval_scaled = train_test_split(wine_data_scaled, wine.target, test_size=eval_fraction, random_state=seed)
# + [markdown] colab_type="text" id="9FKsJVtxt8rh"
# Evaluate the training set dimensionality:
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="-YLKYgVst8ri" outputId="a9578a7f-2b81-4cf2-b98b-1b16d1f49e14"
X_train_scaled.shape, y_train_scaled.shape
# + [markdown] colab_type="text" id="MMLky8CTt8rj"
# Evaluate the evaluation set dimensionality:
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="nLB4I8_Gt8rj" outputId="5345efe3-e423-423c-f272-3e5d58d9de2a"
X_eval_scaled.shape, y_eval_scaled.shape
# -
# ### 3.2 k Nearest-Neighbor (kNN) Model Training and Evaluation
# <img align="center" style="max-width: 700px; height: auto" src="https://raw.githubusercontent.com/HSG-AIML-Teaching/IEMBA2022-Lab/main/lab_03/hsg_knn.png">
#
# (Courtesy: Intro to AI & ML lecture, Prof. Dr. Borth, University of St. Gallen)
# + [markdown] colab_type="text" id="yZ-KsINqt8qh"
# We recommend you to try the following exercises as part of the self-coding session:
#
# **Exercise 1: Train and evaluate the prediction accuracy of the k=1,...,40 Nearest Neighbor models.**
# -
# > Write a Python loop that trains and evaluates the prediction accuracy of all k-Nearest Neighbor parameterizations ranging from k=1,...,40 using the **Manhattan** instead of the **Euclidean** distance. Collect and print the prediction accuracy of each model respectively and compare the results.
# + colab={} colab_type="code" id="UL_44Y-qt8qi"
# ***************************************************
# INSERT YOUR SOLUTION/CODE HERE
# ***************************************************
# ***************************************************
# Task 1: define range k=1 through k=40 to be evaluated
# ***************************************************
k_range = range(1, 41)
# ***************************************************
# Task 2: init evaluation accuracy score array
# ***************************************************
eval_accuracy_scores_scaled = []
# ***************************************************
# we use a for-loop to iterate over the distinct k values
# ***************************************************
for k in k_range:
# ***************************************************
# Task 3: init the k-NN classifier of the current k-value
# ***************************************************
knn = KNeighborsClassifier(n_neighbors=k, metric='manhattan')
# ***************************************************
# Task 4: train the k-NN classifer on the training data
# ***************************************************
knn.fit(X_train_scaled, y_train_scaled)
# ***************************************************
# Task 5: evaluate the k-NN classifier on the evaluation data
# ***************************************************
y_eval_pred_scaled = knn.predict(X_eval_scaled)
# ***************************************************
# Task 6: collect the classification accuracy of the current k on the evaluation data
# ***************************************************
eval_accuracy_scores_scaled.append(metrics.accuracy_score(y_eval_scaled, y_eval_pred_scaled))
# -
# **Exercise 2: Visualize the model prediction accuracy for the distinct values of k=1,...,40.**
#
# > Plot the prediction accuracy collected for each model above. The plot should display the **distinct values of k at the x-axis** and the corresponding **model prediction accuracy on the y-axis**. What kind of behaviour in terms of prediction accuracy can be observed with increasing k?
# +
# ***************************************************
# INSERT YOUR SOLUTION/CODE HERE
# ***************************************************
# ***************************************************
# prepare plot
# ***************************************************
fig = plt.figure()
ax = fig.add_subplot(111)
# ***************************************************
# Task 1: plot the classification accuracy of distinct k's
# ***************************************************
ax.plot(range(1, len(eval_accuracy_scores_scaled) + 1), eval_accuracy_scores_scaled, color='green', marker='o')
# ***************************************************
# Note: the following code lines will plot the confusion matrix (no need to change them)
# ***************************************************
# add grid
ax.grid(linestyle='dotted')
# add axis range and legends
ax.set_xlabel("[$k$-Nearest-Neighbors]", fontsize=10)
ax.set_ylabel("[% classification accuracy]", fontsize=10)
# add plot title
ax.set_title('k-NN Classification Accuracy (scaled features)', fontsize=10);
# -
# **Exercise 3: Train, evaluate and plot the prediction accuracy of the Nearest Neighbor models without feature scaling.**
#
# > Similar to the exercises above, write a Python loop that trains and evaluates the prediction accuracy of all k-Nearest Neighbor parameterizations ranging from k=1,...,40 using the **original (non feature scaled) wine dataset**. Collect and print the prediction accuracy of each model respectively and compare the results (similar to exercise 1). Plot the prediction accuracy collected for each model above. The plot should display the distinct values of k at the x-axis and the corresponding model prediction accuracy on the y-axis (similar to exercise 2). What do you observe when comparing the results of the non re-scaled with the results obtained for the scaled features?
# +
# ***************************************************
# INSERT YOUR SOLUTION/CODE HERE
# ***************************************************
# ***************************************************
# Task 1: set the evaluation fraction to 30%
# ***************************************************
eval_fraction = 0.3
# ***************************************************
# Task 2: set a random seed
# ***************************************************
seed = 42
# ***************************************************
# Task 3: conduct the 70% training and 30% evaluation split using the 'train_test_split' function
# ***************************************************
X_train, X_eval, y_train, y_eval = train_test_split(wine.data, wine.target, test_size=eval_fraction, random_state=seed)
# ***************************************************
# Task 4: define range k=1 through k=40 to be evaluated
# ***************************************************
k_range = range(1, 41)
# ***************************************************
# Task 5: init evaluation accuracy score array
# ***************************************************
eval_accuracy_scores_non_scaled = []
# ***************************************************
# we use a for-loop to iterate over the distinct k values
# ***************************************************
for k in k_range:
# ***************************************************
# Task 6: init the k-NN classifier
# ***************************************************
knn = KNeighborsClassifier(n_neighbors=k, metric='manhattan')
# ***************************************************
# Task 7: train the k-NN classifer on the training data
# ***************************************************
knn.fit(X_train, y_train)
# ***************************************************
# Task 8: evaluate the k-NN classifier on the evaluation data
# ***************************************************
y_eval_pred = knn.predict(X_eval)
# ***************************************************
# Task 9: collect the classification accuracy of the current k on the evaluation data
# ***************************************************
eval_accuracy_scores_non_scaled.append(metrics.accuracy_score(y_eval, y_eval_pred))
# ***************************************************
# prepare plot
# ***************************************************
fig = plt.figure()
ax = fig.add_subplot(111)
# ***************************************************
# Task 10: plot the classification accuracy of distinct k's
# ***************************************************
ax.plot(range(1, len(eval_accuracy_scores_non_scaled)+1), eval_accuracy_scores_non_scaled, color='green', marker='o')
# ***************************************************
# Note: the following code lines will plot the confusion matrix (no need to change them)
# ***************************************************
# add grid
ax.grid(linestyle='dotted')
# add axis range and legends
ax.set_xlabel("[$k$-Nearest-Neighbors]", fontsize=10)
ax.set_ylabel("[% classification accuracy]", fontsize=10)
# add plot title
ax.set_title('k-NN Classification Accuracy (non-scaled features)', fontsize=10);
|
lab_03/lab_03b_solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit ('Python39')
# name: python395jvsc74a57bd00de088ff9727ee96c4d6fff748e8a0449766a03da85633043318bd9293fa6100
# ---
# + [markdown] _uuid="b614ba0f78fa3cb457efd3a72d7c5c6d66ced8c4"
# # Exercise 02 - Functions and Getting Help !
# + [markdown] _uuid="463c02da36c6f974212f336d68fa9fdec63b29f2"
# ## 1. Complete Your Very First Function
#
# Complete the body of the following function according to its docstring.
#
# *HINT*: Python has a builtin function `round`
# + _uuid="eb7401ab1739f5310e7b400a67c55e9025866d5d"
def round_to_two_places(num):
"""Return the given number rounded to two decimal places.
>>> round_to_two_places(3.14159)
3.14
"""
# Replace this body with your own code.
# ("pass" is a keyword that does literally nothing. We used it as a placeholder,
# so that it will not raise any errors,
# because after we begin a code block, Python requires at least one line of code)
pass
# -
def round_to_two_places(num):
num = round(num,2)
print('The number after rounded to two decimal places is: ', num)
round_to_two_places(3.4455)
# + [markdown] _uuid="2c6c12ce2992453c0e7c06d813e3bdf1dbfdad73"
# ## 2. Explore the Built-in Function
# The help for `round` says that `ndigits` (the second argument) may be negative.
# What do you think will happen when it is? Try some examples in the following cell?
#
# Can you think of a case where this would be useful?
# + _uuid="310ae90a6f40836427b7563e595dbb1156283ceb"
print(round(122.3444,-3))
print(round(122.3456,-2))
print(round(122.5454,-1))
print(round(122.13432,0))
#round with ndigits <=0 - the rounding will begin from the decimal point to the left
# + [markdown] _uuid="36e83961f42db06503a116a777185ac25b759788"
# ## 3. More Function
#
# Giving the problem of candy-sharing friends Alice, Bob and Carol tried to split candies evenly. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they will take 30 each and smash 1.
#
# Below is a simple function that will calculate the number of candies to smash for *any* number of total candies.
#
# **Your task**:
# - Modify it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before.
# - Update the docstring to reflect this new behaviour.
# + _uuid="2bbed479de86e23c7a812c7ae0782735fd764195"
def to_smash(total_candies,n = 3):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
return total_candies % n
# -
print('#no. of candies to smash = ', to_smash(31))
print('#no. of candies to smash = ', to_smash(32,5))
# + [markdown] _uuid="ec7d50eeeb167b91ecae5c682d65b9e1fc5fcf13"
# ## 4. Taste some Errors
#
# It may not be fun, but reading and understanding **error messages** will help you improve solving problem skills.
#
# Each code cell below contains some commented-out buggy code. For each cell...
#
# 1. Read the code and predict what you think will happen when it's run.
# 2. Then uncomment the code and run it to see what happens. *(**Tips**: In the kernel editor, you can highlight several lines and press `ctrl`+`/` to toggle commenting.)*
# 3. Fix the code (so that it accomplishes its intended purpose without throwing an exception)
#
# <!-- TODO: should this be autochecked? Delta is probably pretty small. -->
# + _uuid="dd3955780a593bff78face0dc46093f4deb533ab"
round_to_two_places(9.9999)
# + _uuid="2a89511b6b1bd89b05998b705bf3db5088841a43"
x = -10
y = 5
# Which of the two variables above has the smallest absolute value?
smallest_abs = min(abs(x),abs(y))
print(smallest_abs)
# + _uuid="6ae2b82d1003c4bc7fe4d503e7d9ddeca3f95b1e"
def f(x):
y = abs(x)
return y
print(f(5))
# + [markdown] _uuid="dc94a365a7d7f7f974348e9e5ec597a2d5036c6b"
# ## 5. More and more Functions
#
# For this question, we'll be using two functions imported from Python's `time` module.
#
# ### Time Function
# The [time](https://docs.python.org/3/library/time.html#time.time) function returns the number of seconds that have passed since the Epoch (aka [Unix time](https://en.wikipedia.org/wiki/Unix_time)).
#
# <!-- We've provided a function called `seconds_since_epoch` which returns the number of seconds that have passed since the Epoch (aka [Unix time](https://en.wikipedia.org/wiki/Unix_time)). -->
#
# Try it out below. Each time you run it, you should get a slightly larger number.
# + _uuid="6c219975b461ccac77cdb0cb8dd3527c0cef52fd"
# Importing the function 'time' from the module of the same name.
# (We'll discuss imports in more depth later)
from time import time
t = time()
print(t, "seconds since the Epoch")
# + [markdown] _uuid="5b75363a5c9d514c41cc10f263083f16b078c81c"
# ### Sleep Function
# We'll also be using a function called [sleep](https://docs.python.org/3/library/time.html#time.sleep), which makes us wait some number of seconds while it does nothing particular. (Sounds useful, right?)
#
# You can see it in action by running the cell below:
# + _uuid="bc124bfa7c9ac5510fae42cb4c241768ab241502"
from time import sleep
duration = 5
print("Getting sleepy. See you in", duration, "seconds")
sleep(duration)
print("I'm back. What did I miss?")
# + [markdown] _uuid="c4e97a4e8267f2c210ada2949b084fa8c9742c9d"
# ### Your Own Function
# With the help of these functions, complete the function **`time_call`** below according to its docstring.
#
# <!-- (The sleep function will be useful for testing here since we have a pretty good idea of what something like `time_call(sleep, 1)` should return.) -->
# + _uuid="100e5013aea45c09e249bf1476d103b1f98daba4"
def time_call(fn, arg):
"""Return the amount of time the given function takes (in seconds) when called with the given argument.
"""
from time import time
start_time = time()
fn(arg)
end_time = time()
duration = end_time - start_time
return duration
# + [markdown] _uuid="45ba3ad3926ba61d0c9febfc129d8118b9f01518"
# How would you verify that `time_call` is working correctly? Think about it...
# +
#solution? use sleep function?
# + [markdown] _uuid="472d1778e82e6b7300823a7934ecb55a35a1bb42"
# ## 6. 🌶️ Reuse your Function
#
# *Note: this question depends on a working solution to the previous question.*
#
# Complete the function below according to its docstring.
# + _uuid="21abdac6bcd885ef4916cf56a360aee37bce2479"
def slowest_call(fn, arg1, arg2, arg3):
"""Return the amount of time taken by the slowest of the following function
calls: fn(arg1), fn(arg2), fn(arg3)
"""
slowest = min(time_call(fn, arg1), time_call(fn, arg2), time_call(fn,arg3))
return slowest
print(slowest_call(sleep,1,2,3))
# + [markdown] _uuid="d1c0353110c1b9578af9bbc20e4a9e0f6717a34e"
# # Keep Going
|
python-for-data/Ex02 - Functions and Getting Help.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question1
# # Unit Testing
# ! pip install pylint
# ! pip install unittest2
# ! pip install unittest
# # PyLint
# %%writefile check_prime_number.py
'''
This is a module to check weather the given number is even or odd.''
'''
def prime(num):
'''
This is the main function which check out the given number is even or odd.
'''
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
return print("It is a Prime Number")
return print("It is not a Prime Number")
n = int(input("enter the number :"))
prime(n)
# ! pylint "check_prime_number.py"
# # Unittest
# +
# %%writefile capitalizeText.py
def capText(string_To_Cap):
return string_To_Cap.title()
# +
# %%writefile test.py
'''
this is the test file in which we are going to check out the py file with the help of unittest
'''
import unittest
import capitalizeText
class testPrimeNumber(unittest.TestCase):
def testOne(self):
result = capitalizeText.capText("<NAME>")
self.assertEqual(result,"<NAME>")
def testSecond(self):
result = capitalizeText.capText("this is a text string to test the unittest on a file")
self.assertEqual(result,"This Is A Text String To Test The Unittest On A File")
if __name__ == "__main__":
unittest.main()
# -
# ! python test.py
# # Question 2
# # Make a small generator program for returning armstrong numbers in between 1-1000 in a generator object.
def armstrong(num):
for x in range(1,num):
if x>10:
order = len(str(x))
sum = 0
temp = x
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if x == sum:
yield print("The First Armstrong Nubmer is : ", x)
lst = list(armstrong(500))
|
Assignment 7 Day-9.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started with SPARK:
#
# ### (1) - Set up a AWS Account and a t2.micro instance
# ### (2) - Installed Anaconda (Python 3.6)
# ### (3) - Installed Java
# ### (4) - Installed Scala
# ### (5) - Installed SPARK+Hadoop
#
# + active=""
# export SPARK_HOME='/home/ubuntu/spark-2.3.0-bin-hadoop2.7'
# export PATH=$SPARK_HOME:$PATH
# export PYTHONPATH=$SPARK_HOME?python:$PYTHONPATH
# -
from pyspark import SparkContext
sc = SparkContext()
# #It works!
# +
# %%writefile example.txt
first line
second line
third line
fourth line
# -
# ### RDD using textFile method
# - sc is the context
# - RDD Actions ... return values
# - RDD transformations ... return ... RDD with subset of items
#use spark context to make RDD from txt or other files.
textFile = sc.textFile('example.txt')
#simplest action is count()
textFile.count()
# grab the first line as an ACTION
textFile.first()
# - Filter Method transformation (similar to python filter)
# - only contians lines contianing the ____
secfind = textFile.filter(lambda line: 'second' in line)
# That was superfast to run b/c transformations are lazily evaluated.
# - Think of transformation like a recipe
#RecipE: no output unti8l the run action is called.
secfind
# Do it...
secfind.collect()
secfind.count()
# - You can create a complicated recipe without having to run the many commands
# # RDD TRANSFORMATIONS AND ACTIONS
# ### Important Items
#
# - **RDD** - Resilient Distributed Dataset
# - **Transformation** - Spark operation producing RDD
# - will not give an object until an action is called
# - **Action** - Spark operation producting a local object
# -
# - **Spark Job** - Sequence of transformations on data w/ a final action
# ### Creating an RDD
# - sc.parallelize(array) - create RDD of elements of array or list
# - sc.textFile(path/to/file) - create RDD of lines from file
# ### RDD Transformation
# - Create a set of instructions we want to perform o the RDD
# - BEFORE we call an ACTION and actually execute them
# - the 'recipe' pieces
# + active=""
# filter(lambda x: x %2 == 0) - discard non-even elements
# map(lambda x: x * 2) - Multiply each RDD element by 2
# map(lambda x: x.split()) - split string into words
# flatMap(lambda x: x.split()) - split each string into workds and flatten sequence (?)
# sample(withReplacement=True,0.25) - create a sample of 25% of elements with replacement
# union(rdd) - append rdd to existing RDD
# distinct() - remove duplicates in RDD
# sortBy(lambda x: x, ascending=False) sort elements in descending order
# -
# ### RDD Actions
# - execute your recipe
# + active=""
# collect() convert RDD to in-memory list
# take(3) first 3 elements of RDD
# top(3) top 3 elements of RDD (like after a sort)
# takeSample(withReplacement=True,3)
# sum()
# mean()
# stdev()
# -
# # Transformation and Actions Practice
# %%writefile example2.txt
first
second line
the third line
then a fourth line
# +
#from pyspark import SparkContext
#sc = SparkContext()
# -
sc.textFile('example2.txt')
text_rdd = sc.textFile('example2.txt')
# a transformation that splits every line into a list of words
words = text_rdd.map(lambda line: line.split())
# words won't do anything until we take an ACTION
words
words.collect()
#This is what the non-mapped, original RDD looks like:
text_rdd.collect()
# ### map vs flatMap
# +
# Can do transformation and action into a single line
# no memory penalty for splitting them up b/c Lazy
text_rdd.flatMap(lambda line: line.split()).collect()
#A list of all words in the text file
# -
# ### Example from video
# %%writefile services.txt
#EventId Timestamp Customer State ServiceID Amount
201 10/13/2017 100 NY 131 100.00
204 10/18/2017 700 TX 129 450.00
202 10/15/2017 203 CA 121 200.00
206 10/19/2017 202 CA 131 500.00
203 10/17/2017 101 NY 173 750.00
205 10/19/2017 202 TX 121 200.00
services = sc.textFile('services.txt')
# may need to clean or manipulate...
services.take(2)
services.map(lambda line: line.split())
# +
#transform string into a list
services.map(lambda line: line.split()).take(3)
# -
#remove the hastag before EventID
services.map(lambda line: line[1:] if line[0]=='#' else line).collect()
clean = services.map(lambda line: line[1:] if line[0]=='#' else line)
clean = clean.map(lambda line: line.split())
clean.collect()
# +
# Practice grabbing fields...
# Total sales by state
# list of tuple pairs
# Needed for the key to work
clean.map(lambda lst: (lst[3],lst[-1])).collect()
# -
pairs = clean.map(lambda lst: (lst[3],lst[-1]))
# +
# reduceByKey assumes data comes in key: item tuples
# It automatically assumes 1st column is key
rekey = pairs.reduceByKey(lambda amt1,amt2 : amt1 + amt2)
# -
rekey.collect()
# +
# Notes... there are still single quotes, so the numbers are concatenated strings
# Need to fix...
rekey = pairs.reduceByKey(lambda amt1,amt2 : float(amt1) + float(amt2))
# -
rekey.collect()
# ### A lot of SPARK is cleaning up data to get it into the form you can use it
clean.collect()
# +
# Grab (State, Amount)
step1 = clean.map(lambda lst: (lst[3],lst[-1]))
# Reduce by key
step2 = step1.reduceByKey(lambda amt1,amt2 : float(amt1) + float(amt2))
# Get rid of State, Amount titles
step3 = step2.filter(lambda x: not x[0]=='State')
# Sort Results by Amoutn
# sort by 2nd item in tuple, desciending
step4 = step3.sortBy(lambda stAmt : stAmt[1],ascending=False)
step4.collect()
# -
# ### GEneral
# ### Use tuple unpacking for readability
# - Replace indexing with tuple unpacking
#
x = ['ID','State','Amount']
def func1(lst):
return lst[-1]
# +
# If you come back in the future, a lst[3] isn't readable
# so... ... ...
def func2(id_st_amt):
#unpack values
(Id,st,amt) = id_st_amt
return amt
# -
func1(x)
func2(x)
# #### Function 2 is much much more readable later on.
|
Udemy-Python-DS-and-ML-Bootcamp/SPARK Intro.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# +
using Revise
using Bilevel
using RigidBodyDynamics
using MeshCatMechanisms
using MeshCat
using Plots
using LinearAlgebra
using StaticArrays
using Interpolations
# +
urdf = joinpath("..", "..", "urdf", "little_dog", "little_dog2d.urdf")
mechanism = parse_urdf(Float64, urdf)
floor = findbody(mechanism, "floor")
point = Point3D(default_frame(floor), SVector([0.,0.,0.]...))
normal = FreeVector3D(default_frame(floor), SVector([0.,0.,1.]...))
floor_obs = Obstacle(floor, point, normal, :xyz, 80.)
obstacles = [floor_obs]
env = Environment(mechanism, urdf, obstacles);
# -
mvis = MechanismVisualizer(mechanism, URDFVisuals(urdf));
open(mvis)
# IJuliaCell(mvis)
# +
N = 8
Δt = 0.025
q0 = [0., -.275, 0., 0., 0., 0., 0., pi/4, pi/4, -pi/4, -pi/4, -pi/2, -pi/2, pi/2, pi/2]
v0 = zeros(num_velocities(mechanism))
sim_data = get_sim_data_indirect(mechanism,env,Δt)
null_ctrl! = (u,t,x) -> u[:] .= 0.
x0 = MechanismState(mechanism)
set_configuration!(x0, q0)
set_velocity!(x0, v0)
setdirty!(x0)
traj_sim = Bilevel.simulate(sim_data,null_ctrl!,x0,N)
setanimation!(mvis, traj_sim[6], traj_sim[1])
# +
q_nom = copy(traj_sim[1][end])
q_min = min.(q_nom, [-5., -.5, -.2*pi, 0., 0., 0., 0., -3.5, -3.5, -3.5, -3.5, -3.1, -3.1, -3.1, -3.1])
q_max = max.(q_nom, [5., -0.1, .2*pi, 0., 0., 0., 0., 2.4, 2.4, 2.4, 2.4, 1., 1., 1., 1.])
x_goal = 0.1
N = 10
Δt = 0.05
function add_prob_constraints!(sim_data)
vs = sim_data.vs
add_eq!(sim_data, :cq1, num_positions(mechanism), x -> vs(x, :q1) - q_nom)
# add_eq!(sim_data, :cq1, num_positions(mechanism), x -> vs(x, :q1)[1] - q_nom[1])
add_eq!(sim_data, :cv1, num_velocities(mechanism), x -> vs(x, :v1))
add_ineq!(sim_data, :cqNx, 1, x -> x_goal - vs(x, Symbol("q", N))[1])
# add_eq!(sim_data, :cqNlegs, num_positions(mechanism)-1, x -> vs(x, Symbol("q", N))[2:end] - q_nom[2:end])
add_eq!(sim_data, :cqNlegs, num_positions(mechanism)-1, x -> vs(x, Symbol("q", N))[2:end] - vs(x, :q1)[2:end])
add_eq!(sim_data, :cvN, num_velocities(mechanism), x -> vs(x, Symbol("v", N)))
# for n = 1:N
# add_eq!(sim_data, Symbol("cq1", n), 2, x -> vs(x, Symbol("q", n))[8:9] + vs(x, Symbol("q", n))[11:-1:10])
# add_eq!(sim_data, Symbol("cq2", n), 2, x -> vs(x, Symbol("q", n))[12:13] + vs(x, Symbol("q", n))[15:-1:14])
# end
end
function add_prob_obj!(sim_data)
for n = 1:N-1
add_obj!(sim_data, Symbol("u", n), x -> sim_data.vs(x, Symbol("u", n))' * sim_data.vs(x, Symbol("u", n)))
end
end
function get_prob_limits(sim_data)
x_min = -1e19*ones(sim_data.vs.num_vars)
x_max = 1e19*ones(sim_data.vs.num_vars)
# joint limits
add_box_con_snopt!(x_min, x_max, sim_data, :q, q_min, q_max, 1:N)
add_box_con_snopt!(x_min, x_max, sim_data, :h, [1. * Δt], [1. * Δt], 1:N-1)
# torques limits
u_limit = 1.
add_box_con_snopt!(x_min, x_max, sim_data, :u, -u_limit * ones(num_velocities(mechanism)), u_limit * ones(num_velocities(mechanism)), 1:N-1)
x_min, x_max
end
function get_prob_init(sim_data)
x0 = zeros(sim_data.vs.num_vars)
for n = 1:N
qn = copy(q_nom)
qn[1] = (n/N)*(x_goal - q_nom[1]) + q_nom[1]
x0[sim_data.vs(Symbol("q", n))] .= qn
end
x0
end
function setup_prob!(sim_data)
add_prob_constraints!(sim_data)
add_prob_obj!(sim_data)
x_min, x_max = get_prob_limits(sim_data)
x0 = get_prob_init(sim_data)
x0, x_min, x_max
end
# -
# ## Indirect Method
sim_data = get_trajopt_data_indirect(mechanism,env,Δt,N,relax_comp=false)
x0, x_min, x_max = setup_prob!(sim_data);
# +
con_tol = 1e-3
obj_tol = 1e-3
max_iter = 10000
traj_indirect = Bilevel.trajopt(sim_data, x0=x0, x_min=x_min, x_max=x_max, verbose=1, opt_tol=obj_tol, major_feas=con_tol, minor_feas=con_tol, max_iter=max_iter);
x0 = traj_indirect[9]
setanimation!(mvis, traj_indirect[7], traj_indirect[1])
# -
setanimation!(mvis, traj_indirect[7], traj_indirect[1])
t_step = traj_indirect[7]
q_step = traj_indirect[1]
u_step = traj_indirect[2][2:end];
# ## Semidirect Method
sim_data = get_trajopt_data_semidirect(mechanism,env,Δt,N,relax_comp=false)
x0, x_min, x_max = setup_prob!(sim_data);
# +
con_tol = 1e-3
obj_tol = 1e-3
max_iter = 10000
steps = 20
for n = 1:N-1
sim_data.fric_options[n]["num_sosteps"] = steps
end
traj_semidirect = Bilevel.trajopt(sim_data, x0=x0, x_min=x_min, x_max=x_max, verbose=1, opt_tol=obj_tol, major_feas=con_tol, minor_feas=con_tol, max_iter=max_iter)
x0 = traj_semidirect[9]
setanimation!(mvis, traj_semidirect[7], traj_semidirect[1])
# -
setanimation!(mvis, traj_semidirect[7], traj_semidirect[1])
t_step = traj_semidirect[7]
q_step = traj_semidirect[1]
u_step = traj_semidirect[2][2:end];
# ## Direct Method
sim_data = get_trajopt_data_direct(mechanism,env,Δt,N,relax_comp=false)
x0, x_min, x_max = setup_prob!(sim_data);
# +
con_tol = 1e-3
obj_tol = 1e-3
max_iter = 1000
steps = 20
for n = 1:N-1
sim_data.fric_options[n]["num_sosteps"] = steps
end
traj_direct = Bilevel.trajopt(sim_data, x0=x0, x_min=x_min, x_max=x_max, verbose=1, opt_tol=obj_tol, major_feas=con_tol, minor_feas=con_tol, max_iter=max_iter)
x0 = traj_direct[9]
setanimation!(mvis, traj_direct[7], traj_direct[1])
# -
setanimation!(mvis, traj_direct[7], traj_direct[1])
t_step = traj_direct[7]
q_step = traj_direct[1]
u_step = traj_direct[2][2:end];
# # steps
# +
reps = 5
ttraj = t_step
qtraj = q_step
utraj = u_step
for i = 1:(reps-1)
ttraj = vcat(ttraj, t_step .+ ttraj[end])
qtraj = vcat(qtraj, map(q -> q + vcat(qtraj[end][1],zeros(length(q)-1)), q_step))
utraj = vcat(utraj, u_step)
end
setanimation!(mvis, ttraj, qtraj)
# -
# ## Simulation
# +
ctrl_itp = Vector()
utraj_mat = hcat(utraj...)
for i = 1:num_velocities(mechanism)
# TODO support variable step size
push!(ctrl_itp, Interpolations.CubicSplineInterpolation(0.:Δt:Δt*(N-1)*reps,utraj_mat[i,:],extrapolation_bc=0.))
end
function feedforward_ctrl!(u,t,x)
u[:] = map(ctrl_itp) do itp
itp(t) / 100.
end
end
# +
N = 100
Δt = 0.005
q0 = qtraj[1]
v0 = zeros(num_velocities(mechanism))
sim_data = get_sim_data_indirect(mechanism,env,Δt)
state0 = MechanismState(mechanism)
set_configuration!(state0, q0)
set_velocity!(state0, v0)
setdirty!(state0)
traj_sim_ctrl = Bilevel.simulate(sim_data,feedforward_ctrl!,state0,N);
setanimation!(mvis, traj_sim_ctrl[6], traj_sim_ctrl[1])
# -
maximum(abs.(vcat(traj_semidirect[3]...)))
|
notebooks/ISRR/.ipynb_checkpoints/Little Dog-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# If on google colab, you will need to change the hardware accelerator to GPU under Runtime > Change Runtime Type.
#
# Then clone and install the cbmos github repository via
#
# `!git clone https://github.com/somathias/cbmos.git`
#
# `%cd cbmos`
#
# `!pip install -e .`
#
# +
import numpy as np
import matplotlib.pyplot as plt
import cbmos
import cbmos.force_functions as ff
import cbmos.solvers.euler_forward as ef
import cbmos.cell as cl
# -
# assuming GPU backend is available and accessible through CuPy
import cupy as cp
# Define two models both using the forward Euler method as a solver - one using NumPy as a hpc_backend and the other using CuPy
dim = 2 # let's have a two-dimensional model
cbmmodel_numpy = cbmos.CBMModel(ff.Linear(), ef.solve_ivp, dim, hpc_backend=np)
cbmmodel_cupy = cbmos.CBMModel(ff.Linear(), ef.solve_ivp, dim, hpc_backend=cp)
# Let's generate a large set of random numbers...
seed = 845
np.random.seed(seed) # set seed for reproducibility if necessary
N = 10000
y_np = np.random.uniform(-10, 10, size=(N, dim))
# ...which we can use as initial coordinates of our (non-proliferating) cell population.
cell_list = []
N = 1000
for i in range(N):
cell_list.append(cl.Cell(i, y_np[i, :], proliferating=False))
# Now time the models on the mechanical relaxation of our random cell population.
t_data = np.linspace(0, 1, 101)
# %time history = cbmmodel_numpy.simulate(cell_list, t_data, {}, {"dt": 0.01}, seed=seed)
# %time history = cbmmodel_cupy.simulate(cell_list, t_data, {}, {"dt": 0.01}, seed=seed)
|
examples/benchmark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic Regression Project
#
# The goal of this project is to create a Classification Model which predicts whether or not a person has presence of heart disease based on physical features of that person such as age,sex, cholesterol, etc...
#
# ## Imports
#
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# ## Data
#
# This database contains 14 physical attributes based on physical testing of a patient. Blood samples are taken and the patient also conducts a brief exercise test. The "goal" field refers to the presence of heart disease in the patient. It is integer (0 for no presence, 1 for presence). In general, to confirm 100% if a patient has heart disease can be quite an invasive process, so if we can create a model that accurately predicts the likelihood of heart disease, we can help avoid expensive and invasive procedures.
#
# Content
#
# Attribute Information:
#
# * age
# * sex
# * chest pain type (4 values)
# * resting blood pressure
# * serum cholestoral in mg/dl
# * fasting blood sugar > 120 mg/dl
# * resting electrocardiographic results (values 0,1,2)
# * maximum heart rate achieved
# * exercise induced angina
# * oldpeak = ST depression induced by exercise relative to rest
# * the slope of the peak exercise ST segment
# * number of major vessels (0-3) colored by flourosopy
# * thal: 3 = normal; 6 = fixed defect; 7 = reversable defect
# * target:0 for no presence of heart disease, 1 for presence of heart disease
#
# Original Source: https://archive.ics.uci.edu/ml/datasets/Heart+Disease
#
# Creators:
#
# Hungarian Institute of Cardiology. Budapest: <NAME>, M.D.
# University Hospital, Zurich, Switzerland: <NAME>, M.D.
# University Hospital, Basel, Switzerland: <NAME>, M.D.
# V.A. Medical Center, Long Beach and Cleveland Clinic Foundation: <NAME>, M.D., Ph.D.
df = pd.read_csv('C:\\Users\\fnorouzi\\Documents\\data science course\DATA\heart.csv')
# # Exploratory Data
df.info()
df.describe().transpose()
df.head(8)
df['target'].unique()
df.columns
# ### Visualization
sns.countplot(x='target',data=df)
# Running a pairplot to display the relationships between some of the columns
df.columns
col_list = ['age','trestbps', 'chol','thalach','target']
sns.pairplot(df[col_list],hue='target')
# Correlation between all the columns
plt.figure(figsize=(12,8))
sns.heatmap(df.corr(),cmap='magma',annot=True)
#
# # ML section
#
# ## Train | Test Split and Scaling
#
#
X = df.drop('target',axis=1)
y = df['target']
# test size = 10%
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=101)
# Scaler and normalizing the X train and test set feature data
scaler = StandardScaler()
scaled_X_train = scaler.fit_transform(X_train)
scaled_X_test = scaler.transform(X_test)
# ## Logistic Regression Model
#
from sklearn.linear_model import LogisticRegressionCV
log_model = LogisticRegressionCV()
log_model.fit(scaled_X_train,y_train)
log_model.C_
log_model.get_params()
# Model coefficients
log_model.coef_
# Model coefficients plot
coefs = pd.Series(index=X.columns,data=log_model.coef_[0])
coefs = coefs.sort_values()
plt.figure(figsize=(10,6))
sns.barplot(x=coefs.index,y=coefs.values);
# ---------
#
# ## Model Performance Evaluation
from sklearn.metrics import confusion_matrix,classification_report,plot_confusion_matrix
y_pred = log_model.predict(scaled_X_test)
confusion_matrix(y_test,y_pred)
plot_confusion_matrix(log_model,scaled_X_test,y_test)
plt.savefig('confusion_matrix.png')
print(classification_report(y_test,y_pred))
# ### Performance Curves
#
from sklearn.metrics import plot_precision_recall_curve,plot_roc_curve
plot_precision_recall_curve(log_model,scaled_X_test,y_test)
plt.savefig('precision_recall_curve.png')
plot_roc_curve(log_model,scaled_X_test,y_test)
plt.savefig('roc_curve.png')
# Sample patient
patient = [[ 54. , 1. , 0. , 122. , 286. , 0. , 0. , 116. , 1. ,
3.2, 1. , 2. , 2. ]]
X_test.iloc[-1]
y_test.iloc[-1]
log_model.predict(patient)
log_model.predict_proba(patient)
|
Projects/Logistic Regression/Logistic-Regression-Project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
def creat_list():
count = 0
index = 0
semester = ['2020 Spring', '2020 Summer', '2020 Fall', '2021 Spring', '2021 Summer', '2021 Fall', '2022 Spring']
courses = []
while count < 10:
print(f'Select Courses for the {semester[index]} Semester ({index + 1}): ')
courses.append([semester[index], input('- Choose the first courseid (CS XXXX): ')])
count += 1
sign = 0
while sign != 'y' or sign != 'n':
sign = input('Do you want to add a new course this semester? (y/n)')
if sign == 'y':
courses.append([semester[index], input('- Choose the second courseid (CS XXXX): ')])
count += 1
break
elif sign == 'n':
break
elif sign == 'q':
print('Interrputed by the user')
return courses
else:
print('keyword error')
index += 1
return courses
print('Course list successfully generated.')
MyCourse = creat_list()
mycourse_df = pd.DataFrame(MyCourse, columns=['Semester', 'CourseId'])
courses_df = pd.read_csv('../CourseListCreater/OMSCSCourseList.csv', usecols=['CourseId','Name','Link'])
mycourse_df = mycourse_df.join(courses_df.set_index('CourseId'), on='CourseId')
mycourse_df.to_csv('MyCourses.csv')
# +
# CS 6515 | CS 6290 | CS 6200 CS 6300 | CS 6250 CS 6291 | CSE 6220 | CS 6035 CS 6265 | CS 6457
# -
mycourse_df
|
MyCourse/MyCourses.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ## Introduction
#
# This repo explains how to use R to analyze real estate data that I scraped. The dataset has 5064 rows, covering the real estate lists from most cities in the Metro Vancouver area on Jul 20, 2021. However, the website that I used only allows to scrape at most 25 pages for each city using the start URL, so some lists from the cities are missing.
#
# To know how I scraped the data, check my another repo [RealEsatelist_scrape_clean_transform][1].
# If you want to scrape the whole list of each city in Vancouver, check [my article, which uses another website as an example][2]. However, it will not allow you to scrape land size for houses. But, it allows you to scrape list type if you change the URL format and add list type parameters inside.
#
# ### Interested to see the insights found directly? Go check the highlight texts at the bottom of this notebook.
#
# [1]:https://github.com/EvaWang2020/RealEsateDataAnalysis
# [2]:https://evaanalytics.wixsite.com/website/post/use-scrapy-to-real-estate-data
# +
Data=read.csv("C://replace_with_your_path/realestatelist_totallistappend.csv")
Data$Price <- as.numeric(gsub('[$,]', '', Data$Price))
# below is a peak of the data structure
head(Data)
# -
# below is a summary of the data. You can see the min, max, median, quartiles of each column
summary(Data)
# +
# calculate the average price per floor square foot (sf)
Average_sf_cost=Data$Price/Data$Floor_size
# caluclate land size
Landsize=(Data$Land_width)*(Data$Land_depth)
# add them as columns to the table
Data$Average_sf_cost <- Average_sf_cost
Data$Landsize <- Landsize
# calcualte the average price per floor square foot
# Ingnore the rows with empty and zero values in column Floor_size
library (dplyr)
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
Subset_Avg_sf_price = mean(Subset_Data$Average_sf_cost,na.rm = TRUE)
round(Subset_Avg_sf_price)
# +
# calculate the average floor sf price by list type
# you can add more filters, such as setting up bedroom quantity
library(dplyr)
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
# Subset_Data<- filter(Subset_Data, Subset_Data$Bedroom_qty==1)
# Subset_Data<- filter(Subset_Data, Subset_Data$Bathroom_qty==1)
# Subset_Data<- filter(Subset_Data, Subset_Data$City=='Vancouver')
# %>% is a pipe operator
ex1 <- Subset_Data %>%
group_by(List_type) %>%
summarize(Avg_price = round(mean(Average_sf_cost,na.rm = TRUE)),
Max_price = round(max(Average_sf_cost,na.rm = TRUE)),
Min_price = round(min(Average_sf_cost,na.rm = TRUE)),
List_count = length(Price))
ex1 %>%
arrange(Avg_price)
# +
# create a function to calculate the average sf price by the city for appartmet (Appt) list
# you can add more filters, such as setting up bedroom quantity
library(ggplot2)
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size))
Subset_Data<- filter(Subset_Data, Subset_Data$List_type=='Apt/Condo')
# Subset_Data<- filter(Subset_Data, Subset_Data$Bedroom_qty==1)
# Subset_Data<- filter(Subset_Data, Subset_Data$Bathroom_qty==1)
ex1<-Subset_Data %>%
group_by(City) %>%
summarize(Avg_price = mean(Average_sf_cost,na.rm = TRUE),List_Count = length(Price) )
options(repr.plot.width=6, repr.plot.height=4)
ggplot(ex1, aes(y = City, x =Avg_price, Color=City,fill=City)) +
geom_bar(stat = "identity", width=0.4, show.legend = FALSE) +
theme_classic() +
#scale_x_reordered() +
labs(
y = "City",
x = "Price (CAD)",
title = paste(
"Average Floor Square Foot Price"
)
)
# +
# average price frequency of for House only
# you can add more filters, such as setting up specific city/cities
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
Subset_Data<- filter(Subset_Data, Subset_Data$List_type=='House')
# Subset_Data<- filter(Subset_Data, Subset_Data$City=='Vancouver')
data=hist(Subset_Data$Average_sf_cost,plot=FALSE,breaks=12)
ymax=max(data[[2]]) # to find the max number of Y axis based on type 2 data of a vector
options(repr.plot.width=6, repr.plot.height=4)
plot(data[[4]],data[[2]],type='o',
ylab="Frequency / list quantity",xlab="Average square foot price (CAD)",main="Frequency of Average Square Foot Price - House" ,frame.plot='TRUE',axes=FALSE,ylim=c(0,ymax+1), col="red") # ylim is the function to define the numbe limit of Y axis
axis(1,data[[4]])
axis(2)
# +
# average price frequency of for appt only
# you can add more filters, such as setting up specific city/cities
Subset_Data1<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
Subset_Data1<- filter(Subset_Data1, Subset_Data1$List_type=='Apt/Condo')
# Subset_Data1<- filter(Subset_Data1, Subset_Data1$City=='Vancouver')
data=hist(Subset_Data1$Average_sf_cost,plot=FALSE,breaks=12)
ymax=max(data[[2]]) # to find the max number of Y axis based on type 2 data of a vector
options(repr.plot.width=6, repr.plot.height=4)
plot(data[[4]],data[[2]],type='o',
ylab="Frequency / list quantity",xlab="Average square foot price (CAD)",main="Frequency of Average Square Foot Price - Appt" ,frame.plot='TRUE',axes=FALSE,ylim=c(0,ymax+1),col="blue") # ylim is the function to define the numbe limit of Y axis
axis(1,data[[4]])
axis(2)
# +
# zoom in to check frequency in a smaller price range for specif areas
# you can add more filters, such as setting up specific city regions
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
Subset_Data<- filter(Subset_Data, Subset_Data$List_type=='Apt/Condo')
# Subset_Data<- filter(Subset_Data, Subset_Data$City=='Vancouver' )
Subset_Data<- filter(Subset_Data, Subset_Data$City_region=='Downtown West' | Subset_Data$City_region=='Coal Harbour' | Subset_Data$City_region=='Cambie' | Subset_Data$City_region=='Yaletown')
hist(Subset_Data$Average_sf_cost,
main="Histogram for Appt List",
xlab="Average square foot price (CAD) ",
border="blue",
col="yellow",
xlim=c(500,1800),
las=1,
breaks=30 )
# -
# calcualte correlation of different variables of each city for house
# some house lists do not have land size value due to missing dimension
# Therefore, after layers of filtering, the total list count is smaller than the real total list count
Array=unique(Data$City, incomparables = FALSE)
n=length(Array)
for(i in 1:n ){
A = Array[i]
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
Subset_Data<- filter(Subset_Data, !is.na(Subset_Data$Landsize))
Subset_Data<- filter(Subset_Data, !is.na(Subset_Data$Bedroom_qty))
Subset_Data<- filter(Subset_Data, !is.na(Subset_Data$Bathroom_qty))
Subset_Data<- filter(Subset_Data, !is.na(Subset_Data$Landsize))
Subset_Data<- filter(Subset_Data, Subset_Data$List_type=='House')
Subset_Data<- filter(Subset_Data, Subset_Data$City==A)
x=Subset_Data$Average_sf_cost
y=Subset_Data$Bedroom_qty
z=Subset_Data$Bathroom_qty
l=Subset_Data$Floor_size
m=Subset_Data$Landsize
X=cbind(x,y,z,l,m)
print(paste('Correlation Matrix of house; City:', A))
print(round(cor(X),5))
print('--------------------------------------------------------')
}
# ### How to understand the results? Using the correlation matrix of Richmond as an example:
# >Between x and x, the correlation coefficient is 1.0000. The number suggests x and x are strongly postively correlated. Of course, they are !
#
# >Between x and m, the correlation coefficient is 0.00092. The number suggests x and m are just slightly postively correlated. __<span style="color:red">This means land size
# contributes little to the average square foot price of houses in Richmond</span>__ (_pls notice: here the average square foot price is an aggregation between list
# price and floor size, not between list price and land size_).
#
#
# - __x is a centralized average square foot price vector__
# - __y is a centralized average bedroom quantity vector__
# - __z is a centralized average bathroom quantity vector__
# - __l is a centralized average floor size vector__
# - __m is a centralized average land size vector__
# [1] "Correlation Matrix of house; City: Richmond"
# x y z l m
# x 1.00000 -0.19968 -0.16357 -0.07772 0.00092
# y -0.19968 1.00000 0.59397 0.52311 0.02755
# z -0.16357 0.59397 1.00000 0.71028 0.02315
# l -0.07772 0.52311 0.71028 1.00000 0.28391
# m 0.00092 0.02755 0.02315 0.28391 1.00000
# ### Using the correlation matrix of Vancouver as another example:
# >Between x and m, the correlation coefficient is 0.39713, which is much bigger when compared with Richmond. This result means x and m are
# postively correlated. It suggests that the land size contribute significantly to the average square foot price of the
# houses in Vancouver. In another word, __<span style="color:red">the land itself is much more valuable in Vancouver than in Richmond and contributes significantly to the average square foot price of Vancouver houses</span>__.
# [1] "Correlation Matrix of house; City: Vancouver"
# x y z l m
# x 1.00000 -0.20501 0.00929 0.12361 0.39713
# y -0.20501 1.00000 0.68523 0.51877 0.12949
# z 0.00929 0.68523 1.00000 0.73558 0.39265
# l 0.12361 0.51877 0.73558 1.00000 0.67532
# m 0.39713 0.12949 0.39265 0.67532 1.00000
Array=unique(Data$City, incomparables = FALSE)
n=length(Array)
for(i in 1:n ){
A = Array[i]
Subset_Data<- filter(Data, Data$Floor_size>0 & !is.na(Data$Floor_size) )
Subset_Data<- filter(Subset_Data, !is.na(Subset_Data$Bedroom_qty))
Subset_Data<- filter(Subset_Data, !is.na(Subset_Data$Bathroom_qty))
Subset_Data<- filter(Subset_Data, Subset_Data$List_type=='Apt/Condo')
Subset_Data<- filter(Subset_Data, Subset_Data$City==A)
x=Subset_Data$Average_sf_cost
y=Subset_Data$Bedroom_qty
z=Subset_Data$Bathroom_qty
l=Subset_Data$Floor_size
X=cbind(x,y,z,l)
print(paste('Correlation Matrix of apartment; City:', A))
print(round(cor(X),5))
print('--------------------------------------------------------')
}
# ### Here we use Langley as an example:
# > Between x and l, the correlation coefficient is -0.26667. The number means x and l are negatively correlated. This suggests that __<span style="color:red">in Langley, the bigger the apartment
# is, the cheaper each square foot is</span>__ .
#
[1] "Correlation Matrix of apartment; City: Langley"
x y z l
x 1.00000 -0.16166 -0.07615 -0.26667
y -0.16166 1.00000 0.58518 0.68132
z -0.07615 0.58518 1.00000 0.63153
l -0.26667 0.68132 0.63153 1.00000
# ### Let compare with Vancouver and Surrey
# > For vancouver, between x and l, the correlation coefficient is 0.40166. This result means x and l are
# positively correlated. __<span style="color:red">It suggests that the bigger the appartement is in Vancouver, the more expensive the average square foot is. This is the opposite situation of Langley</span>__.
#
# > For Surrey, between x and l, the correlation coefficient is -0.15118. This means x and l are
# slightly negatively correlated. __<span style="color:red">It suggests in Surrey, the smaller the apartment is, the higher the average square foot price is. This shows similarity with Langley</span>__.
# +
[1] "Correlation Matrix of apartment; City: Vancouver"
x y z l
x 1.00000 0.30241 0.38590 0.40166
y 0.30241 1.00000 0.77728 0.68787
z 0.38590 0.77728 1.00000 0.79829
l 0.40166 0.68787 0.79829 1.00000
[1] "Correlation Matrix of apartment; City: Surrey"
x y z l
x 1.00000 -0.14714 0.05086 -0.15118
y -0.14714 1.00000 0.69331 0.64490
z 0.05086 0.69331 1.00000 0.73708
l -0.15118 0.64490 0.73708 1.00000
# -
# ### Why Bowen Island has so many "NA"?
# > Because Bowen Island does not have apartment lists, only house lists
#[1] "Correlation Matrix of apartment; City: Port Moody"
x y z l
#x NA NA NA NA
#y NA NA NA NA
#z NA NA NA NA
#l NA NA NA NA
# ### Limitation of this analysis:
# - Error of Average. Average can be deceiving. For example, it was found that in the Vancouver apartment market, the bigger the unit it, the more expensive each square foot might be. This is true for the overall average situation. However, when you break down the numbers, you might find some districts in Vancouver show the same trends as Langley and Surrey, which is the smaller the unit is, the higher each square foot is. [Check my short article related to risk of aggregation][2]
# - The daatset has only one day list data. Later, I will add more data to the dataset. [Check my repo to see how I collected the data.][1]
# - the website that I used only allows to scrape at most 25 pages for each city using the start URL, so some lists from the cities are missing. If you want to scrape the whole list of each city in Vancouver, check [my article, which uses another website as an example][3]. However, it will not allow you to scrape land size for houses. But, it allows you to scrape list type if you change the URL format and add list type parameters inside.
# [1]:https://github.com/EvaWang2020/RealEsateDataAnalysis
# [2]:https://github.com/EvaWang2020/Analyze-real-estae-lists-using-R/blob/main/Risk%20of%20average%20and%20generalization.pdf
# [3]:https://evaanalytics.wixsite.com/website/post/use-scrapy-to-real-estate-data
|
RealEstateListAnalysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Develop the function to read in images
# +
import numpy as np
import matplotlib.pyplot as plt
from MorphSED.morphsed import image, image_atlas
import glob
import matplotlib as mpl
mpl.rc("xtick", direction="in", labelsize=18)
mpl.rc("ytick", direction="in", labelsize=18)
mpl.rc("xtick.major", width=1., size=8)
mpl.rc("ytick.major", width=1., size=8)
mpl.rc("xtick.minor", width=1., size=5)
mpl.rc("ytick.minor", width=1., size=5)
# %load_ext autoreload
# %autoreload 2
# -
# ### Load a single image
# +
img = image('MorphSED/examples/data/PG0050+124_sci.fits', unit='adu')
print(img.wcs_rotation.to('degree'))
print(img.pixel_scales)
mean, med, std = img.sigma_clipped_stats()
vmin = med - 0 * std
vmax = med + 1000 * std
ax = img.plot(vmin=vmin, vmax=vmax, a=1e-4)
img.plot_direction(ax, (24, 19))
plt.show()
# -
# ### Load a list of images
# +
flist = sorted(glob.glob('data/NGC1400/*'))
image_list = [image(f, unit='adu') for f in flist]
band_list=['panstarrs.g_P1', 'panstarrs.r_P1', 'panstarrs.i_P1', 'panstarrs.z_P1', 'panstarrs.y_P1']
imgs = image_atlas(image_list=image_list, band_list=band_list)
img = imgs['panstarrs.r_P1']
mean, med, std = img.sigma_clipped_stats()
vmin = med - 0 * std
vmax = med + 100 * std
img.plot(vmin=vmin, vmax=vmax, a=1e-3)
plt.show()
# -
|
examples/demo_read_images.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
conn=pd.read_csv('/home/maria/Documents/Connectomics/exported-traced-adjacencies/traced-total-connections.csv')
print(conn)
conn=np.array(conn)
print(conn.shape)
print(np.unique(conn[:,0]).shape)
dat=pd.read_csv('/home/maria/Documents/Connectomics/exported-traced-adjacencies/traced-neurons.csv')
print(dat)
dat['instance'].values
def conn_mat(conn):
map_neurons_to_inds={}
uniq=np.array(list(set(np.union1d(conn[:,0],conn[:,1]))))
for j in range(0,uniq.shape[0]):
map_neurons_to_inds[uniq[j]]=j
#print(map_neurons_to_inds)
conn_m=np.zeros((uniq.shape[0],uniq.shape[0]))
for row in range(0,conn.shape[0]):
pre=map_neurons_to_inds[conn[row,0]]
post=map_neurons_to_inds[conn[row,1]]
weight=conn[row,2]
conn_m[pre,post]=weight
return conn_m
def conn_mat_undirected(conn):
map_neurons_to_inds={}
uniq=np.array(list(set(np.union1d(conn[:,0],conn[:,1]))))
for j in range(0,uniq.shape[0]):
map_neurons_to_inds[uniq[j]]=j
#print(map_neurons_to_inds)
conn_m=np.zeros((uniq.shape[0],uniq.shape[0]))
for row in range(0,conn.shape[0]):
pre=map_neurons_to_inds[conn[row,0]]
post=map_neurons_to_inds[conn[row,1]]
weight=conn[row,2]
conn_m[pre,post]=weight
conn_m[post,pre]=weight
return conn_m
conn_undir=conn_mat_undirected(conn)
np.save('/media/maria/DATA1/Documents/NeuralData/conn_undir_drosophila.npy',conn_undir)
conn_m=conn_mat(conn)
import matplotlib.pyplot as plt
plt.imshow(conn_m)
np.save('conn_drosophila.npy',conn_m)
|
Drosophila.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="E_R0bsVyO7Fe"
# # 0.0 Problema
# + [markdown] id="E1EcA_CnPX8L"
# **Objetivo**: Se aproximar o máximo possivel do gráfico feito pela Finacial Times.
#
#
#
# [<img src="https://www.ft.com/__origami/service/image/v2/images/raw/https%3A%2F%2Fd6c748xw2pzm8.cloudfront.net%2Fprod%2F2cc43cb0-d713-11ea-8827-2bf5fdc831fb-fullwidth.png?dpr=1&fit=scale-down&quality=highest&source=next&width=1260">](https://www.ft.com/content/272354f2-f970-4ae4-a8ae-848c4baf8f4a)
#
# Para isto, é interessante quebrar o escopo em vários escopos:
# - [ ] Definir o que vai ser Estudado
# - [ ] Importar Bibliotecas
# - [ ] Limpar os Dados
# - [ ] Plotar o Gráfico
#
#
# + [markdown] id="ao4JzwrxRGK9"
# Para esse notebook, vamos utilizar a curva de Retail, dos estados do Brasil.
#
# Fizemos então as seguintes definições:
#
# - Tratar Apenas dos estados Brasileiros
# - Tratar Apenas do campo *RETAIL* (ida ao varejo)
# - Gerar um gráfico mais próximo do FT.
#
#
# Analisando a foto do FT, temos alguns escopos dentro do gráfico:
# - Gerar um gráfico mais próximo do FT.
# - É um gráfico agrupado pela data *(eixo X)* pelo RETAIL *(eixo Y)*
# - Ter um Estado em Destaque e o resto em fundo
# - Ter o Valor Minimo da Curva
# - Ter o Valor Maximo da Curva
# - Ter a variação do Mínimo com o último valor
#
# Dado isso, é possivel que teremos algumas funções
#
# - Agrupar
# - Minimo
# - Ultimo
# - Variacao
#
# + [markdown] id="lt37OZd7LuAX"
# # 1.0 Importando Bibliotecas
# + id="6bIUoo146lAh"
import pandas as pd
import numpy as np
import plotly.express as px
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
# + [markdown] id="SLai8iIlLx-9"
# # 2.0 Importando Dataset
# + id="PA9IgEaqLtXH"
link = "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
df = pd.read_csv(link)
# + id="1KMXQ7qqL6zt"
df.head(2)
# + [markdown] id="q4QDI0gtL8ra"
# # 3.0 Limpando os Dados
# + [markdown] id="zwcjX6VbNKRU"
# ## 3.1 Limpando os Nomes
# + id="Zt5Qv1SrL76o"
## Escolhendo Apenas a Região do Brasil
df = df.loc[df.country_region == "Brazil",:].iloc[:,[2,3,7,8]]
## Ajustando os Nomes das colunas
df.columns = ["estado","cidade","data","ida_ao_varejo"]
##
df.estado.replace('Federal District','Distrito Federal', inplace=True)
# + id="HL-D4Rb7MV5g"
df.head(2)
# + [markdown] id="2nx9SZUBNJW6"
# ## 3.2 Transformando em Série Temporal
# + id="SYTYevhbMWeQ"
## Transformando a data (tipo string) para datetime (tipo datetime)
df.data = pd.to_datetime(df.data)
## colocando a data no lugar do index
df.index = df.data
## removendo a coluna date
df.drop(labels="data",axis=1,inplace=True)
# removendo State of
df.estado = df.estado.str.replace('State of ','')
# + id="2rvpjcaDN8v8"
df.head(2)
# + [markdown] id="jLr0zw3vOC_L"
# # 4.0 Função Groupby
#
# + id="qvS57USTOCNB"
## Criar uma função para agrupar o Dataset
## E ele já irá fazer a média e destacar do grupo
def agrupar(df,agruparPor,estados = ''):
if(estados):
return df.groupby(by=agruparPor).mean().unstack()['ida_ao_varejo'][estados].rolling(window=7).mean()
else:
estados = df.estado.unique().tolist()
estados.pop(0)
return df.groupby(by=agruparPor).mean().unstack()['ida_ao_varejo'][estados].rolling(window=7).mean()
# + [markdown] id="8LDsX3KESnq9"
# # 5.0 Plotagem do Gráfico
# + [markdown] id="Q0VxfqFLSqsS"
# ## 5.1 Função Mínimo, Último e sua Variação em Porcentagem
# + id="Oqmfx0LmS3N1"
## Vamos pegar o Index do valor minimo
## E o valor do minimo
## Retornar o index e o Ultimo Valor
def getMinimo(serie):
index = serie.idxmin()
min = serie.min()
return index,min
# + id="gXL8Syn6S4ip"
## Vamos pegar o Index do ultimo valor
## E o valor do último
## Retornar o index e o Ultimo Valor
def getUltimo(serie):
index = serie.tail(1).index.item()
ultimo = serie.iloc[-1]
return index,ultimo
# + id="bbED5tOLS9U-"
## Vamos dividir o ultimo pelo minimo, multiplicar por 100 e tirar 100
## depois usamos o abs para transformar em positivo e passamos o str para
## transformar em String, depois concatemos com % e retornamos
def getVariacao(minimo,ultimo):
return '{0:.2f}'.format(abs(((ultimo/minimo)*100)-100)) + '%'
# + [markdown] id="aFgxaN9YULom"
# ## 5.2 Plotagem 1 Estado
# + [markdown] id="1iEIRoScY-s5"
# Para testar vamos iniciar apenas com 1 estado.
# + id="rKs0CEf1UkbV"
## Pegando os Valores
# DF RN
df_RN = agrupar(df,[df.index,df.estado],'Rio Grande do Norte')
# DF COM TODOS ESTADOS
df_todos_estados = agrupar(df,[df.index,df.estado])
#Index Min,ultimo e valores e sua variacao do RN
indexMin_RN, min_RN = getMinimo(df_RN.dropna())
indexUltimo_RN, ultimo_RN = getUltimo(df_RN.dropna())
variacao_RN = getVariacao(min_RN,ultimo_RN)
# + id="LCW2ltFUYphn"
data_RN = indexMin_RN.strftime("%d/%m/%Y")
# + id="93zgBD6IZFX6"
## Em verde é as modificações desse código
## zorder o quanto pra cima o bagulho ta
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10,8))
## Plot do RN
df_RN.plot(linewidth=6,zorder=3,color='#35d0ba', alpha=1,ax=ax)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles,labels, ncol=2)
ax.set_xticklabels(["","Ago","","","","","","Set"])
ax.set_xlabel("")
# Bolinha no Minimo
ax.scatter(indexMin_RN,min_RN,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax.annotate('{0:.2f}'.format(min_RN),xy=(indexMin_RN,min_RN+1),fontsize=12,color='#ff9234',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
ax.annotate(data_RN,xy=(indexMin_RN,min_RN-5),fontsize=12,color='#ff9234',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Bolinha no ultimo
ax.scatter(indexUltimo_RN,ultimo_RN,marker='o',color='#d92027',linewidth=6,zorder=4)
ax.annotate('{0:.2f}'.format(ultimo_RN),xy=(indexUltimo_RN,ultimo_RN+2),fontsize=12,color='#ff9234',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Tracinhos do Capeta
# Tranformação do Timestamp em Period
ax.plot([pd.Period(indexMin_RN.ctime(), 'D'), pd.Period(indexUltimo_RN.ctime(), 'D')], [min_RN, min_RN], color="#ff9234", linestyle="--", linewidth=2)
ax.plot([pd.Period(indexUltimo_RN.ctime(), 'D'), pd.Period(indexUltimo_RN.ctime(), 'D')], [min_RN, ultimo_RN], color="#d92027", linestyle="--", linewidth=2)
# Porcentagem Variação
ax.annotate(variacao_RN,xy=(indexUltimo_RN,-50),fontsize=14,color='#d92027',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
## Fundo do Plot
fig.patch.set_facecolor('#fff1e5')
ax.set_facecolor('#fff1e5')
## Plot dos Outros
df_todos_estados.plot(legend=False,color="grey",linewidth=1, alpha=0.4, ax=ax)
## Alterando o nome
ax.set_title("Recuperação do RN",fontsize=14,color='black',weight='bold',ha='center')
plt.show()
# + [markdown] id="zU_dkFaZotkg"
# ## 5.3 Plotagem para Estados Selecionados
# + id="KHKYR5SJaHZd"
fig, ax = plt.subplots(nrows=1,ncols=5,figsize=(32,8))
#Colocando cor na area de algo
estados = ["Pará", "Piauí", "Rio Grande do Norte","São Paulo","Santa Catarina"]
df_todos_estados = agrupar(df,[df.index,df.estado])
for i,estado in enumerate(estados):
#Agrupando por Index e Estado
df_destaque = agrupar(df,[df.index,df.estado],estado)
##Colocando os outros Estados em Cinza
df_todos_estados.plot(legend=False,color="grey",linewidth=1, alpha=0.4, ax=ax[i])
## Colocando o Estado em Destaque
df_destaque.plot(linewidth=6,zorder=3,color='#35d0ba', alpha=1,ax=ax[i])
# Pegar os Mínimos
indexMin, min = getMinimo(df_destaque.dropna())
indexUltimo, ultimo = getUltimo(df_destaque.dropna())
variacao = getVariacao(min,ultimo)
# Bolinha no Minimo
ax[i].scatter(indexMin,min,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax[i].annotate('{0:.2f}'.format(min),xy=(indexMin,min+1),fontsize=14,color='black',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Bolinha no ultimo
ax[i].scatter(indexUltimo,ultimo,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax[i].annotate('{0:.2f}'.format(ultimo),xy=(indexUltimo,ultimo+2),fontsize=14,color='black',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Porcentagem Variação
ax[i].annotate(variacao,xy=(indexUltimo,-50),fontsize=14,color='#d92027',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Tracinhos do Capeta
# Tranformação do Timestamp em Periodo
ax[i].plot([pd.Period(indexMin.ctime(), 'D'), pd.Period(indexUltimo.ctime(), 'D')], [min, min], color="#ff9234", linestyle="--", linewidth=2)
ax[i].plot([pd.Period(indexUltimo.ctime(), 'D'), pd.Period(indexUltimo.ctime(), 'D')], [min, ultimo], color="#d92027", linestyle="--", linewidth=2)
## Fundo do Plot
fig.patch.set_facecolor('#fff1e5')
ax[i].set_facecolor('#fff1e5')
fig.patch.set_visible('#fff1e5')
# ax[i].spines['right'].set_visible(False)
# ax[i].spines['left'].set_visible(False)
# ax[i].spines['top'].set_visible(False)
ax[i].set_title(estado,fontsize=14,color='black',weight='bold',ha='center')
ax[i].xaxis.grid(False)
ax[i].set_facecolor('#fff1e5')
ax[i].set_xlabel("")
ax[i].arrow(6, 7, -2.5, -2.5, head_width = 0.5,
head_length = 0.5, fc ='g', ec ='g')
ax[i].set_xticklabels(["","Ago","","","","","","Set"])
ax[i].xaxis.set_tick_params(labelsize=12)
ax[i].yaxis.set_tick_params(labelsize=12)
if (i==0) or (i==4):
ax[i].yaxis.tick_right()
else:
ax[i].set_yticklabels([])
# + [markdown] id="-Q74ONFPf9zX"
# ## 6.0 Analise para o Medium
# + id="IndAOMnphOrV"
Regioes = {
'Acre':'Norte',
'Amapá':'Norte',
'Amazonas':'Norte',
'Pará':'Norte',
'Rondônia':'Norte',
'Roraima':'Norte',
'Tocantins': 'Norte',
'Alagoas': 'Nordeste',
'Bahia': 'Nordeste',
'Ceará': 'Nordeste',
'Maranhão': 'Nordeste',
'Paraíba': 'Nordeste',
'Pernambuco': 'Nordeste',
'Piauí': 'Nordeste',
'Rio Grande do Norte': 'Nordeste',
'Sergipe': 'Nordeste',
'Goiás': 'Centro-Oeste',
'Distrito Federal': 'Centro-Oeste',
'Mato Grosso': 'Centro-Oeste',
'Mato Grosso do Sul': 'Centro-Oeste',
'Espírito Santo': 'Sudeste',
'Minas Gerais': 'Sudeste',
'Rio de Janeiro': 'Sudeste',
'São Paulo': 'Sudeste',
'Paraná': 'Sul',
'Santa Catarina': 'Sul',
'Rio Grande do Sul': 'Sul'
}
df['regiao'] = df.estado.map(Regioes)
# + id="UM9plp7aGzPV"
sul = df.estado.where(df.regiao == 'Sul').unique().tolist()
sul.pop(0)
norte = df.estado.where(df.regiao == 'Norte').unique().tolist()
norte.pop(0)
norte.pop(0)
print(norte)
nordeste = df.estado.where(df.regiao == 'Nordeste').unique().tolist()
nordeste.pop(0)
centro_oeste = df.estado.where(df.regiao == 'Centro-Oeste').unique().tolist()
centro_oeste.pop(0)
sudeste = df.estado.where(df.regiao == 'Sudeste').unique().tolist()
sudeste.pop(0)
sul = df.estado.where(df.regiao == 'Sul').unique().tolist()
sul.pop(0)
# + id="CdzcRnnqIRka"
gp = agrupar(df,[df.index,df.estado])
for regiao in [norte,nordeste,centro_oeste,sudeste,sul]:
print(gp[regiao].rolling(window=7).mean().dropna().max())
# + id="Ig5BPAn8Iiux"
fig, ax = plt.subplots(nrows=1,ncols=5,figsize=(32,8))
#Colocando cor na area de algo
estados = ["Tocantins", "Goiás", "Espírito Santo","Minas Gerais","Rio Grande do Sul"]
df_todos_estados = agrupar(df,[df.index,df.estado])
for i,estado in enumerate(estados):
#Agrupando por Index e Estado
df_destaque = agrupar(df,[df.index,df.estado],estado)
for regiao in [norte,nordeste,centro_oeste,sudeste,sul]:
df_regiao = agrupar(df,[df.index,df.estado],regiao)
##Colocando os outros Estados (Por Regiao) em Cinza
df_regiao.plot(legend=False,color="grey",linewidth=1, alpha=0.4, ax=ax[i])
## Colocando o Estado em Destaque
df_destaque.plot(legend=True, linewidth=4,zorder=3,color='#35d0ba', alpha=1,ax=ax[i])
# Pegar os Mínimos
indexMin, min = getMinimo(df_destaque.dropna())
indexUltimo, ultimo = getUltimo(df_destaque.dropna())
variacao = getVariacao(min,ultimo)
# Bolinha no Minimo
ax[i].scatter(indexMin,min,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax[i].annotate('{0:.2f}'.format(min),xy=(indexMin,min+1),fontsize=14,color='black',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Bolinha no ultimo
ax[i].scatter(indexUltimo,ultimo,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax[i].annotate('{0:.2f}'.format(ultimo),xy=(indexUltimo,ultimo+2),fontsize=14,color='black',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Porcentagem Variação
ax[i].annotate(variacao,xy=(indexUltimo,-50),fontsize=14,color='#d92027',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Tracinhos do Capeta
# Tranformação do Timestamp em Periodo
ax[i].plot([pd.Period(indexMin.ctime(), 'D'), pd.Period(indexUltimo.ctime(), 'D')], [min, min], color="#ff9234", linestyle="--", linewidth=2)
ax[i].plot([pd.Period(indexUltimo.ctime(), 'D'), pd.Period(indexUltimo.ctime(), 'D')], [min, ultimo], color="#d92027", linestyle="--", linewidth=2)
## Fundo do Plot
fig.patch.set_facecolor('#fff1e5')
ax[i].set_facecolor('#fff1e5')
fig.patch.set_visible('#fff1e5')
# ax[i].spines['right'].set_visible(False)
# ax[i].spines['left'].set_visible(False)
# ax[i].spines['top'].set_visible(False)
for i,regiao in enumerate(['Norte','Nordeste','Centro-Oeste','Sudeste','Sul']):
ax[i].set_title(regiao,fontsize=14,color='black',weight='bold',ha='center')
ax[i].xaxis.grid(False)
ax[i].set_facecolor('#fff1e5')
ax[i].set_xlabel("")
ax[i].arrow(0, 3, 5, -2.5, head_width = 0.5,
head_length = 0.5, fc ='g', ec ='g')
ax[i].set_xticklabels(["","Ago","","","","","","Set"])
ax[i].xaxis.set_tick_params(labelsize=12)
ax[i].yaxis.set_tick_params(labelsize=12)
if (i==0) or (i==4):
ax[i].yaxis.tick_right()
else:
ax[i].set_yticklabels([])
# + id="W1fXgXblOpQ7"
fig, ax = plt.subplots(nrows=1,ncols=5,figsize=(32,8))
#Colocando cor na area de algo
estados = ["Pará", "Maranhão", "<NAME>","Minas Gerais","Paraná"]
df_todos_estados = agrupar(df,[df.index,df.estado])
for i,estado in enumerate(estados):
#Agrupando por Index e Estado
df_destaque = agrupar(df,[df.index,df.estado],estado)
for regiao in [norte,nordeste,centro_oeste,sudeste,sul]:
df_regiao = agrupar(df,[df.index,df.estado],regiao)
##Colocando os outros Estados (Por Regiao) em Cinza
df_regiao.plot(legend=False,color="grey",linewidth=1, alpha=0.4, ax=ax[i])
## Colocando o Estado em Destaque
df_destaque.plot(legend=True, linewidth=4,zorder=3,color='#35d0ba', alpha=1,ax=ax[i])
# Pegar os Mínimos
indexMin, min = getMinimo(df_destaque.dropna())
indexUltimo, ultimo = getUltimo(df_destaque.dropna())
variacao = getVariacao(min,ultimo)
# Bolinha no Minimo
ax[i].scatter(indexMin,min,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax[i].annotate('{0:.2f}'.format(min),xy=(indexMin,min+1),fontsize=14,color='black',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Bolinha no ultimo
ax[i].scatter(indexUltimo,ultimo,marker='o',color='#ff9234',linewidth=6,zorder=4)
ax[i].annotate('{0:.2f}'.format(ultimo),xy=(indexUltimo,ultimo+2),fontsize=14,color='black',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Porcentagem Variação
ax[i].annotate(variacao,xy=(indexUltimo,-50),fontsize=14,color='#d92027',weight='bold',ha='right',path_effects=[path_effects.withStroke(linewidth=4,foreground='w')])
# Tracinhos do Capeta
# Tranformação do Timestamp em Periodo
ax[i].plot([pd.Period(indexMin.ctime(), 'D'), pd.Period(indexUltimo.ctime(), 'D')], [min, min], color="#ff9234", linestyle="--", linewidth=2)
ax[i].plot([pd.Period(indexUltimo.ctime(), 'D'), pd.Period(indexUltimo.ctime(), 'D')], [min, ultimo], color="#d92027", linestyle="--", linewidth=2)
## Fundo do Plot
fig.patch.set_facecolor('#fff1e5')
ax[i].set_facecolor('#fff1e5')
fig.patch.set_visible('#fff1e5')
# ax[i].spines['right'].set_visible(False)
# ax[i].spines['left'].set_visible(False)
# ax[i].spines['top'].set_visible(False)
for i,regiao in enumerate(['Norte','Nordeste','Centro-Oeste','Sudeste','Sul']):
ax[i].set_title(regiao,fontsize=14,color='black',weight='bold',ha='center')
ax[i].xaxis.grid(False)
ax[i].set_facecolor('#fff1e5')
ax[i].set_xlabel("")
ax[i].arrow(0, 3, 5, -2.5, head_width = 0.5,
head_length = 0.5, fc ='g', ec ='g')
ax[i].set_xticklabels(["","Ago","","","","","","Set"])
ax[i].xaxis.set_tick_params(labelsize=12)
ax[i].yaxis.set_tick_params(labelsize=12)
if (i==0) or (i==4):
ax[i].yaxis.tick_right()
else:
ax[i].set_yticklabels([])
|
1_Figure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Задание 1
# +
import json
purchase = {}
with open('purchase_log.txt') as f:
next(f)
for line in f:
line = line.strip()
dict_ = json.loads(line)
purchase[dict_['user_id']] = dict_['category']
f.close()
# -
# ## Задание 2
# +
import json
i =0
purchase = {}
with open('purchase_log.txt') as f:
next(f)
for line in f:
line = line.strip()
dict_ = json.loads(line)
purchase[dict_['user_id']] = dict_['category']
with open('visit_log.csv','r') as m, open('funnel.csv','w') as l:
for line_m in m:
str_m = line_m.strip().split(',')[0]
if str_m in purchase.keys():
l.write(line_m.strip()+','+purchase[str_m]+'\n')
f.close()
m.close()
l.close()
# +
with open('funnel.csv') as l:
for line_l in l:
print(line_l)
i = i+1
if i>6:
break
l.close()
# -
|
.ipynb_checkpoints/read-write_DZ-2-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.10 ('mv')
# language: python
# name: python3
# ---
# ## Image classificaton for dataset CIFAR10
# +
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils import to_categorical, plot_model
from keras.callbacks import ModelCheckpoint
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.models import Sequential, Model, model_from_json
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, Flatten, Conv2D, Conv2DTranspose, MaxPool2D, UpSampling2D, BatchNormalization, Input, ZeroPadding2D, Concatenate
# custom package imports
from Helpers_Classification import helper_model
from Helpers_Classification import helper_data
from Helpers_Classification import helper_stats
# -
# ### Load train and test dataset
# +
# NOTE: specify destination paths
srcPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Cifar10'
dstResultsPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Results'
dstModelsPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Models'
# --- variables ---
imgDims = {'rows': 32, 'cols': 32}
num_classes = 10
image_depth = 3
# --- load and format data ---
# load full dataset into memory - image data and labels
x_train, y_train = helper_data.read_images_cifar(os.path.join(srcPath, 'train'), image_depth)
x_test, y_test = helper_data.read_images_cifar(os.path.join(srcPath, 'test'), image_depth)
y_test = to_categorical(y_test, num_classes)
print(f'Training dataset shape: {x_train.shape}')
print(f'Number of training samples: {x_train.shape[0]}')
print(f'Number of test samples: {x_test.shape[0]}')
# -
# ### Create training version
# +
# --- paths ---
version = 'LC_12'
# optimization hyperprameters
batch_size = 128
epochs = 50
lr = 0.0001
# create folders to save data from the current execution
if not os.path.exists(os.path.join(dstResultsPath, version)):
os.mkdir(os.path.join(dstResultsPath, version))
else:
# to avoid overwriting training results
print(f"Folder name {version} exists.")
exit(1)
resultsPath = os.path.join(dstResultsPath, version)
if not os.path.exists(os.path.join(dstModelsPath, version)):
os.mkdir(os.path.join(dstModelsPath, version))
modelsPath = os.path.join(dstModelsPath, version)
# -
# ### Create model
# +
# trim training data and one-hot encoding of labels
x_train_trim, y_train_trim = helper_data.trim_train_data(x_train,y_train,None)
y_train_trim = to_categorical(y_train_trim, num_classes)
# create validation dataset (image and label data is shuffled in both datasets)
X_train, X_val, Y_train, Y_val = train_test_split(x_train_trim, y_train_trim,
test_size=0.2, # assign random 20% of the samples to the validation set
random_state=42) # fixed random seed enables repeatability of sample choice across executions
# --- construct model ---
#model = helper_model.construct_model(num_classes) # build model architecture
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
#model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax')) # softmax for multi-class classification
# compile model
model.compile(loss=categorical_crossentropy, # categorical crossentropy for multi-class classification
optimizer=Adam(lr=lr),
metrics=['accuracy'])
# SGD(lr=lr, momentum=0.0, decay=0.0)
# --- fit model ---
model_checkpoint = ModelCheckpoint(filepath=os.path.join(modelsPath, 'checkpoint-{epoch:03d}-{val_accuracy:.4f}.hdf5'), # epoch number and val accuracy will be part of the weight file name
monitor='val_accuracy', # metric to monitor when selecting weight checkpoints to save
verbose=1,
save_best_only=True) # True saves only the weights after epochs where the monitored value (val accuracy) is improved
# -
# ### Train model
history = model.fit(X_train, Y_train,
batch_size=batch_size, # number of samples to process before updating the weights
epochs=epochs,
callbacks=[model_checkpoint],
verbose=0,
validation_data=(X_val, Y_val))
# ### Save and evaluate model
# +
# save model architecture
print(model.summary()) # parameter info for each layer
with open(os.path.join(modelsPath, 'modelSummary.txt'), 'w') as fh: # save model summary
model.summary(print_fn=lambda x: fh.write(x + '\n'))
plot_model(model, to_file=os.path.join(modelsPath, 'modelDiagram.png'), show_shapes=True) # save diagram of model architecture
# save model configuration and weights
model_json = model.to_json() # serialize model architecture to JSON
with open(os.path.join(os.path.join(modelsPath, 'model.json')), "w") as json_file:
json_file.write(model_json)
model.save_weights(os.path.join(modelsPath, 'model.h5')) # serialize weights to HDF5
print("Saved model to disk.")
# --- save training curves and logs ---
helper_stats.save_training_logs(history=history, dst_path=modelsPath)
# --- apply model to test data ---
Y_test_pred = model.predict(x_test, verbose=1)
# --- evaluate model ---
# accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# confusion matrix
labels = [x for x in range(10)]
# convert one-hot encoded vectors to 1D list of classes
y_test_list = np.argmax(y_test, axis=1)
Y_test_pred_list = np.argmax(Y_test_pred, axis=1)
cm = confusion_matrix(y_test_list, Y_test_pred_list, labels) # takes 1D list of classes as input
# plot confusion matrix
target_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
fig = helper_stats.plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=False)
fig.savefig(os.path.join(modelsPath, 'confusionMatrix.png'), dpi=fig.dpi) # save confusion matrix as figure
# --- save misclassified test samples ---
# find indices of misclassified samples
missed = [ind for ind, elem in enumerate(Y_test_pred_list) if elem != y_test_list[ind]]
for i in missed:
cv2.imwrite(os.path.join(resultsPath, str(i).zfill(6) + '_' + str(Y_test_pred_list[i]) + '_' + str(y_test_list[i]) + '.png'),
(x_test[i] * 255).astype(np.uint8)) # transform value range inback to [0, 255]
# file name: OrdinalNumberOfSample_PredictedClass_TrueClass.png
# -
# ### Load and evaluate model
# +
# load model
srcModelPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Models\LC_11\model.json'
srcWeightsPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Models\LC_11\model.h5'
model = helper_model.load_model(srcModelPath,srcWeightsPath)
model.compile(loss=categorical_crossentropy, # categorical crossentropy for multi-class classification
optimizer=Adam(lr=lr),
metrics=['accuracy'])
# # parameter info for each layer
print(model.summary())
# --- apply model to test data ---
Y_test_pred = model.predict(x_test, verbose=1)
# --- evaluate model ---
# accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# confusion matrix
labels = [x for x in range(10)]
# convert one-hot encoded vectors to 1D list of classes
y_test_list = np.argmax(y_test, axis=1)
Y_test_pred_list = np.argmax(Y_test_pred, axis=1)
cm = confusion_matrix(y_test_list, Y_test_pred_list, labels) # takes 1D list of classes as input
# plot confusion matrix
target_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
fig = helper_stats.plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=False)
# -
|
Clasification/imgclassificationcifar10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [40, 30]
# +
import fastai as fai
from fastai.basic_data import DataLoader
import numpy as np
from torch.utils.data import SubsetRandomSampler
import pandas as pd
import seaborn as sns
from os import environ
from torch import optim
from bisect import bisect_left
import time
from tqdm import tqdm
from fastai.callbacks import EarlyStoppingCallback
from functools import partial
from src.data.dataset import *
from src.model.model_bn import *
# + tags=["parameters"]
optimizer = environ.get('optimizer', 'Adam')
num_workers= int(environ.get('num_workers', '8'))
batch_size=int(environ.get('batch_size', '2048'))
n_epochs=int(environ.get('n_epochs', '500'))
batch_norm = environ.get('batch_norm', 'True') == 'True'
dataset= environ.get('dataset', 'data/speedup_dataset2.pkl')
loss_func = environ.get('loss_func', 'MSE')
log = environ.get('log', 'True') == 'True'
wd = float(environ.get('weight_decay', '0.01'))
cuda_device = environ.get('cuda_device', 'cuda:0')
layers_sizes = list(map(int, environ.get('layers', '300 200 120 80 30').split()))
drops = list(map(float, environ.get('dropouts', '0.2 0.2 0.1 0.1 0.1').split()))
device = torch.device(cuda_device if torch.cuda.is_available() else "cpu")
# +
def train_model(model, criterion, optimizer, dataloader, num_epochs=100):
since = time.time()
losses = []
train_loss = 0
model = model.to(device)
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
# Iterate over data.
for inputs, labels in tqdm(dataloader[phase], total=len(dataloader[phase])):
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
assert outputs.shape == labels.shape
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# print(loss.item())
# statistics
running_loss += loss.item()
#running_corrects += torch.sum((outputs.data - labels.data) < e)/inputs.shape[0]
epoch_loss = running_loss / len(dataloader[phase])
print('{} Loss: {:.4f}'.format(
phase, epoch_loss))
if phase == 'val':
losses.append((train_loss, epoch_loss))
else:
train_loss = epoch_loss
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
return losses
def get_results_df(dl, model, log=False):
df = pd.DataFrame()
indices = dl.sampler.indices
inputs, targets = dl.dataset[indices]
names = [dl.dataset.programs[dl.dataset.restricted_program_indexes[i]].name for i in indices]
sched_names = [dl.dataset.schedules[i].name for i in indices]
inputs = torch.Tensor(inputs)
model.eval()
preds = model(inputs.to(device))
interchange, tile, unroll = zip(*[dl.dataset.schedules[index].binary_repr for index in indices])
preds = preds.cpu().detach().numpy().reshape((-1,))
targets = targets.reshape((-1,))
if log:
preds = np.exp(preds*dl.dataset.std + dl.dataset.mean)
targets = np.exp(targets*dl.dataset.std + dl.dataset.mean)
df['index'] = indices
df['name'] = names
df['sched_name'] = sched_names
df['prediction'] = preds
df['target'] = targets
df['abs_diff'] = np.abs(preds - targets)
df['APE'] = np.abs(df.target - df.prediction)/df.target * 100
df['SMAPE'] = 100*np.abs(df.target - df.prediction)/((np.abs(df.target) + np.abs(df.prediction))/2)
df['interchange'] = interchange
df['tile'] = tile
df['unroll'] = unroll
return df
def train_dev_split(dataset, batch_size, num_workers, log=False, seed=42):
test_size = validation_size = 10000
ds = DatasetFromPkl(dataset, maxsize=None, log=log)
indices = range(len(ds))
test_indices, val_indices, train_indices = indices[:test_size], \
indices[test_size:test_size+validation_size], \
indices[test_size+validation_size:]
train_dl = DataLoader(ds, batch_size=batch_size,
sampler=SubsetRandomSampler(train_indices),
num_workers=num_workers)
val_dl = DataLoader(ds, batch_size=batch_size,
sampler=SubsetRandomSampler(val_indices),
num_workers=num_workers)
test_dl = DataLoader(ds, batch_size=batch_size,
sampler=SubsetRandomSampler(test_indices),
num_workers=num_workers)
return train_dl, val_dl, test_dl
def mape_criterion(inputs, targets):
eps = 1e-5
return 100*torch.mean(torch.abs(targets - inputs)/(targets+eps))
def smape_criterion(inputs, targets):
return 100*torch.mean(torch.abs(targets - inputs)/((torch.abs(targets)+torch.abs(inputs))/2))
def rmse_criterion(inputs, targets):
return torch.sqrt(nn.MSELoss()(inputs, targets))
def get_data_with_names(dl):
dataset = dl.dataset
names, X, Y = zip(*[(dataset.get_sched_name(index), *dataset[index]) for index in dl.sampler.indices])
return names, X, Y
def get_schedule_data(dl, schedule):
dataset = dl.dataset
indices = [index for index in dl.sampler.indices
if np.all(np.array(dataset.schedules[index].binary_repr) + np.array(schedule) != 1)]
return indices
def get_data_with_prog_names(dl):
dataset = dl.dataset
names, X, Y = zip(*[(dataset.get_prog_name(index), *dataset[index]) for index in dl.sampler.indices])
return names, X, Y
def joint_plot(df, title, val_range=list(range(-1, 15))):
ax = sns.jointplot('target', 'prediction', df, ).ax_joint
plt.suptitle(title)
_ = ax.set_xticks(val_range)
_ = ax.set_yticks(val_range)
_ = ax.plot(val_range, val_range, ':k')
class NameGetter(object):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
return self.dataset.get_prog_name(index)
def get_program_data(dl, prog_name):
dataset = dl.dataset
name_g = NameGetter(dataset)
index1 = bisect_left(name_g, prog_name, 0)
index2 = bisect_right(name_g, prog_name, index1)
X, Y = zip(*[dataset[index] for index in range(index1, index2)])
return torch.Tensor(X), torch.Tensor(Y)
def joint_plot_one_program(dl, prog_name, model):
model.eval()
X, Y = get_program_data(dl, prog_name)
Y_hat = model(X.to(device))
df = pd.DataFrame()
df['prediction'] = np.array(Y_hat.view(-1,))
df['target'] = np.array(Y)
joint_plot(df, prog_name)
def joint_plot_one_schedule(dl, schedule, model, log=False):
indices = get_schedule_data(dl, schedule)
X, Y = dl.dataset[indices]
X, Y = torch.Tensor(X), torch.Tensor(Y)
Y_hat = model(X.to(device))
df = pd.DataFrame()
df['prediction'] = np.array(Y_hat.view(-1,))
df['target'] = np.array(Y)
joint_plot(df, schedule)
# -
|
utils/speedup_model/utils.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Loading the iris dataset into scikit-learn
# -
# import load_iris function from datasets module
# convention is to import modules instead of sklearn as a whole
from sklearn.datasets import load_iris
# + pycharm={"name": "#%%\n"}
# save "bunch" object containing iris dataset and its attributes
# the data type is "bunch"
iris = load_iris()
type(iris)
# + pycharm={"name": "#%%\n"}
# print the iris data
# same data as shown previously
# each row represents each sample
# each column represents the features
print(iris.data)
# + [markdown] pycharm={"name": "#%% md\n"}
# # Exploring the iris dataset¶
# + pycharm={"name": "#%%\n"}
# print the names of the four features
iris.feature_names
# + pycharm={"name": "#%%\n"}
# print integers representing the species of each observation
# 0, 1, and 2 represent different species
iris.target
# + pycharm={"name": "#%%\n"}
# print the encoding scheme for species: 0 = setosa, 1 = versicolor, 2 = virginica
iris.target_names
# + pycharm={"name": "#%%\n"}
|
scratch-pad/Iris_dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rajdeepd/tensorflow_2.0_book_code/blob/master/ch03/custom_TF_training_loops.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="pfi5G8ZWDeox"
# Primary imports
import tensorflow as tf
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="UUK3cHkqDovW" outputId="02abd1fd-6406-4fb8-bf42-7bf7c42ac4ce"
print(tf.__version__)
# + colab={"base_uri": "https://localhost:8080/"} id="-TkzuZXSD_MN" outputId="6f3f6fe0-0ae1-426b-ef1c-64c4868c37aa"
# Load the FashionMNIST dataset, scale the pixel values
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
X_train = X_train/255.
X_test = X_test/255.
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# + id="uNfUBY4mErkY"
# Define the labels of the dataset
CLASSES=["T-shirt/top","Trouser","Pullover","Dress","Coat",
"Sandal","Shirt","Sneaker","Bag","Ankle boot"]
# + id="ofiUxCQpEu0p"
# Change the pixel values to float32 and reshape input data
X_train = X_train.astype("float32").reshape(-1, 28, 28, 1)
X_test = X_test.astype("float32").reshape(-1, 28, 28, 1)
# + colab={"base_uri": "https://localhost:8080/"} id="vWsjTjvwLVvL" outputId="3894cd2e-6551-47f0-dafb-9d68b069c9dc"
y_train.shape, y_test.shape
# + id="jA2ES9N7FOo4"
# TensorFlow imports
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
# + id="UMXRRKcbE9vE"
# Define utility function for building a basic shallow Convnet
def get_training_model():
model = Sequential()
model.add(Conv2D(16, (5, 5), activation="relu",
input_shape=(28, 28,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (5, 5), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(len(CLASSES), activation="softmax"))
return model
# + id="DDXOr6FhFvXw"
# Define loass function and optimizer
loss_func = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# + id="uXfw22IZF5Rw"
# Average the loss across the batch size within an epoch
train_loss = tf.keras.metrics.Mean(name="train_loss")
valid_loss = tf.keras.metrics.Mean(name="test_loss")
# Specify the performance metric
train_acc = tf.keras.metrics.SparseCategoricalAccuracy(name="train_acc")
valid_acc = tf.keras.metrics.SparseCategoricalAccuracy(name="valid_acc")
# + id="w_CcwaadLI8e"
# Batches of 64
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(64)
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(64)
# + id="-JgN5TWzLroE"
# Train the model
@tf.function
def model_train(features, labels):
# Define the GradientTape context
with tf.GradientTape() as tape:
# Get the probabilities
predictions = model(features)
# Calculate the loss
loss = loss_func(labels, predictions)
# Get the gradients
gradients = tape.gradient(loss, model.trainable_variables)
# Update the weights
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Update the loss and accuracy
train_loss(loss)
train_acc(labels, predictions)
# + id="LBPBn762LwZ1"
# Validating the model
@tf.function
def model_validate(features, labels):
predictions = model(features)
v_loss = loss_func(labels, predictions)
valid_loss(v_loss)
valid_acc(labels, predictions)
# + id="jleWU_WoPXfN"
# A shallow Convnet
model = get_training_model()
# + id="946iV6qSXZXo"
# Grab random images from the test and make predictions using
# the model *while it is training* and log them using WnB
def get_sample_predictions():
predictions = []
images = []
random_indices = np.random.choice(X_test.shape[0], 25)
for index in random_indices:
image = X_test[index].reshape(1, 28, 28, 1)
prediction = np.argmax(model(image).numpy(), axis=1)
prediction = CLASSES[int(prediction)]
images.append(image)
predictions.append(prediction)
#wandb.log({"predictions": [wandb.Image(image, caption=prediction)
# for (image, prediction) in zip(images, predictions)]})
# + colab={"base_uri": "https://localhost:8080/"} id="yZE9_IhjL4lM" outputId="3cb8c06d-dbd0-46f1-e6f5-f8a85c597488"
# Train the model for 5 epochs
for epoch in range(5):
# Run the model through train and test sets respectively
for (features, labels) in train_ds:
model_train(features, labels)
for test_features, test_labels in test_ds:
model_validate(test_features, test_labels)
# Grab the results
(loss, acc) = train_loss.result(), train_acc.result()
(val_loss, val_acc) = valid_loss.result(), valid_acc.result()
# Clear the current state of the metrics
train_loss.reset_states(), train_acc.reset_states()
valid_loss.reset_states(), valid_acc.reset_states()
# Local logging
template = "Epoch {}, loss: {:.3f}, acc: {:.3f}, val_loss: {:.3f}, val_acc: {:.3f}"
print (template.format(epoch+1,
loss,
acc,
val_loss,
val_acc))
get_sample_predictions()
# + id="1MtHl_cggzdS"
|
Chapter 03/custom_TF_training_loops.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
epochs = 20
n_train_items = 1280
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
# +
import syft as sy # <-- NEW: import the Pysyft library
hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
# simulation functions
def connect_to_workers(n_workers):
return [
sy.VirtualWorker(hook, id=f"worker{i+1}")
for i in range(n_workers)
]
workers = connect_to_workers(n_workers=20)
# +
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 64
self.epochs = epochs
self.lr = 0.01
self.momentum = 0.5
self.no_cuda = False
self.seed = 1
self.log_interval = 3
self.save_model = False
args = Arguments()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# +
# federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader
# datasets.MNIST('../data', train=True, download=True,
# transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ]))
# .federate(workers), # <-- NEW: we distribute the dataset across all the workers, it's now a FederatedDataset
# batch_size=args.batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST('../data', train=False, transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args.test_batch_size, shuffle=True, **kwargs)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size
)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args.test_batch_size
# )
test_MNIST = datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
#---
less_train_dataloader = [
((data), (target))
for i, (data, target) in enumerate(train_loader)
if i < n_train_items / args.batch_size
]
# less_test_dataloader = [
# ((data), (target))
# for i, (data, target) in enumerate(test_loader)
# if i < n_train_items / args.batch_size
# ]
test_dataloader = torch.utils.data.DataLoader(test_MNIST, batch_size=args.test_batch_size)
# +
# from PIL import Image
# import numpy
# #mnist_dataset.__getitem__(2)[1]
# a = (mnist_dataset.__getitem__(0)[0]).numpy()
# a.dtype = 'uint8'
# print(a)
# Image.fromarray(a[0], mode= 'P')
# -
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def model_init(workers, Net):
model_list = list()
for worker in workers:
model_list.append(Net)
return model_list
def opt_init(model_list):
opt_list = list()
for model in model_list:
opt_list.append(optim.SGD(model.parameters(), lr=args.lr))
return opt_list
# +
def train(args, device, less_train_dataloader, opt_list, epoch, workers):
global model_list
## start training and record the model into model_list
for batch_idx, (data, target) in enumerate(less_train_dataloader): # <-- now it is a distributed dataset
model_on_worker = model_list[batch_idx%len(workers)]
model_on_worker.train()
model_on_worker.send(workers[batch_idx%len(workers)]) # <-- NEW: send the model to the right location
data_on_worker = data.send(workers[batch_idx%len(workers)])
target_on_worker = target.send(workers[batch_idx%len(workers)])
data_on_worker, target_on_worker = data_on_worker.to(device), target_on_worker.to(device)
opt_list[batch_idx%len(workers)].zero_grad()
output = model_on_worker(data_on_worker)
loss = F.nll_loss(output, target_on_worker)
loss.backward()
opt_list[batch_idx%len(workers)].step()
model_on_worker.get() # <-- NEW: get the model back
model_list[batch_idx%len(workers)] = model_on_worker #When len(dataloader) is longer than the len(worker) send and get must be modified
#model_list here is full of the model which has trained on the workers, there are all different now.
if batch_idx % args.log_interval == 0:
loss = loss.get() # <-- NEW: get the loss back
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * args.batch_size, len(less_train_dataloader) * args.batch_size,
100. * batch_idx / len(less_train_dataloader), loss.item()))
##Aggregation time
new_model = []
tmp_model = Net().to(device)
with torch.no_grad():
for p in model_list[0].parameters():
new_model.append(0)
for m in model_list:
for par_idx, par in enumerate(m.parameters()):
#average the model_list
new_model[par_idx] = new_model[par_idx]+par.data
# we get new model in list format and need to set_ to model
for worker in range(len(workers)):
for par_idx in range(len(new_model)):
list(model_list[worker].parameters())[par_idx].set_(new_model[par_idx]/len(workers))
#init model with new_model
# -
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader)*(args.batch_size)
#Since the test loader here is a list, we can get the len by * it with batch.size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader)* (args.batch_size),
100. * correct / (len(test_loader)*args.batch_size)))
# +
# %%time
#optimizer = optim.SGD(model.parameters(), lr=args.lr) # TODO momentum is not supported at the moment
model_list = []
model_list = model_init(workers, Net().to(device))
opt_list = opt_init(model_list)
# not finish in train, finish latter
pars = [list(model.parameters()) for model in model_list]
for epoch in range(1, args.epochs + 1):
train(args, device, less_train_dataloader, opt_list, epoch, workers)
print("After training")
test(args, model_list[0], device, test_dataloader)
if (args.save_model):
torch.save(model.state_dict(), "mnist_cnn.pt")
# +
model = Net()
tttt = sy.VirtualWorker(hook, id="tttt")
tttt.clear_objects()
model = model.send(tttt)
t = torch.ones([1,28,28], dtype=torch.float64)
t_on_worker = t.send(tttt)
print((t_on_worker))
model(t)
# print(workers[0].current_objects())
# +
model = Net()
model_list = []
model_ondevice = []
aggregater = sy.VirtualWorker(hook, id="aggregater")
for batch_idx, (data, target) in enumerate(less_train_dataloader):
data_on_device = data.send(workers[batch_idx])
if batch_idx<3:
model_ondevice.append(model.copy().send(workers[batch_idx]))
print(model_ondevice[batch_idx].location)
pre = model_ondevice[batch_idx](data_on_device)
model_ondevice[batch_idx].move(aggregater)
print(model.location)
print(model.weight.data)
# -
with torch.no_grad():
print(model_list[0].parameters())
model.parameters()
model.sned(workers[0])
for i in model.parameters():
test.append(i.data)
new_test = []
for i in test:
new_test.append(i+i)
tmp_model = Net()
with torch.no_grad():
for i,par in enumerate(tmp_model.parameters()):
par.set_(new_test[i])
for i in tmp_model.parameters():
print(i)
x = list(torch.tensor((1,2)))
for i in x:
i+i
model.fc2.weight.data.numpy()
model.fc2.bias.set_
|
shuffle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import numpy as np
from matplotlib import rc
import deepdish as dd
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 20}
rc('font', **font)
#rc('text',usetex=True)
import matplotlib.pyplot as plt
import h5py
# +
all_n = np.arange(-10, 2.5 ,0.5) # we cut off n = 2.5, 3, 3.5, 4 because Cloudy
def get_index_number(n, z = None, z_index = None):
all_n = np.arange(-10, 2.5 , 0.5) # we cut off n = 2.5, 3, 3.5, 4 because Cloudy
all_n = np.arange(-10, 4.0 , 0.5)
all_z = np.array([0.0000e+00, 1.2202e-01, 2.5893e-01, 4.1254e-01, 5.8489e-01, 7.7828e-01, 9.9526e-01, 1.2387e+00,
1.5119e+00, 1.8184e+00, 2.1623e+00, 2.5481e+00, 2.9811e+00, 3.4668e+00, 4.0119e+00, 4.6234e+00,
5.3096e+00, 6.0795e+00, 6.9433e+00, 7.9125e+00, 9.0000e+00, 1.0220e+01, 1.1589e+01, 1.3125e+01,
1.4849e+01])
index1 = np.argmin( np.abs(all_n - n))
if z_index is None and z is None:
index2 = 0 # redshift zero
elif z_index is None:
index2 = np.argmin( np.abs(all_z - z))
elif z is None:
if z_index < 0:
z_index = np.size(all_z) + z_index
index2 = z_index
else:
print "Cannot provide both a redshift value and redshift bin value"
raise ValueError
run_num = (index1)*len(all_z) + (index2 + 1)
return index1, index2, run_num
#j = 0
#for i in np.arange(1, 626,1):
#
# n_z_list[i-1] = (all_n[j], all_z[(i-1) % (np.size(all_z))])
# j = j + 1
# if j >= np.size(all_n):
# j = 0
#n_z_dict = {}
#
#i = 1
#for t in n_z_list:
# n_z_dict[t] = i
# i = i + 1
#i = 1
#n_z_dict_2 = {}
#for t in n_z_list:
# n_z_dict_2[(t[0], (i-1)%(np.size(all_z)))] = i
# i = i + 1
# -
# +
data_type = 'thin'
if data_type == 'thin':
data = h5py.File('./CloudyData_UVB=HM2012.h5')
elif data_type == 'shielded':
data = h5py.File('./CloudyData_UVB=HM2012_shielded.h5')
elif data_type == 'shielded_cooling_only':
data = dd.io.load('./CloudyData_UVB=HM2012_shielded_cooling_only.h5')
elif data_type == 'noUVB':
data = dd.io.load('./CloudyData_noUVB.h5')
#data = h5py.File('./CloudyData_UVB=HM2012_shielded_cooling_only.h5')
#data = dd.io.load('./CloudyData_UVB=HM2012_shielded.h5')
#data = h5py.File('CloudyData_UVB=HM2012.h5')
#data = h5py.File('CloudyData_noUVB.h5')
# +
n = -2
Z = 0.01
# +
metal_cool = data['CoolingRates']['Metals']['Cooling']
try:
metal_heat = data['CoolingRates']['Metals']['Heating']
except:
print 'failed to load heating'
prim_cool = data['CoolingRates']['Primordial']['Cooling']
try:
prim_heat = data['CoolingRates']['Primordial']['Heating']
except:
print 'failed to load heating'
# -
# +
#print T
# -
#
#
# Choose your n and z here
#
#
plot_n = [-2, 0, 2]
z = None
z_index = -1
# +
T = np.logspace(1,9,161)
T = np.log10(T)
fig, ax = plt.subplots(1,3)
for i in [0,1,2]:
n = plot_n[i]
print n, z, z_index
index1, index2, run_num = get_index_number(n, z = z, z_index = z_index)
try:
index2 = 0
total_cool = metal_cool[index1][index2] * Z + prim_cool[index1][index2]
#print prim_cool[index1][index2]
total_heat = metal_heat[index1][index2] * Z + prim_heat[index1][index2]
net = total_cool - total_heat
except:
total_cool = metal_cool[index1] * Z + prim_cool[index1]
print 'failed to compute heating and net '
ax[i].plot(T, np.log10(total_cool), lw = 3, color = 'black', ls = ':', label = 'cooling')
try:
ax[i].plot(T, np.log10(total_heat), lw = 3, color = 'black', ls = '--', label = 'heating')
except:
print 'failed to plot heating'
try:
ax[i].plot(T, np.log10(np.abs(net)), lw = 3, color = 'black', ls = '-', label = '|cooling - heating|')
except:
print 'failed to plot cooling - heating'
if i == 0:
ax[i].legend(loc='best')
ax[i].set_xlabel('T (K)')
ax[i].set_ylabel(r'$log(\Lambda$ [(erg cm$^{3}$ s$^{-1}$)])')
ax[i].set_title(r'n = %.3f - Z = %0.2f Z$_{\odot}$'%(10**n,Z))
ax[i].xaxis.set_ticks(np.arange(1,9.1,1))
ax[i].set_xlim(1,9)
ax[i].set_ylim(-30, -21)
fig.set_size_inches(24,8)
plt.tight_layout()
plt.show()
if data_type == 'thin':
fig.savefig('cooling_rates_opticallythin.png')
elif data_type == 'shielded':
fig.savefig('cooling_rates_shielded.png')
elif data_type == 'shielded_cooling_only':
fig.savefig('cooling_rates_shielded_cooling_only.png')
elif data_type == 'noUVB':
fig.savefig('cooling_rates_noUVB.png')
# -
# +
thin = h5py.File('./CloudyData_UVB=HM2012.h5')
shield = h5py.File('./CloudyData_UVB=HM2012_shielded.h5')
thin_metal_c = thin['CoolingRates']['Metals']['Cooling']
thin_metal_h = thin['CoolingRates']['Metals']['Heating']
shield_metal_c = shield['CoolingRates']['Metals']['Cooling']
shield_primordial_c = shield['CoolingRates']['Primordial']['Cooling']
shield_metal_h = shield['CoolingRates']['Metals']['Heating']
shield_primordial_h = shield['CoolingRates']['Primordial']['Heating']
# -
# +
#Z =
plot_n = [-2, -1, 0, 1, 2, 3]
Z = 0.1
fig, ax = plt.subplots(2,3, sharex=True, sharey=True)
print Z
axis_tuple = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2)]
z = 0.0
z_index = None
def _plot_data(ax, x, y, color, label):
if np.size(y[y<0]) > 0:
ax.plot(x[y<0], np.log10(np.abs(y[y<0])), lw = 3, color = color, ls = '--')
ax.plot(x[y>0], np.log10(np.abs(y[y>0])), lw = 3, color = color, ls = '-', label = label)
# fill in the gaps between the two regions if there are any - don't attempt if there are multiple
test = np.where(y<0)[0]
if all( test[1:] == (test[:-1] + 1)):
heat_max_index = np.argmax(y[y<0])
cool_min_index = np.argmin(y[y>0]) + np.size(y[y<0]) + 1
ax.plot(x[heat_max_index:cool_min_index], np.log10(np.abs(y[heat_max_index:cool_min_index])),
lw = 3, color = color, ls = '-')
else:
print 'cannot plot the separate lines'
else:
ax.plot(x, np.log10(np.abs(y)), lw = 3, color = color, ls = '-', label = label)
return
for i in [0,1,2,3,4,5]:
n = plot_n[i]
#index1 = np.argmin(np.abs(all_n - n))
#index2 = 0
index1, index2, run_num = get_index_number(n, z = z, z_index = z_index)
#
# Plot Forbes et al. type cooling
# this should be shielded primordial + optically thin metal
#
total_cool = thin_metal_c[index1][index2] * Z + shield_primordial_c[index1][index2]
total_heat = thin_metal_h[index1][index2] * Z + shield_primordial_h[index1][index2]
net = total_cool - total_heat
i = axis_tuple[i]
_plot_data(ax[i], T, net, 'orange', 'Inconsistent Model')
#ax[i].plot(T[net<0], np.log10(np.abs(net[net<0])), lw = 3, color = 'orange', ls = '--')
#ax[i].plot(T[net>0], np.log10(np.abs(net[net>0])), lw = 3, color = 'orange', ls = '-', label = 'Inconsistent Model')
total_cool = shield_metal_c[index1][index2] * Z + shield_primordial_c[index1][index2]
total_heat = shield_metal_h[index1][index2] * Z + shield_primordial_h[index1][index2]
net = total_cool - total_heat
_plot_data(ax[i], T, net, 'black', 'Consistent Metal Cooling Rates')
#ax[i].plot(T[net<0], np.log10(np.abs(net[net<0])), lw = 3, color = 'black', ls = '--')
#ax[i].plot(T[net>0], np.log10(np.abs(net[net>0])), lw = 3, color = 'black', ls = '-', label = 'Metal Consistent Model')
if i == 0:
ax[i].legend(loc='best')
# ax[i].set_title(r'n = %.3f - Z = %0.2f Z$_{\odot}$'%(10**n,Z))
x_ann = 1.2
y_ann = -21.5
ax[i].annotate(r'n = %.3f'%(10**n), xy=(x_ann,y_ann),xytext=(x_ann,y_ann))
ax[i].xaxis.set_ticks(np.arange(1,9.1,1))
ax[i].set_xlim(1,9)
ax[i].set_ylim(-30, -21)
xlabel = r'log(T [K])'
ax[(1,0)].set_xlabel(xlabel)
ax[(1,1)].set_xlabel(xlabel)
ax[(1,2)].set_xlabel(xlabel)
ylabel = r'log($\Lambda$ [(erg cm$^{3}$ s$^{-1}$])'
ax[(0,0)].set_ylabel(ylabel)
ax[(1,0)].set_ylabel(ylabel)
plt.minorticks_on()
fig.set_size_inches(24,16)
plt.tight_layout()
plt.show()
fig.savefig('cooling_model_comparison')
# -
|
grackle/cooling_curve.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 23 - Concurrency
#
# * https://adventofcode.com/2019/day/23
#
# We get to wire up 50 Intcode CPUs today. Clearly we need queues here! I could re-tool my Intcode module to use asyncio coroutines throughout, but we can also use threads. I'll use that here, via [`concurrency.futures()`](https://docs.python.org/3/library/concurrent.futures.html) to provide a ready-made threadpool, and the standard [`queue` module](https://docs.python.org/3/library/queue.html) to provide the 'network layer', which is simply a mapping with 50 queues (so I don't have to special-case address 255).
#
# We need a way of shutting down the CPUs, however. I'm going to use special queue value for this, a sentinel that when received simply raises a halt exception. The simplest sentinel value in Python is the `None` object, which also makes it easy to type hint.
# +
from __future__ import annotations
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from queue import Empty, SimpleQueue
from typing import (
ContextManager,
Generator,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
TYPE_CHECKING
)
from intcode import CPU, Halt, Instruction, InstructionSet, base_opcodes
NETWORK_SIZE = 50
class Packet(NamedTuple):
x: int
y: int
class Network(ContextManager["Network"]):
_queues: Mapping[int, SimpleQueue[Optional[Packet]]]
def __init__(self):
self._queues = {addr: SimpleQueue() for addr in range(NETWORK_SIZE)}
self.broadcast = self._queues[255] = SimpleQueue()
def __exit__(self, *exc) -> None:
for q in self._queues.values():
q.put(None)
def __getitem__(self, addr: int) -> SimpleQueue[Optional[Packet]]:
return self._queues[addr]
def reader(self, addr: int) -> Iterator[int]:
queue = self._queues[addr]
while True:
try:
packet = queue.get(timeout=0.1)
except Empty:
yield -1
continue
if packet is None:
raise Halt
yield packet.x
yield packet.y
NetworkDriver = Generator[None, int, None]
class NetworkCard(ContextManager[NetworkDriver]):
address: int
network: Network
_runner: Optional[NetworkDriver] = None
def __init__(self, address: int, network: Network) -> None:
self.address = address
self.network = network
def powerdown(self) -> None:
if self._runner is not None:
self._runner.close()
self._runner = None
def __enter__(self) -> NetworkDriver:
if self._runner is None:
self._runner = self.run()
# prime the runner, so it is waiting for input
next(self._runner)
return self._runner
def __exit__(self, *exc) -> None:
self.powerdown()
def run(self) -> NetworkDriver:
network = self.network
while True:
dest = yield None
packet = Packet((yield None), (yield None))
try:
network[dest].put(packet)
except KeyError:
# unknown network destination
pass
def receive(self) -> Iterator[int]:
yield self.address
yield from self.network.reader(self.address)
def networked_intcode(memory: List[int], address: int, network: Network) -> None:
networkcard = NetworkCard(address, network)
with networkcard as driver:
opcodes: InstructionSet = {
**base_opcodes,
3: Instruction(partial(next, networkcard.receive()), output=True),
4: Instruction(driver.send, 1),
}
CPU(opcodes).reset(memory).execute()
def run_network(memory: List) -> int:
with ThreadPoolExecutor(max_workers=NETWORK_SIZE) as executor:
with Network() as network:
machines = [
executor.submit(networked_intcode, memory, addr, network)
for addr in range(NETWORK_SIZE)
]
packet = network.broadcast.get(timeout=15)
for fut in machines:
fut.cancel()
return packet.y
# -
import aocd
data = aocd.get_data(day=23, year=2019)
memory = list(map(int, data.split(',')))
print("Part 1:", run_network(memory))
# ## Part 2, concurrency and synchronisation
#
# We now need an extra thread that implements the NAT, which needs to know if the network is idle. This can be tricky with multiple threads all racing to send and receive. Note that we can't do this with the threading *Event* primitive, because checking on all 50 events still leaves room for race conditions (events we already checked could be set or cleared before we checked on all of them); we need a sort of reverse [barrier](https://en.wikipedia.org/wiki/Barrier_(computer_science)).
#
# Since I already have a `Network` class, I just used a lock and a set of flags, and a sum of flags set (easily incremented or decremented as needed). The NAT can then just check the sum, if it is 50 all CPUs are idle.
# +
from threading import Lock
class MonitoringNetwork(Network):
_idle_flags: List[bool]
_idle_lock: Lock
_idle_level: int = 0
idle: bool = False
def __init__(self):
super().__init__()
self._idle_flags = [False] * NETWORK_SIZE
self._idle_lock = Lock()
def __exit__(self, *exc) -> None:
for q in self._queues.values():
q.put(None)
def __getitem__(self, addr: int) -> SimpleQueue[Optional[Packet]]:
return self._queues[addr]
def reader(self, addr: int) -> Iterator[int]:
queue = self._queues[addr]
while True:
try:
packet = queue.get(timeout=0.1)
except Empty:
self._set_idle_flag(addr, True)
yield -1
continue
if packet is None:
raise Halt
self._set_idle_flag(addr, False)
yield packet.x
yield packet.y
def _set_idle_flag(self, addr: int, flag: bool) -> None:
flags = self._idle_flags
with self._idle_lock:
if flags[addr] != flag:
self._idle_level += 1 if flag else -1
assert 0 <= self._idle_level <= 50
self.idle = self._idle_level == 50
flags[addr] = flag
def nat(network: MonitoringNetwork) -> int:
queue = network.broadcast
addr_zero = network[0]
last_received: Optional[Packet] = None
last_sent_y: Optional[int] = None
while True:
if last_received is not None and network.idle:
addr_zero.put(last_received)
if last_received.y == last_sent_y:
return last_sent_y
last_sent_y = last_received.y
try:
packet = queue.get(timeout=0.1)
if packet is not None:
last_received = packet
except Empty:
pass
def run_network_with_nat(memory: List) -> int:
with ThreadPoolExecutor(max_workers=NETWORK_SIZE + 1) as executor:
with MonitoringNetwork() as network:
machines = [
executor.submit(networked_intcode, memory, addr, network)
for addr in range(NETWORK_SIZE)
]
nat_fut = executor.submit(nat, network)
result = nat_fut.result(timeout=30)
for fut in machines:
fut.cancel()
return result
# -
print("Part 2:", run_network_with_nat(memory))
|
2019/Day 23.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Deface
# +
import json
import os
bids_path = '/home/connectomics/Pulpit/mounted_data/BONNA_decide_net/data/main_fmri_study'
img_files_path = "/home/connectomics/Pulpit/mounted_data/BONNA_decide_net/"\
"data/main_fmri_study/derivatives/img_files"
with open(img_files_path, 'r') as f:
img_files = json.load(f)
# +
# Find files
t1w_files = []
for dirpath, dirnames, filenames in os.walk(bids_path):
for filename in filenames:
if ('T1w.nii' in filename) and (bids_path + '/sub' in os.path.join(dirpath, filename)):
t1w_files.append(os.path.join(dirpath, filename))
with open('t1w_files.txt', 'w') as f:
for file in t1w_files:
f.write("%s\n" % file)
# -
# Run bash script using following syntax:
#
# ```
# ./deface.sh $(< t1w_files.txt)
# ```
# ```bash
# # #!/bin/bash
# for file in $1
# do
# echo $file
# pydeface $file
# done
# ```
# Delete not-defaced files and rename
for subject in list(img_files.keys()):
oldfile = os.path.join(
img_files[subject]["subject_path_anat"],
img_files[subject]["t1w_bids_name"] + ".nii.gz"
)
newfile = os.path.join(
img_files[subject]["subject_path_anat"],
img_files[subject]["t1w_bids_name"] + "_defaced.nii.gz"
)
os.remove(oldfile)
os.rename(newfile, oldfile)
|
fmri_preparation/dn_fp_02_deface.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (spinningup)
# language: python
# name: spinningup
# ---
from IPython.core.debugger import set_trace
import numpy as np
import pprint
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.gridworld import GridworldEnv
pp = pprint.PrettyPrinter(indent=2)
env = GridworldEnv()
def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):
"""
Evaluate a policy given an environment and a full description of the environment's dynamics.
Args:
policy: [S, A] shaped matrix representing the policy.
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
Vector of length env.nS representing the value function.
"""
# Start with a random (all 0) value function
V = np.zeros(env.nS)
while True:
delta = 0
# For each state, perform a "full backup"
for s in range(env.nS):
v = 0
# Look at the possible next actions
for a, action_prob in enumerate(policy[s]):
# For each action, look at the possible next states...
for prob, next_state, reward, done in env.P[s][a]:
# Calculate the expected value. Ref: Sutton book eq. 4.6.
v += action_prob * prob * (reward + discount_factor * V[next_state])
# How much our value function changed (across any states)
delta = max(delta, np.abs(v - V[s]))
V[s] = v
# Stop evaluating once our value function change is below a threshold
if delta < theta:
break
return np.array(V)
random_policy = np.ones([env.nS, env.nA]) / env.nA
v = policy_eval(random_policy, env)
random_policy.shape
random_policy[:,1]
# +
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
# -
# Test: Make sure the evaluated policy is what we expected
expected_v = np.array([0, -14, -20, -22, -14, -18, -20, -20, -20, -20, -18, -14, -22, -20, -14, 0])
np.testing.assert_array_almost_equal(v, expected_v, decimal=2)
|
6_Renforcement_Learning_Gridword/1_Policy_Evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DAT210x - Programming with Python for DS
# ## Module2 - Lab3
# Import and alias Pandas
import pandas as pd
# Often, you will want to load a dataset that is missing explicit header labels. You won't know if your data lacks headers or not unless you load it up and examine the headers to see if they make sense. Pandas by default reads in the first row of data as the header. If that isn't the case for your specific data set, you will lose your first data row. Be careful!
#
# Load up the `Servo.data` dataset. Examine the headers, and adjust them as necessary, if need be.
# +
# .. your code here ..
# -
# Let's try experimenting with some slicing. Create a slice that contains all entries that have a vgain equal to 5. Then print the length of (# of samples in) that slice:
# +
# .. your code here ..
# -
# Create a slice that contains all entries having a motor equal to E and screw equal to E. Then print the length of (# of samples in) that slice:
# +
# .. your code here ..
# -
# Create a slice that contains all entries having a pgain equal to 4. Use one of the various methods of finding the mean vgain value for the samples in that slice. Once you've found it, print it:
# +
# .. your code here ..
# -
# Here's a bonus activity for you. See what happens when you display the `.dtypes` property of your dataframe!
# +
# .. your code here ..
|
Module2/.ipynb_checkpoints/Module2 - Lab3-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="W7A0gXbOTIXT"
# # Identificación de sistemas
#
#
# - Contiene una colección de notebooks de [Jupyter](https://jupyter.org/) con notas y ejemplos con el contenido propuesto para el curso **Identificación de sistemas**.
#
# - Estas notas fueron escritas por [<NAME>](https://www.ecoprofe.com/JPDP).
#
# - Los notebooks contienen código en [IPython 3](http://ipython.org/).
#
# - Este repostorio es una adaptación de diversas fuentes.
#
# + [markdown] id="nsG10RMfTIXX"
# ## 0 Introducción
#
#
#
# + [markdown] id="upGGfLJsTIXY"
# ## 1 Series de tiempo
#
# - [Modelos univariantes.](01_Series_temporales.ipynb)
# - [Polinomio autorregresivo (AR).]()
# - [Modelo autorregresivo de media móvil (ARMA).]()
# - [Modelo autorregresivo integrado de media móvil (ARIMA).]()
# - [Modelo autorregresivo de media móvil con entrada exógena (ARMAX).]()
#
# + [markdown] id="8ixBEWWsTIXZ"
# ## 2 Métodos no paramétricos
#
# - [Métodos de correlación.]()
# - [Respuesta al impulso y al escalón.]()
# - [Análisis espectral.]()
#
# + [markdown] id="8G0KkVHXTIXZ"
# ## 3 Métodos paramétricos
#
# - [Mínimos Cuadrados.]()
# - [Método de mínimos cuadrados no recursivo.]()
# - [Método de mínimos cuadrados recursivo.]()
# - [Identificación de sistemas en línea (on line).]()
# - [Modelos de predicción.]()
#
|
Indice.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="xjEPg3jvOx9x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="bf7126ac-8c06-4824-8bc6-fea365e61b63" executionInfo={"status": "ok", "timestamp": 1583471190815, "user_tz": -60, "elapsed": 12176, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiXIQvk1nUvkgVE9llOxDRJEtl8eR6CCR4zodHCow=s64", "userId": "15534607621166723214"}}
# !pip install --upgrade tables
# !pip install eli5
# !pip install xgboost
# !pip install hyperopt
# + id="-iLj4n75PDPq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="0d7b734e-dd9a-4d3a-f95a-c65253da6c3d" executionInfo={"status": "ok", "timestamp": 1583471293533, "user_tz": -60, "elapsed": 2815, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiXIQvk1nUvkgVE9llOxDRJEtl8eR6CCR4zodHCow=s64", "userId": "15534607621166723214"}}
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score
from hyperopt import hp, fmin, tpe, STATUS_OK
import eli5
from eli5.sklearn import PermutationImportance
# + id="bzDoymZFPpuj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dc7bc609-f9cd-452a-fe3e-a9cd1685a2d6" executionInfo={"status": "ok", "timestamp": 1583471307281, "user_tz": -60, "elapsed": 1440, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiXIQvk1nUvkgVE9llOxDRJEtl8eR6CCR4zodHCow=s64", "userId": "15534607621166723214"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car"
# + id="Pc1BznbXPqQS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8094f342-2e98-4c59-f209-0179d93895e1" executionInfo={"status": "ok", "timestamp": 1583471321854, "user_tz": -60, "elapsed": 5712, "user": {"displayName": "Micha\u0142 Ko\u0142odziejski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiXIQvk1nUvkgVE9llOxDRJEtl8eR6CCR4zodHCow=s64", "userId": "15534607621166723214"}}
df = pd.read_hdf('data/car.h5')
df.shape
# + [markdown] id="E6RzRLqhPz05" colab_type="text"
# #Feature Engineering
# + id="tbGz7r14PsxU" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
# + id="YJZ-HtUqQIAL" colab_type="code" colab={}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]) )
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')))
# + id="OlXH8UheP8hb" colab_type="code" colab={}
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="Gpd_mS4pQcXb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="408b9f49-4ac0-4fde-9c80-418e9255d29e" executionInfo={"status": "ok", "timestamp": 1583471570576, "user_tz": -60, "elapsed": 12942, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiXIQvk1nUvkgVE9llOxDRJEtl8eR6CCR4zodHCow=s64", "userId": "15534607621166723214"}}
feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat']
xgb_params = {
'max_depth': 5,
'n_estimators': 50,
'learning_rate': 0.1,
'seed': 0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + [markdown] id="AlJvqZ3PYTwy" colab_type="text"
# # Hyperopt
# + id="2sYRKumsQlkp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 952} outputId="bd4cd612-ed0b-422e-e0b7-0e261445f75c" executionInfo={"status": "ok", "timestamp": 1583476806269, "user_tz": -60, "elapsed": 1326702, "user": {"displayName": "Micha\u014<NAME>\u0142odziejski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiXIQvk1nUvkgVE9llOxDRJEtl8eR6CCR4zodHCow=s64", "userId": "15534607621166723214"}}
def obj_func(params):
print("Training with params: ")
print(params)
mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)
return {'loss': np.abs(mean_mae), 'status': STATUS_OK}
# space
xgb_reg_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'objective': 'reg:squarederror',
'n_estimators': 100,
'seed': 0,
}
## run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25)
best
# + id="N7keYSPxdzrm" colab_type="code" colab={}
|
day5.ipynb
|