code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf import cv2 import numpy as np import matplotlib.pyplot as plt import os import pathlib import tensorflow_addons as tfa from tensorflow.python.client import device_lib print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) physical_devices = tf.config.list_physical_devices('GPU') tf.config.set_visible_devices( physical_devices[1], 'GPU' ) # # Data Loading path_train = 'CINIC10/train/' path_valid = 'CINIC10/valid/' path_test = 'CINIC10/test/' data_dir_train = pathlib.Path(path_train) data_dir_val = pathlib.Path(path_valid) data_dir_test = pathlib.Path(path_test) list_ds_train = tf.data.Dataset.list_files(str(data_dir_train/'*/*')) list_ds_val = tf.data.Dataset.list_files(str(data_dir_val/'*/*')) list_ds_test = tf.data.Dataset.list_files(str(data_dir_test/'*/*')) CLASS_NAMES = np.array([item.name for item in data_dir_train.glob('*')]) def get_label(file_path): parts = tf.strings.split(file_path, os.path.sep) return parts[-2] == CLASS_NAMES def decode_img(img,dsize): img = tf.image.decode_png(img, channels=3) img = tf.image.convert_image_dtype(img, tf.float32) return tf.image.resize(img, [dsize[0], dsize[1]]) def process_path(file_path): size = (299,299) label = get_label(file_path) img = tf.io.read_file(file_path) img = decode_img(img,size ) return img, label AUTOTUNE = tf.data.experimental.AUTOTUNE labeled_ds_train = list_ds_train.map(process_path, num_parallel_calls=AUTOTUNE) labeled_ds_valid = list_ds_val.map(process_path, num_parallel_calls=AUTOTUNE) labeled_ds_test = list_ds_test.map(process_path, num_parallel_calls=AUTOTUNE) def normalize(x, y): x = tf.image.per_image_standardization(x) return x, y def prepare_for_training(ds, cache=True, shuffle_buffer_size=1000, batch_size = 128): if cache: if isinstance(cache, str): ds = ds.cache(cache) else: ds = ds.cache() ds = ds.map(normalize) ds = ds.shuffle(buffer_size=shuffle_buffer_size) ds = ds.repeat() ds = ds.batch(batch_size) ds = ds.prefetch(buffer_size=AUTOTUNE) return ds train_ds = prepare_for_training(labeled_ds_train, batch_size = 8) val_ds = prepare_for_training(labeled_ds_valid, batch_size = 8) test_ds = prepare_for_training(labeled_ds_test, batch_size = 8) # # Xception Details # Optimization Configuration used for ImageNet: # * Optimizer: RMSprop/SGD # * Momentum: 0.9 # * Initial learning rate: 0.045 # * Learning rate decay: decay of rate 0.94 every 2 epochs # # Regularization: # * Weight decay: 1e-5 # * No dropout # * No auxilairy loss # # # Model Definition class Model: def __init__(self, n_classes): self.n_classes = n_classes def initial_block(self,inputs): x = tf.keras.layers.Conv2D(32, kernel_size=3, strides=2, padding='same')(inputs) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) x = tf.keras.layers.Conv2D(64, kernel_size=3, padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) return x def reg_block(self,x, n_channels_c1, n_channels_c2, pre_activation = True): res = tf.keras.layers.Conv2D(n_channels_c2, kernel_size=1,strides=2, padding='same')(x) res = tf.keras.layers.BatchNormalization()(res) if pre_activation: x = tf.nn.relu(x) x = tf.keras.layers.SeparableConv2D(n_channels_c1, kernel_size=3, padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) x = tf.keras.layers.SeparableConvolution2D(n_channels_c2, kernel_size= 3, padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x) x = tf.keras.layers.Add()([res, x]) return x def final_block(self,x): x = tf.keras.layers.SeparableConv2D(1536, kernel_size=3, padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) x = tf.keras.layers.SeparableConv2D(2048, kernel_size=3, padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(500, activation='relu')(x) x = tf.keras.layers.Dense(self.n_classes, activation='softmax')(x) return x def entry_flow(self,inputs): x = self.initial_block(inputs) x = self.reg_block(x, 128, 128, pre_activation=False) x = self.reg_block(x, 256, 256) x = self.reg_block(x, 728, 728) return x def middle_flow(self,x): for i in range(8): initial_state = x x = tf.nn.relu(x) x = tf.keras.layers.SeparableConv2D(728,kernel_size=3,padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) x = tf.keras.layers.SeparableConv2D(728,kernel_size=3,padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.nn.relu(x) x = tf.keras.layers.SeparableConv2D(728,kernel_size=3,padding='same')(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Add()([x, initial_state]) return x def exit_flow(self,x): x = self.reg_block(x, 728,1024) x = self.final_block(x) return x def build_model(self): inputs = tf.keras.layers.Input(shape=(299,299,3)) x = self.entry_flow(inputs) x = self.middle_flow(x) x = self.exit_flow(x) model = tf.keras.Model(inputs, x) return model model_ = Model(10) model = model_.build_model() model.summary() # # Training def step_decay(epoch): initial_lrate = 0.045 drop = 0.94 epochs_drop = 2 lrate = initial_lrate * tf.math.pow(drop,tf.math.floor((1+epoch)/epochs_drop)) return lrate lrate = tf.keras.callbacks.LearningRateScheduler(step_decay) class LossHistory(tf.keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = [] self.lr = [] def on_epoch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) self.lr.append(step_decay(len(self.losses))) loss_history = LossHistory() lrate = tf.keras.callbacks.LearningRateScheduler(step_decay) callbacks_list = [loss_history, lrate] model.compile(optimizer = tf.keras.optimizers.SGD(learning_rate=0.001,momentum=0.9 ), loss = 'categorical_crossentropy', metrics = ['accuracy']) history = model.fit(train_ds, epochs = 74, #callbacks=callbacks_list, steps_per_epoch = 100, verbose = 1, validation_data = val_ds, validation_steps = 100) # # Evaluation loss_test, accuracy_test = model.evaluate(test_ds, verbose = 1, steps=5) loss_test, accuracy_test # # Training results visualization plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train','validation'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Curve') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show()
CNNs/1-Image_Classification/6_Xception.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dataset URL https:/ / data. medicare. gov/ data/ dialysisfacility-compare # # ![medicare_gov_dialysis.jpeg](attachment:medicare_gov_dialysis.jpeg) # + #Let us import the files as pandas dataframe # + import pandas as pd df = pd.read_csv('./Datasets/Input2/ESRD_QIP_-_Complete_QIP_Data_-_Payment_Year_2018.csv',header=0) #df = spark.read.csv("/FileStore/tables/ESRD_QIP_Payment_Year_2018.csv", header="true", inferSchema="true") df.head(2) # - print('Number of rows: ' + str(df.shape[0])) # returns a tuple containing number of rows and columns print('Number of columns: ' + str(df.shape[1])) df.head() print(df.head(n=5)) # n=5 is the number of rows to be displayed print(df.columns) for column in df.columns: print(column) # Pandas abbreviates the output so to see all columns name we loop it df_states = df.groupby('State').size() print(df_states) df_states = df.groupby('State').size().sort_values(ascending=False) print(df_states) # Count dialysis facility in each state df_states = df.groupby('State').size().sort_values(ascending=False).head(n=10) print(df_states) # Sort in descending order & limit output to 10 # # Observation: Califormia (CA) has the highest number of dialysis centers df_ca = df.loc[df['State'] == 'CA'] print(df_ca) # Filter centers by State equals California print(df.groupby('Total Performance Score').size()) # Get a count on the different scores received by centers # # Observations: We note that there are 276 rows with no values for Total performance score. # # Column Performance score is a string rather than a integer import warnings warnings.filterwarnings('ignore') # Convert and display 5 results df_filt= df.loc[df['Total Performance Score'] != 'No Score'] df_filt['Total Performance Score'] = pd.to_numeric(df_filt['Total Performance Score']) df_tps = df_filt[['Facility Name','State', 'Total Performance Score']].sort_values('Total Performance Score') print(df_tps.head(n=5)) # + import numpy as np df_state_means = df_filt.groupby('State').agg({'Total Performance Score': np.mean}) print(df_state_means.sort_values('Total Performance Score', ascending=False)) # - # # Observations: IDAHO and WYOMING are the best performing dialysis center # + import numpy as np df_state_means = df_filt.groupby('State').agg({ # Add a column to include the number of dialysis center 'Total Performance Score': np.mean, 'State': np.size }) print(df_state_means.sort_values('Total Performance Score', ascending=False)) # - # # Manual Validation # ![Idaho_Dialysis.jpeg](attachment:Idaho_Dialysis.jpeg)
3 Compare dialysis facility ESRDv1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from Paramerator import Parameters # ## Instantiate the Class # #### This loads the DEFAULT dict for mapping your parameter files. p = Parameters() # ## Load a file's parameters # #### (Use either bit or boolean responses for Y/N) p.loader('dat/params1.test', 'name1') # ### The loader knew that the file was required # ### Now lets try another file p.loader('dat/params2.test', 'name2') # ## Lets look at the files loader generated # + with open('dat/params1.test', 'r') as fileObj1: #Load dat/params1.test x = fileObj1.read() print('dat/params1.test' + x) with open('dat/params2.test', 'r') as fileObj2: #Load dat/params2.test x = fileObj2.read() print('dat/params2.test' + x) # - # ## Compare to the attributes attached to params print(p.name1, p.name2) # ## And call the values as you would attributes using tab. (automatic lookup) # Try it! p.name1.Section1.par ## Tab complete # ## Unlike with named tuples, param values are mutable p.name1.Section1.param3string p.name1.Section1.param3string = "this is a new string" p.name1.Section1.param3string # The param keys and sections are not mutable to prevent missing params. p.name1.Section1 = ["this", "wont", "work"] # ## params.loader is object type aware. p.name1.Section1.param5dict = {"Pretty Cool":"ey?"} type(p.name1.Section1.param5dict) p.name2.Section2.param1, type(p.name2.Section2.param1) # ## param_writer can save your currently loaded params # In this example we make "new.test" in our "dat" folder p.writer('dat/new.test', p.name2) # ## And will prompt on overwrites, asking for new file location if desired. p.writer('dat/new.test', p.name2) # ## You can also generate fresh copies of parameter files using generate_defaults() # This is a work in progress p.generate_defaults() p.loader('dat/mongo.info', 'mongo') p.mongo p.mongo.Location, p.mongo.Info # This can be useful if you are sharing a repo and need to generate files with API keys or other sensitive data. # In the future, I intend to make use of encryption for sensitive data, but until then, be aware that the files generated and their locations are easily visible. # ## Lastly, here is the DEFAULTS.py dict which built our param files. p._def_dict # ## Use your favorite text editor to create your DEFAULT.py the way you need it. Enjoy!
Paramerator Library Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COCO data preprocessing # # This code will download the caption anotations for coco and preprocess them into an hdf5 file and a json file. # # These will then be read by the COCO data loader in Lua and trained on. # lets download the annotations from http://mscoco.org/dataset/#download import os os.system('wget http://msvocds.blob.core.windows.net/annotations-1-0-3/captions_train-val2014.zip') # ~19MB os.system('unzip captions_train-val2014.zip') import json val = json.load(open('annotations/captions_val2014.json', 'r')) train = json.load(open('annotations/captions_train2014.json', 'r')) val.keys() val['info'] len(val['images']) len(val['annotations']) train['images'][0] val['images'][0] val['annotations'][:4] # + import json import os # combine all images and annotations together imgs = val['images'] + train['images'] annots = val['annotations'] + train['annotations'] # for efficiency lets group annotations by image itoa = {} for a in annots: imgid = a['image_id'] if not imgid in itoa: itoa[imgid] = [] # 所有图片ID下的描述 itoa[imgid].append(a) # create the json blob out = [] for i,img in enumerate(imgs): imgid = img['id'] # coco specific here, they store train/val images separately loc = 'train2014' if 'train' in img['file_name'] else 'val2014' jimg = {} jimg['file_path'] = os.path.join(loc, img['file_name']) jimg['id'] = imgid sents = [] annotsi = itoa[imgid] for a in annotsi: sents.append(a['caption']) jimg['captions'] = sents out.append(jimg) json.dump(out, open('coco_raw.json', 'w')) # - itoa # lets see what they look like out[:10]
coco/coco_preprocess.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns #loading data df=pd.read_csv(r"C:\Users\devel\Downloads\machine_learning\PROJECT ON GIT\car_price_predictor_mine\quikr_car.csv") df df1=df.copy() #checking shape df.shape df.info() df.describe() df.isnull().sum() for i in range(6): print(df.iloc[:,i:i+1].value_counts()) # # data quality checking # # ### null values # - kms_driven 52 # - fuel_type 55 # ### change it into int # - year 892 non-null object # - Price 892 non-null object # - kms_driven 840 non-null object # ### problem in columns # - price column has ask for price value # - names are pretty inconsistent # - names have company names attached to it # - some names are spam like '<NAME>iga showroom condition with' and 'Well mentained Tata Sumo' # - company: many of the names are not of any company like 'Used', 'URJENT', and so on. # - year has many non-year values # - kms_driven with kms at last. # - It has nan values and two rows have 'Petrol' in them # # # # # # # # cleaning data and feature engineering # - # # price column df["Price"].value_counts() df=df[df["Price"]!="Ask For Price"] df.info() df['Price']=df['Price'].str.replace(',','').astype(int) df.info() # ### name column df["name"].value_counts() df["name"].isnull().sum() df["name"]=df["name"].str.split().str[0:2].str.join("") df.head() # ### year column df["year"].value_counts() # + #removing other than int type data df=df.iloc[np.where(df["year"].str.isnumeric())] # - df["year"]=df["year"].astype(int) df.info() df["year"].value_counts() # # kms_driven df.head() df["kms_driven"]=df["kms_driven"].str.split(",").str.join("").str.split(" ").str[0] df[["kms_driven"]] # + df=df.iloc[np.where(df["kms_driven"].str.isnumeric())] df["kms_driven"]=df["kms_driven"].astype(int) # - df.isnull().sum() sns.boxplot(df["kms_driven"]) df["kms_driven"].describe() sns.displot(df["kms_driven"]) plt.hist(df["kms_driven"],bins=50) df=df.reset_index(drop=True) df2=df.copy() df2 # # six columns # # # doing EDA # + sns.countplot(df["fuel_type"]) df["fuel_type"].value_counts() # - sns.pairplot(df) # + df=df.dropna() df.info() df.to_csv("cleaned.csv") # - # # train and test spilt dataset x=df[["name","company","year","kms_driven","fuel_type"]] y=df[["Price"]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x,y, test_size=0.2,random_state=42) X_train.info() y_train.info() # # creating pipeline # + from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline,make_pipeline from sklearn.linear_model import LinearRegression from sklearn.compose import make_column_transformer # + # one hot encoding trf1 = ColumnTransformer([("gfg",OneHotEncoder(sparse=False,handle_unknown='ignore'),[0,1,4])],remainder='passthrough') # Scaling trf2 = ColumnTransformer([("vgfdf",StandardScaler(),[2,3])],remainder='passthrough') #train the model trf5 = LinearRegression() # - # Alternate Syntax pipe = make_pipeline(trf1,trf2,trf5) X_train pipe.fit(X_train,y_train) # Predict y_pred = pipe.predict(X_test) y_pred from sklearn.metrics import r2_score r2_score(y_test,y_pred) scores=[] for i in range(1000): X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.1,random_state=i) pipe.fit(X_train,y_train) y_pred=pipe.predict(X_test) scores.append(r2_score(y_test,y_pred)) np.argmax(scores) scores[np.argmax(scores)] X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.1,random_state=np.argmax(scores)) pipe.fit(X_train,y_train) y_pred=pipe.predict(X_test) import pickle pickle.dump(pipe,open('LinearRegressionModel.pkl','wb'))
car_dta_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <center>Laboratorium 7: Współczynnik uwarunkowania oraz układy równań liniowych - metody dokładne</center> # Instrukcja: # Na zajęciach należy wykonać poniższe zadania, a następnie sporządzić sprawozdanie zawierające odpowiedzi (w postaci kodu) z komentarzami w środowisku Jupyter Notebook i umieścić je na platformie e-learningowej. # ***Temat główny:*** # # Rozwiązać układ równań: # # ### $ Ax = b$ # # gdzie: # # $\mathbf{A} =\left[ \begin{matrix} # 1 & 8 & 2\\ # -20 & 12 & 0\\ # -3 & 5 & 17 # \end{matrix}\right]$ # # $\mathbf{b} =\left[ \begin{matrix} # 44\\ # 8\\ # 99 # \end{matrix}\right] # $ # # # metodami: # * eliminacji Gaussa, # * rozkład LU # ***Zadanie 1.*** # Zaimplementować funkcje obliczające współczynnik uwarunkowania macierzy używające w tym celu normy: # * kolumnowej # * spektralnej # * wierszowej # # Wykorzystać powyższe funkcje do obliczenia współczynnika dla macierz A oraz macierzy Hilberta o wymiarach $5x5$. # # Wskazówki: # [Definicje norm](https://pl.wikipedia.org/wiki/Norma_macierzowa) # # Pakiet SciPy posiada [metody](https://docs.scipy.org/doc/numpy/reference/routines.linalg.html) pozwalające w prosty sposób wyznaczyć np. wartości własne macierzy. Dla referencji można skorzystać również z metody obliczającej uwarunkowanie macierzy - [cond](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.cond.html#numpy.linalg.cond). # ***Zadanie 2.*** # Zaimplementować metode rozwiązywania układu równań przy pomocy eliminacji Gaussa. # # Wskazówki: # Należy przekształcić układ równań i wyświetlić uzyskaną macierz trójkątną oraz # przekształcony wektor wyrazów wolnych. Następnie należy rozwiązać układ metodą # podstawień wstecznych # ***Zadanie 3.*** # Zaimplementować metode rozwiązywania układu równań przy pomocy rezkładu LU. # # Wskazówki: # Należy rozłożyć macierz A na iloczyn macierzy LU, wyświetlić te macierze i # sprawdzić czy faktycznie ich iloczyn = A. Następnie należy zapisać wyjściowy układ # równań Ax=b w postaci dwóch układów równań (z macierzami trójkątnymi), a potem # rozwiązać te układy odpowiednio metodą podstawień oraz podstawień wstecznych. # ***Zadanie 4.*** # Porównać metody działania obu metod pod względem wyników i czasu działania. # ***Zadanie 5.*** # Wykorzystać [funkcje](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.solve.html#numpy.linalg.solve) # pakietu scipy do rozwiązania zadani i porównać z własną implementacją
Metody Numeryczne 2019/Lab 8/lab8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from dateutil.parser import parse import missingno as msno import seaborn as sns # Datos de mediciones de fin de semana weekend_data = pd.read_csv('../data/weekends.csv',index_col=0,parse_dates=True) weekend_data = weekend_data.resample("600S").interpolate(method='time') #weekend_data # + # Nombres de e+ siempre los mismos nombres = np.genfromtxt('../idf/cubiculo_original/DosPersonas/c001/c001.csv', dtype='U',delimiter=',',max_rows=1,) nombres for i,nombre in enumerate(nombres): print(i,nombre) # - # funcion para importar datos de e+ def importa(file,caso): nombres = ['tiempo',f'Ti_{caso}',f'PMV_{caso}',f'PPD_{caso}'] simulate = pd.read_csv(file,names=nombres,skiprows=1) simulate.tiempo = simulate.tiempo.str.replace('24:00:00','23:59:59') simulate.tiempo = '2019-'+simulate.tiempo simulate.tiempo = pd.to_datetime(simulate.tiempo,format='%Y- %m/%d %H:%M:%S') simulate.tiempo = simulate.tiempo-pd.Timedelta('10minute') simulate.set_index('tiempo',inplace=True) # Hago columna de dias para hacer mascara (opcional) #simulate['dia'] = simulate.index.day_of_week # solo dines de semana de simulaciones weekend_simu = simulate[simulate.index.day_of_week>=5] weekend_simu.drop([f'PMV_{caso}',f'PPD_{caso}'],axis='columns',inplace=True) # solo entre semana weekday_simu = simulate[simulate.index.day_of_week<5] weekday_simu.drop([f'Ti_{caso}'],axis='columns',inplace=True) return weekend_simu,weekday_simu # simulaciones c0001we,c0001wd = importa('../idf/cubiculo_original/DosPersonas/c0001/c0001.csv','0001') c001we,c001wd = importa('../idf/cubiculo_original/DosPersonas/c001/c001.csv','001') c01we,c01wd = importa('../idf/cubiculo_original/DosPersonas/c01/c01.csv','01') c003we,c003wd = importa('../idf/cubiculo_original/DosPersonas/c003/c003.csv','003') # Estrategia Bioclimatica c0001web,c0001wdb = importa('../idf/cubiculo_original/DosPersonas/EB/c0001/c0001EB.csv','0001EB') c001web,c001wdb = importa('../idf/cubiculo_original/DosPersonas/EB/c001/c001EB.csv','001EB') c01web,c01wdb = importa('../idf/cubiculo_original/DosPersonas/EB/c01/c01EB.csv','01EB') c003web,c003wdb = importa('../idf/cubiculo_original/DosPersonas/EB/c003/c003EB.csv','003EB') data = pd.concat([weekend_data,c0001we,c001we,c01we,c003we],axis=1) #data comfort = pd.concat([c0001wd,c001wd,c01wd,c003wd],axis=1) eb = pd.concat([c0001wdb,c001wdb,c01wdb,c003wdb],axis=1) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-06-08') fecha2 = fecha1 + pd.Timedelta('2D') ax.set_xlim(fecha1,fecha2) ax.scatter(data.index,data.TCAire,marker='.',label='Medición') ax.scatter(data.index,data.Ti_0001,marker='.',label='Simul con c=0.001 kg/s') ax.scatter(data.index,data.Ti_001,marker='.',label='Simul con c=0.01 kg/s') ax.scatter(data.index,data.Ti_01,marker='.',label='Simul con c=0.1 kg/s') ax.scatter(data.index,data.Ti_003,marker='.',label='Simul con c=0.03 kg/s') ax.set_title('Fin de semana 2019-06-08 (Dos personas)') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('T [°C]') ax.legend() ax.grid() # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-06-15') fecha2 = fecha1 + pd.Timedelta('2D') ax.set_xlim(fecha1,fecha2) ax.scatter(data.index,data.TCAire,marker='.',label='Medición') ax.scatter(data.index,data.Ti_0001,marker='.',label='Simul con c=0.001 kg/s') ax.scatter(data.index,data.Ti_001,marker='.',label='Simul con c=0.01 kg/s') ax.scatter(data.index,data.Ti_01,marker='.',label='Simul con c=0.1 kg/s') ax.scatter(data.index,data.Ti_003,marker='.',label='Simul con c=0.03 kg/s') ax.set_title('Fin de semana 2019-06-15 (Dos persona)') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('T [°C]') ax.legend() ax.grid() fig.savefig('../figs/FinSemana0615_dos.png',dpi=100,bbox_inches=None) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-06-22') fecha2 = fecha1 + pd.Timedelta('2D') ax.set_xlim(fecha1,fecha2) ax.scatter(data.index,data.TCAire,marker='.',label='Medición') ax.scatter(data.index,data.Ti_0001,marker='.',label='Simu con c=0.001 kg/s') ax.scatter(data.index,data.Ti_001,marker='.',label='Simu con c=0.01 kg/s') ax.scatter(data.index,data.Ti_01,marker='.',label='Simu con c=0.1 kg/s') ax.scatter(data.index,data.Ti_003,marker='.',label='Simu con c=0.03 kg/s') ax.set_title('Fin de semana 2019-06-22 (Dos persona)') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('T [°C]') ax.legend() ax.grid() fig.savefig('../figs/FinSemana0622_dos.png',dpi=100,bbox_inches=None) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-05-31 09:00:00') fecha2 = fecha1 + pd.Timedelta('9H') ax.set_xlim(fecha1,fecha2) #ax.set_ylim(fecha1,fecha2) ax.plot(comfort.index,comfort.PPD_003,label='PPD con c=0.03 kg/s') ax.plot(comfort.index,comfort.PPD_001,label='PPD con c=0.01 kg/s') ax.plot(comfort.index,comfort.PPD_01,label='PPD con c=0.1 kg/s') ax.plot(comfort.index,comfort.PPD_0001,label='PPD con c=0.001 kg/s') ax.set_title('PPD en el día más cálido (Dos personas) ') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('%') ax.legend() ax.grid() fig.savefig('../figs/PPD_dos.png',dpi=100,bbox_inches=None) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-05-31 09:00:00') fecha2 = fecha1 + pd.Timedelta('9H') ax.set_xlim(fecha1,fecha2) #ax.set_ylim(fecha1,fecha2) ax.plot(comfort.index,comfort.PPD_003,label='PPD sin EB') ax.plot(eb.index,eb.PPD_003EB,label='PPD con EB') ax.set_title('PPD para c=0.03 kg/s (Dos personas) ') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('%') ax.legend() ax.grid() fig.savefig('../figs/PPD003_dos.png',dpi=100,bbox_inches=None) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-05-31 09:00:00') fecha2 = fecha1 + pd.Timedelta('9H') ax.set_xlim(fecha1,fecha2) #ax.set_ylim(fecha1,fecha2) ax.plot(comfort.index,comfort.PPD_001,label='PPD sin EB') ax.plot(eb.index,eb.PPD_001EB,label='PPD con EB') ax.set_title('PPD para c=0.01 kg/s (Dos personas)') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('%') ax.legend() ax.grid() fig.savefig('../figs/PPD001_dos.png',dpi=100,bbox_inches=None) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-05-31 09:00:00') fecha2 = fecha1 + pd.Timedelta('9H') ax.set_xlim(fecha1,fecha2) #ax.set_ylim(fecha1,fecha2) ax.plot(comfort.index,comfort.PPD_0001,label='PPD sin EB') ax.plot(eb.index,eb.PPD_0001EB,label='PPD con EB') ax.set_title('PPD para c=0.001 kg/s (Dos persona)') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('%') ax.legend() ax.grid() fig.savefig('../figs/PPD0001_dos.png',dpi=100,bbox_inches=None) # + fig, ax = plt.subplots(figsize=(12,4)) fecha1 = parse('2019-05-31 09:00:00') fecha2 = fecha1 + pd.Timedelta('9H') ax.set_xlim(fecha1,fecha2) #ax.set_ylim(fecha1,fecha2) ax.plot(comfort.index,comfort.PPD_01,label='PPD sin EB') ax.plot(eb.index,eb.PPD_01EB,label='PPD con EB') ax.set_title('PPD para c=0.1 kg/s (Dos persona)') ax.set_xlabel('tiempo [mm-dd hh]') ax.set_ylabel('%') ax.legend() ax.grid() fig.savefig('../figs/PPD01_dos.png',dpi=100,bbox_inches=None) # - # Limpio dataframes para metricas junio_dos_Ti = data.truncate(before='2019-06-08 00:00:00',after='2019-06-23 23:49:00') junio_dos_Ti.dropna(inplace=True) #junio_una_Ti # + #msno.matrix(junio_dos) # - junio_dos_Ti.isnull().sum().sum() junio_dos_Ti.to_csv('../data/junio_dos_Ti.csv') comfort.to_csv('../data/comfort_dos.csv')
notebooks/006c_AnalisisCasosDosPersona.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Groverのアルゴリズムを用いた充足可能性問題の解法 # このセクションでは、Qiskit Aqua内のGroverのアルゴリズムの実装を用いた、充足可能性問題の解法を説明します。 # # ## 目次 # # 1. [序論](#introduction) # # 2. [3-SAT問題](#3satproblem) # # 3. [Qiskitでの実装](#implementation) # # 4. [演習問題](#problems) # # 5. [参考文献](#references) # ## 1. 序論 <a id='introduction'></a> # # [前のセクション](../ch-algorithms/grover.ipynb)では、非構造化探索に対するGroverのアルゴリズムについて、Qiskit Terraを用いた例と実装とともに紹介しました。Groverの探索は、古典的なコンピューターのものより二乗のオーダーで、早く正しい解を探すことができる量子アルゴリズムであることがお分かりいただけたと思います。ここでは、Groverのアルゴリズムを使用して、組合せブール値の充足可能性問題の解法を説明しましょう。 # # コンピューター・サイエンスにおいて、ブール値の充足性問題とは、与えられたブール式を満たす解が存在するかどうかを決定する問題です。言い換えると、式がTRUEと評価されるように、与えられたブール式の変数をTRUEまたはFALSEの値で置き換えることができるかどうかを問う問題になります。置き換えられる場合、式は「充足している」と言います。 一方、そのような値の割り当てが存在しない場合は、式で表される関数は、すべての可能な変数値に対してFALSEになり、式は「充足していない」と言います。 つまり、ブール式を満たす割り当てを解とすると、これは探索問題と見なすことができます。 # ## 2. 3-SAT問題 <a id='3satproblem'></a> # # 3体充足度問題(3-SAT問題)は、次の具体的な問題が最良の説明となります。以下の様に、3つのブール変数 $v_1,v_2,v_3$ とブール関数 $f$ を考えましょう: # # $$f(v_1,v_2,v_3) = (\neg v_1 \vee \neg v_2 \vee \neg v_3) \wedge (v_1 \vee \neg v_2 \vee v_3) \wedge (v_1 \vee v_2 \vee \neg v_3) \wedge (v_1 \vee \neg v_2 \vee \neg v_3) \wedge (\neg v_1 \vee v_2 \vee v_3)$$ # # 上の関数において、右側の方程式の項の$()$の内側を、節と呼びます。つまり、この関数には5つの節があります。3-SAT問題であるため、各節には必ず3つのリテラルがあります。 例えば、最初の節には、 $ \neg v_1$、 $\neg v_2$ 、および $\neg v_3$ がリテラルとして含まれています。 記号 $\neg$ は、後続のリテラルの値を否定する(または反転する)論理NOTです。 記号 $\vee$ と $\wedge$ は、それぞれ論理ORと論理ANDになります。 $f(v_1, v_2, v_3) = 1$ と評価できる $v_1, v_2, v_3$ の値がある場合には、ブール関数 $f$ は充足します(つまり、 $f$ がTrueと評価できるということです)。 # # そのような値を見つけるための馬鹿正直な方法は、$f$の入力値の可能なすべての組み合わせを試行することです。以下の表は、 $v_1, v_2, v_3$ のすべての可能な組み合わせを試行した時に得られる表です。 説明を容易にするため、 $0$ はFalseと、 $1$ はTrueと同義とします。 # # |$v_1$ | $v_2$ | $v_3$ | $f$ | コメント | # |------|-------|-------|-----|---------| # | 0 | 0 | 0 | 1 | **解** | # | 0 | 0 | 1 | 0 | $f$ がFalseなので解ではない | # | 0 | 1 | 0 | 0 | $f$ がFalseなので解ではない | # | 0 | 1 | 1 | 0 | $f$ がFalseなので解ではない | # | 1 | 0 | 0 | 0 | $f$ がFalseなので解ではない | # | 1 | 0 | 1 | 1 | **解** | # | 1 | 1 | 0 | 1 | **解** | # | 1 | 1 | 1 | 0 | $f$ がFalseなので解ではない | # # 上の表から、この3-SAT問題が、3つの充足解 $(v_1, v_2, v_3) = (T, F, T)$ or $(F, F, F)$ or $(T, T, F)$ を持つことがわかります。 # # 一般的に、ブール関数 $f$ は、多くの節と、より多くのブール型変数を持ちます。3-SAT問題は、連言標準形(Conjunctive Normal Form、CNF)、つまり3つのリテラルの選言からなる節と一つ以上の節の連言として常に表現できることに注意してください。すなわち、3つの論理和の論理積となります。 # ## 3. Qiskitでの実装 <a id='implementation'></a> # # では、Qiskit Aquaを使って、3-SATの例題を解いてみましょう: # $$f(v_1,v_2,v_3) = (\neg v_1 \vee \neg v_2 \vee \neg v_3) \wedge (v_1 \vee \neg v_2 \vee v_3) \wedge (v_1 \vee v_2 \vee \neg v_3) \wedge (v_1 \vee \neg v_2 \vee \neg v_3) \wedge (\neg v_1 \vee v_2 \vee v_3)$$ # # まず、Qiskit Aquaがこの様な問題を解くために使用している入力フォーマット [DIMACS CNF](http://www.satcompetition.org/2009/format-benchmarks2009.html) ついて理解する必要があります: # # ~~~ # c example DIMACS CNF 3-SAT # p cnf 3 5 # -1 -2 -3 0 # 1 -2 3 0 # 1 2 -3 0 # 1 -2 -3 0 # -1 2 3 0 # ~~~ # # - `c` から始まる行はコメントです # - 例: `c example DIMACS CNF 3-SAT` # - 最初の非コメント行は、`p cnf nbvar nbclauses` という形である必要があります。ここで: # - `cnf` は、入力がCNF形式であることを意味します # - `nbvar` は、ファイル内に出現する変数の正確な数です # - `nbclauses` は、ファイル内に含まれる節の正確な数です # - 例: `p cnf 3 5` # - 次に、各節の行が記述されます。ここで: # - 各節は `-nbvar` から `nbvar`の間の個別の非Null値の数列で、行は`0`で終わります # - 反数 i と -i を同時に含むことはできません # - 正の数は対応する変数を意味します # - 負の数は対応する変数の否定を意味します # - 例:`-1 2 3 0` は、節 $\neg v_1 \vee v_2 \vee v_3$ に対応します。 # # 同様に、前の問題の解 $(v_1, v_2, v_3) = (T, F, T)$ , $(F, F, F)$ , $(T, T, F)$ は、`1 -2 3` , `-1 -2 -3` , `1 2 -3` と書くことができます. # # この例題を入力として、Grover探索に対応する Oracle を作成します。具体的には、Aquaで提供されるLogicalExpressionOracleコンポーネントを使用します。このコンポーネントは、DIMACS CNF構文文字列の解析と、対応するOracle回路の構築をサポートしています。 import numpy as np from qiskit import BasicAer from qiskit.visualization import plot_histogram # %config InlineBackend.figure_format = 'svg' # Makes the images look nice from qiskit.aqua import QuantumInstance, run_algorithm from qiskit.aqua.algorithms import Grover from qiskit.aqua.components.oracles import LogicalExpressionOracle, TruthTableOracle input_3sat = ''' c example DIMACS-CNF 3-SAT p cnf 3 5 -1 -2 -3 0 1 -2 3 0 1 2 -3 0 1 -2 -3 0 -1 2 3 0 ''' oracle = LogicalExpressionOracle(input_3sat) # `oracle` はGroverのインスタンスを作成するために使用されます: grover = Grover(oracle) # シミュレーター・バックエンドを構成し、Groverのインスタンスを実行して結果を得ることができます: backend = BasicAer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend, shots=1024) result = grover.run(quantum_instance) print(result['result']) # 上に示される通り、与えられた3-SAT問題を充足する解が得られました。これは確かに3つの充足解の1つです。 # # シミュレーター・バックエンドを使用しているため、以下の図に示すように、完全な測定結果も返されます。3つの充足解に対応するバイナリ文字列 `000`、`011`および`101`(各文字列のビット・オーダーに注意してください)が高い確率を持っていることが見て取れます。 plot_histogram(result['measurement']) # シミュレーターが例題の解を見つけられることを確認しました。ノイズと不完全なゲートを持つ本物の量子デバイスを使用したとき、何が起こるか見てみましょう。 # # ただし、ネットワークを介して実装置に送信できる文字列の長さの制限(この回路のQASMは6万文字以上あります)があるため、上記の回路を実装置のバックエンドで実行することはできません。以下のように、実装置の`ibmq_16_melbourne`バックエンド上でコンパイルしたQASMを表示することはできます: # Load our saved IBMQ accounts and get the ibmq_16_melbourne backend from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') backend = provider.get_backend('ibmq_16_melbourne') # + from qiskit.compiler import transpile # transpile the circuit for ibmq_16_melbourne grover_compiled = transpile(result['circuit'], backend=backend, optimization_level=3) print('gates = ', grover_compiled.count_ops()) print('depth = ', grover_compiled.depth()) # - # 必要とされるゲート数は、現在の短期量子コンピュータのデコヒーレンス時間に関する制限をはるかに上回ります。 つまり、充足問題や他の最適化問題を解決するGrover探索の量子回路を設計することはまだ難しいのです。 # ## 4. 演習問題 <a id='problems'></a> # # 1. Qiskit Aquaを使用して、次の3-SAT問題を解いてください: $f(x_1, x_2, x_3) = (x_1 \vee x_2 \vee \neg x_3) \wedge (\neg x_1 \vee \neg x_2 \vee \neg x_3) \wedge (\neg x_1 \vee x_2 \vee x_3)$ 。 結果は期待したものでしたか? # # ## 5. 参考文献 <a id='references'></a> # # 1. <NAME> (2017), "An Introduction to Quantum Computing, Without the Physics", [arXiv:1708.03684 ](https://arxiv.org/abs/1708.03684) import qiskit qiskit.__qiskit_version__
i18n/locales/ja/ch-applications/satisfiability-grover.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # 说明: # 给定一个整数 (32 位有符号整数),请编写一个函数来判断它是否是 4 的幂次方。 # # 示例 1: # 输入: 16 # 输出: true # # 示例 2: # 输入: 5 # 输出: false # # 进阶: # 你能不使用循环或者递归来完成本题吗? # - class Solution: def isPowerOfFour(self, num: int) -> bool: return num > 0 and num & (num-1) == 0 and num % 3 == 1 solution = Solution() solution.isPowerOfFour(5)
Bit Manipulation/1108/342. Power of Four.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # + import numpy as np import matplotlib.pyplot as plt import matplotlib from ipywidgets import interact # - matplotlib.rcParams.update({'font.size': 22}) def undumped(wn, x0, x_dot0, t): return x0 * np.cos(wn * t) + x_dot0 / wn * np.sin(wn * t) def underdumped(xi, wn, x0, x_dot0, t): wd = wn * np.sqrt(1 - xi**2) x = ( np.exp(-xi*wn*t) * ( x0 * np.cos(wd * t) + (xi*wn*x0 + x_dot0) / wd * np.sin(wd * t) ) ) return x def critically_dumped(wn, x0, x_dot0, t): return np.exp(-wn*t) * (x0 * (1 + wn * t) + x_dot0 * t) def overdumped(xi, wn, x0, x_dot0, t): a = xi * wn + wn * np.sqrt(xi**2 - 1) b = xi * wn - wn * np.sqrt(xi**2 - 1) x = ( (a * x0 + x_dot0) / (a - b) * np.exp(-b*t) - (b * x0 + x_dot0) / (a - b) * np.exp(-a*t) ) return x def plot_2order_free_resp(xi, wn, x0, x_dot0): t = np.linspace(0, 10, 1000) # Calculate selected if np.isclose(xi, 0): x = undumped(wn, x0, x_dot0, t) elif np.isclose(xi, 1): x = critically_dumped(wn, x0, x_dot0, t) elif 0 < xi < 1: x = underdumped(xi, wn, x0, x_dot0, t) else: x = overdumped(xi, wn, x0, x_dot0, t) # Plot selected plt.figure(figsize=(15, 8)) plt.plot(t, x, ls='-', lw=2, c='#b30000', label=f"$\\xi={xi:.2f}$") # Critical dumping x = critically_dumped(wn, x0, x_dot0, t) plt.plot(t, x, ls='--', lw=4, alpha=0.7, c='#02818a', label="$\\xi=1.00$") # No dumping x = undumped(wn, x0, x_dot0, t) plt.plot(t, x, ls='-', lw=4, alpha=0.5, c='#0570b0', label="$\\xi=0.00$") plt.ylim(-1, 1) plt.grid() plt.xlabel('t') plt.ylabel('x') plt.legend(loc='upper right') # # Respuesta libre de sistemas de segundo orden interact(plot_2order_free_resp, xi=(0.0, 3.0, 0.05), wn=(1, 4), x0=(-1.0, 1.0), x_dot0=(-10.0, 10.0));
notebooks_completos/093-Ejemplos-SistemasSegundoOrden-RespuestaLibre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `pandas` Part 2: this notebook is a 2nd lesson on `pandas` # ## The main objective of this tutorial is to slice up some DataFrames using `pandas` # >- Reading data into DataFrames is step 1 # >- But most of the time we will want to select specific pieces of data from our datasets # # # Learning Objectives # ## By the end of this tutorial you will be able to: # 1. Select specific data from a pandas DataFrame # 2. Insert data into a DataFrame # # ## Files Needed for this lesson: `winemag-data-130k-v2.csv` # >- Download this csv from Canvas prior to the lesson # # ## The general steps to working with pandas: # 1. import pandas as pd # >- Note the `as pd` is optional but is a common alias used for pandas and makes writing the code a bit easier # 2. Create or load data into a pandas DataFrame or Series # >- In practice, you will likely be loading more datasets than creating but we will learn both # 3. Reading data with `pd.read_` # >- Excel files: `pd.read_excel('fileName.xlsx')` # >- Csv files: `pd.read_csv('fileName.csv')` # 4. After steps 1-3 you will want to check out your DataFrame # >- Use `shape` to see how many records and columns are in your DataFrame # >- Use `head()` to show the first 5-10 records in your DataFrame # 5. Then you will likely want to slice up your data into smaller subset datasets # >- This step is the focus of this lesson # # First, check your working directory # # Step 1: Import pandas and give it an alias # # Step 2 Read Data Into a DataFrame # >- Knowing how to create your own data can be useful # >- However, most of the time we will read data into a DataFrame from a csv or Excel file # # ## File Needed: `winemag-data-130k-v2.csv` # >- Make sure you download this file from Canvas and place in your working directory # ### Read the csv file with `pd.read_csv('fileName.csv`) # >- Set the index to column 0 # ### Check how many rows/records and columns are in the the `wine_reviews` DataFrame # >- Use `shape` # ### Check a couple of rows of data # ### Now we can access columns in the dataframe using syntax similar to how we access values in a dictionary # ### To get a single value... # ### Using the indexing operator and attribute selection like we did above should seem familiar # >- We have accessed data like this using dictionaries # >- However, pandas also has it's own selection/access operators, `loc` and `iloc` # >- For basic operations, we can use the familiar dictionary syntax # >- As we get more advanced, we should use `loc` and `iloc` # >- It might help to think of `loc` as "label based location" and `iloc` as "index based location" # # ### Both `loc` and `iloc` start with with the row then the column # #### Use `iloc` for index based location similar to what we have done with lists and dictionaries # #### Use `loc` for label based location. This uses the column names vs indexes to retrieve the data we want. # # First, let's look at index based selection using `iloc` # # ## As we work these examples, remember we specify row first then column # ### Selecting the first row using `iloc` # >- For the wine reviews dataset this is our header row # ### To return all the rows of a particular column with `iloc` # >- To get everything, just put a `:` for row and/or column # ### To return the first three rows of the first column... # ### To return the second and third rows... # ### We can also pass a list for the rows to get specific values # ### Can we pass lists for both rows and columns...? # ### We can also go from the end of the rows just like we did with lists # >- The following gets the last 5 records for country in the dataset # ### To get the last 5 records for all columns... # # Label-Based Selection with `loc` # ## With `loc`, we use the names of the columns to retrieve data # ### Get all the records for the following fields/columns using `loc`: # >- taster_name # >- taster_twitter_handle # >- points # # Notice we have been using the default index so far # ## We can change the index with `set_index` # # Conditional Selection # >- Suppose we only want to analyze data for one country, reviewer, etc... # >- Or we want to pull the data only for points and/or prices above a certain criteria # ## Which wines are from the US with 95 or greater points? # # Some notes on our previous example: # >- We just quickly took at dataset that has almost 130K rows and reduced it to one that has 993 # >- This tells us that less that 1% of the wines are from the US and have ratings of 95 or higher # >- With some simple slicing using pandas we already have some decent start to an analytics project # # Q: What are all the wines from Italy or that have a rating higher than 95? # >- To return the results for an "or" question use the pipe `|` between your conditions # # Q: What are all the wines from Italy or France? # >- We can do this with an or statement or the `isin()` selector # >- Note: if you know SQL, this is the same thing as the IN () statement # >- Using `isin()` replaces multiple "or" statements and makes your code a little shorter # # Q: What are all the wines without prices? # >- Here we can use the `isnull` method to show when values are not entered for a particular column # # What are all the wines with prices? # >- Use `notnull()` # # We can also add columns/fields to our DataFrames
Week 9/Pandas_Part2_Indexing-Selecting-Assigning_STUDENT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **anaMamiDefl**(ection) # + jupyter={"source_hidden": true} # %matplotlib widget import warnings import numpy as np import matplotlib.pyplot as plt import succolib as sl from skimage import io from matplotlib.colors import LogNorm from tqdm.auto import tqdm from scipy.optimize import curve_fit from scipy.integrate import quad # - # --- # ## SETTINGS # ### **list of runs & corresponding parameters** # + jupyter={"source_hidden": true} dataNames = {} # {filenames : parent_datasets} -- full statistics, select files to open with dataNamesToOpen # note: all the images that share the same parent dataset will be summed together # note: all the parent datasets should be named so that the alphabetical order corresponds to the scan angle order # note: make sure to set the parent folder below # 2020 Si2 -- 300MeV for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_171809480_%4.4d.tiff" % (i+1) : "Si2_searchChanneling18"}) # gain test for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_172451540_%4.4d.tiff" % (i+1) : "Si2_searchChanneling19"}) # gain test for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_173841296_%4.4d.tiff" % (i+1) : "Si2_searchChanneling20"}) # gain test for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_175737146_%4.4d.tiff" % (i+1) : "Si2_searchChanneling21"}) # gain test for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_180127537_%4.4d.tiff" % (i+1) : "Si2_searchChanneling22"}) # gain test for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_204403604_%4.4d.tiff" % (i+1) : "Si2_searchChanneling23"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_212142344_%4.4d.tiff" % (i+1) : "Si2_searchChanneling24"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_181919107_%4.4d.tiff" % (i+1) : "Si2_scan300"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182003707_%4.4d.tiff" % (i+1) : "Si2_scan301"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182100818_%4.4d.tiff" % (i+1) : "Si2_scan302"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182154130_%4.4d.tiff" % (i+1) : "Si2_scan303"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182226314_%4.4d.tiff" % (i+1) : "Si2_scan304"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182300987_%4.4d.tiff" % (i+1) : "Si2_scan305"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182338577_%4.4d.tiff" % (i+1) : "Si2_scan306"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182408642_%4.4d.tiff" % (i+1) : "Si2_scan307"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182446153_%4.4d.tiff" % (i+1) : "Si2_scan308"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182516329_%4.4d.tiff" % (i+1) : "Si2_scan309"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182544713_%4.4d.tiff" % (i+1) : "Si2_scan310"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182733248_%4.4d.tiff" % (i+1) : "Si2_scan311"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182811264_%4.4d.tiff" % (i+1) : "Si2_scan312"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182914631_%4.4d.tiff" % (i+1) : "Si2_scan313"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_182953704_%4.4d.tiff" % (i+1) : "Si2_scan314"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183045296_%4.4d.tiff" % (i+1) : "Si2_scan315"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183134103_%4.4d.tiff" % (i+1) : "Si2_scan316"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183213695_%4.4d.tiff" % (i+1) : "Si2_scan317"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183258950_%4.4d.tiff" % (i+1) : "Si2_scan318"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183457326_%4.4d.tiff" % (i+1) : "Si2_scan319"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183554493_%4.4d.tiff" % (i+1) : "Si2_scan320"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183634750_%4.4d.tiff" % (i+1) : "Si2_scan321"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183708820_%4.4d.tiff" % (i+1) : "Si2_scan322"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183751605_%4.4d.tiff" % (i+1) : "Si2_scan323"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183828236_%4.4d.tiff" % (i+1) : "Si2_scan324"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183904045_%4.4d.tiff" % (i+1) : "Si2_scan325"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_183946252_%4.4d.tiff" % (i+1) : "Si2_scan326"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184034236_%4.4d.tiff" % (i+1) : "Si2_scan327"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184111116_%4.4d.tiff" % (i+1) : "Si2_scan328"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184148251_%4.4d.tiff" % (i+1) : "Si2_scan329"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184229276_%4.4d.tiff" % (i+1) : "Si2_scan330"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184304627_%4.4d.tiff" % (i+1) : "Si2_scan331"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184338827_%4.4d.tiff" % (i+1) : "Si2_scan332"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184421362_%4.4d.tiff" % (i+1) : "Si2_scan333"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184500603_%4.4d.tiff" % (i+1) : "Si2_scan334"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184543731_%4.4d.tiff" % (i+1) : "Si2_scan335"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_184625617_%4.4d.tiff" % (i+1) : "Si2_scan336"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_185159560_%4.4d.tiff" % (i+1) : "Si2_scanAmo3"}) for i in range(20): dataNames.update({"Si2_scanCradle3/X1__21282342__20201008_185232888_%4.4d.tiff" % (i+1) : "Si2_bkgscanAmo3"}) # 2020 Si2 -- 600MeV for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_102042224_%4.4d.tiff" % (i+1) : "Si2_searchChanneling16"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_102231008_%4.4d.tiff" % (i+1) : "Si2_searchChanneling17"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_105804680_%4.4d.tiff" % (i+1) : "Si2_scan200"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110113619_%4.4d.tiff" % (i+1) : "Si2_scan201"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110234825_%4.4d.tiff" % (i+1) : "Si2_scan202"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110309695_%4.4d.tiff" % (i+1) : "Si2_scan203"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111152903_%4.4d.tiff" % (i+1) : "Si2_scan204"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111237055_%4.4d.tiff" % (i+1) : "Si2_scan205"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110542545_%4.4d.tiff" % (i+1) : "Si2_scan206"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110615657_%4.4d.tiff" % (i+1) : "Si2_scan207"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110711176_%4.4d.tiff" % (i+1) : "Si2_scan208"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_110741296_%4.4d.tiff" % (i+1) : "Si2_scan209"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111329742_%4.4d.tiff" % (i+1) : "Si2_scan210"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111439069_%4.4d.tiff" % (i+1) : "Si2_scan211"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111511046_%4.4d.tiff" % (i+1) : "Si2_scan212"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111539469_%4.4d.tiff" % (i+1) : "Si2_scan213"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111639270_%4.4d.tiff" % (i+1) : "Si2_scan214"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111711910_%4.4d.tiff" % (i+1) : "Si2_scan215"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111746877_%4.4d.tiff" % (i+1) : "Si2_scan216"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111820333_%4.4d.tiff" % (i+1) : "Si2_scan217"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111848332_%4.4d.tiff" % (i+1) : "Si2_scan218"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111918356_%4.4d.tiff" % (i+1) : "Si2_scan219"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_111948445_%4.4d.tiff" % (i+1) : "Si2_scan220"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112237570_%4.4d.tiff" % (i+1) : "Si2_scan221"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112306027_%4.4d.tiff" % (i+1) : "Si2_scan222"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112416818_%4.4d.tiff" % (i+1) : "Si2_scan223"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112446386_%4.4d.tiff" % (i+1) : "Si2_scan224"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112530138_%4.4d.tiff" % (i+1) : "Si2_scan225"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112601986_%4.4d.tiff" % (i+1) : "Si2_scan226"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112651242_%4.4d.tiff" % (i+1) : "Si2_scan227"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112725658_%4.4d.tiff" % (i+1) : "Si2_scan228"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112804977_%4.4d.tiff" % (i+1) : "Si2_scan229"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112838281_%4.4d.tiff" % (i+1) : "Si2_scan230"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_112918721_%4.4d.tiff" % (i+1) : "Si2_scan231"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_113118881_%4.4d.tiff" % (i+1) : "Si2_scanAmo2"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201008_113150800_%4.4d.tiff" % (i+1) : "Si2_bkgScanAmo2"}) # 2020 Si2 -- 855MeV for i in range(10): dataNames.update({"test/X1__21282342__20201007_020203935_%4.4d.tiff" % (i+1) : "Si2_searchChanneling0"}) # gain test for i in range(10): dataNames.update({"test/X1__21282342__20201007_021809690_%4.4d.tiff" % (i+1) : "Si2_searchChanneling0"}) # gain test for i in range(20): dataNames.update({"test/X1__21282342__20201007_023417772_%4.4d.tiff" % (i+1) : "Si2_searchChanneling1"}) # gain test for i in range(20): dataNames.update({"test/X1__21282342__20201007_024032438_%4.4d.tiff" % (i+1) : "Si2_searchChanneling2"}) # gain test for i in range(20): dataNames.update({"test/X1__21282342__20201007_024215429_%4.4d.tiff" % (i+1) : "Si2_searchChanneling3"}) # gain test for i in range(20): dataNames.update({"test/X1__21282342__20201007_025413807_%4.4d.tiff" % (i+1) : "Si2_searchChanneling4"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_030617123_%4.4d.tiff" % (i+1) : "Si2_searchChanneling5"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_031011713_%4.4d.tiff" % (i+1) : "Si2_searchChanneling6"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_031432623_%4.4d.tiff" % (i+1) : "Si2_searchChanneling7"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_031702425_%4.4d.tiff" % (i+1) : "Si2_searchChanneling8"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_034341151_%4.4d.tiff" % (i+1) : "Si2_searchChanneling9"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_040141961_%4.4d.tiff" % (i+1) : "Si2_searchChanneling10"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_041427981_%4.4d.tiff" % (i+1) : "Si2_searchChanneling11"}) # gain test for i in range(20): dataNames.update({"test/X1__21282342__20201007_031912166_%4.4d.tiff" % (i+1) : "Si2_searchChanneling8_bkg"}) # beam off, just bkg for i in range(20): dataNames.update({"test/X1__21282342__20201007_042256691_%4.4d.tiff" % (i+1) : "Si2_scan000"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042329147_%4.4d.tiff" % (i+1) : "Si2_scan001"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042401123_%4.4d.tiff" % (i+1) : "Si2_scan002"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042439578_%4.4d.tiff" % (i+1) : "Si2_scan003"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042532946_%4.4d.tiff" % (i+1) : "Si2_scan004"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042618009_%4.4d.tiff" % (i+1) : "Si2_scan005"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042653362_%4.4d.tiff" % (i+1) : "Si2_scan006"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042741176_%4.4d.tiff" % (i+1) : "Si2_scan007"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042839425_%4.4d.tiff" % (i+1) : "Si2_scan008"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042926672_%4.4d.tiff" % (i+1) : "Si2_scan009"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_042954008_%4.4d.tiff" % (i+1) : "Si2_scan010"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_043031592_%4.4d.tiff" % (i+1) : "Si2_scan011"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_043108816_%4.4d.tiff" % (i+1) : "Si2_scan012"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_043147617_%4.4d.tiff" % (i+1) : "Si2_scan013"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045013066_%4.4d.tiff" % (i+1) : "Si2_scan014"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045158257_%4.4d.tiff" % (i+1) : "Si2_scan015"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045254218_%4.4d.tiff" % (i+1) : "Si2_scan016"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045345130_%4.4d.tiff" % (i+1) : "Si2_scan017"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045421168_%4.4d.tiff" % (i+1) : "Si2_scan018"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045455081_%4.4d.tiff" % (i+1) : "Si2_scan019"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045525576_%4.4d.tiff" % (i+1) : "Si2_scan020"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045552744_%4.4d.tiff" % (i+1) : "Si2_scan021"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045644016_%4.4d.tiff" % (i+1) : "Si2_scan022"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045808520_%4.4d.tiff" % (i+1) : "Si2_scan023"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045841272_%4.4d.tiff" % (i+1) : "Si2_scan024"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_045953167_%4.4d.tiff" % (i+1) : "Si2_scan025"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_050302311_%4.4d.tiff" % (i+1) : "Si2_scan026"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_050613782_%4.4d.tiff" % (i+1) : "Si2_scan027"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_050701428_%4.4d.tiff" % (i+1) : "Si2_scan028"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051031748_%4.4d.tiff" % (i+1) : "Si2_scan029"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051231659_%4.4d.tiff" % (i+1) : "Si2_scan030"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051323394_%4.4d.tiff" % (i+1) : "Si2_scan031"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051356619_%4.4d.tiff" % (i+1) : "Si2_scan032"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051448850_%4.4d.tiff" % (i+1) : "Si2_scan033"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051649537_%4.4d.tiff" % (i+1) : "Si2_scan034"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051730577_%4.4d.tiff" % (i+1) : "Si2_scan035"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051806762_%4.4d.tiff" % (i+1) : "Si2_scan036"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_051915536_%4.4d.tiff" % (i+1) : "Si2_scan037"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_052042097_%4.4d.tiff" % (i+1) : "Si2_scanAmo0"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_044548107_%4.4d.tiff" % (i+1) : "Si2_bkgScan013"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_052128640_%4.4d.tiff" % (i+1) : "Si2_bkgscanAmo0"}) for i in range(20): dataNames.update({"test/X1__21282342__20201007_162429101_%4.4d.tiff" % (i+1) : "Si2_validateChanneling"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_191051091_%4.4d.tiff" % (i+1) : "Si2_searchChanneling12"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_191806057_%4.4d.tiff" % (i+1) : "Si2_searchChanneling13"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_193219566_%4.4d.tiff" % (i+1) : "Si2_searchChanneling14"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_195316119_%4.4d.tiff" % (i+1) : "Si2_searchChanneling15"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_223813221_%4.4d.tiff" % (i+1) : "Si2_scan100"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_223912989_%4.4d.tiff" % (i+1) : "Si2_scan101"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224349436_%4.4d.tiff" % (i+1) : "Si2_scan102"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224431412_%4.4d.tiff" % (i+1) : "Si2_scan103"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224510468_%4.4d.tiff" % (i+1) : "Si2_scan104"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224547532_%4.4d.tiff" % (i+1) : "Si2_scan105"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224621373_%4.4d.tiff" % (i+1) : "Si2_scan106"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224655963_%4.4d.tiff" % (i+1) : "Si2_scan107"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224737723_%4.4d.tiff" % (i+1) : "Si2_scan108"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_224826570_%4.4d.tiff" % (i+1) : "Si2_scan109"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225026018_%4.4d.tiff" % (i+1) : "Si2_scan110"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225246153_%4.4d.tiff" % (i+1) : "Si2_scan111"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225426473_%4.4d.tiff" % (i+1) : "Si2_scan112"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225531249_%4.4d.tiff" % (i+1) : "Si2_scan113"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225616406_%4.4d.tiff" % (i+1) : "Si2_scan114"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225710648_%4.4d.tiff" % (i+1) : "Si2_scan115"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225823615_%4.4d.tiff" % (i+1) : "Si2_scan116"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_225926231_%4.4d.tiff" % (i+1) : "Si2_scan117"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230033096_%4.4d.tiff" % (i+1) : "Si2_scan118"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230115503_%4.4d.tiff" % (i+1) : "Si2_scan119"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230325238_%4.4d.tiff" % (i+1) : "Si2_scan120"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230405942_%4.4d.tiff" % (i+1) : "Si2_scan121"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230535478_%4.4d.tiff" % (i+1) : "Si2_scan122"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230610806_%4.4d.tiff" % (i+1) : "Si2_scan123"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230651165_%4.4d.tiff" % (i+1) : "Si2_scan124"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230734093_%4.4d.tiff" % (i+1) : "Si2_scan125"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230836037_%4.4d.tiff" % (i+1) : "Si2_scan126"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230905221_%4.4d.tiff" % (i+1) : "Si2_scan127"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_230947828_%4.4d.tiff" % (i+1) : "Si2_scan128"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231026620_%4.4d.tiff" % (i+1) : "Si2_scan129"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231107444_%4.4d.tiff" % (i+1) : "Si2_scan130"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231142652_%4.4d.tiff" % (i+1) : "Si2_scan131"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231220252_%4.4d.tiff" % (i+1) : "Si2_scan132"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231253532_%4.4d.tiff" % (i+1) : "Si2_scan133"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231335371_%4.4d.tiff" % (i+1) : "Si2_scan134"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231421051_%4.4d.tiff" % (i+1) : "Si2_scan135"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231501410_%4.4d.tiff" % (i+1) : "Si2_scan136"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231530107_%4.4d.tiff" % (i+1) : "Si2_scan137"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231600826_%4.4d.tiff" % (i+1) : "Si2_scan138"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231629282_%4.4d.tiff" % (i+1) : "Si2_scan139"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231846874_%4.4d.tiff" % (i+1) : "Si2_scanAmo1"}) for i in range(20): dataNames.update({"Si2_scanCradle2/X1__21282342__20201007_231921281_%4.4d.tiff" % (i+1) : "Si2_bkgscanAmo1"}) # 2020 Si1 for i in range(2): dataNames.update({"test/X1__21282342__20201006_232722524_%4.4d.tiff" % (i+1) : "Si1_searchChanneling0"}) for i in range(10): dataNames.update({"test/X1__21282342__20201006_234611493_%4.4d.tiff" % (i+1) : "Si1_searchChanneling1"}) for i in range(10): dataNames.update({"test/X1__21282342__20201007_003213338_%4.4d.tiff" % (i+1) : "Si1_searchChanneling2"}) for i in range(10): dataNames.update({"test/X1__21282342__20201007_005559647_%4.4d.tiff" % (i+1) : "Si1_searchChanneling3"}) for i in range(10): dataNames.update({"test/X1__21282342__20201007_011405398_%4.4d.tiff" % (i+1) : "Si1_searchChanneling4"}) # + jupyter={"source_hidden": true} dataParams = { # {parent_datasets: [dirBeam_parent_dataset, boxPhys, boxBkg, [angle_horsa, angle_versa, x_horsa, x_versa]]} -- full statistics, select files to open with dataNamesToOpen # boxPhys & boxBkg = [xPixMin, xPixMax, yPixMin, yPixMax] -- in pixel indexes! # boxPhys is the picture interesting region for physics, whereas boxBkg is the region in which background is computed # angle_horsa/versa are the goniometer positions in deg when meaningful, None otherwise --> channeling final plots only drawn if meaningful # 2020 Si2 -- 300MeV "Si2_searchChanneling18": ["Si2_searchChanneling18", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8568, 17.52, 3.15]], "Si2_searchChanneling19": ["Si2_searchChanneling19", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8568, 17.52, 3.15]], "Si2_searchChanneling20": ["Si2_searchChanneling20", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8568, 17.52, 3.15]], "Si2_searchChanneling21": ["Si2_searchChanneling21", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.52, 3.15]], "Si2_searchChanneling22": ["Si2_searchChanneling22", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.52, 3.15]], "Si2_searchChanneling23": ["Si2_searchChanneling23", [1000, 3000, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.7993, 17.52, 3.15]], "Si2_searchChanneling24": ["Si2_searchChanneling24", [950, 1350, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8611, 16.2, 3.4]], "Si2_scan300": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8210, 17.52, 3.15]], "Si2_scan301": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8240, 17.52, 3.15]], "Si2_scan302": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8270, 17.52, 3.15]], "Si2_scan303": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8300, 17.52, 3.15]], "Si2_scan304": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8330, 17.52, 3.15]], "Si2_scan305": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8358, 17.52, 3.15]], "Si2_scan306": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8388, 17.52, 3.15]], "Si2_scan307": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8418, 17.52, 3.15]], "Si2_scan308": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8448, 17.52, 3.15]], "Si2_scan309": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8478, 17.52, 3.15]], "Si2_scan310": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8508, 17.52, 3.15]], "Si2_scan311": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8538, 17.52, 3.15]], "Si2_scan312": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8568, 17.52, 3.15]], "Si2_scan313": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8598, 17.52, 3.15]], "Si2_scan314": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8628, 17.52, 3.15]], "Si2_scan315": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8658, 17.52, 3.15]], "Si2_scan316": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8688, 17.52, 3.15]], "Si2_scan317": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8718, 17.52, 3.15]], "Si2_scan318": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8748, 17.52, 3.15]], "Si2_scan319": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8778, 17.52, 3.15]], "Si2_scan320": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8808, 17.52, 3.15]], "Si2_scan321": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8838, 17.52, 3.15]], "Si2_scan322": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8868, 17.52, 3.15]], "Si2_scan323": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8898, 17.52, 3.15]], "Si2_scan324": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8928, 17.52, 3.15]], "Si2_scan325": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8958, 17.52, 3.15]], "Si2_scan326": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.8988, 17.52, 3.15]], "Si2_scan327": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9018, 17.52, 3.15]], "Si2_scan328": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9048, 17.52, 3.15]], "Si2_scan329": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9078, 17.52, 3.15]], "Si2_scan330": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9108, 17.52, 3.15]], "Si2_scan331": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9138, 17.52, 3.15]], "Si2_scan332": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9168, 17.52, 3.15]], "Si2_scan333": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9198, 17.52, 3.15]], "Si2_scan334": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9228, 17.52, 3.15]], "Si2_scan335": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9258, 17.52, 3.15]], "Si2_scan336": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [0, 2.9288, 17.52, 3.15]], "Si2_scanAmo3": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.52, 3.15]], "Si2_bkgScanAmo3": ["Si2_scanAmo3", [1100, 1500, 0, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.52, 3.15]], # 2020 Si2 -- 600MeV () "Si2_searchChanneling16": ["Si2_searchChanneling16", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8578, 17.4, 4.3]], "Si2_searchChanneling17": ["Si2_searchChanneling17", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.4, 3.3]], "Si2_scan200": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8369, 17.4, 3.3]], "Si2_scan201": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8398, 17.4, 3.3]], "Si2_scan202": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8428, 17.4, 3.3]], "Si2_scan203": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8458, 17.4, 3.3]], "Si2_scan204": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8488, 17.4, 3.3]], "Si2_scan205": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8518, 17.4, 3.3]], "Si2_scan206": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8548, 17.4, 3.3]], "Si2_scan207": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8578, 17.4, 3.3]], "Si2_scan208": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8608, 17.4, 3.3]], "Si2_scan209": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8638, 17.4, 3.3]], "Si2_scan210": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8668, 17.4, 3.3]], "Si2_scan211": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8698, 17.4, 3.3]], "Si2_scan212": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8728, 17.4, 3.3]], "Si2_scan213": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8758, 17.4, 3.3]], "Si2_scan214": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8788, 17.4, 3.3]], "Si2_scan215": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8818, 17.4, 3.3]], "Si2_scan216": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8848, 17.4, 3.3]], "Si2_scan217": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8878, 17.4, 3.3]], "Si2_scan218": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8908, 17.4, 3.3]], "Si2_scan219": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8938, 17.4, 3.3]], "Si2_scan220": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8968, 17.4, 3.3]], "Si2_scan221": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8998, 17.4, 3.3]], "Si2_scan222": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9028, 17.4, 3.3]], "Si2_scan223": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9058, 17.4, 3.3]], "Si2_scan224": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9088, 17.4, 3.3]], "Si2_scan225": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9118, 17.4, 3.3]], "Si2_scan226": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9148, 17.4, 3.3]], "Si2_scan227": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9178, 17.4, 3.3]], "Si2_scan228": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9208, 17.4, 3.3]], "Si2_scan229": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9238, 17.4, 3.3]], "Si2_scan230": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9268, 17.4, 3.3]], "Si2_scan231": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9298, 17.4, 3.3]], "Si2_scanAmo2": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.4, 3.3]], "Si2_bkgscanAmo2": ["Si2_scanAmo2", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.4, 3.3]], # 2020 Si2 -- 855MeV "Si2_searchChanneling0": ["Si2_searchChanneling0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9090, 17.5, 8.5]], "Si2_searchChanneling1": ["Si2_searchChanneling1", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.5]], "Si2_searchChanneling2": ["Si2_searchChanneling2", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.5]], "Si2_searchChanneling3": ["Si2_searchChanneling3", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.5]], "Si2_searchChanneling4": ["Si2_searchChanneling4", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9176, 17.5, 12.5]], "Si2_searchChanneling5": ["Si2_searchChanneling5", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8796, 17.5, 6.5]], "Si2_searchChanneling6": ["Si2_searchChanneling6", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8796, 17.5, 8.5]], "Si2_searchChanneling7": ["Si2_searchChanneling7", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.5]], "Si2_searchChanneling8": ["Si2_searchChanneling8", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 3.5]], "Si2_searchChanneling8_bkg": ["Si2_searchChanneling8", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 3.5]], "Si2_searchChanneling9": ["Si2_searchChanneling9", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.5]], "Si2_searchChanneling10": ["Si2_searchChanneling10", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.0]], "Si2_searchChanneling11": ["Si2_searchChanneling11", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 4.0]], "Si2_scan000": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8435, 17.5, 4.0]], "Si2_scan001": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8455, 17.5, 4.0]], "Si2_scan002": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8475, 17.5, 4.0]], "Si2_scan003": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8495, 17.5, 4.0]], "Si2_scan004": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8515, 17.5, 4.0]], "Si2_scan005": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8535, 17.5, 4.0]], "Si2_scan006": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8555, 17.5, 4.0]], "Si2_scan007": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8575, 17.5, 4.0]], "Si2_scan008": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8595, 17.5, 4.0]], "Si2_scan009": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8615, 17.5, 4.0]], "Si2_scan010": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8635, 17.5, 4.0]], "Si2_scan011": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8655, 17.5, 4.0]], "Si2_scan012": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8675, 17.5, 4.0]], "Si2_scan013": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8695, 17.5, 4.0]], "Si2_scan014": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8717, 17.5, 4.0]], "Si2_scan015": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8737, 17.5, 4.0]], "Si2_scan016": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8757, 17.5, 4.0]], "Si2_scan017": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8777, 17.5, 4.0]], "Si2_scan018": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8797, 17.5, 4.0]], "Si2_scan019": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8818, 17.5, 4.0]], "Si2_scan020": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8838, 17.5, 4.0]], "Si2_scan021": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8858, 17.5, 4.0]], "Si2_scan022": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8878, 17.5, 4.0]], "Si2_scan023": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8898, 17.5, 4.0]], "Si2_scan024": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8919, 17.5, 4.0]], "Si2_scan025": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8939, 17.5, 4.0]], "Si2_scan026": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8959, 17.5, 4.0]], "Si2_scan027": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8980, 17.5, 4.0]], "Si2_scan028": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9000, 17.5, 4.0]], "Si2_scan029": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9020, 17.5, 4.0]], "Si2_scan030": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9040, 17.5, 4.0]], "Si2_scan031": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9060, 17.5, 4.0]], "Si2_scan032": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9080, 17.5, 4.0]], "Si2_scan033": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9100, 17.5, 4.0]], "Si2_scan034": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9120, 17.5, 4.0]], "Si2_scan035": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9140, 17.5, 4.0]], "Si2_scan036": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9161, 17.5, 4.0]], "Si2_scan037": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9181, 17.5, 4.0]], "Si2_scanAmo0": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.5, 4.0]], "Si2_bkgScan013": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8616, 17.5, 3.5]], "Si2_bkgScanAmo0": ["Si2_scanAmo0", [1300, 1600, 200, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.5, 4.0]], "Si2_validateChanneling": ["Si2_validateChanneling", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8636, 17.5, 3.4]], "Si2_searchChanneling12": ["Si2_searchChanneling12", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8636, 17.5, 3.4]], "Si2_searchChanneling13": ["Si2_searchChanneling13", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8636, 17.5, 3.4]], "Si2_searchChanneling14": ["Si2_searchChanneling14", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8547, 17.5, 3.4]], "Si2_searchChanneling15": ["Si2_searchChanneling15", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8557, 17.3, 4.1]], "Si2_scan100": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8305, 17.2, 3.8]], "Si2_scan101": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8325, 17.2, 3.8]], "Si2_scan102": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8345, 17.2, 3.8]], "Si2_scan103": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8365, 17.2, 3.8]], "Si2_scan104": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8385, 17.2, 3.8]], "Si2_scan105": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8405, 17.2, 3.8]], "Si2_scan106": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8425, 17.2, 3.8]], "Si2_scan107": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8445, 17.2, 3.8]], "Si2_scan108": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8465, 17.2, 3.8]], "Si2_scan109": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8485, 17.2, 3.8]], "Si2_scan110": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8506, 17.2, 3.8]], "Si2_scan111": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8526, 17.2, 3.8]], "Si2_scan112": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8546, 17.2, 3.8]], "Si2_scan113": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8566, 17.2, 3.8]], "Si2_scan114": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8586, 17.2, 3.8]], "Si2_scan115": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8607, 17.2, 3.8]], "Si2_scan116": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8627, 17.2, 3.8]], "Si2_scan117": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8647, 17.2, 3.8]], "Si2_scan118": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8667, 17.2, 3.8]], "Si2_scan119": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8687, 17.2, 3.8]], "Si2_scan120": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8708, 17.2, 3.8]], "Si2_scan121": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8728, 17.2, 3.8]], "Si2_scan122": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8748, 17.2, 3.8]], "Si2_scan123": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8768, 17.2, 3.8]], "Si2_scan124": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8788, 17.2, 3.8]], "Si2_scan125": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8809, 17.2, 3.8]], "Si2_scan126": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8829, 17.2, 3.8]], "Si2_scan127": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8849, 17.2, 3.8]], "Si2_scan128": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8869, 17.2, 3.8]], "Si2_scan129": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8889, 17.2, 3.8]], "Si2_scan130": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8910, 17.2, 3.8]], "Si2_scan131": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8930, 17.2, 3.8]], "Si2_scan132": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8950, 17.2, 3.8]], "Si2_scan133": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8970, 17.2, 3.8]], "Si2_scan134": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.8990, 17.2, 3.8]], "Si2_scan135": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9010, 17.2, 3.8]], "Si2_scan136": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9031, 17.2, 3.8]], "Si2_scan137": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9051, 17.2, 3.8]], "Si2_scan138": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9071, 17.2, 3.8]], "Si2_scan139": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [0, 2.9091, 17.2, 3.8]], "Si2_scanAmo1": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.2, 3.8]], "Si2_bkgScanAmo1": ["Si2_scanAmo1", [1150, 1450, 200, 1100], [1200, 1800, 1100, 1400], [3, 0, 17.2, 3.8]], # 2020 Si1 "Si1_searchChanneling0": ["Si1_searchChanneling0", [1440, 1500, 0, 800], [1200, 1800, 1100, 1400], [-1, 1.6180, None, None]], "Si1_searchChanneling1": ["Si1_searchChanneling1", [1440, 1500, 0, 800], [1200, 1800, 1100, 1400], [-1, 1.5635, None, None]], "Si1_searchChanneling2": ["Si1_searchChanneling2", [1440, 1500, 0, 800], [1200, 1800, 1100, 1400], [-1, 1.6171, None, None]], "Si1_searchChanneling3": ["Si1_searchChanneling3", [1440, 1500, 0, 800], [1200, 1800, 1100, 1400], [-1, 1.5910, None, None]], "Si1_searchChanneling4": ["Si1_searchChanneling4", [1440, 1500, 0, 800], [1200, 1800, 1100, 1400], [-1, 1.5910, None, None]], } # - # ### **data input & conditioning settings** # + jupyter={"source_hidden": true} dataPath = "./data20/camera/" # files path (with / at the end) -- either a singles string (common to all the files) or a list with same length of dataNamesToOpen # selected datasets to be opened -- single files; then all the files that share the same parent dataset will be summed together (check dataNames) # remember to open the required direct beam datasets specified in dataParams as well! dataNamesToOpen = [] for s in [s0 for s0 in dataNames if "Si2_scan3" in dataNames[s0]]: dataNamesToOpen += [s] for s in [s0 for s0 in dataNames if "Si2_scanAmo3" in dataNames[s0]]: dataNamesToOpen += [s] ###################################### bProgressBar = True # print progress bar? should be set to False when interactive mode is not available bPlotRaw = False # plot parent dataset raw images? bPixel = True # if True (False), plot the figures with physical lengths in mm (raw pixel numbers) on the axes bLog = True # raw images log scale? dPixel = 16.6 # pixel size in um dz = 6.02 # crystal-screen distance in m # - # --- # ## DATA INPUT & CONDITIONING # + jupyter={"source_hidden": true} rawDict = {} # raw data opening here -- dictionary rawDict dataTotToOpen = [s0+s1 for s0, s1 in zip(dataPath, dataNamesToOpen)] if type(dataPath)==list else [dataPath+s for s in dataNamesToOpen] print("opening %d images from %d datasets..." % (len(dataTotToOpen), len(set([dataNames[s] for s in dataNamesToOpen])))) for i, s in enumerate(tqdm(dataNamesToOpen) if bProgressBar else dataNamesToOpen): rawDict.update({s : io.imread(dataTotToOpen[i], as_gray=True).astype("float")}) # + jupyter={"source_hidden": true} plt.close("all") totDict = {} # parent datasets summing & plotting here -- dictionary totDict for s in sorted(set([dataNames[s0] for s0 in dataNamesToOpen])): print( "opened dataset %s consisting of %d images -- e.g. 1st one is %s with (rows, cols) = %s" % (s, len([rawDict[ss] for ss in dataNamesToOpen if dataNames[ss]==s]), [ss for ss in dataNamesToOpen if dataNames[ss]==s][0], str([rawDict[ss] for ss in dataNamesToOpen if dataNames[ss]==s][0].shape)) ) totDict[s] = sum([rawDict[ss] for ss in dataNamesToOpen if dataNames[ss]==s]) # also, background subtraction from each parent dataset lims = dataParams[s][2] totDict[s] = totDict[s] - np.mean(totDict[s][lims[2]:lims[3], lims[0]:lims[1]]) totDict[s] = np.where(totDict[s]<=0, 0.1*min([min(p) for p in np.where(totDict[s]>0, totDict[s], max([max(pp) for pp in totDict[s]]))]), totDict[s]) # negative values set to 0.1 the minimum positive value for compatibility with log scale if bPlotRaw: plt.figure(s) plt.title("raw dataset: " + s) plt.xlabel("x [mm]" if bPixel else "pixel, horsa") plt.ylabel("y [mm]" if bPixel else "pixel, versa") if bPixel: plt.imshow(totDict[s], extent=(0, dPixel*totDict[s].shape[1]*1e-3, 0, dPixel*totDict[s].shape[0]*1e-3), aspect="auto", norm=LogNorm() if bLog else None) else: plt.imshow(totDict[s], aspect="auto", norm=LogNorm() if bLog else None) plt.colorbar() plt.tight_layout() plt.show() # also, fix unset boundaries for fiducial area selection -- remember: x (y) is shape[1] (shape[0]) dataParams[s][1][0] = max(dataParams[s][1][0], 0) dataParams[s][1][1] = min(dataParams[s][1][1], totDict[s].shape[1]) dataParams[s][1][2] = max(dataParams[s][1][2], 0) dataParams[s][1][3] = min(dataParams[s][1][3], totDict[s].shape[0]) # - # ## ANALYSIS & PLOTS # + jupyter={"source_hidden": true} pi = 3.1415926535 ######## # function to project 2d image into 1d array -- axis = "x" ("y") to sum along the x (y) axis def proj(img, axis): axisN = 1 if axis=="x" else 0 return np.sum(img.astype("float"), axisN) ######## # function to project a slice of the 2d angular phase space into 1d array -- axis = "x" ("y") to sum along the x (y) axis def projPhaseSpace(space, lims, axis, bAverage): # space = (x, y, z) from the phase space plot boolX = np.where((space[0] >= lims[0]) & (space[0] <= lims[1]), True, False) boolY = np.where((space[1] >= lims[2]) & (space[1] <= lims[3]), True, False) boolTot = boolX*boolY lenX, lenY = space[2].shape[1], space[2].shape[0] xOut0 = np.where(boolTot, space[1], np.nan) xOut = xOut0.flatten() yOut0 = np.where(boolTot, space[0], np.nan) yOut = yOut0.flatten() zOut0 = np.where(boolTot, space[2], np.nan) zOut = zOut0.flatten() if bAverage: xOutFinal, yOutFinal = [], [] if axis=="x": for i in range(lenX): xOutFinal.append(np.nanmean(xOut[i::lenX])) yOutFinal.append(np.nanmean(zOut[i::lenX])) else: for i in range(lenY): xOutFinal.append(np.nanmean(yOut[i*lenX:(i+1)*lenX-1])) yOutFinal.append(np.nanmean(zOut[i*lenX:(i+1)*lenX-1])) else: xOutFinal, yOutFinal = xOut if axis=="x" else yOut, zOut return np.array(xOutFinal), np.array(yOutFinal) # - # ### **direct beam or amorphous-like output beam** (dependent on data availability) # #### *settings here below:* # + jupyter={"source_hidden": true} sigma0 = 70 # sigma approximate value & fit range to be set manually -- common for horsa & versa # + jupyter={"source_hidden": true} for sDir in set([dataParams[sPhys][0] for sPhys in totDict]): plt.close("%s_directBeam" % sDir) fig, ax = plt.subplots(num="%s_directBeam" % sDir, figsize=[8, 4]) lims = dataParams[sDir][1] # horizontal y = proj(totDict[sDir][lims[2]:lims[3], lims[0]:lims[1]], "y") xPix = np.linspace(lims[0]+1, lims[1], len(y)) plt.subplot(121) plt.plot(xPix, y) plt.xlim([0, totDict[sDir].shape[1]]) plt.xlabel("pixel, horsa") mean0, indMean0 = np.mean([xPix[i] for i in range(len(xPix)) if y[i]==max(y)]), np.mean([i for i in range(len(xPix)) if y[i]==max(y)]) xFit, yFit = xPix[int(indMean0-sigma0) : int(indMean0+sigma0)], y[int(indMean0-sigma0) : int(indMean0+sigma0)] p0 = [max(yFit), mean0, sigma0] par, cov = curve_fit(sl.fGaus, xFit, yFit, p0=p0) plt.plot(xFit, sl.fGaus(xFit, *par), "r") xCentre, xWidth = par[1], par[2] # vertical y = proj(totDict[sDir][lims[2]:lims[3], lims[0]:lims[1]], "x") xPix = np.linspace(lims[2]+1, lims[3], len(y)) y = y*4.93 if (sDir == "Si2_searchChanneling19") else y plt.subplot(122) plt.plot(xPix, y) plt.xlim([0, totDict[sDir].shape[0]]) plt.xlabel("pixel, versa") mean0, indMean0 = np.mean([xPix[i] for i in range(len(xPix)) if y[i]==max(y)]), np.mean([i for i in range(len(xPix)) if y[i]==max(y)]) xFit, yFit = xPix[int(indMean0-sigma0) : int(indMean0+sigma0)], y[int(indMean0-sigma0) : int(indMean0+sigma0)] p0 = [max(yFit), mean0, sigma0] par, cov = curve_fit(sl.fGaus, xFit, yFit, p0=p0) plt.plot(xFit, sl.fGaus(xFit, *par), "r") yCentre, yWidth = par[1], par[2] fig.suptitle("input or amorphous-like output beam from " + sDir) plt.tight_layout() plt.show() # appending [xCentre, yCentre] to dataParams entries for sPhys in set([s for s in dataParams if dataParams[s][0]==sDir] + [sDir]): dataParams[sPhys] += [[xCentre, yCentre]] print("dataset %s used to study direct beam..." % sDir) print("beam CENTRE in pixel nr.: (x, y) = (%f, %f)" % (xCentre, yCentre)) print("beam CENTRE in phys. coord.: (x, y) = (%f, %f) mm" % (xCentre*dPixel*1e-3, yCentre*dPixel*1e-3)) print("beam SIZE in pixel nr.: (x, y) = (%f, %f)" % (xWidth, yWidth)) print("beam SIZE in phys. coord.: (x, y) = (%f, %f) mm" % (xWidth*dPixel*1e-3, yWidth*dPixel*1e-3)) print("--> beam centre coordinates (in pixel nr.) added to dataParams entries related to %s" % sDir) # - # ### **angular phase space** # #### *settings here below:* # + jupyter={"source_hidden": true} bCompute = True # angular phase space analysis only done if requested (check dataNamesToOpen) # note: this also enables the channeling effects section bIncludeDirBeam = False # if True (False), direct beam data are (not) included in the phase space bXSide = False # if True (False), the horizontal (vertical) phase space is studied bLog = False # toggle phase space contour plot log scale nLevels = 50 # number of colour levels for the phase space contour plot coeffX = (2 * pi / 360) * 1e6 # input angle conversion factor into urad coeffY = 1e6 # output angle conversion factor into urad xRange = [49500+100, 50500+150, -500, 800] # plot range -- format: [x0, x1, y0, y1] or None (i.e. automatic range) cmap = "jet" # colormap # + jupyter={"source_hidden": true} if bCompute: # all this is done only if requested # get the correct input angles and output angles x, y, z = [], [], [] actualData = {s : dataParams[s] for s in totDict} if bIncludeDirBeam else {s : dataParams[s] for s in totDict if dataParams[s][0]!=s} for s in actualData: lims = actualData[s][1] inAngles = actualData[s][3] beamCentres = actualData[s][4] tempImage = totDict[s][lims[2]:lims[3], lims[0]:lims[1]] if inAngles[0 if bXSide else 1]!=None: # dataset only added to the phase space if goniometer coordinate is available # input angles x.append(coeffX * inAngles[0 if bXSide else 1] * np.ones(tempImage.shape[1 if bXSide else 0])) # output angles y0 = np.linspace(lims[0 if bXSide else 2], lims[1 if bXSide else 3], lims[1 if bXSide else 3]-lims[0 if bXSide else 2]) y.append(coeffY * np.arctan(dPixel*1e-6 / dz) * (y0 - beamCentres[0 if bXSide else 1])) # spectrum -- normalisation: each biplot column is normalised to the sum of the events in it z.append(proj(tempImage, "y" if bXSide else "x") / sum(proj(tempImage, "y" if bXSide else "x"))) x = np.array(x) y = np.array(y) z = np.array(z) zBounds = [min([min(z0) for z0 in z]), max([max(z0) for z0 in z])] # plot plt.close("phaseSpace") plt.figure("phaseSpace", figsize=[9, 7]) plt.title("%s angular phase space" % ("horizontal" if bXSide else "vertical")) plt.xlabel("input angle [urad]") plt.ylabel("output angle [urad]") plt.contourf(x, y, z, np.logspace(np.log10(zBounds[0]), np.log10(zBounds[1]), nLevels) if bLog else nLevels, norm=LogNorm() if bLog else None, cmap=cmap) if xRange!=None: plt.xlim(xRange[0], xRange[1]) plt.ylim(xRange[2], xRange[3]) plt.colorbar(ticks=np.logspace(np.log10(zBounds[0]), np.log10(zBounds[1]), 10) if bLog else np.linspace(zBounds[0], zBounds[1], 10), format='%e') plt.axhline(y=0, c="0.9", ls="--", lw=1.5) plt.axvline(x=min([coeffX*dataParams[s][3][0 if bXSide else 1] for s in {s : dataParams[s] for s in totDict if dataParams[s][0]!=s}]), c="0.9", ls="--", lw=1) plt.axvline(x=max([coeffX*dataParams[s][3][0 if bXSide else 1] for s in {s : dataParams[s] for s in totDict if dataParams[s][0]!=s}]), c="0.9", ls="--", lw=1) plt.tight_layout() plt.show() # - # ### **channeling effects** # #### *settings here below:* # + jupyter={"source_hidden": true} bPlot = True # angular phase space 1d projection done only if requested (subordinate to bCompute above) bFit = True # angular peak gaussian fit done only if requested (subordinate to bCompute & bPlot above) bXSide = True # if True (False), the projection of the phase space above to the y (x) axis is studied bAverage = True # if True (False), multiple slices of the phase space are included are averaged (plotted independently) subrange = [49800, 50000, -100000, 100000] # angular phase space subrange in which to perform 1d analysis fitRange = [320, 1300] # gaussian fit range fitCentre = 320 # gaussian fit approx. centre fitSigma = 200 # gaussian fit approx. sigma nSigmasIntegrate = 20 # number of gaussian sigmas over which to perform peak integral (i.e. efficiency computation) # + jupyter={"source_hidden": true} bFit = False if not bPlot else bFit if bCompute: xPlot, yPlot = projPhaseSpace((x, y, z), subrange, "x" if bXSide else "y", bAverage) if bFit: xFit0 = np.where((xPlot >= fitRange[0]) & (xPlot <= fitRange[1]), xPlot, np.nan) yFit0 = np.where((xPlot >= fitRange[0]) & (xPlot <= fitRange[1]), yPlot, np.nan) xFit1, yFit1 = xFit0[np.logical_not(np.isnan(xFit0))], yFit0[np.logical_not(np.isnan(yFit0))] p0 = [max(yFit1), fitCentre, fitSigma] par, cov = curve_fit(sl.fGaus, xFit1, yFit1, p0=p0) # fit output: par = (ampl, mean, sigma) xFit = np.linspace(np.min(xFit1), np.max(xFit1), 1000) yFit = sl.fGaus(xFit, *par) gaussIntegral = quad(lambda xx : sl.fGaus(xx, *par), par[1]-nSigmasIntegrate*par[2], par[1]+nSigmasIntegrate*par[2]) print("gaussian peak HEIGHT is %f" % par[0]) print("gaussian peak CENTRE is %f urad" % par[1]) print("gaussian peak SIGMA is %f urad" % par[2]) if bAverage: print("gaussian peak integral over (average) bin size i.e. EFFICIENCY is %f" % (gaussIntegral[0]/np.nanmean([xPlot[i+1]-xPlot[i] for i in range(len(xPlot[:-1]))]))) else: print("toggle average over same-abscissa points to get a value for the EFFICIENCY") if bPlot: plt.close("channelingFits") plt.figure("channelingFits", figsize=[8, 5]) plt.title("angular phase space projection to %s axis" % "y" if bXSide else "x") plt.xlabel("output angle [urad]") plt.plot(xPlot, yPlot, marker="*", ms=1, lw=0, c="C0") if bFit: plt.plot(xFit, yFit, c="r", lw=1.5) plt.axvline(x = par[1], c="r", lw=1) plt.axvline(x = par[1]-par[2], c="r", ls=":", lw=1) plt.axvline(x = par[1]+par[2], c="r", ls=":", lw=1) plt.tight_layout() plt.show() # - # --- # ## WHITEBOARD # + jupyter={"source_hidden": true} # + jupyter={"source_hidden": true}
anaMamiDefl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: etk2_env # language: python # name: etk2_env # --- # # Excel Extractor # # ETK's Excel Extractor is a cell-based extractor for extracting data from compatible spreadsheets. # # ## Souce spreadsheet # # The example spreadsheet file named `alabama.xml` and it has a sheet named `16tbl08al`, in which row 1 to row 5 and row 60 to row 62 are metadata, 6A to M59 is a table (which has row and column headers). For this example, I'm going to extract data from C7 to M33 (see the picture attached below). # # ![screenshot.png](screenshot.png) # ## Define where and how to extract data # # Excel Extractor will scan cell-by-cell within a region that you specified and populate variables that you defined. # # # ### Define variable # # In this particular example, I want to extract value of all cells in region (C7, M33) and I defined a variable called `value`. Its value will be extracted from a cell located at `$col,$row` where `$col` and `$row` mean current column id and row id that the scanner is traversing at. The return is a list of object which contains user-defined variables. import pprint from etk.extractors.excel_extractor import ExcelExtractor ee = ExcelExtractor() variables = { 'value': '$col,$row' } raw_extractions = ee.extract('alabama.xls', '16tbl08al', ['C,7', 'M,33'], variables) pprint.pprint(raw_extractions[:10]) # print first 10 # ### Coordinate variable # # Excel Extractor allows you to define multiple variables. This is useful if you want to extract the data from other cells which are associated with current cell. In this example, I also need column header (category) and county name of every cell in the region. It supports constant coordinate like `($B,$1)` (which means the cell at column B row 1) or using `+` and `-` to caculate relative coordinate like `($B-1,$row+1)` (which means the cell at column A and its row id is current row id + 1). variables = { 'value': '$col,$row', 'county': '$B,$row', 'category': '$col,$6' } raw_extractions = ee.extract('alabama.xls', '16tbl08al', ['C,7', 'M,33'], variables) pprint.pprint(raw_extractions[:10]) # print first 10 # ### Single variable # # Besides the coordinate, the value of variables can also be a builtin variable (it only has `$row` and `$col` right now). This can be used for tracking provenance of extractions. Both row and column id here are presented in numeric form (base is 0). variables = { 'value': '$col,$row', 'county': '$B,$row', 'category': '$col,$6', 'from_row': '$row', 'from_col': '$col' } raw_extractions = ee.extract('alabama.xls', '16tbl08al', ['C,7', 'M,33'], variables) pprint.pprint(raw_extractions[:10]) # print first 10 # ## Wrap them up in ETK module and post processing # # The below example shows how to use this extractor in ETK module. The extractor's variable syntax only supports using a single builtin variable or a coordinate. All the post processings need to be done after extraction. # + import os, sys from etk.etk import ETK from etk.etk_module import ETKModule from etk.extractors.excel_extractor import ExcelExtractor from etk.utilities import Utility class ExampleETKModule(ETKModule): """ Abstract class for extraction module """ def __init__(self, etk): ETKModule.__init__(self, etk) self.ee = ExcelExtractor() def document_selector(self, doc): return 'file_path' in doc.cdr_document def process_document(self, doc): """ Add your code for processing the document """ variables = { 'value': '$col,$row', 'county': '$B,$row', 'category': '$col,$6', 'from_row': '$row', 'from_col': '$col' } raw_extractions = self.ee.extract(doc.cdr_document['file_path'], '16tbl08al', ['C,7', 'M,33'], variables) extracted_docs = [] for d in raw_extractions: # post processing d['category'] = d['category'].replace('\n', ' ').strip() d['county'] = d['county'].replace('\n', ' ').strip() d['from_row'] = int(d['from_row']) d['from_col'] = int(d['from_col']) # create sub document d['doc_id'] = Utility.create_doc_id_from_json(d) extracted_docs.append(etk.create_document(d)) return extracted_docs # if __name__ == "__main__": etk = ETK(modules=ExampleETKModule) doc = etk.create_document({'file_path': 'alabama.xls'}) docs = etk.process_ems(doc) for d in docs[1:11]: # print first 10 print(d.value)
examples/excel_extractor/excel extractor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import hypertools as hyp import seaborn as sns # %matplotlib inline # + #load in the data fname = 'At Risk Students For Dartmouth.xlsx' sheet1 = 'Year 16-17' sheet2 = 'Year 17-18 9.25.17' columns = ('id', 'grade', 'age', 'school', 'sex', 'homeless', 'disadvantaged', 'specialneeds', 'excused1', 'unexcused1', 'tardy1', 'excused2', 'unexcused2', 'tardy2', 'excused3', 'unexcused3', 'tardy3', 'excused4', 'unexcused4', 'tardy4') y1_data = pd.read_excel(fname, sheetname=sheet1, skiprows=[0], names=columns) y2_data = pd.read_excel(fname, sheetname=sheet2, skiprows=[0], names=columns) #use student IDs as the index y1_data.set_index('id', inplace=True) y2_data.set_index('id', inplace=True) # + #do some data cleaning #in "disadvantaged" column, replace "YES" with 1 and NaN with 0 y1_data['disadvantaged'] = y1_data['disadvantaged'].map({np.nan: 0, 'YES': 1}) y2_data['disadvantaged'] = y2_data['disadvantaged'].map({np.nan: 0, 'YES': 1}) #in "disadvantaged" column, replace "YES" with 1 and NaN with 0 #y1_data['specialneeds'] = y1_data['specialneeds'].map({np.nan: '', '504': 504, 'IEP': IEP }) #y1_data['specialneeds'] = y2_data['specialneeds'].map({np.nan: '', '504': 504, 'IEP': IEP }) #replace '---' with 0 (Fourth marking period columns) y1_data.replace('---', 0, inplace=True) y2_data.replace('---', 0, inplace=True) # - y1_data.head() pd.unique(y1_data['specialneeds']) #create a new column total_debauchery which has the total number of unexcused absences for all marking periods y1_data['total_debauchery'] = y1_data['unexcused1'] + y1_data['unexcused2']+ y1_data['unexcused3']+ y1_data['unexcused4'] y1_data['total_excused'] = y1_data['excused1'] + y1_data['excused2']+ y1_data['excused3']+ y1_data['excused4'] y1_data #create two different data frames based on economic status (advantaged vs. disadvantaged) df_adv = y1_data.drop(y1_data[(y1_data.disadvantaged == 1)].index) df_disadv = y1_data.drop(y1_data[(y1_data.disadvantaged == 0)].index) df_disadv # + #get two plots side by side, one for economically advantaged and one for economically disadvantaged #this is only for first marking period fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) ax1.set_title('Economically Disadvantaged') ax2.set_title('Economically Advantaged') variables = ['age', 'total_debauchery', 'total_excused'] # Calculate correlations corr1 = df_disadv[variables].corr() corr2 = df_adv[variables].corr() # Heatmap sns.heatmap(corr1, ax = ax1) sns.heatmap(corr2, ax = ax2) # - sns.boxplot(data = y1_data, x='disadvantaged', y = 'total_debauchery', order=[0, 1]) plt.ylim(0,50) data = y1_data data['disadvantaged'] = data['disadvantaged'].map({0: 'A', 1: 'D'}) data # + #distribution plot of total absences sns.distplot(df_adv['total_debauchery'], bins = 20) sns.distplot(df_disadv['total_debauchery'], bins = 20) plt.xlim(0,100) #disadv is in green, adv is in blue print('mean advantaged: ' + str(np.mean(df_adv['total_debauchery']))) print('mean disadvantaged: ' + str(np.mean(df_disadv['total_debauchery']))) # - # # Things we've learned # - Problems with the data? Some students have > 365 total absences # - Significance of age differs according to economic status. To explore: is there some sort of interesting/non-linear trend? Positive or negative correlation? # - Box plots: disadvantaged students have more absences # - Distribution plots/histograms: # - proportionally more disadvantaged students have perfect attendance. # - proportionally more disadvantaged students have small numbers of absences (vs. advantaged students) # - of the students with large numbers of absences, nearly all are disadvantaged
data-stories/school-attendance/School-Attendance-Lil.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # RSTechTalk MFlix # ## Yes it is # # This is a movie report using Jupyter Notebook , Python, PyMongo , and MongoDB # ### A. Initializing the connection to MongoDB Atlas # + # import goes here from pymongo import MongoClient import pprint import pandas as pd import matplotlib.pyplot as plt pp = pprint.PrettyPrinter(indent=4) client = MongoClient("mongodb+srv://dev:<EMAIL>.<EMAIL>/<dbname>?retryWrites=true&w=majority") # - # ### B. Displaying the first movie in the database movie_db = client.sample_mflix.movies first_movie = movie_db.find_one() print(f"The first movie in the collection is first_movie['title'])") print("The complete data is shown below") pp.pprint(first_movie) # ### C. Count all collections count = movie_db.count_documents({}) print(f"There are a total of {count} movies and series in the collection") # ### D. Get all series title (limit the output to 10) all_series_cursor = movie_db.find({ "type":"series" }, { "_id":0, "title":1, "type":1 } ).limit(10) all_series = list(all_series_cursor) pp.pprint(all_series) all_series_cursor = movie_db.find({ "type":"series" }, { "_id":0, "title":1, "type":1 } ).limit(10).skip(9) all_series = list(all_series_cursor) pp.pprint(all_series) # ### E. Put mongodb cursor to a pandas dataframe movies_cursor = movie_db.find( { "type":"movie" }, { "title":1, "genres":1, "runtime":1, "year":1, "rated":1 } ) movies_list = list(movies_cursor) count = len(movies_list) print(f"There are {count} movies in the collection!") movie_df = pd.DataFrame(movies_list) movie_df.head() count_in_df = len(movie_df) print(f"There are {count_in_df} movies in the collections!") movie_df = movie_df.drop(["_id","genres","runtime"], axis=1) movie_df.head() grouped_by_year_df = movie_df.groupby('year') number_of_groups = len(grouped_by_year_df.groups) print(f"There are {number_of_groups} in the collection") for name, group in grouped_by_year_df: print(f" ---------- {name} -------------------") print(group) grouped_by_rate_df = movie_df.groupby('rated') number_of_groups = len(grouped_by_rate_df.groups) print(f"There are {number_of_groups} in the collection") for name, group in grouped_by_rate_df: print(f" ---------- {name} -------------------") print(group) # ### F. Visualization with Pandas for_count_rated_df = movie_df.drop(["year"],axis=1) for_count_rated_df.groupby('rated').count().plot(kind='bar') plt.show() group_by_rated=for_count_rated_df.groupby('rated').count() group_by_rated for_count_year_df = movie_df.drop(["rated"],axis=1) year_count = for_count_year_df.groupby('year').count().plot(kind='line') plt.show() for_count_year_df = movie_df.drop(["rated"],axis=1) year_count = for_count_year_df.groupby('year').count() #.plot(kind='bar') #plt.show() year_count #.plot(kind="line", x="title", y="year") #plt.show() # # End of Report
0 - Getting Started/CodeSamples/notebook/MovieReport.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Pdugovich/DS-Unit-2-Applied-Modeling/blob/master/module1/assignment_applied_modeling_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="AkysMJxfpFrX" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 3, Module 1* # # --- # # # # Define ML problems # # You will use your portfolio project dataset for all assignments this sprint. # # ## Assignment # # Complete these tasks for your project, and document your decisions. # # - [ ] Choose your target. Which column in your tabular dataset will you predict? # - [ ] Choose which observations you will use to train, validate, and test your model. And which observations, if any, to exclude. # - [ ] Determine whether your problem is regression or classification. # - [ ] Choose your evaluation metric. # - [ ] Begin to clean and explore your data. # - [ ] Begin to choose which features, if any, to exclude. Would some features "leak" information from the future? # # + id="btnsh6mLpFrY" colab_type="code" colab={} # Import libraries import pandas as pd import os # + id="hw4oraDMpFrd" colab_type="code" colab={} df_original = pd.read_csv('oh_columbus_2019_08_13.csv') # + id="x1PciAmKpFrh" colab_type="code" colab={} outputId="06dc4b9c-6950-46df-89e7-4148565a6100" df_original.head() # + id="Iy2KOmbnpFrm" colab_type="code" colab={} #Making a duplicate of the dataframe to retain the original df = df_original.copy() # + [markdown] id="S7ZBLeTCpFrp" colab_type="text" # ## Selecting a target # + [markdown] id="-Vkbt7YcpFrq" colab_type="text" # I'll be trying to predict whether an action is taken against the person being pulled over, this includes 'arrest' or 'citation'. The alternative is 'warning' # + [markdown] id="l7Mdvpe3pFrr" colab_type="text" # ## Selecting how to train/test split # + [markdown] id="NYjCaFpJpFrs" colab_type="text" # The dates range from the start of 2012 to the end of 2016. I'll try a couple different splits, but first, I'm going to train on 2012-2015, validate on 2015, then test on 2016. I might also try randomized or time-series crossvalidation. # + id="l8EpKJ4VpFrt" colab_type="code" colab={} outputId="9e9e338b-08ea-4526-cc73-1c22d1eb7918" df.dtypes # + id="IuLd8ZZFpFrx" colab_type="code" colab={} # Converting date and time to datetime format to split the data # + id="L1YJJz7IpFr0" colab_type="code" colab={} df['date'] = pd.to_datetime(df['date']) # + id="alZmJ1fWpFr4" colab_type="code" colab={} outputId="93b83a37-4072-42e0-f0fe-d80c0f33435a" df['date'] # + id="aCBta6-hpFr7" colab_type="code" colab={} #Separating my data my_train = df[df['date'] <= '2014-12-31'] my_val = df[(df['date'] >= '2015-01-01') & (df['date'] <= '2015-12-31')] test = df[df['date'] >= '2016-01-01'] # + id="DkooD0Q7pFr_" colab_type="code" colab={} outputId="2d35b5c3-7000-474c-b5ce-278e2a66af01" my_train.head() # + id="uTmASSckpFsC" colab_type="code" colab={} outputId="d60c282e-c3ff-4802-fdc7-05b4abccb1b2" my_train.shape,my_val.shape,test.shape # + id="WXkAR5QGpFsG" colab_type="code" colab={} outputId="a992fb12-c3e3-4f6e-d38b-ee681ded521b" #Checking to make sure I grabbed all of the data my_train.shape[0] + my_val.shape[0] + test.shape[0] == df.shape[0] # + id="eM9ySMGbpFsK" colab_type="code" colab={} outputId="9b837701-c06f-4599-ca9d-495dfdc91cfc" df.shape # + [markdown] id="PHoBEwLOpFsN" colab_type="text" # ## Regression or Classification? # This will be a binary classification # + id="4UeS2YWQpFsO" colab_type="code" colab={} outputId="9daff77b-b243-4873-f6d6-615c76e83554" #Citation and arrest will be merged into a single value and be the my_train['outcome'].value_counts(normalize=True) # + [markdown] id="c2uU-asBpFsS" colab_type="text" # ## Evaluation Metric # I will be using roc/auc to evaluate. I might also use accuracy, since I believe I can improve greatly over baseline. # + [markdown] id="jYtwZQiBpFsT" colab_type="text" # ## Data Exploration / Cleaning # + id="OQgw_MyIpFsU" colab_type="code" colab={} outputId="1cf35cb6-c22e-4f30-d0d2-7ac4e39481d0" my_train.describe() # + id="XrGAjhwfpFsY" colab_type="code" colab={} outputId="87f9e5c9-e895-417e-9ee3-d2f55c778e99" my_train.describe(exclude='number') # + id="XVKOEK-opFsb" colab_type="code" colab={} outputId="aa25ae26-bf65-4b24-d907-3686c8dadcdc" my_train.isnull().sum() # + [markdown] id="ToPfeWE_pFsf" colab_type="text" # This is interesting. All of the entries where an outcome is missing, there were vehicle searches. # Still, there are only 35 instances. It's probably safe to drop all of the nan outcomes without losing any value # + id="0knO9KbipFsg" colab_type="code" colab={} outputId="a8510a28-a88b-43ea-e74e-8fb92ee54d49" my_train[my_train['outcome'].isnull() == True] # + id="Uc9x_KdLpFsm" colab_type="code" colab={} outputId="8d60e7c1-3b29-4fdf-c927-607dc901aadc" df['location'].value_counts() # + id="sTiGhP4xpFsr" colab_type="code" colab={} outputId="3190ed8a-e5b6-4e76-ba29-abcea1578f05" my_train['precinct'].value_counts(dropna=False ) # + [markdown] id="mxMakm6CpFsu" colab_type="text" # - 'raw_row_number' just seems to count each individual action against a person. Three actions were taken against the first person, so that is raw_row_number 1,2, and 3. That probably isn't useful, so I'll remove it # # # - 'location' is interesting in that is an intersection. I need to explore it more to see if it 'x and y' and also 'y and x' exist. I could strip out the ' and ' and separate the two crossroads into different columns. # # # - 'type' is a single value, so I can drop it # # # - 'arrest_made', 'citation_issued', and 'warning_issued' can be dropped because 'warning_issued' will be used as my binary classifier. I will need to invert the t/f and astype it to an integer. # # # - I need to do more exploration on 'search_conducted'. I'll likely drop that as data leakage since I feel like it would be highly correlated with action taken... After further exploration, the data is too dirty to deal with. There are multiple spellings, spelling errors, formatting differences. Also, this is covered in long/lat coordinates. # # # - 'raw_enforcement_taken' will be dropped as it includes my target # + id="bw3btESIpFsv" colab_type="code" colab={} # + id="bhTq9WKApFsy" colab_type="code" colab={} # + id="KEDV1HdIpFs1" colab_type="code" colab={} # + id="hgGCBOILpFs4" colab_type="code" colab={} # + [markdown] id="2AzsS6DRpFs6" colab_type="text" # ## Data Wrangling # + id="-HTWv8RipFs7" colab_type="code" colab={} def wrangle(X): #To prevent a warning. X = X.copy() # Pulling hour and minute from time column X['hour'] = pd.to_datetime(X['time']).dt.hour X['minute'] = pd.to_datetime(X['time']).dt.minute #X = X.drop(columns='time') #Dropping unecessary columns X = X.drop(columns=['raw_row_total','type','location']) #Dropping data leakage/time-traveling data X = X.drop(columns=['arrest_made','citation_issued','search_conducted', 'raw_enforcement_taken'] ) #Creating separate columns if a row has these values missing. col_has_missing = ['time','lat','lng','precinct','zone'] for column in col_has_missing: X[column+'_MISSING'] = x[column].isnull() return X # + id="9kC_BwiapFs-" colab_type="code" colab={} # + id="5kWEbF4DpFtB" colab_type="code" colab={} # + id="WDEVEtPHpFtF" colab_type="code" colab={} # + id="pNom5qeupFtH" colab_type="code" colab={} # + id="OnY0NhK3pFtM" colab_type="code" colab={} # + id="sD8B8SZipFtS" colab_type="code" colab={} # + id="vBwl4ewvpFtU" colab_type="code" colab={} # + id="v_HexT3DpFtX" colab_type="code" colab={} # + id="YHEMEvsBpFtZ" colab_type="code" colab={} # + id="WgBnudyTpFtb" colab_type="code" colab={} # + id="bLX0LAVBpFtd" colab_type="code" colab={} # + id="FtoHgC1YpFtg" colab_type="code" colab={} # + id="ThlTnj33pFtj" colab_type="code" colab={} # + id="2EyQc2TSpFtm" colab_type="code" colab={}
module1/assignment_applied_modeling_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="wxZo6dKGzElp" # # # 2. Exercícios Numpy - <NAME> # # Agora que aprendemos sobre o NumPy, vamos testar seu conhecimento. Começaremos com algumas tarefas simples e, em seguida, você terá algumas perguntas mais complicadas. # + [markdown] id="Dtnzdm45zElx" # #### Import NumPy as np # + id="tfnyOK5azEly" import numpy as np # + [markdown] id="EOnwIuZrzElz" # #### Create an array of 10 zeros # + colab={"base_uri": "https://localhost:8080/"} id="Pg2g-u3QzEl0" outputId="b26017b7-760a-471e-b7bb-3473101099eb" array = np.zeros((10)) array # + [markdown] id="0vKh8EWVzEl1" # #### Create an array of 10 ones # + colab={"base_uri": "https://localhost:8080/"} id="YcOpI8YvzEl2" outputId="53830f51-b2e4-4027-9436-7bcc9717bc1b" array= np.ones(10) array # + [markdown] id="Qnu69wQOzEl2" # #### Create an array of 10 fives # + colab={"base_uri": "https://localhost:8080/"} id="pv3yNvxkzEl3" outputId="eff5d7b9-0d0c-41fc-9e71-5fbd77693200" array = np.full(10, 5.) array # + [markdown] id="cCozmHW6zEl3" # #### Create an array of the integers from 10 to 50 # + colab={"base_uri": "https://localhost:8080/"} id="MTvubV_azEl4" outputId="e97c4054-65fa-4e0a-cd96-faed2a489fad" array = np.arange(10, 51) array # + [markdown] id="C6MbDsGMzEl5" # #### Create a 3x3 matrix with values ranging from 0 to 8 # + colab={"base_uri": "https://localhost:8080/"} id="F0rffg-FsaU4" outputId="702f311d-a5ec-41c0-bd98-e8bfe96e2c0a" array = np.arange(9).reshape((3,3)) #array = array.reshape((3,3)) array # + [markdown] id="-DEREkLlzEl5" # #### Create a 3x3 identity matrix # + colab={"base_uri": "https://localhost:8080/"} id="EL_PRTMMzEl6" outputId="87c3cd6c-8fb2-4e2f-8eae-9a8dbf6db3fb" array = np.eye(3,3) array # + [markdown] id="wcZrbYO6zEl6" # #### Use NumPy to generate a random number between 0 and 1 # + colab={"base_uri": "https://localhost:8080/"} id="FNFwv3yqzEl6" outputId="e9abcd79-6f9b-4d67-a490-c361fc78c21d" array = np.random.random(1) array # + [markdown] id="qC_S3r-lzEl7" # #### Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution # + colab={"base_uri": "https://localhost:8080/"} id="FkEfrZa0zEl7" outputId="362fe5a0-baac-4151-a351-871cb514eb51" array = np.random.randn(25) array # + [markdown] id="R5iFZnkZzEl7" # #### Create the following matrix: # + id="KJzGQI-ozEl8" colab={"base_uri": "https://localhost:8080/"} outputId="e350962f-91f6-4836-b813-f83bbce3f557" array = np.arange(0.01,1.01,0.01).reshape(10,10) array # + [markdown] id="q6byyjr4zEl8" # #### Create an array of 20 linearly spaced points between 0 and 1: # + colab={"base_uri": "https://localhost:8080/"} id="IWaEw1AszEl8" outputId="faf33b7e-a261-4c4d-c3d4-a103ae682916" array = np.linspace(0, 1, 20) array # + [markdown] id="KoURHuLZzEl8" # ## Numpy Indexing and Selection # # Now you will be given a few matrices, and be asked to replicate the resulting matrix outputs: # + colab={"base_uri": "https://localhost:8080/"} id="AF-0xbLUzEl9" outputId="62f4caf9-50bc-451d-f3b3-4b717734ad7b" mat = np.arange(1,26).reshape(5,5) mat # + id="p_cf5unizEl9" # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + colab={"base_uri": "https://localhost:8080/"} id="ELgvYl3fzEl9" outputId="7ac89368-15f0-4a3d-df5e-6814d63f222b" a = mat[2:,1:] a # + id="5SBP-IXPzEl9" # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + id="6XphVLBGzEl-" colab={"base_uri": "https://localhost:8080/"} outputId="0fea6315-8707-4ae5-e469-69db5bc200a7" b = mat[3,4] b # + id="NkAW9XOEzEl-" # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + id="xFSes5nVzEl-" colab={"base_uri": "https://localhost:8080/"} outputId="6fa4041d-f765-49ce-80fb-e6192113c916" c = mat[:3:,1:2] c # + id="HPxxsEL7zEl_" # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + id="316QHCtqzEl_" colab={"base_uri": "https://localhost:8080/"} outputId="b8b0e38a-d21c-4b66-a6ec-3ba2aef1af28" d = mat[4:,0:] d # + id="uUTX6VCDzEl_" # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + id="0ZIoFi47zEmA" colab={"base_uri": "https://localhost:8080/"} outputId="5f4afd26-e432-4c93-89ab-a4da7fe2145e" e = mat[3:,0:] e # + [markdown] id="haNDcWSRzEmA" # ### Now do the following # + [markdown] id="fOllMRSwzEmA" # #### Get the sum of all the values in mat # + id="0QvH7VIDzEmA" colab={"base_uri": "https://localhost:8080/"} outputId="6ae9f96e-dcde-4f30-fecd-ae53b34fc762" somar = mat.sum() somar # + [markdown] id="FYfGhpIKzEmB" # #### Get the standard deviation of the values in mat # + id="xCJ-OQRizEmB" colab={"base_uri": "https://localhost:8080/"} outputId="f7ed3ff6-0c7a-483a-97a0-1d50f40f3ade" desviopadrao = np.std(mat) desviopadrao # + [markdown] id="y5h9UYbuzEmB" # #### Get the sum of all the columns in mat # + id="wOycsxt7zEmB" colab={"base_uri": "https://localhost:8080/"} outputId="63f86bf1-c9a3-4fa2-e0a6-bb0806b2632f" soma = mat.sum(axis=0) soma # + [markdown] id="APMNA8EwzEmC" # # Great Job!
22_NumpyExercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Demo: Externally Defined Layouts # This notebook computes a layout using NetworkX and sends it to Graphistry for subsequent interactive analysis. import graphistry import networkx as nx import pandas as pd # ## Make a NetworkX Graph num_nodes = 1000 G=nx.Graph() G.add_nodes_from(range(num_nodes)) Edges = [(x, (x * 2) % (num_nodes/5)) for x in range(num_nodes)] + [(x, (x + 1) % 20) for x in range(20)] G.add_edges_from(Edges) G # ## Use a NetworkX layout # %time pos=nx.fruchterman_reingold_layout(G) pos[0] # ## Combine into node, edge dataframes def pos2df (pos): nodes = pd.DataFrame({'key': pos.keys(), 'pos_0': [pos[k][0] for k in pos.keys()], 'pos_1': [pos[k][1] for k in pos.keys()]}) nodes.columns = ['key', 'x', 'y'] return nodes nodes = pos2df(pos) nodes[:3] edges = pd.DataFrame(Edges) edges.columns = ['src', 'dst'] edges[:3] # ## Autolayout mode bindings = graphistry.nodes(nodes).edges(edges).bind(source='src', destination='dst', node='key') bindings.plot() # ## Predefined layout mode # As the node table provides "x" and "y" columns, they will be automatically used as starting positions. To prevent the automatic layout algorithm from moving the nodes on load, we also set the URL parameter "play" to 0 seconds. (Both settings will likely change.) bindings2 = bindings.settings(url_params={'play':0}) bindings2.plot() # ## For fun, here's a circular layout pos2=nx.circular_layout(G, scale=100) nodes2 = pos2df(pos2) bindings2.nodes(nodes2).plot()
demos/more/external_layout/networkx_layout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # ### *IPCC SR15 scenario assessment* # # <img style="float: right; height: 80px; padding-left: 20px;" src="../_static/IIASA_logo.png"> # <img style="float: right; height: 80px;" src="../_static/IAMC_logo.jpg"> # # # Descriptive statistics of electricity generation # # This notebook computes indicators and diagnostics of the primary-energy timeseries by fuel # for **Table 2.7** in the IPCC's _"Special Report on Global Warming of 1.5°C"_. # # The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). # ## Load `pyam` package and other dependencies import pandas as pd import numpy as np import warnings import io import itertools import yaml import math import matplotlib.pyplot as plt plt.style.use('style_sr15.mplstyle') # %matplotlib inline import pyam # ## Import scenario data, categorization and specifications files # # The metadata file must be generated from the notebook `sr15_2.0_categories_indicators` included in this repository. # If the snapshot file has been updated, make sure that you rerun the categorization notebook. # # The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook. sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx') sr1p5.load_metadata('sr15_metadata_indicators.xlsx') # + with open("sr15_specs.yaml", 'r') as stream: specs = yaml.load(stream, Loader=yaml.FullLoader) rc = pyam.run_control() for item in specs.pop('run_control').items(): rc.update({item[0]: item[1]}) cats = specs.pop('cats') cats_15 = specs.pop('cats_15') cats_15_no_lo = specs.pop('cats_15_no_lo') marker= specs.pop('marker') # - # ## Downselect scenario ensemble to categories of interest for this assessment years = [2020, 2030, 2050] df = sr1p5.filter(category=cats_15, year=years) # ## Initialize a `pyam.Statistics` instance stats = pyam.Statistics(df=df, filters=[('all 1.5', {}), ('no & lo os 1.5', {'category': cats_15_no_lo}), ('hi os 1.5', {'category': ['1.5C high overshoot']}) ], rows=True) # + header='Electricity generation (EJ)' header_share='Share in electricity generation (%)' header_growth='Growth (factor)' statistics_settings = dict( header=header, header_share='Share in electricity generation (%)', header_growth='Growth (factor)', growth_year=2050, base_year=2020 ) # - def add_statistics(data, base, row, growth_year, base_year, header, header_share, header_growth): stats.add(data, header=header, row=row) if base is not None: stats.add(data / base * 100, header=header_share, row=row) stats.add(data[growth_year] / data[base_year] - 1, header=header_growth, row=row, subheader='{}-{}'.format(base_year, growth_year)) # ## Extract total electricity generation timeseries and add summary statistics se = df.filter(variable='Secondary Energy|Electricity').timeseries() se.index = se.index.droplevel([2, 3, 4]) add_statistics(se, None, 'total generation', **statistics_settings) # ## Compute share of renewables by various types in electricity generation # # Only use scenarios for this indicator that report both biomass and the aggregate non-biomass timeseries - otherwise, the share would be distorted. # ### All renewables (biomass and non-biomass) df_pe_res = df.filter() df_pe_res.require_variable('Secondary Energy|Electricity|Non-Biomass Renewables', exclude_on_fail=True) df_pe_res.require_variable('Secondary Energy|Electricity|Biomass', exclude_on_fail=True) df_pe_res.filter(exclude=False, inplace=True) res = ( df_pe_res.filter(variable=['Secondary Energy|Electricity|Biomass', 'Secondary Energy|Electricity|Non-Biomass Renewables']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(res, se, 'renewables', **statistics_settings) # ### Biomass res_bio = ( df_pe_res.filter(variable=['Secondary Energy|Electricity|Biomass']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(res_bio, se, 'biomass', **statistics_settings) # ### Non-biomass renewables res_non_bio = ( df_pe_res.filter(variable=['Secondary Energy|Electricity|Non-Biomass Renewables']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(res_non_bio, se, 'non-biomass', **statistics_settings) # ### Renewable energy from wind and solar # # As above, verify that scenarios report values for both 'Wind' and 'Solar' df_win_sol = df.filter() df_win_sol.require_variable('Secondary Energy|Electricity|Solar', exclude_on_fail=True) df_win_sol.require_variable('Secondary Energy|Electricity|Wind', exclude_on_fail=True) df_win_sol.filter(exclude=False, inplace=True) win_sol = ( df_win_sol.filter(variable=['Secondary Energy|Electricity|Solar', 'Secondary Energy|Electricity|Wind ']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(win_sol, se, 'wind & solar', **statistics_settings) # ## Compute share of nuclear in electricity generation nuc = ( df.filter(variable=['Secondary Energy|Electricity|Nuclear']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(nuc, se, 'nuclear', **statistics_settings) # ## Compute share of fossil in electricity generation df_fossil = df.filter() df_fossil.require_variable('Secondary Energy|Electricity|Coal', exclude_on_fail=True) df_fossil.require_variable('Secondary Energy|Electricity|Gas', exclude_on_fail=True) df_fossil.require_variable('Secondary Energy|Electricity|Oil', exclude_on_fail=True) df_fossil.filter(exclude=False, inplace=True) fossil = ( df.filter(variable=['Secondary Energy|Electricity|Coal', 'Secondary Energy|Electricity|Gas', 'Secondary Energy|Electricity|Oil']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(fossil, se, 'fossil', **statistics_settings) coal = ( df_fossil.filter(variable=['Secondary Energy|Electricity|Coal']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(coal, se, 'coal', **statistics_settings) gas = ( df_fossil.filter(variable=['Secondary Energy|Electricity|Gas']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(gas, se, 'gas', **statistics_settings) oil = ( df_fossil.filter(variable=['Secondary Energy|Electricity|Oil']) .timeseries() .groupby(['model', 'scenario']).sum() ) add_statistics(oil, se, 'oil', **statistics_settings) # ## Display and export summary statistics for all 1.5°C pathways to `xlsx` summary = ( stats.summarize(center='median', fullrange=True) .reindex(columns=['count', header, header_share, header_growth], level=0) ) summary summary.to_excel('output/table_2.7_electricity_generation.xlsx')
assessment/sr15_2.4.2.2_electricity_generation_statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Running custom model training on Vertex Pipelines # # In this lab, you will learn how to run a custom model training job using the Kubeflow Pipelines SDK on Vertex Pipelines. # # ## Learning objectives # # * Use the Kubeflow Pipelines SDK to build scalable ML pipelines. # * Create and containerize a custom Scikit-learn model training job that uses Vertex AI managed datasets. # * Run a batch prediction job within Vertex Pipelines. # * Use pre-built components for interacting with Vertex AI services, provided through the google_cloud_pipeline_components library. # # ## Vertex Pipelines setup # There are a few additional libraries you'll need to install in order to use Vertex Pipelines: # # * __Kubeflow Pipelines__: This is the SDK you'll be using to build your pipeline. Vertex Pipelines supports running pipelines built with both Kubeflow Pipelines or TFX. # * __Google Cloud Pipeline Components__: This library provides pre-built components that make it easier to interact with Vertex AI services from your pipeline steps. # # Each learning objective will correspond to a __#TODO__ in the [student lab notebook](../labs/custom_model_training.ipynb) -- try to complete that notebook first before reviewing this solution notebook. # # To install both of the services to be used in this notebook, first set the user flag in the notebook cell: USER_FLAG = "--user" # !pip3 install {USER_FLAG} google-cloud-aiplatform==1.7.0 --upgrade # !pip3 install {USER_FLAG} kfp==1.8.9 google-cloud-pipeline-components==0.2.0 # You may see some warning messages in the install output. # After installing these packages you'll need to restart the kernel: # + import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) # - # Finally, check that you have correctly installed the packages. The KFP SDK version should be >=1.8: # !python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" # !python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))" # ### Set your project ID and bucket # Throughout this notebook, you'll reference your Cloud project ID and the bucket you created earlier. Next you'll create variables for each of those. # # If you don't know your project ID you may be able to get it by running the following: # + import os PROJECT_ID = "" # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): # shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) # - # Otherwise, set it here: if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "your-project-id" # @param {type:"string"} # Then create a variable to store your bucket name. If you created it in this lab, the following will work. Otherwise, you'll need to set this manually: BUCKET_NAME="gs://" + PROJECT_ID + "-bucket" # !echo {BUCKET_NAME} # ### Import libraries # Add the following to import the libraries you'll be using throughout this notebook: # + from kfp.v2 import compiler, dsl from kfp.v2.dsl import pipeline from google.cloud import aiplatform from google_cloud_pipeline_components import aiplatform as gcc_aip # - # ### Define constants # The last thing you need to do before building your pipeline is define some constant variables. `PIPELINE_ROOT` is the Cloud Storage path where the artifacts created by your pipeline will be written. You're using `us-central1` as the region here, but if you used a different region when you created your bucket, update the `REGION` variable in the code below: # + # PATH=%env PATH # %env PATH={PATH}:/home/jupyter/.local/bin REGION="us-central1" PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/" PIPELINE_ROOT # - # After running the code above, you should see the root directory for your pipeline printed. This is the Cloud Storage location where the artifacts from your pipeline will be written. It will be in the format of `gs://YOUR-BUCKET-NAME/pipeline_root/` # ## Configuring a custom model training job # Before you set up your pipeline, you need to write the code for your custom model training job. To train the model, you'll use the UCI Machine Learning [Dry beans dataset](https://archive.ics.uci.edu/ml/datasets/Dry+Bean+Dataset), from: <NAME>. and <NAME>., (2020), "Multiclass Classification of Dry Beans Using Computer Vision and Machine Learning Techniques."In Computers and Electronics in Agriculture, 174, 105507. [DOI](https://www.sciencedirect.com/science/article/abs/pii/S0168169919311573?via%3Dihub). # # Your first pipeline step will create a managed dataset in Vertex AI using a BigQuery table that contains a version of this beans data. The dataset will be passed as input to your training job. In your training code, you'll have access to environment variable to access this managed dataset. # # Here's how you'll set up your custom training job: # # * Write a Scikit-learn `DecisionTreeClassifier` model to classify bean types in your data. # * Package the training code in a Docker container and push it to Container Registry # # From there, you'll be able to start a Vertex AI Training job directly from your pipeline. Let's get started! # ### Define your training code in a Docker container # Run the following to set up a directory where you'll add your containerized code: # !mkdir traincontainer # !touch traincontainer/Dockerfile # !mkdir traincontainer/trainer # !touch traincontainer/trainer/train.py # After running those commands, you should see a directory called traincontainer/ created on the left (you may need to click the refresh icon to see it). You'll see the following in your traincontainer/ directory: # # ``` # + Dockerfile # + trainer/ # + train.py # ``` # Your first step in containerizing your code is to create a Dockerfile. In your Dockerfile you'll include all the commands needed to run your image. It'll install all the libraries you're using and set up the entry point for your training code. Run the following to create a Dockerfile file locally in your notebook: # + # %%writefile traincontainer/Dockerfile FROM gcr.io/deeplearning-platform-release/sklearn-cpu.0-23 WORKDIR / # Copies the trainer code to the docker image. COPY trainer /trainer RUN pip install sklearn google-cloud-bigquery joblib pandas google-cloud-storage # Sets up the entry point to invoke the trainer. ENTRYPOINT ["python", "-m", "trainer.train"] # - # Run the following to create `train.py` file. This retrieves the data from your managed dataset, puts it into a Pandas DataFrame, trains a Scikit-learn model, and uploads the trained model to Cloud Storage: # + # %%writefile traincontainer/trainer/train.py from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import roc_curve from sklearn.model_selection import train_test_split from google.cloud import bigquery from google.cloud import storage from joblib import dump import os import pandas as pd bqclient = bigquery.Client() storage_client = storage.Client() def download_table(bq_table_uri: str): prefix = "bq://" if bq_table_uri.startswith(prefix): bq_table_uri = bq_table_uri[len(prefix):] table = bigquery.TableReference.from_string(bq_table_uri) rows = bqclient.list_rows( table, ) return rows.to_dataframe(create_bqstorage_client=False) # These environment variables are from Vertex AI managed datasets training_data_uri = os.environ["AIP_TRAINING_DATA_URI"] test_data_uri = os.environ["AIP_TEST_DATA_URI"] # Download data into Pandas DataFrames, split into train / test df = download_table(training_data_uri) test_df = download_table(test_data_uri) labels = df.pop("Class").tolist() data = df.values.tolist() test_labels = test_df.pop("Class").tolist() test_data = test_df.values.tolist() # Define and train the Scikit model skmodel = DecisionTreeClassifier() skmodel.fit(data, labels) score = skmodel.score(test_data, test_labels) print('accuracy is:',score) # Save the model to a local file dump(skmodel, "model.joblib") # Upload the saved model file to GCS bucket = storage_client.get_bucket("YOUR_GCS_BUCKET") model_directory = os.environ["AIP_MODEL_DIR"] storage_path = os.path.join(model_directory, "model.joblib") blob = storage.blob.Blob.from_string(storage_path, client=storage_client) blob.upload_from_filename("model.joblib") # - # Run the following to replace YOUR_GCS_BUCKET from the script above with the name of your Cloud Storage bucket: BUCKET = BUCKET_NAME[5:] # Trim the 'gs://' before adding to train script # !sed -i -r 's@YOUR_GCS_BUCKET@'"$BUCKET"'@' traincontainer/trainer/train.py # You can also do this manually if you'd prefer. If you do, make sure not to include the gs:// in your bucket name when you update the script. # # Now your training code is in a Docker container and you're ready to run training in the Cloud. # ### Push container to Container Registry # # With your training code complete, you're ready to push this to Google Container Registry. Later when you configure the training component of your pipeline, you'll point Vertex Pipelines at this container. # # Replace `YOUR_PROJECT_ID` with your PROJECT_ID in the IMAGE_URI. # !PROJECT_ID=$(gcloud config get-value project) # !IMAGE_URI="gcr.io/YOUR_PROJECT_ID/scikit:v1" # Again, replace `YOUR_PROJECT_ID` with your PROJECT_ID and build your container by running the following: # !docker build ./traincontainer -t gcr.io/YOUR_PROJECT_ID/scikit:v1 # Finally, push the container to Container Registry: # !docker push gcr.io/$PROJECT_ID/scikit:v1 # Navigate to the [Container Registry section](https://console.cloud.google.com/gcr/) of your Cloud console to verify your container is there. # ## Configuring a batch prediction job # The last step of your pipeline will run a batch prediction job. For this to work, you need to provide a CSV file in Cloud Storage that contains the examples you want to get predictions on. You'll create this CSV file in your notebook and copy it to Cloud Storage using the `gsutil` command line tool. # # ### Copying batch prediction examples to Cloud Storage # The following file contains 3 examples from each class in your beans dataset. The example below doesn't include the `Class` column since that is what your model will be predicting. Run the following to create this CSV file locally in your notebook: # %%writefile batch_examples.csv Area,Perimeter,MajorAxisLength,MinorAxisLength,AspectRation,Eccentricity,ConvexArea,EquivDiameter,Extent,Solidity,roundness,Compactness,ShapeFactor1,ShapeFactor2,ShapeFactor3,ShapeFactor4 23288,558.113,207.567738,143.085693,1.450653336,0.7244336162,23545,172.1952453,0.8045881703,0.9890847314,0.9395021523,0.8295857874,0.008913077034,0.002604069884,0.6882125787,0.9983578734 23689,575.638,205.9678003,146.7475015,1.403552348,0.7016945718,24018,173.6714472,0.7652721693,0.9863019402,0.8983750474,0.8431970773,0.00869465998,0.002711119968,0.7109813112,0.9978994889 23727,559.503,189.7993849,159.3717704,1.190922235,0.5430731512,24021,173.8106863,0.8037601626,0.9877607094,0.952462433,0.9157600082,0.007999299741,0.003470231343,0.8386163926,0.9987269085 31158,641.105,212.0669751,187.1929601,1.132879009,0.4699241567,31474,199.1773023,0.7813134733,0.989959967,0.9526231013,0.9392188582,0.0068061806,0.003267009878,0.8821320637,0.9993488983 32514,649.012,221.4454899,187.1344232,1.183349841,0.5346736437,32843,203.4652564,0.7849831,0.9899826447,0.9700068737,0.9188051492,0.00681077351,0.002994124691,0.8442029022,0.9989873701 33078,659.456,235.5600775,178.9312328,1.316483846,0.6503915309,33333,205.2223615,0.7877214708,0.9923499235,0.9558229607,0.8712102818,0.007121351881,0.002530662194,0.7590073551,0.9992209221 33680,683.09,256.203255,167.9334938,1.525623324,0.7552213942,34019,207.081404,0.80680321,0.9900349805,0.9070392732,0.8082699962,0.007606985006,0.002002710402,0.6533003868,0.9966903078 33954,716.75,277.3684803,156.3563259,1.773951126,0.825970469,34420,207.9220419,0.7994819873,0.9864613597,0.8305492781,0.7496238998,0.008168948587,0.001591181142,0.5619359911,0.996846984 36322,719.437,272.0582306,170.8914975,1.591993952,0.7780978465,36717,215.0502424,0.7718560075,0.9892420405,0.8818487005,0.7904566678,0.007490177594,0.001803782407,0.6248217437,0.9947124371 36675,742.917,285.8908964,166.8819538,1.713132487,0.8119506999,37613,216.0927123,0.7788277766,0.9750618137,0.8350248381,0.7558572692,0.0077952528,0.001569528272,0.5713202115,0.9787472145 37454,772.679,297.6274753,162.1493177,1.835514817,0.8385619338,38113,218.3756257,0.8016695205,0.9827093118,0.7883332637,0.7337213257,0.007946480356,0.001420623993,0.5383469838,0.9881438654 37789,766.378,313.5680678,154.3409867,2.031657789,0.8704771226,38251,219.3500608,0.7805870567,0.9879218844,0.8085170916,0.6995293312,0.008297866252,0.001225659709,0.4893412853,0.9941740339 47883,873.536,327.9986493,186.5201272,1.758516115,0.822571799,48753,246.9140116,0.7584464543,0.9821549443,0.7885506623,0.7527897207,0.006850002074,0.00135695419,0.5666923636,0.9965376533 49777,861.277,300.7570338,211.6168613,1.42123379,0.7105823885,50590,251.7499649,0.8019106536,0.9839296304,0.843243269,0.8370542883,0.00604208839,0.001829706116,0.7006598815,0.9958014989 49882,891.505,357.1890036,179.8346914,1.986207449,0.8640114945,51042,252.0153467,0.7260210171,0.9772736178,0.7886896753,0.7055518063,0.007160679276,0.001094585314,0.4978033513,0.9887407248 53249,919.923,325.3866286,208.9174205,1.557489212,0.7666552108,54195,260.3818974,0.6966846347,0.9825445152,0.7907120655,0.8002231025,0.00611066177,0.001545654241,0.6403570138,0.9973491406 61129,964.969,369.3481688,210.9473449,1.750902193,0.8208567513,61796,278.9836198,0.7501135067,0.9892064211,0.8249553283,0.7553404711,0.006042110436,0.001213219664,0.5705392272,0.9989583843 61918,960.372,353.1381442,224.0962377,1.575832543,0.7728529173,62627,280.7782864,0.7539207091,0.9886790043,0.8436218213,0.7950947556,0.005703319619,0.00140599258,0.6321756704,0.9962029945 141953,1402.05,524.2311633,346.3974998,1.513380332,0.7505863011,143704,425.1354762,0.7147107987,0.9878152313,0.9074598849,0.8109694843,0.003692991084,0.0009853172185,0.6576715044,0.9953071199 145285,1440.991,524.9567463,353.0769977,1.486805285,0.7400216694,146709,430.0960442,0.7860466375,0.9902937107,0.8792413513,0.8192980608,0.003613289371,0.001004269363,0.6712493125,0.9980170255 146153,1476.383,526.1933264,356.528288,1.475881001,0.7354662103,149267,431.3789276,0.7319360978,0.9791380546,0.8425962592,0.8198107159,0.003600290972,0.001003163512,0.6720896099,0.991924286 # Then, copy the file to your Cloud Storage bucket: # !gsutil cp batch_examples.csv $BUCKET_NAME # You'll reference this file in the next step when you define your pipeline. # ### Building a pipeline with pre-built components # Now that your training code is in the cloud, you're ready to call it from your pipeline. The pipeline you'll define will use three pre-built components from the `google_cloud_pipeline_components` library you installed earlier. These pre-defined components simplify the code you need to write to set up your pipeline, and will allow us to use Vertex AI services like model training and batch prediction. # # If you can't find a pre-built component for the task you want to accomplish, you can define your own Python-based custom components. To see an example, check out [this codelab](https://codelabs.developers.google.com/vertex-pipelines-intro#5). # # Here's what your three-step pipeline will do: # # * Create a managed dataset in Vertex AI. # * Run a training job on Vertx AI using the custom container you set up. # * Run a batch prediction job on your trained Scikit-learn classification model. # ### Define your pipeline # Because you're using pre-built components, you can set up your entire pipeline in the pipeline definition. @pipeline(name="automl-beans-custom", pipeline_root=PIPELINE_ROOT) def pipeline( bq_source: str = "bq://sara-vertex-demos.beans_demo.large_dataset", bucket: str = BUCKET_NAME, project: str = PROJECT_ID, gcp_region: str = REGION, bq_dest: str = "", container_uri: str = "", batch_destination: str = "" ): dataset_create_op = gcc_aip.TabularDatasetCreateOp( display_name="tabular-beans-dataset", bq_source=bq_source, project=project, location=gcp_region ) training_op = gcc_aip.CustomContainerTrainingJobRunOp( display_name="pipeline-beans-custom-train", container_uri=container_uri, project=project, location=gcp_region, dataset=dataset_create_op.outputs["dataset"], staging_bucket=bucket, training_fraction_split=0.8, validation_fraction_split=0.1, test_fraction_split=0.1, bigquery_destination=bq_dest, model_serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-24:latest", model_display_name="scikit-beans-model-pipeline", machine_type="n1-standard-4", ) batch_predict_op = gcc_aip.ModelBatchPredictOp( project=project, location=gcp_region, job_display_name="beans-batch-predict", model=training_op.outputs["model"], gcs_source_uris=["{0}/batch_examples.csv".format(BUCKET_NAME)], instances_format="csv", gcs_destination_output_uri_prefix=batch_destination, machine_type="n1-standard-4" ) # ### Compile and run the pipeline # With your pipeline defined, you're ready to compile it. The following will generate a JSON file that you'll use to run the pipeline: compiler.Compiler().compile( pipeline_func=pipeline, package_path="custom_train_pipeline.json" ) # Next, create a `TIMESTAMP` variable. You'll use this in your job ID: # + from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") # - # Then define your pipeline job, passing in a few project-specific parameters: pipeline_job = aiplatform.PipelineJob( display_name="custom-train-pipeline", template_path="custom_train_pipeline.json", job_id="custom-train-pipeline-{0}".format(TIMESTAMP), parameter_values={ "project": PROJECT_ID, "bucket": BUCKET_NAME, "bq_dest": "bq://{0}".format(PROJECT_ID), "container_uri": "gcr.io/{0}/scikit:v1".format(PROJECT_ID), "batch_destination": "{0}/batchpredresults".format(BUCKET_NAME) }, enable_caching=True, ) # Finally, run the job to create a new pipeline execution: pipeline_job.submit() # After running this cell, you should see logs with a link to view the pipeline run in your console. Navigate to that link. You can also access it by opening your [Pipelines dashboard](https://console.cloud.google.com/vertex-ai/pipelines). This pipeline will take __35-40 minutes__ to run, but you can continue to the next step before it completes. Next you'll learn more about what's happening in each of these pipeline steps. # For further instructions, please refer to the lab manual.
courses/machine_learning/deepdive2/production_ml/solutions/custom_model_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from generators import generate_databases from mechanisms import * from estimator import * import collections import matplotlib.pyplot as plt from tqdm import tqdm # + tasks = {noisy_max_v1a: {}, noisy_max_v1b: {}, noisy_max_v2a: {}, noisy_max_v2b: {}, histogram: {}, histogram_eps: {}, SVT: {'T': 0.5, 'N': 1}, iSVT1: {'T': 1}, iSVT2: {'T': 1}, iSVT3: {'T': 1, 'N': 1}, truncated_geometric: {'delta':0.7}} algorithm = noisy_max_v1a test_estimator = "opt" # "opt" or "mle" kwargs = tasks[algorithm] kwargs['epsilon'] = 0.3 epsilon_list = np.linspace(0,1,11) num_input = (5,10) input_list = [] num_input = (int(num_input), ) if isinstance(num_input, (int, float)) else num_input for num in num_input: input_list.extend(generate_databases(algorithm, num, default_kwargs=kwargs)) # - def test_results(algorithm, input_list, epsilon_list, n_mean=1000000 ): delta_list12 = np.zeros([len(input_list), len(epsilon_list)]) delta_list21 = np.zeros([len(input_list), len(epsilon_list)]) for i in tqdm(range(len(input_list))): (d1,d2, kwargs) = input_list[i] np.random.seed() n1 = np.random.poisson(n_mean) n2 = np.random.poisson(n_mean) n12 = np.random.poisson(n_mean) n22 = np.random.poisson(n_mean) if algorithm in (noisy_max_v1a, noisy_max_v2a,laplace_mechanism,truncated_geometric): result_d1 = [] result_d2 = [] result_d12 = [] result_d22 = [] for _ in range(n1): result_d1.append(algorithm(d1, **kwargs)) for _ in range(n2): result_d2.append(algorithm(d2, **kwargs)) for _ in range(n12): result_d12.append(algorithm(d1, **kwargs)) for _ in range(n22): result_d22.append(algorithm(d2, **kwargs)) count_d1 = dict(zip(*np.unique(result_d1, return_counts=True))) count_d2 = dict(zip(*np.unique(result_d2, return_counts=True))) count_d12 = dict(zip(*np.unique(result_d12, return_counts=True))) count_d22 = dict(zip(*np.unique(result_d22, return_counts=True))) elif algorithm in (noisy_max_v1b, noisy_max_v2b,histogram_eps,histogram): result_d1 = [] result_d2 = [] result_d12 = [] result_d22 = [] for _ in range(n1): result_d1.append(algorithm(d1, **kwargs)) for _ in range(n2): result_d2.append(algorithm(d2, **kwargs)) for _ in range(n12): result_d12.append(algorithm(d1, **kwargs)) for _ in range(n22): result_d22.append(algorithm(d2, **kwargs)) range_max = max(np.max(result_d1), np.max(result_d2), np.max(result_d12), np.max(result_d22)) range_min = min(np.min(result_d1), np.min(result_d2), np.min(result_d12), np.min(result_d22)) hist1, bin_edges = np.histogram(result_d1, bins = 100, range = (range_min,range_max)) hist2, bin_edges = np.histogram(result_d2, bins = 100, range = (range_min,range_max)) hist12, bin_edges = np.histogram(result_d12, bins=100, range=(range_min, range_max)) hist22, bin_edges = np.histogram(result_d22, bins=100, range=(range_min, range_max)) intervals = [(bin_edges[i],bin_edges[i+1]) for i in range(len(bin_edges)-1)] count_d1 = dict(zip(intervals,hist1)) count_d2 = dict(zip(intervals,hist2)) count_d12 = dict(zip(intervals, hist12)) count_d22 = dict(zip(intervals, hist22)) elif algorithm in ( iSVT1, iSVT2): result_d1 = [] result_d2 = [] result_d12 = [] result_d22 = [] for _ in range(n1): result_d1.append(tuple(algorithm(d1, **kwargs))) for _ in range(n2): result_d2.append(tuple(algorithm(d2, **kwargs))) for _ in range(n12): result_d12.append(tuple(algorithm(d1, **kwargs))) for _ in range(n22): result_d22.append(tuple(algorithm(d2, **kwargs))) value_1, count_1 = np.unique(result_d1, return_counts=True,axis=0) count_d1 = dict(zip([tuple(x) for x in value_1], count_1)) value_2, count_2 = np.unique(result_d2, return_counts=True, axis=0) count_d2 = dict(zip([tuple(x) for x in value_2], count_2)) value_12, count_12 = np.unique(result_d12, return_counts=True, axis=0) count_d12 = dict(zip([tuple(x) for x in value_12], count_12)) value_22, count_22 = np.unique(result_d22, return_counts=True, axis=0) count_d22 = dict(zip([tuple(x) for x in value_22], count_22)) elif algorithm in (SVT,iSVT3): result_d1 = [] result_d2 = [] result_d12 = [] result_d22 = [] for _ in range(n1): result_d1.append(tuple(algorithm(d1, **kwargs))) for _ in range(n2): result_d2.append(tuple(algorithm(d2, **kwargs))) for _ in range(n12): result_d12.append(tuple(algorithm(d1, **kwargs))) for _ in range(n22): result_d22.append(tuple(algorithm(d2, **kwargs))) count_d1 = dict(zip(*np.unique(result_d1, return_counts=True))) count_d2 = dict(zip(*np.unique(result_d2, return_counts=True))) count_d12 = dict(zip(*np.unique(result_d12, return_counts=True))) count_d22 = dict(zip(*np.unique(result_d22, return_counts=True))) else: raise ValueError('Unsupported algorithm') support = list(set(list(count_d1.keys()) + list(count_d2.keys())+list(count_d12.keys())+list(count_d22.keys()) )) for event in support: if event not in count_d1.keys(): count_d1[event] = 0 if event not in count_d2.keys(): count_d2[event] = 0 if event not in count_d12.keys(): count_d12[event] = 0 if event not in count_d22.keys(): count_d22[event] = 0 for j in range(len(epsilon_list)): epsilon = epsilon_list[j] if test_estimator == "opt": delta12 = opt_estimator(count_d1, count_d2, count_d12, count_d22,n1,n2,n12,n22, n_mean, epsilon,c_1 = 4, c_2 = 0.1, c_3 = 0.9) delta21 = opt_estimator(count_d2, count_d1, count_d22, count_d12,n2,n1,n22,n12, n_mean, epsilon,c_1 = 4, c_2 = 0.1, c_3 = 0.9) elif test_estimator == "mle": delta12 = mle_estimator(count_d1, count_d2, n1,n2, epsilon) delta21 = mle_estimator(count_d2, count_d1, n2,n1, epsilon) delta_list12[i][j] = delta12 delta_list21[i][j] = delta21 return delta_list12,delta_list21 delta_list_list = [] num_runs = 10 for i in range(num_runs): delta_list12, delta_list21 = test_results(algorithm = algorithm, input_list = input_list, epsilon_list=epsilon_list, n_mean=100000 ) delta_list = np.maximum(np.amax(delta_list12, axis = 0), np.amax(delta_list21, axis = 0)) delta_list_list.append(delta_list) mean_delta = np.mean(np.array(delta_list_list),axis = 0) ste_delta = np.std(np.array(delta_list_list),axis = 0)/np.sqrt(num_runs) # + plt.rc('text', usetex=True) plt.errorbar(epsilon_list, delta_list,ste_delta,marker = ".",label = "region") plt.xlabel("$\epsilon$") plt.ylabel("$\hat{\delta}$") plt.grid() # -
DP_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np a = np.array([1, 2, 3]) print(a) print(a.dtype) a_float = a.astype(np.float32) print(a_float) print(a_float.dtype) print(a) print(a.dtype) a_float = a.astype(float) print(a_float) print(a_float.dtype) a_str = a.astype('str') print(a_str) print(a_str.dtype) a_int = a.astype('int32') print(a_int) print(a_int.dtype) a = np.arange(50).reshape((5, 10)) / 10 - 2 print(a) print(a.dtype) a_int = a.astype('int64') print(a_int) print(a_int.dtype) print(np.round(a).astype(int)) my_round_int = lambda x: np.round((x * 2 + 1) // 2) print(my_round_int(a).astype(int)) def my_round(x, digit=0): p = 10 ** digit s = np.copysign(1, x) return (s * x * p * 2 + 1) // 2 / p * s print(my_round(a).astype(int))
notebook/numpy_astype.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Graph Examples - Protein Interaction dataset(minimized) # ## Import cuxfilter # + import cuxfilter import cudf import cugraph import numpy as np, pandas as pd ITERATIONS=500 THETA=1.0 OPTIMIZE=True # - # ## Load required datasets edges = cudf.read_csv('./data/edges.csv',)[['Source','Destination', 'edgeColor']] nodes = cudf.read_csv('./data/nodes.csv',)[['x', 'y', 'SYMBOL', 'Color']] nodes.Color = nodes.Color - nodes.Color.min() nodes.head() edges.head() # ## preprocess the data # + edges.columns=["source", "destination", 'color'] G = cugraph.Graph() G.from_cudf_edgelist(edges) nodes_ = cugraph.layout.force_atlas2(G, max_iter=500, strong_gravity_mode=False, outbound_attraction_distribution=True, lin_log_mode=False, barnes_hut_optimize=OPTIMIZE, barnes_hut_theta=THETA, verbose=True) # - nodes_1 = nodes_.merge(nodes, left_on='vertex', right_on='SYMBOL', suffixes=('', '_y'))[list(nodes.columns)] nodes_1.head() # ## Define charts cux_df = cuxfilter.DataFrame.load_graph((nodes_1, edges)) # + chart0 = cuxfilter.charts.graph(edge_target='destination',edge_color_palette=['gray', 'black'], node_id='SYMBOL', timeout=200, edge_aggregate_col='color', node_aggregate_col='Color', node_aggregate_fn='mean', node_pixel_shade_type='linear', edge_render_type='direct',#other option available -> 'curved' edge_transparency=0.5 ) chart1 = cuxfilter.charts.number('Color', aggregate_fn="mean", widget=True, title="Mean Color") # - # ## Create a dashboard object d = cux_df.dashboard([chart0, chart1], layout=cuxfilter.layouts.single_feature) #execute below line for dashboard preview await d.preview() # ## Starting the dashboard # 1. d.show('current_notebook_url:current_notebook_port') remote dashboard # # 2. d.app() inline within the notebook cell # # Incase you need to stop the server: # # - d.stop() # + # d.show(notebook_url='') # - # ## Export the queried data into a dataframe queried_df = d.export()
docs/source/examples/graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Hello World! x=10 y=2 z=x+y z # This is just an exercise. names=["Qing", "Françoise", "Raúl", "Bjork","Marie"] ages=[32,33,28,30,29] country=["China", "Senegal", "España", "Norway","Korea"] education=["Bach", "Bach", "Master", "PhD","PhD"] ages[0] ages[4] ages[1:-1] ages[:-2] list(itemgetter(0,2,3)(ages)) from operator import itemgetter list(itemgetter(0,2,3)(ages)) ages[0:4:2] + [ages[3]] country[2]="Spain" country
Code0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Goal - convenience functions to parse HA data into useful format for data science. Consider creating a class to hold the data. # SQLAlchemy is used in this notebook but other suitable libraries are available. # # * http://www.sqlalchemy.org/ # * https://facebook.github.io/prophet/ # # Using Google cloud data # %matplotlib inline from datetime import datetime from sqlalchemy import create_engine, text import json from datetime import datetime, timedelta import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates import datetime as dt from fbprophet import Prophet def load_url(filename): """Convenience for loading a url from a json file.""" try: with open(filename, 'r') as fp: url = json.load(fp) except Exception as e: print('Failed to load url') url = None return url['url'] # Some helper functions for parsing HA data def time_category(dtObj): """Return a time category, bed, home, work, given a datetime object dtObj.""" if 9 <= dtObj.hour <= 17: return 'daytime' elif 5 <= dtObj.hour < 9: return 'morning' elif 17 < dtObj.hour < 23: return 'evening' else: return 'night' def is_weekday(dtObj): """Check a datetime object dtObj is a weekday""" if dtObj.weekday() < 5: return True else: return False # + def isfloat(value): """Check if string can be parsed to a float.""" try: float(value) return True except ValueError: return False isfloat('23.6') # + def parse_state(value): """Check if state can be parsed to a float.""" try: float(value) return float(value) except ValueError: return value val = parse_state('23.6') # - parse_state('foo') # + def binary_state(value): """Return a binary for the state of binary sensors""" if value == 'on': return True elif value == 'off': return False else: return float('nan') print(binary_state('on')) print(binary_state('off')) print(binary_state('foo')) # + def rename_entity(entity_id): """Takes an entity_if of form sensor.name and returns name.""" return entity_id.split('.')[1] rename_entity('sensor.bme680humidity') # - # Conveninece to query and filter list of entities def query_entities(*args, **kwargs): """Takes a query term or terms and returns a list of entities satisfying the term. Additionally apply a single filter term. Example usage = query_entities('temperature', 'light', filter='sensor)""" query_results = [entity for entity in entity_list if any(query in entity for query in args)] if kwargs['filter']: filtered_resaults = [entity for entity in query_results if kwargs['filter'] in entity] return filtered_resaults else: return query_results # Convenience function for checking a state. This is incredibly slow when applied to large data frames. Better approach is place data on time index and fill. def state_at_time(df, dtObj): """Check the last registered state at some time using asof. Passed df needs to be time indexed and should only include the entitiy_id of interest.""" found_index = df.index.asof(dtObj) # Find the closest index asof time test_time if pd.isnull(found_index): # If dtObj is before the first index of dt, will return NaT return float('nan') else: state = df.loc[found_index]['state'] # Get the state at the found_index return state # ## Fetch the states data filename = '/Users/robincole/Desktop/hass_db_url.json' DB_URL = load_url(filename) engine = create_engine(DB_URL) # Lets query all the data and put it in a dataframe # + # %%time stmt = text( """ SELECT domain, entity_id, state, last_changed FROM states WHERE NOT state='unknown' """) allquery = engine.execute(stmt) # get rows from query into a pandas dataframe allqueryDF = pd.DataFrame(allquery.fetchall()) master_df = allqueryDF.copy() # - master_df.shape print("The dataframse size on disk is {} MB".format(master_df.values.nbytes/1e6)) # Name the columns, need to look into the difference between last_changed & last_updated master_df.columns = ['domain', 'entity', 'state', 'last_changed'] master_df.head() master_df.iloc[0]['last_changed'] # Lets drop the timezone and work with naive timestamps master_df['last_changed'] = master_df['last_changed'].apply(lambda x: x.tz_localize(None)) # Lets create a column that indicates if a state is numerical data master_df['numerical'] = master_df['state'].apply(lambda x: isfloat(x)) # Convert to multi-index to allow easy querying master_df.set_index(['domain', 'entity', 'numerical', 'last_changed'], inplace=True) master_df.head() # ## Numerical data # lets do a query for numerical sensor sensors_num_df = master_df.query('domain == "sensor" & numerical == True') sensors_num_df['state'] = sensors_num_df['state'].astype('float') sensors_num_df.head() num_sensors_list = list(sensors_num_df.index.get_level_values('entity').unique()) print(len(num_sensors_list)) num_sensors_list # Lets do a pivot to get sensors in columns to make plotting convenient. Lets also forward fill the data sensors_num_df = sensors_num_df.pivot_table(index='last_changed', columns='entity', values='state') sensors_num_df = sensors_num_df.fillna(method='ffill') sensors_num_df = sensors_num_df.dropna() # drop any remaining nan sensors_num_df.index = pd.to_datetime(sensors_num_df.index) # + height=6 width=14 figsize=(width, height) df_plot = sensors_num_df['sensor.living_room_temperature'] last_time = df_plot.index[-1] hours_to_plot = 24 xfmt = mdates.DateFormatter('%m/%d') # %m- f, ax = plt.subplots(1, 1, figsize=(width, height)) ax.plot(df_plot) #ax.xaxis.set_major_formatter(xfmt) #ax.set_xlim(now-dt.timedelta(hours=hours_to_plot), now); # + sns_plot = sns.pairplot( sensors_num_df[['sensor.living_room_temperature', 'sensor.bedroom_temperature', 'sensor.hall_temperature', 'sensor.darksky_sensor_temperature']]); sns_plot.savefig("pairplot.png") # - # ## Single temperature sensor # + stmt = text(""" SELECT last_changed, state FROM states WHERE NOT state='unknown' AND states.entity_id = 'sensor.darksky_sensor_temperature' """) query = engine.execute(stmt) # get rows from query into a pandas dataframe darksky_sensor_temperature = pd.DataFrame(query.fetchall()) # - darksky_sensor_temperature.head() darksky_sensor_temperature.iloc[0][0] darksky_sensor_temperature[0] = darksky_sensor_temperature[0].apply(lambda x: x.tz_localize(None)) darksky_sensor_temperature.iloc[0][0] darksky_sensor_temperature.columns = ['last_changed', 'state'] darksky_sensor_temperature['last_changed']= pd.to_datetime(darksky_sensor_temperature['last_changed'], utc=True) darksky_sensor_temperature['state'] = darksky_sensor_temperature['state'].astype('float') darksky_sensor_temperature.head() # + height=6 width=18 figsize=(width, height) f, ax = plt.subplots(1, 1, figsize=(width, height)) ax.plot(darksky_sensor_temperature['last_changed'], darksky_sensor_temperature['state']) # - # ## Prophet # Lets try some prediction with prophet df = darksky_sensor_temperature.copy() df.columns = ['ds', 'y'] df.iloc[0]['ds'] df['ds'] = df['ds'].apply(lambda x: x.tz_localize(None)) df.iloc[0]['ds'] plt.plot(df['ds'], df['y']) % time m = Prophet() m.fit(df); future = m.make_future_dataframe(periods=365, freq='H') future.head() future.iloc[0]['ds'] # Daily forecasts forecast = m.predict(future) m.plot(forecast); forecast.head() # + # Steal from Stocker to make a nicer plot height=10 width=18 now = dt.datetime.now() hours_to_plot = 24*2 fig, ax = plt.subplots(1, 1, figsize=(width, height)) ax.plot(df['ds'], df['y'], 'ko-', linewidth = 1.4, alpha = 0.8, ms = 1.8, label = 'Observations') ax.plot(forecast['ds'], forecast['yhat'], 'forestgreen',linewidth = 2.4, label = 'Modeled'); # Plot the uncertainty interval as ribbon ax.fill_between(forecast['ds'].dt.to_pydatetime(), forecast['yhat_upper'], forecast['yhat_lower'], alpha = 0.3, facecolor = 'g', edgecolor = 'k', linewidth = 1.4, label = 'Confidence Interval') # Plot formatting ax.set_xlim(now-dt.timedelta(hours=hours_to_plot), now+dt.timedelta(hours=hours_to_plot)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%A \n %H:%M')) ax.xaxis.set_major_locator(mdates.HourLocator(byhour=[0, 6, 12, 18])) ax.xaxis.set_minor_locator(mdates.HourLocator()) ax.set_ylim(-3.0, 15.0) plt.legend(loc = 2, prop={'size': 10}); plt.xlabel('Date'); plt.ylabel('Temperature ($^\circ$C)'); plt.grid(linewidth=0.6, alpha = 0.6) title = 'darksky_sensor_temperature' plt.title(title); plt.savefig('darksky_sensor_temperature_precition.jpg') fig.autofmt_xdate() plt.show() # - # ### Motion at home sensor # Lets focus on the binary_sensor.motion_at_home which is a sensor that indicates if there is any activity at home. In the pivot_table apply np.min to prevent np.mean (the default) from returning 0.5 in some cases (why is this?) binary_sensors_df = master_df.query('domain == "binary_sensor"') binary_sensors_list = list(binary_sensors_df.index.get_level_values('entity').unique()) print(len(binary_sensors_list)) binary_sensors_list binary_sensors_df['state'] = binary_sensors_df['state'].apply(lambda x: binary_state(x)) # Binarise binary_sensors_df = binary_sensors_df.pivot_table(index='last_changed', columns='entity', values='state') binary_sensors_df.head() binary_sensors_df = binary_sensors_df.fillna(method='ffill') binary_sensors_df = binary_sensors_df.dropna() # drop any remaining nan # Lets get only the motion sensor and put in a dataframe motion_df = binary_sensors_df["binary_sensor.motion_at_home"].to_frame() motion_df.index = pd.to_datetime(motion_df.index) # #### Create some features # Lets get the weekday motion_df['weekday'] = motion_df.index.weekday_name motion_df['time'] = motion_df.index.time motion_df['is_weekday'] = motion_df.index.map(lambda x: is_weekday(x)) motion_df = motion_df[motion_df['binary_sensor.motion_at_home'] == True] # Keep only true detection events motion_df['time_category'] = motion_df.index.map(lambda x: time_category(x)) motion_df.head() # #### Analysis of motion data # Try a facet grid to represent data https://seaborn.pydata.org/generated/seaborn.FacetGrid.html?highlight=facet#seaborn.FacetGrid # Lets see if there is more motion at home on weekends. motion_df['binary_sensor.motion_at_home'].groupby(motion_df['is_weekday']).describe()['count'] # OK, so there is more activity at home on weekdays, but there are also 5 days in the working week and 2 in the weekend. # # Now, how about activity by weekday. # And activity by weekday and time category. motion_df_gb = motion_df['binary_sensor.motion_at_home'].groupby([motion_df['weekday'], motion_df['time_category']]).sum().unstack() motion_df_gb.fillna(value=0, inplace=True) # Replace NaN with 0 motion_df_gb = motion_df_gb.astype('int') # Ints rather than floats motion_df_gb = motion_df_gb.T motion_df_gb fig, ax = plt.subplots(figsize=(8, 6)) days_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] times_list = ['morning', 'daytime', 'evening', 'night'] ax = sns.heatmap(motion_df_gb[days_list].loc[times_list], annot=True, linewidths=.5, fmt="d", ax=ax, cmap='Reds'); ax.set_title('Activity at home by day and time category') fig.savefig('heatmap.jpg') # ## Bayesian sensor query = 'entity == "binary_sensor.in_bed_bayesian"' in_bed_bayes_df = master_df.query(query) in_bed_bayes_df.head() in_bed_bayes_df['state'].unique() # Hmm where are my on.. in_bed_bayes_df.index = in_bed_bayes_df.index.get_level_values('last_changed') # Keep only last_changed level in_bed_bayes_df.index = pd.to_datetime(in_bed_bayes_df.index) # Convert to datetime f, ax = plt.subplots(figsize=(16, 6)) ax.step(in_bed_bayes_df, 'bo', where='post') # + now = dt.datetime.now() hours_to_plot = 240 f, ax = plt.subplots(figsize=(16, 6)) ax.step(in_bed_bayes_df, 'bo', where='post') xfmt = mdates.DateFormatter('%d %H:%M:%S') # %m- ax.xaxis.set_major_formatter(xfmt) ax.set_xlim(now-dt.timedelta(hours=hours_to_plot), now) ax.set_ylabel('In bed status', color='b') # -
HASS data science 26-1-2018.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib notebook import pylab as pl # + import pyspeckit twotwo = pyspeckit.Cube('gcnh3_11.fits') oneone = pyspeckit.Cube('gcnh3_22.fits') oneone.xarr.velocity_convention = 'radio' twotwo.xarr.velocity_convention = 'radio' stack = pyspeckit.CubeStack([oneone,twotwo]) stack.plot_special = pyspeckit.wrappers.fitnh3.plotter_override stack.plot_special_kwargs = {'fignum':3, 'vrange':[-30,135]} # + stack.mapplot(estimator=np.nanmax) pl.figure(3) stack.plot_spectrum(5,5) pl.show()
examples/NH3 cube inspection with pyspeckit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py38] * # language: python # name: conda-env-py38-py # --- # ## Erddap to Grid for Prawler # # Using erddap as the data source, obtain each profile, filter out calibration profiles and provide interpolated/gridded dataset. # # Gridding parameters: # Pressure - 1m # Time - 1hr # __pyversion__==3.6 # __author__==S.Bell import datetime print("Last run {0}".format(datetime.datetime.now())) # %matplotlib inline # ### connecting and basic information # + import warnings #remove the numpy/pandas/cython warnings warnings.filterwarnings(action='ignore', message="numpy.dtype size changed,") from erddapy import ERDDAP import pandas as pd import numpy as np from netCDF4 import date2num, num2date from scipy import interpolate # - # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.dates import YearLocator, WeekdayLocator, MonthLocator, DayLocator, HourLocator, DateFormatter import matplotlib.ticker as ticker from mpl_toolkits.axes_grid1 import make_axes_locatable import cmocean # + server_url = 'http://downdraft.pmel.noaa.gov:8080/erddap' e = ERDDAP(server=server_url) # - df = pd.read_csv(e.get_search_url(response='csv', search_for='PRAWLER')) 'We have {} tabledap, {} griddap, and {} wms endpoints.'.format( len(set(df['tabledap'].dropna())), len(set(df['griddap'].dropna())), len(set(df['wms'].dropna())) ) prawlers = df['Dataset ID'].values print(prawlers) variables = [e.get_var_by_attr(dataset_id=prawler, standard_name=lambda v: v is not None) for prawler in prawlers] print(variables) # ### retrieving and plotting data # + d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id='erddap_17ckitaem2a_prawler' d.variables = [ 'profileid', #profileid 'Temperature', 'Salinity', 'Chlorophyll', 'Turbidity', 'latitude', 'longitude', 'depth', 'time', 'Oxy_Conc', 'Oxy_Sat' ] d.constraints = { 'time>=': '2017-01-01T00:00:00Z', 'time<=': '2018-10-10T00:00:00Z', 'latitude>=': 45, 'latitude<=': 90, 'longitude>=': 180, 'longitude<=': 210 } # - d.get_download_url() # + df = d.to_pandas( index_col='time (UTC)', parse_dates=True, skiprows=(1,) # units information can be dropped. ).dropna() df.head() # - df.tail() # ### Interpolating and Gridding # #### Gridding Parameters # # Set pressure interval to 1m and build a grid from 0-50m ### vertically grid data to evenly space gridspoints # deployment depth has a maximum value - set at 50 generically interval = 1.0 #m press_grid = np.arange(0,50,interval) #1m # #### Temperature # # We need to isolate the calibration park&holds first and remove them from the gridded analysis for Temperature/Salinity/Chlorophyl/Turbidity # We may wish to use them for the oxygen. #groupby profile id dfsg = df.groupby('profileid') # + def profile(cast,press_grid=np.arange(0,50.25,0.25),fillgaps=True): """ For a single profile, take median values Linearly interpolate to fillgaps""" #skip profile if std of depth is less than 1 - likely a park and hold if np.std(dfsg.get_group(cast)['depth (m)']) <= 1.0: #calibration profile return else: # full profile #initialize profile parameters profiledata = {'Salinity (PSU)': [], 'Temperature (C)': [], 'Chlorophyll (ugrams l-1)': [], 'Turbidity (FNU)': [], 'Oxy_Sat (percent)': [], 'Oxy_Conc (umol kg-1)': []} profiledata = { 'Oxy_Conc (umol kg-1)': []} #fill variables for each profile, taking the median if multiple values within same depth bin # and making NaN if no values for pg in press_grid: """ Take the median value if multiple samples occur within same depth bin""" for parameter in profiledata.keys(): if not parameter in ['Statistics']: ireg_ind = np.where((dfsg.get_group(cast)['depth (m)'] > pg) & (dfsg.get_group(cast)['depth (m)'] <= pg+interval)) profiledata[parameter] = np.hstack((profiledata[parameter], dfsg.get_group(cast)[parameter][ireg_ind[0]].median())) else: profiledata['Statistics'] = np.hstack((profiledata['Statistics'], ireg_ind[0].size)) for parameter in profiledata.keys(): if fillgaps: mask = np.isnan(profiledata[parameter]) profiledata[parameter][mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), profiledata[parameter][~mask], right=-100000) profiledata['datetime'] = dfsg.get_group(cast).index[0].to_pydatetime() return(profiledata) def time_interp(date_min,date_max,dt=1.0/24.0): dt_min = date2num(date_min -datetime.timedelta(seconds=60*date_min.minute + date_min.second),'Days since 0001-1-1') time_grid = np.arange(dt_min,date2num(date_max,'Days since 0001-1-1'),dt) #grid limits -> set to top of hour return(time_grid) # + profiledata_filled = {} for profileid in list(dfsg.groups.keys()): if profileid.endswith('00'): print("{profileid} of {number}".format(profileid=profileid, number=list(dfsg.groups.keys())[-1])) tmp = profile(profileid,press_grid=press_grid) if tmp: profiledata_filled.update({profileid: tmp}) # - parameter='Oxy_Conc (umol kg-1)' np2d=[] np2d = [np2d + list(v[parameter]) for k,v in profiledata_filled.items()] # + time_grid = time_interp(profiledata_filled[list(profiledata_filled.keys())[0]]['datetime'], profiledata_filled[list(profiledata_filled.keys())[-1]]['datetime']) date_time = [date2num(v['datetime'],'Days since 0001-1-1') for k,v in profiledata_filled.items()] # - np.shape(press_grid) mesh_grid_func = interpolate.interp2d(press_grid,date_time,np.array(np2d),kind='linear',bounds_error=False,fill_value=-100000) mesh_grid = mesh_grid_func(press_grid,time_grid) # + extent = (time_grid.min(), time_grid.max(), press_grid.max(), press_grid.min()) # extent of the plots #plt.imshow(mesh_grid.T,extent=extent, vmin=31.7, vmax=32.3, cmap=cmocean.cm.haline) fig = plt.figure(figsize=(22,5.25)) ax = plt.subplot(1,1,1) cs = plt.imshow(mesh_grid.T,extent=extent, vmin=0, vmax=20, cmap=cmocean.cm.algae) ax.xaxis.set_major_locator(DayLocator(bymonthday=15)) ax.xaxis.set_minor_locator(DayLocator(bymonthday=[5,10,15,20,25,30])) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_minor_formatter(DateFormatter('%d')) ax.xaxis.set_major_formatter(DateFormatter('%b %y')) ax.xaxis.set_tick_params(which='major', pad=15) # - # ### Save output to csv np.savetxt('17CKITAEPR2A_'+parameter.split()[0]+'.csv', mesh_grid,fmt='%.3f') np.savetxt('17CKITAEPR2A_coords_depth.csv',press_grid,fmt='%.2f') np.savetxt('17CKITAEPR2A_coords_time.csv',time_grid,fmt='%.8f') # + jupyter={"outputs_hidden": true} tags=[] x=[print(datetime.datetime.strftime(x,'%Y-%m-%dT%H:%M:%S')) for x in num2date(time_grid,'days since 0001-01-01')] # -
PrawlerDeployments/ERDDAP_Prawler2Grid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 함수 # ## Function definition # ```python # In [56]: def test(): # ....: print('in test function') # ....: # ....: # # In [57]: test() # in test function # ``` # **Warning** # # Function blocks must be indented as other control-flow blocks. # ## Return statement # Functions can *optionally* return values. # ```python # In [6]: def disk_area(radius): # ...: return 3.14 * radius * radius # ...: # # In [8]: disk_area(1.5) # Out[8]: 7.0649999999999995 # ``` # **Note** By default, functions return ``None``. # **Note** Note the syntax to define a function: # # * the `def` keyword; # * is followed by the function's **name**, then # * the arguments of the function are given between parentheses followed # by a colon. # * the function body; # * and `return object` for optionally returning values. # ## Parameters # Mandatory parameters (positional arguments) # # ```python # In [81]: def double_it(x): # ....: return x * 2 # ....: # # In [82]: double_it(3) # Out[82]: 6 # # In [83]: double_it() # --------------------------------------------------------------------------- # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # TypeError: double_it() takes exactly 1 argument (0 given) # ``` # Optional parameters (keyword or named arguments) # # ```python # In [84]: def double_it(x=2): # ....: return x * 2 # ....: # # In [85]: double_it() # Out[85]: 4 # # In [86]: double_it(3) # Out[86]: 6 # ``` # Keyword arguments allow you to specify *default values*. # **Warning** # # Default values are evaluated when the function is defined, not when # it is called. This can be problematic when using mutable types (e.g. # dictionary or list) and modifying them in the function body, since the # modifications will be persistent across invocations of the function. # # Using an immutable type in a keyword argument: # # ```python # In [124]: bigx = 10 # # In [125]: def double_it(x=bigx): # .....: return x * 2 # .....: # # In [126]: bigx = 1e9 # Now really big # # In [128]: double_it() # Out[128]: 20 # ``` # # Using an mutable type in a keyword argument (and modifying it inside the # function body): # # ```python # In [2]: def add_to_dict(args={'a': 1, 'b': 2}): # ...: for i in args.keys(): # ...: args[i] += 1 # ...: print(args) # ...: # # In [3]: add_to_dict # Out[3]: <function __main__.add_to_dict> # # In [4]: add_to_dict() # {'a': 2, 'b': 3} # # In [5]: add_to_dict() # {'a': 3, 'b': 4} # # In [6]: add_to_dict() # {'a': 4, 'b': 5} # ``` # More involved example implementing python's slicing: # ```python # In [98]: def slicer(seq, start=None, stop=None, step=None): # ....: """Implement basic python slicing.""" # ....: return seq[start:stop:step] # ....: # # In [101]: rhyme = 'one fish, two fish, red fish, blue fish'.split() # # In [102]: rhyme # Out[102]: ['one', 'fish,', 'two', 'fish,', 'red', 'fish,', 'blue', 'fish'] # # In [103]: slicer(rhyme) # Out[103]: ['one', 'fish,', 'two', 'fish,', 'red', 'fish,', 'blue', 'fish'] # # In [104]: slicer(rhyme, step=2) # Out[104]: ['one', 'two', 'red', 'blue'] # # In [105]: slicer(rhyme, 1, step=2) # Out[105]: ['fish,', 'fish,', 'fish,', 'fish'] # # In [106]: slicer(rhyme, start=1, stop=4, step=2) # Out[106]: ['fish,', 'fish,'] # ``` # The order of the keyword arguments does not matter: # # ```python # In [107]: slicer(rhyme, step=2, start=1, stop=4) # Out[107]: ['fish,', 'fish,'] # ``` # # but it is good practice to use the same ordering as the function's # definition. # *Keyword arguments* are a very convenient feature for defining functions # with a variable number of arguments, especially when default values are # to be used in most calls to the function. # ## Passing by value # Can you modify the value of a variable inside a function? Most languages # (C, Java, ...) distinguish "passing by value" and "passing by reference". # In Python, such a distinction is somewhat artificial, and it is a bit # subtle whether your variables are going to be modified or not. # Fortunately, there exist clear rules. # # Parameters to functions are references to objects, which are passed by # value. When you pass a variable to a function, python passes the # reference to the object to which the variable refers (the **value**). # Not the variable itself. # If the **value** passed in a function is immutable, the function does not # modify the caller's variable. If the **value** is mutable, the function # may modify the caller's variable in-place:: # # ```python # >>> def try_to_modify(x, y, z): # ... x = 23 # ... y.append(42) # ... z = [99] # new reference # ... print(x) # ... print(y) # ... print(z) # ... # >>> a = 77 # immutable variable # >>> b = [99] # mutable variable # >>> c = [28] # >>> try_to_modify(a, b, c) # 23 # [99, 42] # [99] # >>> print(a) # 77 # >>> print(b) # [99, 42] # >>> print(c) # [28] # ``` # Functions have a local variable table called a *local namespace*. # # The variable `x` only exists within the function `try_to_modify`. # ## Global variables # Variables declared outside the function can be referenced within the # function: # # ```python # In [114]: x = 5 # # In [115]: def addx(y): # .....: return x + y # .....: # # In [116]: addx(10) # Out[116]: 15 # ``` # But these "global" variables cannot be modified within the function, # unless declared **global** in the function. # # This doesn't work: # # ```python # In [117]: def setx(y): # .....: x = y # .....: print('x is %d' % x) # .....: # .....: # # In [118]: setx(10) # x is 10 # # In [120]: x # Out[120]: 5 # ``` # This works: # # ```python # In [121]: def setx(y): # .....: global x # .....: x = y # .....: print('x is %d' % x) # .....: # .....: # # In [122]: setx(10) # x is 10 # # In [123]: x # Out[123]: 10 # ``` # ## Variable number of parameters # Special forms of parameters: # # * `*args`: any number of positional arguments packed into a tuple # * `**kwargs`: any number of keyword arguments packed into a dictionary # # ```python # In [35]: def variable_args(*args, **kwargs): # ....: print('args is', args) # ....: print('kwargs is', kwargs) # ....: # # In [36]: variable_args('one', 'two', x=1, y=2, z=3) # args is ('one', 'two') # kwargs is {'x': 1, 'y': 2, 'z': 3} # ``` # ## Docstrings # Documentation about what the function does and its parameters. General # convention: # # ```python # In [67]: def funcname(params): # ....: """Concise one-line sentence describing the function. # ....: # ....: Extended summary which can contain multiple paragraphs. # ....: """ # ....: # function body # ....: pass # ....: # # In [68]: funcname? # Type: function # Base Class: type 'function'> # String Form: <function funcname at 0xeaa0f0> # Namespace: Interactive # File: <ipython console> # Definition: funcname(params) # Docstring: # Concise one-line sentence describing the function. # # Extended summary which can contain multiple paragraphs. # ``` # **Note** **Docstring guidelines** # # For the sake of standardization, the `Docstring # Conventions <https://www.python.org/dev/peps/pep-0257>`_ webpage # documents the semantics and conventions associated with Python # docstrings. # # Also, the Numpy and Scipy modules have defined a precise standard # for documenting scientific functions, that you may want to follow for # your own functions, with a `Parameters` section, an `Examples` # section, etc. See # https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard # ## Functions are objects # Functions are first-class objects, which means they can be: # # * assigned to a variable # * an item in a list (or any collection) # * passed as an argument to another function. # # ```python # In [38]: va = variable_args # # In [39]: va('three', x=1, y=2) # args is ('three',) # kwargs is {'x': 1, 'y': 2} # ``` # ## Methods # Methods are functions attached to objects. You've seen these in our # examples on *lists*, *dictionaries*, *strings*, etc... # ## Exercises # 1. Fibonacci sequence # # Write a function that displays the `n` first terms of the Fibonacci # sequence, defined by: # # $$ # \left\{ # \begin{array}{ll} # U_{0} = 0 \\ # U_{1} = 1 \\ # U_{n+2} = U_{n+1} + U_{n} # \end{array} # \right. # $$ # # 1. Quicksort # # Implement the quicksort algorithm, as defined by wikipedia # # ```javascript # function quicksort(array) # var list less, greater # if length(array) < 2 # return array # select and remove a pivot value pivot from array # for each x in array # if x < pivot + 1 then append x to less # else append x to greater # return concatenate(quicksort(less), pivot, quicksort(greater)) # ```
notebooks/ch05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #this script visualizes the contig length, coverage, #and taxonomy of spades-assembled contigs #filter contigs >1000bp, >10x coverage, and those matching #the top genus sequenced into filter_contigs folder # - import pandas as pd import os import matplotlib.pyplot as plt import sys from Bio import SeqIO isolate= sys.argv[1] # + path = 'results/processing/assemblies/'+isolate+'/' #inputs centrifuge_file = path + 'spades_isolate_assembly/centrifuge_hits.tsv' fasta_file = path + 'spades_isolate_assembly/contigs.fasta' #outputs centrifuge_summary_file = path + 'spades_isolate_assembly/centrifuge_summary.tsv' LvC_plot = path + 'spades_isolate_assembly/contigs_length_vs_coverage.pdf' filtered_contigs_dir = 'results/processing/filtered_contigs' os.system('mkdir -pv '+filtered_contigs_dir) filtered_contigs_fasta_file = filtered_contigs_dir+'/'+isolate+'_contigs.fasta' # + #read in tab file that links taxid info produce by centrifuge to useable taxonomic info centrifuge_taxonomy = pd.read_csv('bin/centrifuge_taxonomy/p_compressed+h+v_taxonomy.txt',sep='\t',header=None) centrifuge_taxonomy.columns = ['taxid','taxonomy'] taxid_dict = centrifuge_taxonomy.set_index('taxid').to_dict()['taxonomy'] def get_taxonomy(taxid): try: return(taxid_dict[int(taxid)]) except: return('Unassigned') # + #read in centrifuge output of spades isolate assembly contigs = pd.read_csv(centrifuge_file,sep='\t') print(len(contigs),'contigs in spades isolate assembly') #extract data from contig readIDs and taxIDs contigs['cov']=pd.to_numeric(contigs['readID'].apply(lambda x: x.split('_')[-1])) contigs['length']=pd.to_numeric(contigs['readID'].apply(lambda x: x.split('_')[3])) contigs['taxonomy']=contigs['taxID'].apply(lambda x: get_taxonomy(x)) contigs['genus']=contigs['taxonomy'].apply(lambda x: x.split(' ')[0]) #define contig color based on taxonomy def color_scale(genus): if genus == 'Bacteroides': return('blue') if genus == 'Parabacteroides': return('green') if genus == 'Bifidobacterium': return('red') else: return('gray') contigs['color'] = contigs['genus'].apply(color_scale) #filter contigs >1,000bp and >10x coverage contigs = contigs[contigs['length']>1000].reset_index() contigs = contigs[contigs['cov']>10].reset_index() print(len(contigs),'contigs >1000bp and 10x coverage') if len(contigs) == 0: sys.exit('no passing contigs') #summary contigs by taxonomy contigs_summary = contigs.groupby(['genus','taxonomy'], as_index=False ).agg({"length": "sum"} ).sort_values(by=['length'], ascending=False ).reset_index().drop(columns=['index']) contigs_summary['length_prop'] = 100*contigs_summary['length']/contigs_summary['length'].sum() print(contigs_summary.head()) contigs_summary.to_csv(centrifuge_summary_file,sep='\t',index=False) #plot contig length vs. coverage plt.figure(1) plt.scatter(contigs['length'],contigs['cov'],color=contigs['color']) plt.xlabel('length') plt.ylabel('coverage') plt.title(isolate) plt.savefig(LvC_plot) # + print('top genus') top_genus = contigs_summary['genus'][0] print(top_genus) #filter contigs belonging only to genera01 plt.figure(2) contigs_TG = contigs[contigs['genus']==top_genus] print(len(contigs_TG),'contigs after taxonomic filter') plt.scatter(contigs_TG['length'],contigs_TG['cov'],color=contigs_TG['color']) plt.xlabel('length') plt.ylabel('coverage') plt.title(isolate) #output filtered contig fasta with open(fasta_file, "r") as fasta: with open(filtered_contigs_fasta_file, "w") as filtered_fasta: for record in SeqIO.parse(fasta, "fasta"): if record.id in list(contigs_TG['readID']): SeqIO.write(record, filtered_fasta, "fasta")
scripts/isolate_genome_assembly/filter_contigs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Preperation # ### In this notebook we will prepare our data for our search function to use. # ### Currently we have data stored in csv file. # # #### Mall_Customers.csv # ## It can be computationally expensive to produce analysis results.So we will prepare our data and save it in an easily searchable structure. # # Import the needed modules... import pandas as pd from collections import defaultdict from os import getcwd # # Define Paths to data files.¶ # PATH_Mall_Customers = f"{getcwd()}/Mall_Customers.csv" # # Data Engineering # # ### Get data in dataframes. # ### Convert data to a single dictionary. # """ Read data from Mall_Customers.csv """ df_Mall_Customers = pd.read_csv(PATH_Mall_Customers) Mall_Customers_table_columns = df_Mall_Customers.columns.tolist() print(f"COLUMNS : {Mall_Customers_table_columns}") # COLUMNS : ['CustomerID', 'Gender', 'Age', 'Annual Income', 'Spending Score'] # A user will always search a record by its id so we will create a Global secondary index to be able to perform search our datastore.it will obviously take some extra space but almost negligible as compared to the size of the original data.In addition, It will make our searching faster and efficient so it's a good deal. # print(f"It is {df_Mall_Customers['CustomerID'].is_unique} that the column 'CustomerID' has unique values for all entries in Mall_Customers dataframe.") # Sort Mall_Customers dataframe on the basis of CustomerID as CustomerID is unique for all entries... df_Mall_Customers_sorted = df_Mall_Customers.sort_values(by=['CustomerID']) # It is True that the column 'CustomerID' has unique values for all entries in Mall_Customers dataframe. # from Mall_Customers dataframe... Mall_Customers = df_ Mall_Customers_sorted["CustomerID"].tolist() Gender = [Gender.split("|") for Gender in df_Mall_Customers["Gender"].tolist()] Age = [Age.split("|") for Age in df_Mall_Customers["Age"].tolist()] Annual Income = [Annual Income.split("|") for Annual Income in df_Mall_Customers["Annual Income"].tolist()] Spending Score = [Spending Score.split("|") for Spending Score in df_Mall_Customers["Spending Score"].tolist()] mall_CustomersDict = {} global_secondaryIndex = {} for idx,Mall_Customers in enumerate(Mall_Customers): appDict[apps] = { "Gender" : Gender[idx], "Age" : Age[idx], "Annual Income" : Annual Income[idx], "Spending Score" : Spending Score[idx] } global_secondaryIndex[Mall_Customers[idx]] = Mall_Customers import json print("[INFO] Writing CustomerID Data into the disk...") with open('dataFinal.json', 'w') as fp: json.dump(mall_CustomersDict, fp, sort_keys=True, indent=4) print("[INFO] Writing Global Secondary Index Data into the disk...") with open('dataFinal_GIS.json', 'w') as fp: json.dump(global_secondaryIndex, fp, sort_keys=True, indent=4) # [INFO] Writing CustomerID Data into the disk... # [INFO] Writing Global Secondary Index Data into the disk... # ##### At this point, our database is ready and it can handel high inflow of requests.
dataPreperation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from estruturas.pilha import * from estruturas.fila import * from estruturas.deque import * from estruturas.pilha_dinamica import * from estruturas.fila_dinamica import * from estruturas.lista import * from estruturas.arvore import * from estruturas.arvore_binaria import * from cyjupyter import Cytoscape import json # - # # Árvores Balanceadas # <em>Fontes: Transparências de aula da professora <NAME> (UFF) e <NAME>.; <NAME>. Estruturas de Dados e seus Algoritmos, 3ª ed. LTC. Cap. 5</em> # # Conforme vimos na aula passada, uma árvore binária de busca (ABB) possui um ordenamento de seus nós de acordo com sua chave. Dada uma árvore binária com raiz R, ela será considerada uma ABB se a chave (informação) de cada nó da subárvore da esquerda de R é menor do que a chave do nó R, as chaves de cada nó da subárvore direita de R é maior do que a chave do nó R e as subárvores esquerda e direita também são ABBs. # # ABBs possuem operações de inserção, consulta e exclusão de nós e são importantes estruturas para buscas rápidas. Em uma árvore de altura A, visitam-se no máximo A nós para qualquer busca, ou seja, é possível armazenar grande quantidade de informação em poucos níveis. # # # | Nível | Quantos cabem | # |---------|-----------------| # | 1 | 1 | # | 2 | 3 | # | 3 | 7 | # | 4 | 15 | # | ... | ... | # | N | 2<sup>N</sup> - 1 | # | 10 | 1.024 | # | 13 | 8.192 | # | 16 | 65.536 | # | 18 | 262.144 | # | 20 | 1 milhão | # | 30 | 1 bilhão | # | | ... | # # Se os nós estiverem espalhados uniformemente (árvore balanceada), a consulta será rápida para grandes quantidades de dados: # # #### O(log N) # # Contudo, na medida em que inserimos novos nós, a ABB pode ir se tornando desbalanceada, ou seja, suas subárvores vão adquirindo alturas diferentes. Esta descompensação compromete a eficiência das buscas, e portanto deve ser contornada. Um bom contraexemplo de eficiência é o caso da inserção de valores em ordem decrescente. Neste caso, o desbalanceamento pode tornar a busca tão ineficiente quanto a busca sequencial no pior caso. # # Inserção: 1, 13, 24, 27, 56 # # <img src="./img/abb_desbalanceada.png" alt="Árvore de Busca Binária Desbalanceada" width="200"/> # # #### O(N) # # # ## Conceitos # # | <b>Árvore Estritamente Binária</b> | <b>Árvore Binária Cheia</b> | # |----------------------------------------------------|----------------------------------------------------| # |Neste caso, os nós têm 0 ou 2 filhos. Ou seja, todo nó interno tem 2 filhos e somente as folhas têm 0 filhos.|É um tipo de árvore estritamente binária no qual todos os nós folha estão no mesmo nível: | # |<img src="./img/arvore_estrit_binaria.png" alt="Árvore Estritamente Binária" width="100"/>|<img src="./img/arvore_binaria_completa.png" alt="Árvore Binária Completa" width="200"/>| # # # # ## Balanceamento de Árvores # Distribuição equilibrada dos nós para otimizar as operações de consulta e diminuir o número médio de comparações. # # ### Estratégias # * Uniforme # * Árvore balanceada por altura (distância entre as alturas dos nós não deve exceder um determinado valor # * Não uniforme, ou por frequência # * As chaves mais solicitadas ficam mais perto da raiz # # ## Árvores AVL # ### Adelson-Velskii e Landis (1962) # # Uma árvore binária de busca (ABB) é uma <b>AVL</b> quando, para qualquer um de seus nós, a <b>diferença</b> entre as <b>alturas de suas subárvore direita e esquerda</b> é no <b>máximo 1</b>. # # Verifique quais das ABB são AVL. # <img src="./img/exercicio_avl.png" alt="Exercício de Árvores AVLs" width="550"/> # ## Fator de Balanceamento (FB) # Fator de Balanceamento: diferença entre altura da subárvore direita e esquerda # FB(n) = altura(n->dir) – altura(n->esq) # # <img src="./img/fb.png" alt="Balanceando Árvores" width="550"/> # # #### Exercício # 1. Descreva abaixo um algoritmo para calcular o fator de balanceamento de uma árvore. # + # variavel para armazenar nós de uma arvore e seus respectivos fatores de balanceamento def is_avl(arvore): #enquanto houver no não visitado: #fb = altura(no, dir) - altura(no, esq) #se fb <> -1, 0 ou 1: # retorna não é avl #retorna é avl return None arvore = ArvoreAVL() arvore.adiciona(120) arvore.adiciona(100) arvore.adiciona(130) arvore.adiciona(80) arvore.adiciona(110) arvore.adiciona(200) arvore.adiciona(150) fator_bal = arvore.fb(130) print(str(fator_bal)) def mostrar_altura(self, node): alt_left = 0 alt_right = 0 if node.left: alt_left = self.mostrar_altura(node.esq) if node.right: alt_right = self.mostrar_altura(node.dir) if alt_right > alt_left: return alt_right + 1 return alt_left + 1 # - # 2. Instancie as seguintes árvores ABB e verifique quais são AVL: # # <img src="./img/exercicio_abb.png" alt="Exercício ABB" width="600"/> # # Como preservar uma árvore AVL após operações de inserção e exclusão? # <img src="./img/avl.png" alt="Exercício ABB" width="200"/> # # | 1ª inserção | 2ª inserção | # |-------------|-------------| # |<img src="./img/avl1.png" alt="Exercício ABB" width="200"/>| <img src="./img/avl2.png" alt="Exercício ABB" width="250"/>| # # <img src="./img/avl3.png" alt="Exercício ABB" width="350"/> # # # Rotação # Quando uma inserção ou exclusão faz com que a árvore perca as propriedades de árvore AVL, deve-se realizar uma operação de reestruturação chamada <b>Rotação</b>. # # Rotação preserva a ordem das chaves, de modo que a árvore resultante é uma árvore binária de busca válida e é uma árvore AVL válida. # # # Balanceamento de Árvores AVL por Rotação # # * Rotação Simples # * Direita # * Esquerda # # * Rotação Dupla # * Direita # * Esquerda # # ## Rotação Simples Direita # Aplicar toda vez que uma subárvore ficar com um <b>FB negativo</b> e sua <b>subárvore esquerda</b> também tem um nó com <b>FB negativo</b>. # # <img src="./img/rotacao_simples_direita.png" alt="Rotação" width="500"/> # # ### Exemplo # <img src="./img/arvore_desbalanceada_direita.png" alt="Rotação" width="200"> # # |1 |2 | # |---------|---------| # |<img src="./img/arvore_desbalanceada_direita1.png" alt="Rotação" width="200">| <img src="./img/arvore_desbalanceada_direita2.png" alt="Rotação" width="200">| # # # ## Rotação Simples Esquerda # Aplicar toda vez que uma subárvore ficar com um <b>FB positivo</b> e sua <b>subárvore direita</b> também tem um nó com <b>FB positivo</b>. # # <img src="./img/rotacao_simples_esquerda.png" alt="Rotação" width="500"/> # # ### Exemplo # <img src="./img/arvore_desbalanceada_esquerda.png" alt="Rotação" width="200"> # # |1 |2 | # |---------|---------| # |<img src="./img/arvore_desbalanceada_esquerda1.png" alt="Rotação" width="200"> | <img src="./img/arvore_desbalanceada_esquerda2.png" alt="Rotação" width="200"> | # # # ## Rotação Dupla Direita - (Esquerda-Direita) # # Aplicar toda vez que uma subárvore ficar com um <b>FB negativo</b> e sua <b>subárvore esquerda</b> tem com um <b>FB positivo</b>. # # <img src="./img/rotacao_dupla_direita.png" alt="Rotação" width="400"/> # # ### Passos: # <img src="./img/rotacao_dupla_direita_passos.png" alt="Rotação" width="450"/> # # ### Exemplo: # # <img src="./img/rot_dupla_dir1.png" alt="Rotação" width="250"/> # # | <img src="./img/rot_dupla_dir2.png" alt="Rotação" width="200"/> | <img src="./img/rot_dupla_dir3.png" alt="Rotação" width="400"/> | # |------|------| # | <img src="./img/rot_dupla_dir4.png" alt="Rotação" width="400"/> | <img src="./img/rot_dupla_dir5.png" alt="Rotação" width="400"/> | # # # # ## Rotação Dupla Esquerda - (Direita-Esquerda) # Aplicar toda vez que uma subárvore ficar com um <b>FB positivo</b> e sua <b>subárvore direita</b> tem com um <b>FB negativo</b>. # <img src="./img/rotacao_dupla_esquerda.png" alt="Rotação" width="400"/> # # ### Passos # <img src="./img/rotacao_dupla_esquerda_passos.png" alt="Rotação" width="450"/> # # ### Exemplo # # <img src="./img/rot_dupla_esq1.png" alt="Rotação" width="200"/> # <img src="./img/rot_dupla_esq2.png" alt="Rotação" width="650"/> # # ## Exercícios # 1. Implemente o algoritmo de rotação simples direita. def rightRotate(self, z): y = z.left T3 = y.right # Realiza a rotação y.right = z z.left = T3 # Atualiza as alturas z.height = 1 + max(self.getAltura(z.left), self.getAltura(z.right)) y.height = 1 + max(self.getAltura(y.left), self.getAltura(y.right)) # Retorna o node que é a nova raiz return y def getAltura(self, root): if not root: return 0 return root.height def getBalanceamento(self, root): if not root: return 0 return self.getAltura(root.left) - self.getAltura(root.right) # 2. Implemente o algoritmo de rotação simples esquerda. def leftRotate(self, z): y = z.right T2 = y.left # Realiza a rotação y.left = z z.right = T2 # Atualiza as alturas z.height = 1 + max(self.getAltura(z.left), self.getAltura(z.right)) y.height = 1 + max(self.getAltura(y.left), self.getAltura(y.right)) # Retorna o node que é a nova raiz return y def getAltura(self, root): if not root: return 0 return root.height def getBalanceamento(self, root): if not root: return 0 return self.getAltura(root.left) - self.getAltura(root.right) # 3. Implemente o algoritmo de rotação dupla direita. if balanco > 1 and self.getBalanceamento(root.left) < 0: root.left = self.leftRotate(root.left) return self.rightRotate(root) # 4. Implemente o algoritmo de rotação dupla esquerda. if balanco < -1 and self.getBalanceamento(root.right) > 0: root.right = self.rightRotate(root.right) return self.leftRotate(root) # # Inserção de nós em Árvores AVL # # Percorre-se a árvore verificando se a chave já existe ou não # * Em caso positivo, encerra a tentativa de inserção # * Caso contrário, a busca encontra o local correto de inserção do novo nó # # Verifica-se se a inclusão tornará a árvore desbalanceada # * Em caso negativo, o processo termina # * Caso contrário, deve-se efetuar o balanceamento da árvore # # Descobre-se qual a operação de rotação a ser executada # # Executa-se a rotação # # ## Rebalanceamento # # #### Nó com FB = -2 e filho com FB = -1 ou 0: # * rotação do nó com FB = -2 p/ direita # # #### Nó com FB = +2 e filho com FB = +1 ou 0: # * rotação do nó com FB = +2 p/ esquerda # # #### Nó com FB = -2 e filho com FB = +1: # * rotação do nó com FB = +1 p/ esquerda, e # * rotação do nó com FB = -2 p/ direita # # #### Nó com FB = +2 e filho com FB = -1: # * rotação do nó com FB = -1 p/ direita, e # * rotação do nó com FB = +2 p/ esquerda # # ## Exercício # 5. Implemente um algoritmo de inserção de nós na árvore AVL. https://github.com/VanessaSilva99/EstruturaDeDados2/blob/main/AVL/Tree_AVL # # Remoção de nós em Árvores AVL # Funciona analogamente à inserção; a estrutura precisa ser avaliada para saber se é preciso rebalancear. # # 6. Implemente um algoritmo de remoção de nós na árvore AVL. https://github.com/VanessaSilva99/EstruturaDeDados2/blob/main/AVL/Tree_AVL
lab04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # If the model is in another framework, we can only load and assign the parameter values to our tensorflow model. # import tensorflow and reset default graph import tensorflow as tf tf.reset_default_graph() # Run next cell to define our tf model # + # n_inputs = 2 n_hidden1 = 3 original_w = [[1., 2., 3.], [4., 5., 6.]] # the weights from the other framework original_b = [7., 8., 9.] # the biases from the other framework X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1") # - # Assign original_w to the weight of hidden1, original_b to the bias of hidden1. Print out the weights and bias of hidden1 to prove that the assignment is successful. Also give an input X=[[10, 11]] and prove the output of hidden1 is [[61, 83, 105]] graph = tf.get_default_graph() assign_kernel = graph.get_operation_by_name("hidden1/kernel/Assign") assign_bias = graph.get_operation_by_name("hidden1/bias/Assign") init_kernel = assign_kernel.inputs[1] # the tensor of the intial value. In contrast, assign_kernel.inputs[0] is the current value init_bias = assign_bias.inputs[1] assign_bias.inputs[1] init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init, feed_dict = {init_kernel: original_w, init_bias: original_b}) print(hidden1.eval(feed_dict={X: [[10.0, 11.0]]})) print(sess.run(assign_kernel.inputs[0])) print(sess.run(assign_bias.inputs[0]))
Chapter11_4_ReuseModelsFromOtherFramework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="8rWr2eU1CTWB" colab_type="code" outputId="376d8b42-f971-4af9-cdde-3ba845efc231" colab={"base_uri": "https://localhost:8080/", "height": 35} from keras.datasets import mnist from keras.layers import Input, Dense from keras.models import Model from keras import backend as K import numpy as np import matplotlib.pyplot as plt # + id="yuqLyXyfCoID" colab_type="code" outputId="9ac38963-effc-4ebf-8f5e-0d9ea744fd74" colab={"base_uri": "https://localhost:8080/", "height": 127} from google.colab import drive drive.mount('/content/drive/') # + id="qc62KvMsC4L9" colab_type="code" outputId="4984d92d-d57c-4621-e0fa-a06bf68d411b" colab={"base_uri": "https://localhost:8080/", "height": 35} # ls # + id="O2vWLAHqC7Md" colab_type="code" outputId="23f1feb2-0f83-48f0-f208-ab7a1d7cf4c7" colab={"base_uri": "https://localhost:8080/", "height": 35} # cd drive/My\ Drive # + id="elg_mQeuDAYx" colab_type="code" outputId="6d41f3c6-22c3-441e-ac3c-306d750bcf4a" colab={"base_uri": "https://localhost:8080/", "height": 107} # ls # + id="S7iJQWhxDDdi" colab_type="code" outputId="a946c7aa-5922-4f83-c08b-fdeedbebd42e" colab={"base_uri": "https://localhost:8080/", "height": 35} # cd CITREP_Data+Code/ # + id="9LFjJ7AiDEhb" colab_type="code" outputId="886e6ee5-ca35-45ac-ab1b-4d0bbe6fffa8" colab={"base_uri": "https://localhost:8080/", "height": 467} # ls # + id="HnN4aGW3CTWE" colab_type="code" outputId="7a9ab400-d865-4d5c-a781-3b73e8edaa2a" colab={"base_uri": "https://localhost:8080/", "height": 53} # Loading MNIST Data; only images no labels (x_train, _), (x_test, _) = mnist.load_data() # + id="xq8CUCgLCTWH" colab_type="code" outputId="2e0d0a84-6c1a-4027-854e-eb85f389659a" colab={"base_uri": "https://localhost:8080/", "height": 53} x_train = x_train.astype('float32')/255.0 x_test = x_test.astype('float32')/255.0 x_train_fcnn = np.reshape(x_train, (len(x_train), 28*28)) x_test_fcnn = np.reshape(x_test, (len(x_test), 28*28)) print(x_train.shape) print(x_train_fcnn.shape) # + id="Grct3XybCTWJ" colab_type="code" outputId="79ebb6fa-e0b3-452c-9e04-c97fdc5cccd9" colab={"base_uri": "https://localhost:8080/", "height": 287} plt.imshow(x_train_fcnn[10].reshape(28, 28)) # + [markdown] id="bxV3aM-gCTWL" colab_type="text" # ### Adding Noise to the Data # + id="-62t4GXoCTWM" colab_type="code" outputId="22593387-5265-43ea-c7c1-7c97ac503d56" colab={"base_uri": "https://localhost:8080/", "height": 89} #adding noise factor intentionlly #pick random pixels from the image and add them randomly noise_factor = 0.5 x_train_fcnn_noise = x_train_fcnn + noise_factor * np.random.normal(loc = 0.0, scale = 1.0, size=x_train_fcnn.shape) x_test_fcnn_noise = x_test_fcnn + noise_factor * np.random.normal(loc = 0.0, scale = 1.0, size=x_test_fcnn.shape) print(x_train_fcnn_noise.min()) print(x_train_fcnn_noise.max()) x_train_fcnn_noise = np.clip(x_train_fcnn_noise, 0., 1.) x_test_fcnn_noise = np.clip(x_test_fcnn_noise, 0., 1.) print(x_train_fcnn_noise.min()) print(x_train_fcnn_noise.max()) # + [markdown] id="ZaE7zSrRCTWO" colab_type="text" # ### Display Noise Data # + id="nAVSRVihCTWP" colab_type="code" outputId="c91412e0-fd18-4016-bd2a-1ef65b227bcd" colab={"base_uri": "https://localhost:8080/", "height": 125} n = 10 plt.figure(figsize=(20, 2)) for i in range(n): ax = plt.subplot(1, n, i+1) plt.imshow(x_test_fcnn_noise[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # + id="i8hgKNghCTWR" colab_type="code" outputId="a0a8ef97-fa0d-4371-ce25-aed24f61ca9c" colab={"base_uri": "https://localhost:8080/", "height": 287} plt.imshow(x_train_fcnn_noise[10].reshape(28, 28)) # + [markdown] id="zDEPQP1qCTWT" colab_type="text" # ### Build Fc-AutoEncoder Model # + id="bpp43z4oCTWU" colab_type="code" outputId="e9f4ee0f-1186-42a2-d3a5-1ad0859560c7" colab={"base_uri": "https://localhost:8080/", "height": 145} #the auto-encoder that we want to build so that we can get back our input input_img = Input(shape=(784,)) encoded = Dense(128, activation='relu')(input_img) encoded = Dense(64, activation='relu')(encoded) encoded = Dense(32, activation='relu')(encoded) decoded = Dense(64, activation='relu')(encoded) decoded = Dense(128, activation='relu')(decoded) #we use sigmoid because the outputs returned are in between 0 to 1 decoded = Dense(784, activation='sigmoid')(decoded) # + [markdown] id="Zm4GvK9JCTWW" colab_type="text" # ### Train the Auto-Encoder # + id="flIAj2nUCTWX" colab_type="code" outputId="1374edab-ca3b-4a64-f35b-a1486a8d873e" colab={"base_uri": "https://localhost:8080/", "height": 125} #training portion autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.fit(x_train_fcnn_noise, x_train_fcnn, #pixel to pixel comparison (we are not using y-train this time) epochs=2, batch_size=256, shuffle=True, validation_data=(x_test_fcnn_noise, x_test_fcnn)) #the more times we run this code here, the clearer the image output # + id="3bruWulkCTWZ" colab_type="code" outputId="0694e864-8eb8-4fe7-f855-0199160ac5b7" colab={"base_uri": "https://localhost:8080/", "height": 244} decoded_imgs = autoencoder.predict(x_test_fcnn_noise) n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test_fcnn_noise[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # + id="1U_HV3b9CTWb" colab_type="code" colab={}
Edited1_3h_keras_FC_AutoEncoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Visualisation Examples # # This notebook shows some of the visulisation utility of our toolkit. # # The core packages for visualisation are: # ### `rasterization` # contains classes for getting visual data as multi-channel tensors and turning them into interpretable RGB images. # Every class has at least a `rasterize` method to get the tensor and a `to_rgb` method to convert it into an image. # A few examples are: # - `BoxRasterizer`: this object renders agents (e.g. vehicles or pedestrians) as oriented 2D boxes # - `SatelliteRasterizer`: this object renders an oriented crop from a satellite map # # ### `visualization` # contains utilities to draw additional information (e.g. trajectories) onto RGB images. These utilities are commonly used after a `to_rgb` call to add other information to the final visualisation. # One example is: # - `draw_trajectory`: this function draws 2D trajectories from coordinates and yaws offset on an image # # + import matplotlib.pyplot as plt import numpy as np from l5kit.data import ChunkedDataset, LocalDataManager from l5kit.dataset import EgoDataset, AgentDataset from l5kit.rasterization import build_rasterizer from l5kit.configs import load_config_data from l5kit.visualization import draw_trajectory, TARGET_POINTS_COLOR from l5kit.geometry import transform_points from tqdm import tqdm from collections import Counter from l5kit.data import PERCEPTION_LABELS from prettytable import PrettyTable import os # - # ### First, let's configure where our data lives! # The data is expected to live in a folder that can be configured using the `L5KIT_DATA_FOLDER` env variable. You data folder is expected to contain subfolders for the aerial and semantic maps as well as the scenes (`.zarr` files). # In this example, the env variable is set to the local data folder. You should make sure the path points to the correct location for you. # # We built our code to work with a human-readable `yaml` config. This config file holds much useful information, however, we will only focus on a few functionalities concerning loading and visualization here. # set env variable for data os.environ["L5KIT_DATA_FOLDER"] = "PATH_TO_DATA" # get config cfg = load_config_data("./visualisation_config.yaml") print(cfg) # ### We can look into our current configuration for interesting fields # # \- when loaded in python, the `yaml`file is converted into a python `dict`. # # `raster_params` contains all the information related to the transformation of the 3D world onto an image plane: # - `raster_size`: the image plane size # - `pixel_size`: how many meters correspond to a pixel # - `ego_center`: our raster is centered around an agent, we can move the agent in the image plane with this param # - `map_type`: the rasterizer to be employed. We currently support a satellite-based and a semantic-based one. We will look at the differences further down in this script print(f'current raster_param:\n') for k,v in cfg["raster_params"].items(): print(f"{k}:{v}") # ## Load the data # # The same config file is also used to load the data. Every split in the data has its own section, and multiple datasets can be used (as a whole or sliced). In this short example we will only use the first dataset from the `sample` set. You can change this by configuring the 'train_data_loader' variable in the config. # # You may also have noticed that we're building a `LocalDataManager` object. This will resolve relative paths from the config using the `L5KIT_DATA_FOLDER` env variable we have just set. dm = LocalDataManager() dataset_path = dm.require(cfg["val_data_loader"]["key"]) zarr_dataset = ChunkedDataset(dataset_path) zarr_dataset.open() print(zarr_dataset) # ## Working with the raw data # # `.zarr` files support most of the traditional numpy array operations. In the following cell we iterate over the frames to get a scatter plot of the AV locations: # + frames = zarr_dataset.frames coords = np.zeros((len(frames), 2)) for idx_coord, idx_data in enumerate(tqdm(range(len(frames)), desc="getting centroid to plot trajectory")): frame = zarr_dataset.frames[idx_data] coords[idx_coord] = frame["ego_translation"][:2] plt.scatter(coords[:, 0], coords[:, 1], marker='.') axes = plt.gca() axes.set_xlim([-2500, 1600]) axes.set_ylim([-2500, 1600]) # - # Another easy thing to try is to get an idea of the agents types distribution. # # We can get all the agents `label_probabilities` and get the argmax for each raw. because `.zarr` files map to numpy array we can use all the traditional numpy operations and functions. # + agents = zarr_dataset.agents probabilities = agents["label_probabilities"] labels_indexes = np.argmax(probabilities, axis=1) counts = [] for idx_label, label in enumerate(PERCEPTION_LABELS): counts.append(np.sum(labels_indexes == idx_label)) table = PrettyTable(field_names=["label", "counts"]) for count, label in zip(counts, PERCEPTION_LABELS): table.add_row([label, count]) print(table) # - # ## Working with data abstraction # # Even though it's absolutely fine to work with the raw data, we also provide classes that abstract data access to offer an easier way to generate inputs and targets. # # ### Core Objects # Along with the `rasterizer`, our toolkit contains other classes you may want to use while you build your solution. The `dataset` package, for example, already implements `PyTorch` ready datasets, so you can hit the ground running and start coding immediately. # # ### Dataset package # We will use two classes from the `dataset` package for this example. Both of them can be iterated and return multi-channel images from the rasterizer along with future trajectories offsets and other information. # - `EgoDataset`: this dataset iterates over the AV annotations # - `AgentDataset`: this dataset iterates over other agents annotations # # Both support multi-threading (through PyTorch DataLoader) OOB. rast = build_rasterizer(cfg, dm) dataset = EgoDataset(cfg, zarr_dataset, rast) # ## What if I want to visualise the Autonomous Vehicle (AV)? # # Let's get a sample from the dataset and use our `rasterizer` to get an RGB image we can plot. # # If we want to plot the ground truth trajectory, we can convert the dataset's `target_position` (displacements in meters in world coordinates) into pixel coordinates in the image space, and call our utility function `draw_trajectory` (note that you can use this function for the predicted trajectories, as well). # + data = dataset[50] im = data["image"].transpose(1, 2, 0) im = dataset.rasterizer.to_rgb(im) target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"]) draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR) plt.imshow(im[::-1]) plt.show() # - # ## What if I want to change the rasterizer? # # We can do so easily by building a new rasterizer and new dataset for it. In this example, we change the value to `py_satellite` which renders boxes on an aerial image. # + cfg["raster_params"]["map_type"] = "py_satellite" rast = build_rasterizer(cfg, dm) dataset = EgoDataset(cfg, zarr_dataset, rast) data = dataset[50] im = data["image"].transpose(1, 2, 0) im = dataset.rasterizer.to_rgb(im) target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"]) draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR) plt.imshow(im[::-1]) plt.show() # - # ## What if I want to visualise an agent? # # Glad you asked! We can just replace the `EgoDataset` with an `AgentDataset`. Now we're iterating over agents and not the AV anymore, and the first one happens to be the pace car (you will see this one around a lot in the dataset). # + dataset = AgentDataset(cfg, zarr_dataset, rast) data = dataset[0] im = data["image"].transpose(1, 2, 0) im = dataset.rasterizer.to_rgb(im) target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"]) draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR) plt.imshow(im[::-1]) plt.show() # - # ## System Origin and Orientation # # At this point you may have noticed that we flip the image on the **Y-axis** before plotting it. # # When moving from 3D to 2D we stick to a right-hand system, where the origin is in the bottom-left corner with positive x-values going right and positive y-values going up the image plane. The camera is facing down the negative z axis. # # However, both `opencv` and `pyplot` place the origin in the top-left corner with positive x going right and positive y going down in the image plane. The camera is facing down the positive z-axis. # # The flip done on the resulting image is for visualisation purposes to accommodate the difference in the two coordinate frames. # # Further, all our rotations are counter-clockwise for positive value of the angle. # ## How does an entire scene look like? # # It's easy to visualise an individual scene using our toolkit. Both `EgoDataset` and `AgentDataset` provide 2 methods for getting interesting indices: # - `get_frame_indices` returns the indices for a given frame. For the `EgoDataset` this matches a single observation, while more than one index could be available for the `AgentDataset`, as that given frame may contain more than one valid agent # - `get_scene_indices` returns indices for a given scene. For both datasets, these might return more than one index # # In this example, we visualise the second scene from the ego's point of view: # + from IPython.display import display, clear_output import PIL cfg["raster_params"]["map_type"] = "py_semantic" rast = build_rasterizer(cfg, dm) dataset = EgoDataset(cfg, zarr_dataset, rast) scene_idx = 2 indexes = dataset.get_scene_indices(scene_idx) images = [] for idx in indexes: data = dataset[idx] im = data["image"].transpose(1, 2, 0) im = dataset.rasterizer.to_rgb(im) target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"]) center_in_pixels = np.asarray(cfg["raster_params"]["ego_center"]) * cfg["raster_params"]["raster_size"] draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR) clear_output(wait=True) display(PIL.Image.fromarray(im[::-1]))
examples/visualisation/visualise_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %run ../../common/import_all.py from common.setup_notebook import set_css_style, setup_matplotlib, config_ipython config_ipython() setup_matplotlib() set_css_style() # - # # The perceptron # The perceptron is (one of?) the very first algorithms for artificial neural networks, it was developed in 1957 at a [Cornell lab](https://en.wikipedia.org/wiki/Calspan) funded by the US Navy, by Rosenblatt, a psychologist who was a pioneer in the field of Artificial Intelligence, and published in [[1]](#rosenblatt). It was immediately advertised with lots of fanfare and great expectations (the NY Times at that time published an article [[2]](#nyt), boldly describing it as a machine which could think and learn on its own), and then the interest in it declined due to the unfeasability of many tasks it was supposed to tackle. Have a look a [[3]](#article) for a nice outline of the history of this algorithm. Rosenblatt's book # ## How it works # # # <figure style="float:left;"> # <img src="../../imgs/perceptron.png" width="400" align="left" style="margin:20px 50px"/> # <figcaption>Image from [[the M Nielsen book]](#book), which inspired most of the writing here.</figcaption> # </figure> # # The way a perceptron works is rather simple, yet quite ingenious. You have the representation of a neuron as per figure, where a series of binary input values $(x_1, x_2, \ldots, x_n)$ come in and an output is out. Inputs can be weighted differently, so that weights $(w_1, w_2, \ldots, w_n)$ are given. The output will depend on inputs and weights, the inputs being the values which determine a decision, the weights the how important each value is. # # The neuron is equipped with a threshold value $t$, or alternatively a bias value $b=-t$, such that the output $o$ follows rules # # $$ # o = \begin{cases} # 0 \ \text{ if } \ w \cdot x + b \leq 0\\ # 1 \ \text{ if } \ w \cdot x + b > 0 # \end{cases} # $$ # # which is to say that the neuron "fires" when the dot product of input and weights plus its bias pass 0 (or aleternatively when the dot product of weights and inputs passes threshold), a representation of what happens to real neurons when stimulated and producing electrical activity. The bias values makes things such that the larger it is, the easier it will be for the neuron to fire. # # So the perceptron is pretty much an artificial neuron with the output function given by the [Heaviside step](../maths/functions.ipynb#Heaviside-step), displayed in figure. # + x = np.arange(-10, 10, 1) y = [] for item in x: value = 0 if item < 0 else 1 y.append(value) plt.step(x, y) plt.xlim(-10, 10) plt.xlabel('$x$') plt.ylabel('$y$') plt.title('The Heaviside step function') plt.show(); # - # ### An example: a NAND gate # # You can represent a NAND gate with a perceptron. A NAND gate has truth table ($X$ and $Y$ being the inputs, $o$ the output): # # | X | Y | o | # | :-------: |:-----------:| :-----:| # | 0 | 0 | 1 | # | 0 | 1 | 1 | # | 1 | 0 | 1 | # | 1 | 1 | 0 | # # You can quickly obtain it with a perceptron, for example (again from [[Nielsen's book]](#1)) with weights $w_1=w_2=-2$ and bias $b=3$. I've done it in [[this repo]](#3). # ## References # # 1. <a name="rosenblatt"></a> <NAME>, Frank, *Principles of neurodynamics, perceptrons and the theory of brain mechanisms*, No. VG-1196-G-8. **Cornell Aeronautical Lab**, 1961 # 2. <a name="nyt"></a> The New York Times [article](http://www.nytimes.com/1958/07/08/archives/new-navy-device-learns-by-doing-psychologist-shows-embryo-of.html) on the perceptron # 3. <a name="article"></a> A fab [article](http://fusion.net/story/54904/thinking-computer-perceptron/?curator=MediaREDEF) on Fusion TV outlining a bit of the history of this algorithm, very nice read # 4. <a name="book"></a> *Neural Networks and Deep Learning on perceptrons*, a brilliant [book](http://neuralnetworksanddeeplearning.com/chap1.html#perceptrons) by <NAME> # 5. <a name="repo"></a> My [repo](https://github.com/martinapugliese/neural-nets-compilation) on coding ANNs from scratch
neural-nets/types/perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline # # # Roget # # # Build a directed graph of 1022 categories and # 5075 cross-references as defined in the 1879 version of Roget's Thesaurus # contained in the datafile roget_dat.txt. This example is described in # Section 1.2 in Knuth's book (see [1]_ and [2]_). # # Note that one of the 5075 cross references is a self loop yet # it is included in the graph built here because # the standard networkx `DiGraph` class allows self loops. # (cf. 400pungency:400 401 403 405). # # References # ---------- # # .. [1] <NAME>, # "The Stanford GraphBase: A Platform for Combinatorial Computing", # ACM Press, New York, 1993. # .. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html # # # + from __future__ import print_function # Authors: <NAME>, <NAME> (<EMAIL>) # Date: 2005-04-01 07:56:22 -0700 (Fri, 01 Apr 2005) # Copyright (C) 2004-2018 by # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # All rights reserved. # BSD license. import gzip import re import sys import matplotlib.pyplot as plt from networkx import nx def roget_graph(): """ Return the thesaurus graph from the roget.dat example in the Stanford Graph Base. """ # open file roget_dat.txt.gz (or roget_dat.txt) fh = gzip.open('roget_dat.txt.gz', 'r') G = nx.DiGraph() for line in fh.readlines(): line = line.decode() if line.startswith("*"): # skip comments continue if line.startswith(" "): # this is a continuation line, append line = oldline + line if line.endswith("\\\n"): # continuation line, buffer, goto next oldline = line.strip("\\\n") continue (headname, tails) = line.split(":") # head numfind = re.compile("^\d+") # re to find the number of this word head = numfind.findall(headname)[0] # get the number G.add_node(head) for tail in tails.split(): if head == tail: print("skipping self loop", head, tail, file=sys.stderr) G.add_edge(head, tail) return G if __name__ == '__main__': G = roget_graph() print("Loaded roget_dat.txt containing 1022 categories.") print("digraph has %d nodes with %d edges" % (nx.number_of_nodes(G), nx.number_of_edges(G))) UG = G.to_undirected() print(nx.number_connected_components(UG), "connected components") options = { 'node_color': 'black', 'node_size': 1, 'line_color': 'grey', 'linewidths': 0, 'width': 0.1, } nx.draw_circular(UG, **options) plt.show()
_downloads/plot_roget.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # ## UFO Sightings Implementation and Operations Lab # # The goal of this notebook is to train and deploy our model into SageMaker online hosting with 1 variant. # # What we plan on accompishling is the following: # 1. [Load dataset onto Notebook instance memory from S3](#Step-1:-Load-the-data-from-Amazon-S3) # 1. [Cleaning, transforming and preparing the dataset](#Step-2:-Cleaning,-transforming-and-preparing-the-dataset) # 1. [Create and train our model (Linear Learner)](#Step-4:-Creating-and-training-our-model-(Linear-Learner)) # 1. [Deploying the model into SageMaker hosting](#Step-4:-Deploying-the-model-into-SageMaker-hosting) # First let's go ahead and import all the needed libraries. # + import pandas as pd import numpy as np from datetime import datetime import io import sagemaker.amazon.common as smac import boto3 from sagemaker import get_execution_role import sagemaker import matplotlib.pyplot as plt import seaborn as sns # - # ## Step 1: Loading the data from Amazon S3 # Let's get the UFO sightings data that is stored in S3 and load it into memory. # + role = get_execution_role() bucket='<INSERT_BUCKET_NAME_HERE>' sub_folder = 'ufo_dataset' data_key = 'ufo_fullset.csv' data_location = 's3://{}/{}/{}'.format(bucket, sub_folder, data_key) df = pd.read_csv(data_location, low_memory=False) df.head() # - # <hr> # ## Step 2: Cleaning, transforming and preparing the dataset # This step is so important. It's crucial that we clean and prepare our data before we do anything else. # Let's go ahead and start preparing our dataset by transforming some of the values into the correct data types. Here is what we are going to take care of. # 1. Convert the `reportedTimestamp` and `eventDate` to a datetime data types. # 1. Convert the `shape` and `weather` to a category data type. # 1. Map the `physicalEvidence` and `contact` from 'Y', 'N' to `0`, `1`. # 1. Convert the `researchOutcome` to a category data type (target attribute). # # Let's also drop the columns that are not important. # 1. We can drop `sighting` becuase it is always 'Y' or Yes. # 1. Let's drop the `firstName` and `lastName` becuase they are not important in determining the `researchOutcome`. # 1. Let's drop the `reportedTimestamp` becuase when the sighting was reporting isn't going to help us determine the legitimacy of the sighting. # 1. We would need to create some sort of buckets for the `eventDate` and `eventTime`, like seasons for example, but since the distribution of dates is pretty even, let's go ahead and drop them. # # Finally, let's apply one-hot encoding # 1. We need to one-hot both the `weather` attribute and the `shape` attribute. # 1. We also need to transform or map the researchOutcome (target) attribute into numeric values. This is what the alogrithm is expecting. We can do this by mapping unexplained, explained, and probable to 0, 1, 2. # + # Replace the missing values with the most common shape df['shape'] = df['shape'].fillna(df['shape'].value_counts().index[0]) df['reportedTimestamp'] = pd.to_datetime(df['reportedTimestamp']) df['eventDate'] = pd.to_datetime(df['eventDate']) df['shape'] = df['shape'].astype('category') df['weather'] = df['weather'].astype('category') df['physicalEvidence'] = df['physicalEvidence'].replace({'Y': 1, 'N': 0}) df['contact'] = df['contact'].replace({'Y': 1, 'N': 0}) df['researchOutcome'] = df['researchOutcome'].astype('category') df.drop(columns=['firstName', 'lastName', 'sighting', 'reportedTimestamp', 'eventDate', 'eventTime'], inplace=True) # Let's one-hot the weather and shape attribute df = pd.get_dummies(df, columns=['weather', 'shape']) # Let's replace the researchOutcome values with 0, 1, 2 for Unexplained, Explained, and Probable df['researchOutcome'] = df['researchOutcome'].replace({'unexplained': 0, 'explained': 1, 'probable': 2}) # - display(df.head()) display(df.shape) # <hr> # --- # # ## Step 3: Creating and training our model (Linear Learner) # # Let's evaluate the Linear Learner algorithm as well. Let's go ahead and randomize the data again and get it ready for the Linear Leaner algorithm. We will also rearrange the columns so it is ready for the algorithm (it expects the first column to be the target attribute) # + np.random.seed(0) rand_split = np.random.rand(len(df)) train_list = rand_split < 0.8 val_list = (rand_split >= 0.8) & (rand_split < 0.9) test_list = rand_split >= 0.9 # This dataset will be used to train the model. data_train = df[train_list] # This dataset will be used to validate the model. data_val = df[val_list] # This dataset will be used to test the model. data_test = df[test_list] # Breaks the datasets into attribute numpy.ndarray and the same for target attribute. train_X = data_train.drop(columns='researchOutcome').values train_y = data_train['researchOutcome'].values val_X = data_val.drop(columns='researchOutcome').values val_y = data_val['researchOutcome'].values test_X = data_test.drop(columns='researchOutcome').values test_y = data_test['researchOutcome'].values # - # Next, Let's create recordIO file for the training data and upload it to S3. # + train_file = 'ufo_sightings_train_recordIO_protobuf.data' f = io.BytesIO() smac.write_numpy_to_dense_tensor(f, train_X.astype('float32'), train_y.astype('float32')) f.seek(0) boto3.Session().resource('s3').Bucket(bucket).Object('implementation_operations_lab/linearlearner_train/{}'.format(train_file)).upload_fileobj(f) training_recordIO_protobuf_location = 's3://{}/implementation_operations_lab/linearlearner_train/{}'.format(bucket, train_file) print('The Pipe mode recordIO protobuf training data: {}'.format(training_recordIO_protobuf_location)) # - # Let's create recordIO file for the validation data and upload it to S3 # + validation_file = 'ufo_sightings_validatioin_recordIO_protobuf.data' f = io.BytesIO() smac.write_numpy_to_dense_tensor(f, val_X.astype('float32'), val_y.astype('float32')) f.seek(0) boto3.Session().resource('s3').Bucket(bucket).Object('implementation_operations_lab/linearlearner_validation/{}'.format(validation_file)).upload_fileobj(f) validate_recordIO_protobuf_location = 's3://{}/implementation_operations_lab/linearlearner_validation/{}'.format(bucket, validation_file) print('The Pipe mode recordIO protobuf validation data: {}'.format(validate_recordIO_protobuf_location)) # - # --- # # Alright we are good to go for the Linear Learner algorithm. Let's get everything we need from the ECR repository to call the Linear Learner algorithm. from sagemaker import image_uris container = image_uris.retrieve('linear-learner', boto3.Session().region_name, '1') # + # Create a training job name job_name = 'ufo-linear-learner-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S")) print('Here is the job name {}'.format(job_name)) # Here is where the model-artifact will be stored output_location = 's3://{}/implementation_operations_lab/linearlearner_output'.format(bucket) # - # Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a Linear Learner model. # # First I like to always create a specific job name. Next, we'll need to specify training parameters. # # Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data. print('The feature_dim hyperparameter needs to be set to {}.'.format(data_train.shape[1] - 1)) # + sess = sagemaker.Session() # Setup the LinearLeaner algorithm from the ECR container linear = sagemaker.estimator.Estimator(container, role, instance_count=1, instance_type='ml.c4.xlarge', output_path=output_location, sagemaker_session=sess, input_mode='Pipe') # Setup the hyperparameters linear.set_hyperparameters(feature_dim=22, predictor_type='multiclass_classifier', num_classes=3 # add optimized hyperparmeters here # add optimized hyperparmeters here # add optimized hyperparmeters here # add optimized hyperparmeters here # add optimized hyperparmeters here # add optimized hyperparmeters here ) # Launch a training job. This method calls the CreateTrainingJob API call data_channels = { 'train': training_recordIO_protobuf_location, 'validation': validate_recordIO_protobuf_location } linear.fit(data_channels, job_name=job_name) # - print('Here is the location of the trained Linear Learner model: {}/{}/output/model.tar.gz'.format(output_location, job_name)) # From here we have our trained model we can deploy into production! # --- # # ## Step 4: Deploying the model into SageMaker hosting # # Next, let's deploy the model into SageMaker hosting onto a single m4 instance. We can then use this instance to test the model with the test data that we help out at the beginning of the notebook. We can then evaluate things like accuracy, precision, recall, and f1 score. # # We can use some fancy libraries to build out a confusion matrix/heatmap to see how accurate our model is. multiclass_predictor = linear.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # This next code is just setup code to allow us to draw out nice and pretty confusion matrix/heatmap. # + from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=None): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' plt.cm.Greens else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # print("Normalized confusion matrix") # else: # print('Confusion matrix, without normalization') # print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='Actual', xlabel='Predicted') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax np.set_printoptions(precision=2) # + # from sagemaker.predictor import json_deserializer, csv_serializer # multiclass_predictor.content_type = 'text/csv' multiclass_predictor.serializer = sagemaker.serializers.CSVSerializer() multiclass_predictor.deserializer = sagemaker.deserializers.JSONDeserializer() predictions = [] results = multiclass_predictor.predict(test_X) predictions += [r['predicted_label'] for r in results['predictions']] predictions = np.array(predictions) # + # %matplotlib inline sns.set_context("paper", font_scale=1.4) y_test = test_y y_pred = predictions class_names = np.array(['Unexplained', 'Explained', 'Probable']) # Plot non-normalized confusion matrix plot_confusion_matrix(y_test, y_pred, classes=class_names, title='Confusion matrix', cmap=plt.cm.Blues) plt.grid(False) plt.show() # + from sklearn.metrics import precision_recall_fscore_support from sklearn.metrics import accuracy_score y_test = data_test['researchOutcome'] y_pred = predictions scores = precision_recall_fscore_support(y_test, y_pred, average='macro', labels=np.unique(y_pred)) acc = accuracy_score(y_test, y_pred) print('Accuracy is: {}'.format(acc)) print('Precision is: {}'.format(scores[0])) print('Recall is: {}'.format(scores[1])) print('F1 score is: {}'.format(scores[2])) # -
Chapter9/ufo-implementation-operations-lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score music_data = pd.read_csv('music.csv') X = music_data.drop(columns=['genre']) y = music_data['genre'] X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.8) model = DecisionTreeClassifier() model.fit(X_train, y_train) prediction = model.predict(X_test) # prediction score = accuracy_score(y_test, prediction) score # - music_data
.ipynb_checkpoints/HelloWorld-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import matplotlib as mpl import pickle import itertools from sklearn.manifold import TSNE mpl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['ps.fonttype'] = 42 mpl.rcParams['font.family'] = 'Arial' # + def seperateCNN(): input1 = keras.Input(shape=(10, 12, 1)) input2 = keras.Input(shape=(46, 12, 1)) x = layers.Conv2D(filters=16, kernel_size=(2, 12))(input1) # 9 x = layers.BatchNormalization()(x) x = keras.activations.relu(x) x = layers.Conv2D(filters=32, kernel_size=(2, 1))(x) # 8 x = layers.BatchNormalization()(x) x = keras.activations.relu(x) x = layers.MaxPool2D(pool_size=(2, 1), strides=(2, 1))(x) # 4 x = layers.Flatten()(x) x = keras.Model(inputs=input1, outputs=x) y = layers.Conv2D(filters=16, kernel_size=(15, 12))(input2) # 32 y = layers.BatchNormalization()(y) y = keras.activations.relu(y) y = layers.MaxPool2D(pool_size=(2, 1), strides=(2, 1))(y) # 16 y = layers.Conv2D(filters=32,kernel_size=(9,1))(y) # 8 y = layers.BatchNormalization()(y) y = keras.activations.relu(y) y = layers.MaxPool2D(pool_size=(2, 1),strides=(2,1))(y) # 4 y = layers.Flatten()(y) y = keras.Model(inputs=input2,outputs=y) combined = layers.concatenate([x.output,y.output]) z = layers.Dense(128,activation='relu')(combined) z = layers.Dropout(0.2)(z) z = layers.Dense(1,activation='sigmoid')(z) model = keras.Model(inputs=[input1,input2],outputs=z) return model def pull_peptide_aaindex(dataset): result = np.empty([len(dataset),10,12,1]) for i in range(len(dataset)): result[i,:,:,:] = dataset[i][0] return result def pull_hla_aaindex(dataset): result = np.empty([len(dataset),46,12,1]) for i in range(len(dataset)): result[i,:,:,:] = dataset[i][1] return result def pull_label_aaindex(dataset): col = [item[2] for item in dataset] result = [0 if item == 'Negative' else 1 for item in col] result = np.expand_dims(np.array(result),axis=1) return result def pull_label_aaindex(dataset): result = np.empty([len(dataset),1]) for i in range(len(dataset)): result[i,:] = dataset[i][2] return result def aaindex(peptide,after_pca): amino = 'ARNDCQEGHILKMFPSTWYV-' matrix = np.transpose(after_pca) # [12,21] encoded = np.empty([len(peptide), 12]) # (seq_len,12) for i in range(len(peptide)): query = peptide[i] if query == 'X': query = '-' query = query.upper() encoded[i, :] = matrix[:, amino.index(query)] return encoded def peptide_data_aaindex(peptide,after_pca): # return numpy array [10,12,1] length = len(peptide) if length == 10: encode = aaindex(peptide,after_pca) elif length == 9: peptide = peptide[:5] + '-' + peptide[5:] encode = aaindex(peptide,after_pca) encode = encode.reshape(encode.shape[0], encode.shape[1], -1) return encode def dict_inventory(inventory): dicA, dicB, dicC = {}, {}, {} dic = {'A': dicA, 'B': dicB, 'C': dicC} for hla in inventory: type_ = hla[4] # A,B,C first2 = hla[6:8] # 01 last2 = hla[8:] # 01 try: dic[type_][first2].append(last2) except KeyError: dic[type_][first2] = [] dic[type_][first2].append(last2) return dic def rescue_unknown_hla(hla, dic_inventory): type_ = hla[4] first2 = hla[6:8] last2 = hla[8:] big_category = dic_inventory[type_] #print(hla) if not big_category.get(first2) == None: small_category = big_category.get(first2) distance = [abs(int(last2) - int(i)) for i in small_category] optimal = min(zip(small_category, distance), key=lambda x: x[1])[0] return 'HLA-' + str(type_) + '*' + str(first2) + str(optimal) else: small_category = list(big_category.keys()) distance = [abs(int(first2) - int(i)) for i in small_category] optimal = min(zip(small_category, distance), key=lambda x: x[1])[0] return 'HLA-' + str(type_) + '*' + str(optimal) + str(big_category[optimal][0]) def hla_data_aaindex(hla_dic,hla_type,after_pca): # return numpy array [34,12,1] try: seq = hla_dic[hla_type] except KeyError: hla_type = rescue_unknown_hla(hla_type,dic_inventory) seq = hla_dic[hla_type] encode = aaindex(seq,after_pca) encode = encode.reshape(encode.shape[0], encode.shape[1], -1) return encode def construct_aaindex(ori,hla_dic,after_pca): series = [] for i in range(ori.shape[0]): peptide = ori['peptide'].iloc[i] hla_type = ori['HLA'].iloc[i] immuno = np.array(ori['immunogenicity'].iloc[i]).reshape(1,-1) # [1,1] encode_pep = peptide_data_aaindex(peptide,after_pca) # [10,12] encode_hla = hla_data_aaindex(hla_dic,hla_type,after_pca) # [46,12] series.append((encode_pep, encode_hla, immuno)) return series def hla_df_to_dic(hla): dic = {} for i in range(hla.shape[0]): col1 = hla['HLA'].iloc[i] # HLA allele col2 = hla['pseudo'].iloc[i] # pseudo sequence dic[col1] = col2 return dic def retain_910(ori): cond = [] for i in range(ori.shape[0]): peptide = ori['peptide'].iloc[i] if len(peptide) == 9 or len(peptide) == 10: cond.append(True) else: cond.append(False) data = ori.loc[cond] data = data.set_index(pd.Index(np.arange(data.shape[0]))) return data # - # some preparation after_pca = np.loadtxt('../data/after_pca.txt') hla = pd.read_csv('../data/hla2paratopeTable_aligned.txt',sep='\t') hla_dic = hla_df_to_dic(hla) inventory = list(hla_dic.keys()) dic_inventory = dict_inventory(inventory) data = pd.read_csv('../data/gan_a0201.csv') dataset = construct_aaindex(data, hla_dic, after_pca) X = np.empty((len(dataset), 12 * 10)) for i, (x, y, _) in enumerate(dataset): x = x.reshape(-1) X[i, :] = x ''' Let's first show the TSNE plot in supplementary Figure 6 ''' df = pd.read_csv('../data/df/df_noise.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) coalesed_embedded = TSNE(n_components=2,random_state=42).fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('t-sne1') ax.set_ylabel('t-sne2') df = pd.read_csv('../data/df/df_all_epoch20.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) coalesed_embedded = TSNE(n_components=2,random_state=42).fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('t-sne1') ax.set_ylabel('t-sne2') df = pd.read_csv('../data/df/df_all_epoch40.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) coalesed_embedded = TSNE(n_components=2,random_state=42).fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('t-sne1') ax.set_ylabel('t-sne2') df = pd.read_csv('../data/df/df_all_epoch60.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) coalesed_embedded = TSNE(n_components=2,random_state=42).fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('t-sne1') ax.set_ylabel('t-sne2') df = pd.read_csv('../data/df/df_all_epoch80.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) coalesed_embedded = TSNE(n_components=2,random_state=42).fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('t-sne1') ax.set_ylabel('t-sne2') df = pd.read_csv('../data/df/df_all_epoch100.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) coalesed_embedded = TSNE(n_components=2,random_state=42).fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('t-sne1') ax.set_ylabel('t-sne2') # + ''' Then let's show PCA ''' df = pd.read_csv('../data/df/df_noise.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) from sklearn.decomposition import PCA reducer = PCA() reducer.fit(X) explained = reducer.explained_variance_ratio_ total = 0 for i,v in enumerate(explained): total += v if total > 0.9: print(i) break # PC54 reducer = PCA(n_components=54) coalesed_embedded = reducer.fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('PC1') ax.set_ylabel('PC2') # + df = pd.read_csv('../data/df/df_all_epoch20.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) from sklearn.decomposition import PCA reducer = PCA() reducer.fit(X) explained = reducer.explained_variance_ratio_ total = 0 for i,v in enumerate(explained): total += v if total > 0.9: print(i) break # PC54 reducer = PCA(n_components=54) coalesed_embedded = reducer.fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('PC1') ax.set_ylabel('PC2') # + df = pd.read_csv('../data/df/df_all_epoch40.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) from sklearn.decomposition import PCA reducer = PCA() reducer.fit(X) explained = reducer.explained_variance_ratio_ total = 0 for i,v in enumerate(explained): total += v if total > 0.9: print(i) break # PC54 reducer = PCA(n_components=54) coalesed_embedded = reducer.fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('PC1') ax.set_ylabel('PC2') # + df = pd.read_csv('../data/df/df_all_epoch60.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) from sklearn.decomposition import PCA reducer = PCA() reducer.fit(X) explained = reducer.explained_variance_ratio_ total = 0 for i,v in enumerate(explained): total += v if total > 0.9: print(i) break # PC54 reducer = PCA(n_components=54) coalesed_embedded = reducer.fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('PC1') ax.set_ylabel('PC2') # + df = pd.read_csv('../data/df/df_all_epoch80.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) from sklearn.decomposition import PCA reducer = PCA() reducer.fit(X) explained = reducer.explained_variance_ratio_ total = 0 for i,v in enumerate(explained): total += v if total > 0.9: print(i) break # PC54 reducer = PCA(n_components=54) coalesed_embedded = reducer.fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('PC1') ax.set_ylabel('PC2') # + df = pd.read_csv('../data/df/df_all_epoch100.csv') test_dataset = construct_aaindex(df, hla_dic, after_pca) test_X = np.empty((len(test_dataset), 12 * 10)) for i, (x, y, _) in enumerate(test_dataset): x = x.reshape(-1) test_X[i, :] = x coalesed = np.concatenate([X, test_X], axis=0) from sklearn.decomposition import PCA reducer = PCA() reducer.fit(X) explained = reducer.explained_variance_ratio_ total = 0 for i,v in enumerate(explained): total += v if total > 0.9: print(i) break # PC54 reducer = PCA(n_components=54) coalesed_embedded = reducer.fit_transform(coalesed) from itertools import repeat fig, ax = plt.subplots() ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1], color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])),s=3) h1 = [ax.plot([], [], color=i, marker='o', markersize=5, ls='')[0] for i in ['b','r']] leg = ax.legend(handles=h1,labels=['Real peptides','Pseudo peptides']) ax.add_artist(leg) ax.set_xlabel('PC1') ax.set_ylabel('PC2') # + ''' where do these numbers come from? Just put ../data/df/df_all_epoch.csv file and subject them to either web server (deepimmuno-cnn) or command line tool, then count how many of them have immunogenicity > 0.5, which will be counted as immunogenic peptides ''' fig,ax = plt.subplots() ax.bar(np.arange(6),[414,515,622,650,659,679],color='orange',width=0.4) ax.set_ylim([0,800]) ax.plot(np.arange(6),[414,515,622,650,659,679],marker='o',linestyle='-',color='k') y = [414,515,622,650,659,679] for i in range(6): ax.text(i-0.1,y[i]+15,s=y[i]) ax.set_xticks(np.arange(6)) ax.set_xticklabels(['noise','epoch20','epoch40','epoch60','epoch80','epoch100']) ax.set_ylabel('Amount of immunogenic peptides') ax.grid(True,alpha=0.3) # -
reproduce/fig/fig6_supp6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # -*- coding: cp1252 -*- from mechanize import Browser import os import pandas as pd import html5lib from bs4 import BeautifulSoup #os.chdir(r"E:\Dropbox\Dropbox\0_DB\A_Elaborazioni\Coeweb") class coeweb(object): def __init__(self, link): self.pandas_db = pd.DataFrame() self.link = link br = Browser() br.open(self.link) br.select_form(nr=0) self.paesi = [] self.paesi_descizione = [] control = br.form.find_control("PAESE") if control.type == "select": for item in control.items: self.paesi.append(item.name) self.paesi_descizione.append((item.name, str([label.text for label in item.get_labels()]))) self.territori = [] self.territori_descrizione = [] control = br.form.find_control("CODTERRITORIO") if control.type == "select": for item in control.items: self.territori.append((item.name, str([label.text for label in item.get_labels()]))) self.territori_descrizione.append((item.name, str([label.text for label in item.get_labels()]))) self.anni = [] control = br.form.find_control("ANNO") if control.type == "select": for item in control.items: self.anni.append((item.name, str([label.text for label in item.get_labels()]))) self.mese = [] control = br.form.find_control("MESE") if control.type == "select": for item in control.items: self.mese.append((item.name, str([label.text for label in item.get_labels()]))) self.cumulato = [] control = br.form.find_control("TIPOVALORE") for item in control.items: self.cumulato.append((item.name, str([label.text for label in item.get_labels()]))) self.bric = ['508', '720', '664', '75'] #[('508', "['508 - Brasile']"), ('720', "['720 - Cina']"), ('664', "['664 - India']"), ('75', "['75 - Russia']")] def convert_string_euro(self,x): '''questa funzione rimuove la formattazione dei numeri di coeweb, trasformando il testo in numeri''' try: return float(x.replace(".", "")) except: pass def scarica_uno(self, territorio = ["309048"], paese = ["1033"], cumulato = ['M'], anno = ["2013"], mese = ["4"] ):# , paese, anno, trimestre, modalita) br = Browser() br.open(self.link) br.select_form(nr=0) control_territorio = br.form.find_control("CODTERRITORIO") br[control_territorio.name] = territorio #["309048"] control_paese = br.form.find_control("PAESE") br[control_paese.name] = paese #["309048"] control_anno = br.form.find_control("ANNO") br[control_anno.name] = anno #["309048"] control_mese = br.form.find_control("MESE") br[control_mese.name] = mese #["309048"] control_cumulato = br.form.find_control("TIPOVALORE") br[control_cumulato.name] = cumulato #["309048"] nome_stringa = territorio[0] + "_" + paese[0] + "_" + anno[0] + "_" + mese[0] + "_" + cumulato[0] response = br.submit() out_file = open(os.path.join(os.getcwd(), "scaricati", nome_stringa +".html"), "w") out_file.write(response.read()) out_file.close() #os.system("start " + nome_stringa +".html") self.dati = pd.read_html(os.path.join(os.getcwd(), "scaricati", nome_stringa +".html"), header= 0, index_col=0, encoding = "cp1252")#'utf8' #os.remove(territorio[0] +".html") print "download_" + nome_stringa br.close() return self.dati # i dati dovrebbero essere raccolti in un pandas def scarica_molti(self, territorio = ["309048"], lista_paesi = None, lista_anni = None, lista_mesi = None, lista_cumulato = ["M"]): ''' questa funzione permette di scaricare per uno o più territori uno o più settori, per uno o più periodi di tempo''' #self.pandas_db = pd.DataFrame() def nome_paese(dati, codice): for item in dati: if item[0] == codice: pulito1= item[1].split("'",)[1] pulito2 = pulito1.split(" - ",)[1] print pulito2 return pulito2 def nome_territorio(dati, codice): for item in dati: if item[0] == codice: try: pulito1= item[1].split("'")[1] pulito1 = pulito1.replace("-","") pulito1 = pulito1.replace("'","") return pulito1 except: pass for item_territori in territorio: for item_paesi in lista_paesi: for item_anni in lista_anni: for item_mesi in lista_mesi: dataset = self.scarica_uno([item_territori], paese = [str(item_paesi)], anno = [str(item_anni)], mese = [str(item_mesi)], cumulato = lista_cumulato) dataset = dataset[1]#.set_index(territorio, str(item_paesi), str(item_anni), str(item_mesi)]) dataset["territorio"] = nome_territorio(self.territori_descrizione, item_territori) #dataset["territorio"] = item_territori #nome_paese(self.paesi_descizione, str(item_paesi)) dataset["paese"] = nome_paese(self.paesi_descizione, str(item_paesi)) dataset["anno"] = str(item_anni) dataset["mese"] = str(item_mesi) dataset["cumulato"] = lista_cumulato[0] dataset.set_index(["territorio","paese","anno","mese","cumulato"], append = True, inplace = True) dataset["DATI EXPORT"] = dataset["EXP" + str(item_anni)] dataset = dataset["DATI EXPORT"].apply(lambda x: self.convert_string_euro(x)) # datasetcompleto self.pandas_db = pd.concat([self.pandas_db, pd.DataFrame(dataset)]) # settori ateco disponibili self.pandas_db.settori = set(self.pandas_db.reset_index()["Divisioni"]) #print "errore_" + territorio[0] + "_" + str(item_paesi) + "_" + str(item_anni) + "_" + str(item_mesi) + "_" + lista_cumulato[0] for scaricato in os.listdir(os.path.join(os.getcwd(), "scaricati")): os.remove(os.path.join(os.getcwd(), "scaricati", scaricato)) print "eseguito" def analizza_settori(self, settori, interpolazione = "no"): ''' questa funzione restituisce un dataframe e un dizionario contenente le serie storiche per i settori inseriti come argomento [lista]. Effettua anche l'interpolazione dei dati mancanti''' self.dict_settore = {} for item in settori: dati_settore = self.pandas_db.xs(item, level=0) if interpolazione == "yes": dati_settore["DATI EXPORT"] = dati_settore["DATI EXPORT"].apply(lambda x: convert_string_euro(x)).interpolate() else: dati_settore["DATI EXPORT"] = dati_settore["DATI EXPORT"].apply(lambda x: convert_string_euro(x)) self.dict_settore[item] = dati_settore["DATI EXPORT"] self.dataframe_settori = pd.DataFrame(self.dict_settore) return self.dataframe_settori # - def convert_string_euro(x): try: return float(x.replace(".", "")) except: pass os.getcwd() # + lista_anni = [str(i) for i in range(2009,2016)] # - lista_anni Firenze = coeweb("http://www.coeweb.istat.it/predefinite/tutto_paese_merce.asp?livello=ATE07_AT2&riga=MERCE&territorio=S&AG=S") Firenze.paesi_descizione metropolitana = [('309', "['---Toscana']"), ('309047', "['------Pistoia']"), ('309048', "['------Firenze']"), ('309100', "['------Prato']")] CodiciMetropolitana = [] for item in metropolitana: CodiciMetropolitana.append(item[0]) CodiciMetropolitana Firenze.scarica_molti(territorio = CodiciMetropolitana, lista_paesi = ['6'], lista_anni = lista_anni, lista_mesi = ["9"], lista_cumulato = ['C']) dati_toscana_completi = Firenze.pandas_db dati_toscana_completi = dati_toscana_completi.reset_index() dati_toscana_completi["anno"] = pd.to_datetime(dati_toscana_completi.anno, format="%Y") dati_toscana_completi.drop(["paese", "mese", "cumulato"], axis = 1, inplace = True) dati_toscana_completi.to_excel("dati_metropolitana_completi.xlsx") dati_toscana_completi Firenze dati_toscana_completi[dati_toscana_completi.index.get_level_values('Divisioni').isin(['Totale'])].unstack("anno") # + #["2015"] # + # %matplotlib inline (serie_storica["DATI EXPORT"].divide(serie_storica["DATI EXPORT"]["2009"], axis = 0) * 100).to_excel("andamento_export_totale_numero_indice.xlsx") # - serie_storica["DATI EXPORT"].to_excel("andamento_export_totale.xlsx") dati_toscana_completi # + moda = ["CB13-Prodotti tessili", u'CB14-Articoli di abbigliamento (anche in pelle e in pelliccia)', u'CB15-Articoli in pelle (escluso abbigliamento) e simili'] serie_storica_moda = dati_toscana_completi[dati_toscana_completi.index.get_level_values('Divisioni').isin(moda)] # - pd.options.display.float_format = '€{:,.2f}'.format serie_storica_moda = serie_storica_moda.unstack("anno") serie_storica_moda.to_excel("andamento_moda.xlsx") pd.options.display.float_format = '{:,.2f}'.format (serie_storica_moda["DATI EXPORT"].divide(serie_storica_moda["DATI EXPORT"]["2009"], axis = 0) * 100).to_excel("andamento_moda_numeri_indice.xlsx") Italia = coeweb("http://www.coeweb.istat.it/predefinite/tutto_paese_merce.asp?livello=ATE07_AT2&riga=MERCE&territorio=S&AG=S") italia_province = [] for item in Italia.territori: if len(item[0]) == 6: italia_province.append(item[0]) # + #italia_province # - Italia.scarica_molti(territorio = italia_province, lista_paesi = ['6'], lista_anni = ["2015"], lista_mesi = ["9"], lista_cumulato = ['C']) Italia_df = Italia.pandas_db Italia_df = Italia_df.reset_index() key = pd.read_excel("aggancio_nuts_ok.xlsx") key Italia_map = pd.merge(Italia_df, key, left_on = "territorio", right_on ="COEWEB") Italia_map.to_excel("Italia_map.xlsx") Italia_map.to_excel("classifica_provinciale.xlsx") Italia_df[Italia_df["Divisioni"] == "Totale"].sort_values("DATI EXPORT", ascending = False).to_excel("classifica.xlsx")#["territorio"].unique() classifica = Italia_df[Italia_df["Divisioni"] == "Totale"].sort_values("DATI EXPORT", ascending = False) classifica = pd.merge(classifica, key, left_on = "territorio", right_on ="COEWEB") classifica.to_excel("classifica.xlsx") # + #Italia_df[Italia_df["Divisioni"] == "Totale"].sort_values("DATI EXPORT", ascending = False) # - Divisioni = Italia_df.groupby("Divisioni").sum() Divisioni.drop("Totale",axis =0, inplace = True) Divisioni["%"] = Divisioni["DATI EXPORT"] / Divisioni["DATI EXPORT"].sum() * 100 # Italia_df Divisioni.sort_values("DATI EXPORT", ascending = False).to_excel("Totale_nazionale_Divisioni.xlsx") #Mondo = '1033' Italia.scarica_molti(territorio = italia_province, lista_paesi = ['6', '1033'], lista_anni = ["2015"], lista_mesi = ["9"], lista_cumulato = ['C']) Italia_Export_Regno_Unito_Totale = Italia.pandas_db Italia_Export_Regno_Unito_Totale # + #Italia_Export_Regno_Unito_Totale[{ 'paese' : ['Regno Unito'] }] # - Italia_Export_Regno_Unito_Totale UK = Italia_Export_Regno_Unito_Totale.xs('Regno Unito', level='paese') Mondo = Italia_Export_Regno_Unito_Totale.xs('[MONDO]', level='paese') UK UK / Mondo * 100
old/coeweb_brexit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Содержание<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Условный-оператор-if" data-toc-modified-id="Условный-оператор-if-1">Условный оператор if</a></span></li><li><span><a href="#Логический-тип-bool" data-toc-modified-id="Логический-тип-bool-2">Логический тип bool</a></span></li><li><span><a href="#Логические-операции-(&lt;,-==,-...)" data-toc-modified-id="Логические-операции-(<,-==,-...)-3">Логические операции (&lt;, ==, ...)</a></span></li><li><span><a href="#Логические-выражения" data-toc-modified-id="Логические-выражения-4">Логические выражения</a></span></li></ul></div> # - # # Условный оператор и логические выражения # ## Условный оператор if # Для проверки истинности выражений в Python используется оператор `if`. Если условие истинно, то выполниться блок кода, который стоит после `if`. Если ложно, то тот, который после `else`. print("Чему равно 2 * 2?") a = int(input()) if a == 4: print("Правда") else: print("Ложь") # Особенности синтаксиса: # # * После условия в `if` ставится `:` # * После `else` ставится `:` # * Принадлежащий `if`'у блок обозначается отступом в 4 пробела # * Скобки вокруг условия не нужны # > `if` переводиться с английского как `если`, а `else` — `иначе` # `else` можно пропускать. Тогда при ложном условии ничего не выполнится: # + if 2 * 2 == 5: print("2 * 2 = 5") # В выводе ничего нет # - # Блок кода внутри `if` или `else` может содержать сколько угодно кода, в том числе вложенные `if`: if 10 > 5: print("1") print("2") if 5 < 4: print("3") print("4") else: print("5") else: print("6") # По отступам очень удобно отслеживать, какой `else` к какому `if` относится. Если не ставить двоеточие или отступы, то программа будет выдавать ошибку и не запустится. if 6 > 5 print("Пропущено двоеточие!") # > `SyntaxError: invalid syntax` = `ОшибкаСинтаксиса: некорректный синтаксис` if 6 > 5: print("Пропущен отступ!") # > `IndentationError: expected an indented block` = `ОшибкаОтступа: ожидался блок с отступом` # Конструкция `elif` позволяет рассматривать множественные случаи без вложенных `if ... else` в ветке `else`. x = 3 if x < 2: print("0 or 1") else: if x < 4: print("2 or 3") else: print("4+") # Тот же код с использованием `elif` выглядит так: x = 3 if x < 2: print("0 or 1") elif x < 4: print("2 or 3") else: print("4+") # ## Логический тип bool # Для обозначения истинности в Python есть **логический тип** — `bool`. У него есть 2 возможных значения: `True` (истина) и `False` (ложь). if True: print("Ветка if") else: print("Ветка else") if False: print("Ветка if") else: print("Ветка else") # Переменные также могут быть логического типа: a = True b = False print(a, b) if a: print("a =", a) if b: print("b =", b) # Понятно, что после `if` может стоять не только значение `True` или `False`. Посмотрим, что делает Python, если после `if` стоит нечто более странное? # # Одни типы данных могут преобразовываться к другим, если это возможно (если Python позволяет). Такое преобразование называется **преобразованием типов** (или приведением типов). # # Например, `0` преобразуется в `False`: if 0: print("Ветка if") else: print("Ветка else") # Любое другое число (в том числе и нецелое), отличное от `0` преобразуется в `True`: if 3.5: print("Ветка if") else: print("Ветка else") if 0.0: print("Ветка if") else: print("Ветка else") if -1: print("Ветка if") else: print("Ветка else") # Пустая строка `""` преобразуется в `False`: if "": print("Ветка if") else: print("Ветка else") # Непустая строка — в `True`: if "abc": print("Ветка if") else: print("Ветка else") # ## Логические операции (<, ==, ...) # Посмотрим, как устроены логические операции и операции сравнения в Python. # Список основных операторов сравнения, которые вам понадобятся: # # | Действие | Обозначение в Python | Аналог в C++ | Аналог в Pascal | Приоритет | # | --- | --- | --- | --- | --- | # | Равенство | `a == b` | `a == b` | `a = b` | 4 | # | Неравенство | `a != b` | `a != b` | `a != b` | 4 | # | Меньше | `a < b` | `a < b` | `a < b` | 4 | # | Меньше либо равно | `a <= b` | `a <= b` | `a <= b` | 4 | # | Больше | `a > b` | `a > b` | `a > b` | 4 | # | Больше либо равно | `a >= b` | `a >= b` | `a >= b` | 4 | # Список основных логических операторов, которые вам понадобятся: # # <table> # <thead><tr> # <th>Действие</th> # <th>Обозначение в Python</th> # <th>Аналог в C++</th> # <th>Аналог в Pascal</th> # <th>Приоритет</th> # </tr> # </thead> # <tbody> # <tr> # <td>Логическое отрицание</td> # <td><code>not a</code></td> # <td><code>!a</code></td> # <td><code>not a</code></td> # <td>5</td> # </tr> # <tr> # <td>Логическое и</td> # <td><code>a and b</code></td> # <td><code>a &amp;&amp; b</code></td> # <td><code>a and b</code></td> # <td>6</td> # </tr> # <tr> # <td>Логическое или</td> # <td><code>a or b</code></td> # <td><code>a &vert;&vert; b</code></td> # <td><code>a or b</code></td> # <td>7</td> # </tr> # </tbody> # </table> # Примеры: print(5 > 2) print(not 5 > 2) a = 5 b = 2 print(b == a) print(b != a) a = True b = False print(not b) print(a and b) print(a or b) # ## Логические выражения # Как и во многих других языках программирования, вы можете составлять большие выражения из `True`, `False`, булевых переменных, операций сравнения, логических операций и скобок (для изменения приоритета). Например: # + a = 5 b = 2 print(True and a > 2) print(False or b >= 2) if a >= 2 and b >= 2: print("Обратите внимание, в if не нужны скобки вокруг условия") # - x = 4 print(1 < x and x <= 6) # Вообще говоря, сравнения можно соединять в цепочки, например предыдущий пример можно переписать так: x = 4 print(1 < x <= 6) # В таких случаях для читаемости кода полезно упорядочивать сравнения так, чтобы использовать только знаки `<` и `<=`. # Никогда не сравнивайте напрямую с `True` или `False`. Вместо этого используйте саму переменную или её отрицание. То же самое для сложных выражений. # + bool_var = True if bool_var == True: print("Ай-ай") if bool_var: print("Так хорошо") bool_var = False if not bool_var: print("Так тоже хорошо") # - # Конечно, в условии после `if` чаще всего приходится использовать логические операции. В качестве примера, давайте напишем программу, проверяющую является ли данный год високосным: # + year = int(input()) if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0: print("YES") else: print("NO")
crash-course/if-and-logical-expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py3] * # language: python # name: conda-env-py3-py # --- # # Demo: Using VGG with Keras # Below, you'll be able to check out the predictions from an ImageNet pre-trained VGG network with Keras. # ### Load some example images # + # Load our images first, and we'll check what we have from glob import glob import matplotlib.image as mpimg import matplotlib.pyplot as plt image_paths = glob('images/*.jpg') # Print out the image paths print(image_paths) # View an example of an image example = mpimg.imread(image_paths[0]) plt.imshow(example) plt.show() # - # ### Pre-process an image # Note that the `image.load_img()` function will re-size our image to 224x224 as desired for input into this VGG16 model, so the images themselves don't have to be 224x224 to start. # + # Here, we'll load an image and pre-process it from keras.preprocessing import image from keras.applications.vgg16 import preprocess_input import numpy as np i = 0 # Can change this to your desired image to test img_path = image_paths[i] img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # - # ### Load VGG16 pre-trained model # We won't throw out the top fully-connected layer this time when we load the model, as we actually want the true ImageNet-related output. However, you'll learn how to do this in a later lab. The inference will be a little slower than you might expect here as we are not using GPU just yet. # # Note also the use of `decode_predictions` which will map the prediction to the class name. # + # Note - this will likely need to download a new version of VGG16 from keras.applications.vgg16 import VGG16, decode_predictions # Load the pre-trained model model = VGG16(weights='imagenet') # Perform inference on our pre-processed image predictions = model.predict(x) # Check the top 3 predictions of the model print('Predicted:', decode_predictions(predictions, top=3)[0]) # - # You should mostly get the correct answers here. In our own run, it predicted a Tusker elephant with an African elephant in second place (the image is of an African elephant), correctly selected a labrador, and very confidently predicted a zebra. You can add some of your own images into the `images/` folder by clicking on the jupyter logo in the top left and see how it performs on your own examples!
Transfer Learning/.ipynb_checkpoints/VGG_Transfer_Learning-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: TensorFlow 2.3 on Python 3.6 (CUDA 10.1) # language: python # name: python3 # --- # + [markdown] id="dRLRjX3JxLVf" # # Softmax # + [markdown] id="nX3hfD6jxLVh" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/the-deep-learners/deep-learning-illustrated/blob/master/notebooks/softmax_demo.ipynb) # + id="YRBIVdDYxLVh" from math import exp # + id="9wNI3mdqxLVi" z = [-1.0, 1.0, 5.0] # + id="S-4rpJKjxLVi" outputId="d1bcfa5d-22e2-4b68-fa92-88cb8194fef0" colab={"base_uri": "https://localhost:8080/"} exp(z[0]) # + id="d0JA2uMBxLVj" outputId="cc8b3bd8-77f3-434d-c8fe-61364a348477" colab={"base_uri": "https://localhost:8080/"} exp(z[1]) # + id="JVQN1Vv5xLVj" outputId="ac41c43c-593a-4d58-c27f-2e7509aff4f3" colab={"base_uri": "https://localhost:8080/"} exp(z[2]) # + id="OismZJ9ixLVj" total = exp(z[0]) + exp(z[1]) + exp(z[2]) # + id="Ib-HNhFKxLVk" outputId="6f1baadf-6b5b-4f80-dd61-1368dfdbc4e9" colab={"base_uri": "https://localhost:8080/"} total # + id="PhKNnVy_xLVk" outputId="73ccd4ac-ce01-460f-dc3b-c65cee137fd2" colab={"base_uri": "https://localhost:8080/"} exp(z[0])/total # + id="UgCR2NBSxLVk" outputId="d2ed3552-3cc2-4596-8ca2-f21c4b5b65dd" colab={"base_uri": "https://localhost:8080/"} exp(z[1])/total # + id="uGsfVu2kxLVk" outputId="2b6c404c-16b7-46b3-ea52-892c488e7c46" colab={"base_uri": "https://localhost:8080/"} exp(z[2])/total
notebooks/7-1.softmax_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import SQL Alchemy from sqlalchemy import create_engine # Import and establish Base for which classes will be constructed from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() # Import modules to declare columns and column data types from sqlalchemy import Column, Integer, String, Float # + deletable=false nbgrader={"checksum": "8af1cb904695d814b2f78e98ad1e76d6", "grade": false, "grade_id": "cell-55d75c21dbb2c337", "locked": false, "schema_version": 1, "solution": true} # Create the Garbage class # YOUR CODE HERE # + deletable=false nbgrader={"checksum": "59c29987b266a9c395699d181bb087a6", "grade": false, "grade_id": "cell-4230ec6995aad3fd", "locked": false, "schema_version": 1, "solution": true} # Create a connection to a SQLite database # YOUR CODE HERE # - # Create the garbage_collection table within the database Base.metadata.create_all(engine) # To push the objects made and query the server we use a Session object from sqlalchemy.orm import Session session = Session(bind=engine) # + deletable=false nbgrader={"checksum": "eea32595c61b7a5ae6d9d0e22b1d18df", "grade": false, "grade_id": "cell-544743e14f0f9a25", "locked": false, "schema_version": 1, "solution": true} # Create some instances of the Garbage class # YOUR CODE HERE # + deletable=false nbgrader={"checksum": "2e13e07533fce1b94f2693a49958066b", "grade": false, "grade_id": "cell-7ead20b8cbdbfb73", "locked": false, "schema_version": 1, "solution": true} # Add these objects to the session # YOUR CODE HERE # + deletable=false nbgrader={"checksum": "d4a7717476260b79dd53ffdcee0b352e", "grade": false, "grade_id": "cell-a66cda367a0b1515", "locked": false, "schema_version": 1, "solution": true} # Update two rows of data # YOUR CODE HERE # + deletable=false nbgrader={"checksum": "fc1a996c75d8d3b91c9f7d1c7a74feee", "grade": false, "grade_id": "cell-2cee82afd03d3679", "locked": false, "schema_version": 1, "solution": true} # Delete the row with the lowest weight # YOUR CODE HERE # + deletable=false nbgrader={"checksum": "ecc333c6906d189152b758d27e331e36", "grade": false, "grade_id": "cell-23672c755f55dd5d", "locked": false, "schema_version": 1, "solution": true} # Collect all of the items and print their information # YOUR CODE HERE
2/Activities/04-Par_CruddyDB/Unsolved/Par_CruddyDB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import libraries import numpy as np import cv2 as cv import csv import time import tensorflow as tf from tensorflow.python.keras.models import load_model # ## Setting capture window parameters and loading labels # + #Window Parameters. frameWidth=640 frameHeight=480 brightness= 180 #Setting threshold limit(tunable). threshold= 0.95 font= cv.FONT_HERSHEY_SIMPLEX # Loading class labels into dictionary. with open('labels.csv', mode='r') as infile: reader = csv.reader(infile) mydict = {rows[0]:rows[1] for rows in reader} # - # ## Loading our model model=load_model("tsc_model.h5") # ## Preprocessing of frame # + def preprocess(img): img = cv.resize(img,(32,32)) #Converting to grayscale. img = cv.cvtColor(img,cv.COLOR_BGR2GRAY) img = cv.equalizeHist(img) #Normalize image. img=img/255 # Reshaping image to (1,32,32,1). img=img[np.newaxis,:,:,np.newaxis] return img # - # ## Importing OpenVINO libraries for optimization of model import keras2onnx import onnx import onnxruntime # ## Converting model to ONNX format # + # Convert to onnx model. onnx_model = keras2onnx.convert_keras(model, model.name) temp_model_file = 'tsc.onnx' #Saving the model. keras2onnx.save_model(onnx_model, temp_model_file) sess = onnxruntime.InferenceSession(temp_model_file) # - # ## Importing Inference Engine libraries from openvino.inference_engine import IECore from openvino.inference_engine import IENetwork # ## Using optimized model from OpenVINO to generate output # + #Loading model for generating inference def load_to_IE(model): ie = IECore() net = ie.read_network(model=r"C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\model_optimizer\tsc.xml", weights=r"C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\model_optimizer\tsc.bin") exec_net = ie.load_network(network=net, device_name="CPU") return exec_net def do_inference(exec_net, image): input_blob = next(iter(exec_net.inputs)) return exec_net.infer({input_blob: image}) # load our model tsc_net = load_to_IE("tsc_model.h5") # We need dynamically generated key for fetching output tensor tsc_outputs = list(tsc_net.outputs.keys()) # Taking video input from camera. cap= cv.VideoCapture(0) prev_frame_time = 0 new_frame_time = 0 cap.set(3,frameWidth) cap.set(4,frameHeight) cap.set(10,brightness) while True: # If videocapture fails, exit else continue. ret, image = cap.read() if not ret: break #Taking video frame and converting into numpy array. img = np.asarray(image) # Preprocessing frame by calling method. img = preprocess(img) new_frame_time = time.time() fps = 1/(new_frame_time-prev_frame_time) prev_frame_time = new_frame_time # converting the fps into integer fps = int(fps) # Formatting for output display text. cv.putText(image, "CLASS: ",(20,35),font,0.75,(0,0,255),2,cv.LINE_AA) cv.putText(image, "PROBABILITY: ",(20,75),font,0.75,(255,0,0),2,cv.LINE_AA) cv.putText(image, "FPS: ", (20,115), font, 0.75, (0, 255, 0), 2, cv.LINE_AA) # Inference output = do_inference(tsc_net, image=img) # Storing label with maximum probability and probability score. classIndex = np.argmax(output[tsc_outputs[0]],axis=1) confidence = np.amax(output[tsc_outputs[0]]) print(classIndex, confidence) # If probability score satisfies threshold limit, show output. if confidence>threshold: cv.putText(image, str(classIndex)+" "+mydict[str(max(classIndex))],(120,35),font,0.75,(0,0,255),2,cv.LINE_AA) cv.putText(image, str(round(confidence,2)),(180,75),font,0.75,(255,0,0),2,cv.LINE_AA) cv.putText(image, str(fps),(180,115),font,0.75,(0,255,0),2,cv.LINE_AA) cv.imshow("Result", image) # Setting wait time. k = cv.waitKey(1) & 0xFF == ord('q') if k==10: break cap.release() cap.destroyAllWindows() # -
code/TSC_OpenVINO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import os import jax import timecast as tc from timecast.utils.experiment import experiment import pandas as pd import jax.numpy as jnp import numpy as np import matplotlib.pyplot as plt from keras.models import load_model, Model from tqdm.notebook import tqdm import binpacking import pickle test_keys = np.load('data/fusion/FRNN_1d_sample/test_list.npy') MSE = lambda true, pred: jnp.square(jnp.asarray(true) - jnp.asarray(pred)).mean() # # PredictLast @experiment("shot", test_keys) def runner(shot): import jax import pickle import jax.numpy as jnp from timecast.modules import PredictLast MSE = lambda true, pred: jnp.square(jnp.asarray(true) - jnp.asarray(pred)).mean() pl = PredictLast() data = pickle.load(open(f"data/fusion/original/{shot}.pkl", "rb")) # Locked mode amplitude xs = data[:, 3] xs, ts, s = xs[:-30], xs[1:-29], xs[30:] def loop(module, xy): x, y = xy return module, module(x) ar, ys = jax.lax.scan(loop, pl, (xs, ts)) return { "shot": shot, "mse": MSE(s.squeeze(), ys.squeeze()) } pl_results = runner.run(processes=50, tqdm=tqdm) np.mean([shot["mse"] for shot in pl_results]) pl_results = {shot["shot"]: shot["mse"] for shot in pl_results} # # AR @experiment("shot", test_keys) @experiment("history_len", [200]) @experiment("learning_rate", [1e-5]) def runner(shot, history_len, learning_rate): import jax import pickle import jax.numpy as jnp from timecast.modules import AR from timecast.optim import SGD, NormThreshold MSE = lambda true, pred: jnp.square(jnp.asarray(true) - jnp.asarray(pred)).mean() ar = AR(history_len, 1, 1) data = pickle.load(open(f"data/fusion/original/{shot}.pkl", "rb")) # Locked mode amplitude xs, ts, s = data[:-30, 3], data[1:-29, 3], data[30:, 3] sgd = SGD(learning_rate=learning_rate) nl_k = NormThreshold(0.03, filter=lambda x: "kernel" in x) nl_b = NormThreshold(1e-4, filter=lambda x: "bias" in x) def loop(module, xy): x, y = xy pred = module(x) module = sgd(module, x, y) module = nl_k(module) module = nl_b(module) return module, pred ar, ys = jax.lax.scan(loop, ar, (xs, ts)) return { "shot": shot, "history_len": history_len, "learning_rate": learning_rate, "mse": MSE(s.squeeze(), ys.squeeze()), } ar_results = runner.run(processes=50, tqdm=tqdm) ar_df = pd.DataFrame.from_dict(ar_results) ar_df = ar_df.astype(float) ar_df.pivot_table(values="mse", index=["history_len"], columns=["learning_rate"]) ar_results = {shot["shot"]: shot["mse"] for shot in ar_results} less = [shot for shot in ar_results if ar_results[shot] < pl_results[shot]] less less[0] # # AR on PL resid # + @experiment("shot", test_keys) @experiment("history_len", [5, 10, 20]) @experiment("learning_rate", [1e-7]) @experiment("thresh", [0.1]) def runner(shot, history_len, learning_rate, thresh): import jax import pickle import jax.numpy as jnp from timecast.modules import AR from timecast.optim import SGD, NormThreshold MSE = lambda true, pred: jnp.square(jnp.asarray(true) - jnp.asarray(pred)).mean() ar = AR(history_len, 1, 1) data = pickle.load(open(f"data/fusion/original/{shot}.pkl", "rb")) # Locked mode amplitude xs, ts, s = data[:-30, 3], data[1:-29, 3] - data[:-30, 3], data[30:, 3] sgd = SGD(learning_rate=learning_rate) nl_k = NormThreshold(thresh, filter=lambda x: "kernel" in x) nl_b = NormThreshold(1e-4, filter=lambda x: "bias" in x) def loop(module, xy): x, y = xy pred = module(x) module = sgd(module, x, y) module = nl_k(module) module = nl_b(module) return module, pred ar, ys = jax.lax.scan(loop, ar, (xs, ts)) ys = ys.squeeze() ys += data[:-30, 3].squeeze() # pickle.dump({ # "shot": shot, # "history_len": history_len, # "learning_rate": learning_rate, # "mse": MSE(s.squeeze(), ys.squeeze()), # "pl_mse": MSE(s.squeeze(), xs.squeeze()), # "xs": xs.squeeze(), # "ts": ts.squeeze(), # "ys": ys.squeeze(), # "s": s.squeeze() # }, open(f"data/fusion/ar/{shot}.pkl", "wb")) return { "shot": shot, "history_len": history_len, "learning_rate": learning_rate, "thresh": thresh, "mse": MSE(s.squeeze(), ys.squeeze()), } # - resid_results = runner.run(processes=50, tqdm=tqdm) resid_df = pd.DataFrame.from_dict(resid_results) resid_df = resid_df.astype(float) resid_df.pivot_table(values="mse", index=["history_len"], columns=["learning_rate", "thresh"]) resid_df = pd.DataFrame.from_dict(resid_results) resid_df = resid_df.astype(float) resid_df.pivot_table(values="mse", index=["history_len"], columns=["learning_rate", "thresh"]) resid_results = {shot["shot"]: shot["mse"] for shot in resid_results if (shot["history_len"] == 10 and shot["thresh"] == 0.1)} shot = 156680 data = pickle.load(open(f"data/fusion/ar/{shot}.pkl", "rb")) plt.plot(data["s"]) plt.plot(data["xs"]) # plt.plot(data["ys"]) print(MSE(data["s"], data["xs"]), MSE(data["s"], data["ys"])) less = [shot for shot in resid_results if resid_results[shot] < pl_results[shot]] more = [shot for shot in resid_results if resid_results[shot] >= pl_results[shot]] thresh = [shot for shot in resid_results if (resid_results[shot] - pl_results[shot] > 0.1)] diff = [resid_results[shot] - pl_results[shot] for shot in resid_results] diff = np.sort(diff) diff len(less) len(more) less
notebooks/2020-08-03 Comparing PL and AR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pytorch # language: python # name: pytorch # --- import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision # # Model class Autoencoder(nn.Module): def __init__(self): super(Autoencoder, self).__init__() self.encoder1 = nn.Conv2d(3, 16, 3, padding=1) self.encoder2 = nn.Conv2d(16, 8, 3, padding=1) self.encoder3 = nn.Conv2d(8, 4, 3, padding=1) self.encoder4 = nn.Conv2d(4, 4, 3, padding=1) self.pool = nn.MaxPool2d(2, ceil_mode=True) self.up1 = nn.Upsample(75, mode='nearest') self.up = nn.Upsample(scale_factor=2, mode='nearest') self.decoder1 = nn.Conv2d(4, 4, 3, padding=1) self.decoder2 = nn.Conv2d(4, 4, 3, padding=1) self.decoder3 = nn.Conv2d(4, 8, 3, padding=1) self.decoder4 = nn.Conv2d(8, 16, 3, padding=1) self.decoder5 = nn.Conv2d(16, 3, 3, padding=1) def forward(self, x): x = F.relu(self.encoder1(x)) x = self.pool(x) x = F.relu(self.encoder2(x)) x = self.pool(x) x = F.relu(self.encoder3(x)) x = self.pool(x) x = F.relu(self.encoder4(x)) encoder = self.pool(x) x = F.relu(self.decoder1(encoder)) x = self.up1(x) x = F.relu(self.decoder2(x)) x = self.up(x) x = F.relu(self.decoder3(x)) x = self.up(x) x = F.relu(self.decoder4(x)) x = self.up(x) decoder = F.relu(self.decoder5(x)) return decoder model = Autoencoder() model with torch.no_grad(): model.eval() img = torch.rand(6,3,600,600) print(model(img).shape) # # Dataset # + import os import glob from torch.utils.data import DataLoader, Dataset from PIL import Image class myDataset(Dataset): def __init__(self, image_path, transform=None): self.image_path = image_path self.transform = transform self.image_list = glob.glob(image_path + '/*') def __len__(self): return len(self.image_list) def __getitem__(self, idx): file_name = self.image_list[idx] image = Image.open(file_name) if self.transform: image = self.transform(image) return image, image def collate_fn(batch): return tuple(zip(*batch)) # + import torchvision.transforms as transforms transform = transforms.Compose([ transforms.Resize((600,600)), transforms.Grayscale(3), transforms.ToTensor() ]) train_path = r'C:\Users\gjust\Documents\Github\data\fruit\apple' test_path = r'C:\Users\gjust\Documents\Github\data\fruit\apple' trainset = myDataset(train_path, transform=transform) testset = myDataset(test_path, transform=transform) trainloader = DataLoader(trainset, batch_size=4) testloader = DataLoader(testset, batch_size=4) inputs, outputs = iter(trainloader).next() # - # # GPU device = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(device) print(device) # # Optimizer optimizer = optim.Adam(model.parameters(), lr=0.001) loss_func = nn.MSELoss() loss_func # # 학습 # + EPOCH = 5 train_epoch_loss = [] val_peoch_loss = [] for e in range(EPOCH): # Train model.train() train_iter_loss = [] for i, (images, labels) in enumerate(trainloader): images, labels = images.to(device), labels.to(device) optimizer.zero_grad() outputs = model(images) loss = loss_func(outputs, labels) loss.backward() optimizer.step() train_iter_loss.append(loss.item()) train_epoch_loss.append(sum(train_iter_loss)) print(f'Train Epoch[{e+1}/{EPOCH}] / Loss : {loss}') # Validation if e+1 == 5: model.eval() val_iter_loss = [] for i, (images, labels) in enumerate(testloader): images, labels = images.to(device), labels.to(device) outputs = model(images) loss = loss_func(outputs, labels) val_iter_loss.append(loss.item()) val_peoch_loss.append(sum(val_iter_loss)) print(f'Validation Epoch[{e+1}/{EPOCH}] / Loss : {loss}')
4. Project/Abnormal/Abnormal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Before you start # # 1. Please copy the code from the previous assignment (week 2) into a separate file `blocks.py`. Make sure it resides in the same folder as this notebook. It should contain the implementation of the building blocks. # 2. Downlod the files from [here](http://yann.lecun.com/exdb/mnist/) and place them next to this notebook. You should have 4 files: `t10k-images-idx3-ubyte # 3. All functions should be implemented using [**NumPy**](https://docs.scipy.org/doc/). # The goal of this assignment is to supply you with the **building blocks** of **neural networks** (NNs). In this notebook, we will cover the main aspects of NNs, such as **Backpropagation** and **Optimization Methods**. # You will understand how **Convolutional Neural Networks** and the basics of **image filtering** work. We will implement matrix convolution as well as the convolutional layer from scratch. # # # ### Note # Some of the concepts below have not (yet) been discussed during the lecture. These will be discussed further during the next lectures. # # Table of contents # # * [1. Fully-Connected Neural Networks](#1.-Fully-Connected-Neural-Networks) # * [1.1 Backpropagation](#1.1-Backpropagation) # * [1.2 Dense layer](#1.2-Dense-layer) # * [1.3 ReLU nonlinearity](#1.3-ReLU-nonlinearity) # * [1.4 Sigmoid nonlinearity](#1.4-Sigmoid-nonlinearity) # * [1.5 Sequential model](#1.5-Sequential-model) # * [1.6 NLL loss function](#1.6-NLL-loss-function) # * [1.7 $L_2$ regularization](#1.7-$L_2$-regularization) # * [1.8 SGD optimizer](#1.8-SGD-optimizer) # * [2. Experiments](#2.-Experiments) # * [3. Convolutions](#3.-Convolutions) # * [3.1 Matrix convolution](#3.1-Matrix-convolution) # * [3.2 Basic kernels](#3.2-Matrix-convolution) # * [3.3 Convolutional layer](#3.3-Convolutional-layer) # * [3.4 Pooling layer](#3.4-Pooling-layer) # * [3.5 Flatten](#3.5-Flatten) # * [4. Image Experiments](#4.-Image-Experiments) # + from __future__ import print_function, absolute_import, division import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import blocks import automark as am # fill in you student number as your username username = 'YOUR USERNMAE' # to check your progress, you can run this function am.get_progress(username) # - # # 1. Fully-Connected Neural Networks # ## 1.1 Backpropagation # # Neural networks consist of several layers. Each layer is a function of several parameters that we call weights: $h = f(x, w)$ where $h$ is the layer, $x$ is a vector of inputs and w is a vector of weights. # In the neural network, the output of one layer is the input for the next layer. This means we can chain the different functions. The whole neural network $F$ then becomes a composition of different functions. # $$ # F = f_k \circ f_{k-1} \circ \dots \ f_1\\ # h_1 = f_1(x, w_1)\\ # h_2 = f_2(h_1, w_2)\\ # \dots \\ # \dot{y} = f_k(h_{k-1}, w_k) # $$ # In the above functions, $w_1$ and $w_2$ are different **weight vectors** that apply to the different layers $h_1$ and $h_2$. The weights of a neural network basically determine the effect certain outputs have on the next layer. (Please note: When searching for these terms on the internet, be aware that **weights** are sometimes called **parameters**, and $w$ is sometimes denoted as $\theta$.) # # # At the end of every neural network, there is a loss function. A loss function calculates for the performance of the Neural Network. The calculation of this score depends on the task at hand. For classification tasks the loss function would calculate the difference between prediction and the correct value. In this case the function is a summation of this difference for each data point. Calculating this difference can, again, be done in different ways. One example that we have discussed in class is the squared-loss for linear regression. (Here, the difference between predicted and correct classification is squared so positive and negative differences don't cancel eachother.) # $$\mathcal{L} = \tfrac{1}{2}\sum_{n = 1}^N (y_n - \dot{y}_n)^2$$ # Here, $n$ denotes the different datapoints, $y_n$ and $\dot{y}$ represent the correct and the predicted value for that data point respectively. # # # # The smaller the outcome of this loss function, the better the Neural Network predicts the data. Therefore, we concentrate on **minimizing the loss function** as a means for **training** the neural network. # # # Training is done with [Gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). Another word for **gradient** is **derivative**. We use derivatives to update the weights of the neural network to make better predictions. # The weights of the $k$-th layer are updated according to the following scheme: # $$ # w_k \leftarrow w_k - \gamma \frac{\partial \mathcal{L}}{\partial w_k} # $$ # * $\partial f(x)/\partial x$ means the partial derivative of $f(x)$ with respect to $x$. # * Hyperparameter $\gamma$ is called the *learning rate* (You'll learn more about hyperparameters later. For now, the only thing you'll have to know is that the value of a hyperparameter is set by you.) # * Note that $k$ denotes a layer and $n$ denotes a data point. # # # The computation of $\partial \mathcal{L}/\partial w_k$ is done using the [chain rule](https://en.wikipedia.org/wiki/Chain_rule): # $$ # \frac{\partial \mathcal{L}}{\partial w_k} = # \frac{\partial \mathcal{L}}{\partial h_k} # \frac{\partial h_k}{\partial w_k} = # \frac{\partial \mathcal{L}}{\partial h_{k+1}} # \frac{\partial h_{k+1}}{\partial h_k} # \frac{\partial h_k}{\partial w_k} = \dots # $$ # # # Therefore, for each layer, we can calculate the following expressions: # * $h_k = f_k(h_{k-1}, w_k)$ - the forward pass # * $\partial h_{k}/\partial h_{k-1}$ - the partial derivative of the output with respect to the input # * $\partial h_{k}/\partial w_k$ - the partial derivative of the output with respect to the parameters # # # This whole process of updating weights by calculating the gradient is called [Backpropagation](https://www.iro.umontreal.ca/~vincentp/ift3395/lectures/backprop_old.pdf). Click [here](https://www.youtube.com/watch?v=Ilg3gGewQ5U) for a pretty good video explaining backpropagation. # ## 1.2 Dense layer # A dense Layer is the basic layer of a neural network. (Other terms for dense layer are fully-connected layer and multiplicative layer.) A dense layer transforms an input matrix of size `(n_objects, d_in)` to a matrix of size `(n_objects, d_out)` (where d stands for dimensions) by performing the following operation: # $$ # H = XW + b # $$ # Here $H$ represents the function of the dense layer, $X$ is the input matrix, $W$ is the weight matrix for this layer and $b$ is the bias. The bias $b$ is a vector. # # A more detailed version of this function is: # $$ # H_{nk} = \sum\limits_{i=1}^{d_{in}} X_{ni}W_{ik} + b_k # $$ # where $n$ denotes again a single data object and $k$ the $k^{th}$ layer. # # **Example**: # # You have a neural network of just 1 layer. The inputs are points in a 3D space and you want to classify this point as either $-1$ or $1$. # You have $75$ objects in your training set. # # Therefore, $X$ has shape $75 \times 3$. $H$ has shape $75 \times 1$. Weight $W$ of the layer has shape $3 \times 1$. # # **NOTE: "Dense Layer" is linear. So its mapping is exactly the same as of "Linear" function from the previous week but `n_out` is not restricted to 1. We use name "Dense" in order to distinquish between these two functions** def dense_forward(x_input, W, b): """Perform the mapping of the input # Arguments x_input: input of a dense layer - np.array of size `(n_objects, n_in)` W: np.array of size `(n_in, n_out)` b: np.array of size `(n_out,)` # Output the output of a dense layer np.array of size `(n_objects, n_out)` """ ################# ### YOUR CODE ### ################# return output am.test_student_function(username, dense_forward, ['x_input', 'W', 'b']) # Now, you'll implement a backward pass. As decribed above, this is calculated with the gradient. To calculate the gradient, we'll use the chain rule: # $$ # \frac{\partial \mathcal{L}}{\partial X} = # \frac{\partial \mathcal{L}}{\partial H} # \frac{\partial H}{\partial X} # $$ def dense_grad_input(x_input, grad_output, W, b): """Calculate the partial derivative of the loss with respect to the input of the layer # Arguments x_input: input of a dense layer - np.array of size `(n_objects, n_in)` grad_output: partial derivative of the loss functions with respect to the ouput of the dense layer np.array of size `(n_objects, n_out)` W: np.array of size `(n_in, n_out)` b: np.array of size `(n_out,)` # Output the partial derivative of the loss with respect to the input of the layer np.array of size `(n_objects, n_in)` """ ################# ### YOUR CODE ### ################# return grad_input am.test_student_function(username, dense_grad_input, ['x_input', 'grad_output', 'W', 'b']) # Now, instead of computing the gradient with respect to the input, we'll calculate the gradient with respect to the weights and to the bias: # $$ # \frac{\partial \mathcal{L}}{\partial W} = # \frac{\partial \mathcal{L}}{\partial H} # \frac{\partial H}{\partial W} \\ # \frac{\partial \mathcal{L}}{\partial b} = # \frac{\partial \mathcal{L}}{\partial H} # \frac{\partial H}{\partial b} \\ # $$ def dense_grad_W(x_input, grad_output, W, b): """Calculate the partial derivative of the loss with respect to W parameter of the layer # Arguments x_input: input of a dense layer - np.array of size `(n_objects, n_in)` grad_output: partial derivative of the loss functions with respect to the ouput of the dense layer np.array of size `(n_objects, n_out)` W: np.array of size `(n_in, n_out)` b: np.array of size `(n_out,)` # Output the partial derivative of the loss with respect to W parameter of the layer np.array of size `(n_in, n_out)` """ ################# ### YOUR CODE ### ################# return grad_W am.test_student_function(username, dense_grad_W, ['x_input', 'grad_output', 'W', 'b']) def dense_grad_b(x_input, grad_output, W, b): """Calculate the partial derivative of the loss with respect to b parameter of the layer # Arguments x_input: input of a dense layer - np.array of size `(n_objects, n_in)` grad_output: partial derivative of the loss functions with respect to the ouput of the dense layer np.array of size `(n_objects, n_out)` W: np.array of size `(n_in, n_out)` b: np.array of size `(n_out,)` # Output the partial derivative of the loss with respect to b parameter of the layer np.array of size `(n_out,)` """ ################# ### YOUR CODE ### ################# return grad_b am.test_student_function(username, dense_grad_b, ['x_input', 'grad_output', 'W', 'b']) am.get_progress(username) # ### Dense Layer Class # # Here, we define a basic class for the dense layer. You will use this in the Experiments sections below. You don't need to know how this works; we implement it for you, but it is based on the functions you've written above. class Layer(object): def __init__(self): self.training_phase = True self.output = 0.0 def forward(self, x_input): self.output = x_input return self.output def backward(self, x_input, grad_output): return grad_output def get_params(self): return [] def get_params_gradients(self): return [] class Dense(Layer): def __init__(self, n_input, n_output): super(Dense, self).__init__() #Randomly initializing the weights from normal distribution self.W = np.random.normal(scale=0.01, size=(n_input, n_output)) self.grad_W = np.zeros_like(self.W) #initializing the bias with zero self.b = np.zeros(n_output) self.grad_b = np.zeros_like(self.b) def forward(self, x_input): self.output = dense_forward(x_input, self.W, self.b) return self.output def backward(self, x_input, grad_output): # get gradients of weights self.grad_W = dense_grad_W(x_input, grad_output, self.W, self.b) self.grad_b = dense_grad_b(x_input, grad_output, self.W, self.b) # propagate the gradient backwards return dense_grad_input(x_input, grad_output, self.W, self.b) def get_params(self): return [self.W, self.b] def get_params_gradients(self): return [self.grad_W, self.grad_b] dense_layer = Dense(2, 1) x_input = np.random.random((3, 2)) y_output = dense_layer.forward(x_input) print(x_input) print(y_output) # ## 1.3 ReLU nonlinearity # # The dense layer, from previous section, is linear. Combinging several linear (dense) layers is always equivalent to a single dense layer. Here is the mathematically proof for this: # $$ # H_1 = XW_1 + b_1\\ # H_2 = H_1W_2 + b_2\\ # H_2 = (XW_1 + b_1)W_2 + b_2 = X(W_1W_2) + (b_1W_2 + b_2) = XW^* + b^* # $$ # # # For this reason, we also need non-linear layers. Non-linear layers ($f$ in the following) are mostly element-wise and hold the following: # $$ # H_1 = XW_1 + b_1\\ # H_2 = f(H_1)W_2 + b_2\\ # H_2 = f(XW_1 + b_1)W_2 + b_2 \neq XW^* + b^* # $$ # # A popular example of a simple non-linear layer is **ReLU** (Rectified Linear Unit). ReLU doesn't have weights that can be optimized like a dense layer. # $$ # \text{ReLU}(x) = \max(0, x) # $$ # # <img src="./src/relu.png" width="500"> # # **Example** # # $$ # \text{ReLU} \Big( # \begin{bmatrix} # 1 & -0.5 \\ # 0.3 & 0.1 # \end{bmatrix} # \Big) = # \begin{bmatrix} # 1 & 0 \\ # 0.3 & 0.1 # \end{bmatrix} # $$ # # Next, you will implement the forward pass and backward pass (gradient) for ReLU. def relu_forward(x_input): """relu nonlinearity # Arguments x_input: np.array of size `(n_objects, n_in)` # Output the output of relu layer np.array of size `(n_objects, n_in)` """ ################# ### YOUR CODE ### ################# return output # + #test forward pass for ReLU, see example above x_input = np.array([[1, -0.5], [0.3, 0.1]]) print(relu_forward(x_input)) # - am.test_student_function(username, relu_forward, ['x_input']) def relu_grad_input(x_input, grad_output): """relu nonlinearity gradient. Calculate the partial derivative of the loss with respect to the input of the layer # Arguments x_input: np.array of size `(n_objects, n_in)` grad_output: np.array of size `(n_objects, n_in)` # Output the partial derivative of the loss with respect to the input of the layer np.array of size `(n_objects, n_in)` """ ################# ### YOUR CODE ### ################# return grad_input am.test_student_function(username, relu_grad_input, ['x_input', 'grad_output']) class ReLU(Layer): def forward(self, x_input): self.output = relu_forward(x_input) return self.output def backward(self, x_input, grad_output): return relu_grad_input(x_input, grad_output) # ## 1.4 Sigmoid nonlinearity class Sigmoid(Layer): def forward(self, x_input): self.output = blocks.sigmoid_forward(x_input) return self.output def backward(self, x_input, grad_output): return blocks.sigmoid_grad_input(x_input, grad_output) # ## 1.5 Sequential model # In order to make the work with layers more comfortable, we create `SequentialNN` - a class, which stores all its layers and performs the basic manipulations. Again, this is for the experiments below and you don't need to know how this works. class SequentialNN(object): def __init__(self, *layers): self.layers = layers self.training_phase = True def set_training_phase(self, is_training=True): self.training_phase = is_training for layer in self.layers: layer.training_phase = is_training def forward(self, x_input): self.output = x_input for layer in self.layers: self.output = layer.forward(self.output) return self.output def backward(self, x_input, grad_output): inputs = [x_input] + [l.output for l in self.layers[:-1]] for input_, layer_ in zip(inputs[::-1], self.layers[::-1]): grad_output = layer_.backward(input_, grad_output) def get_params(self): params = [] for layer in self.layers: params.extend(layer.get_params()) return params def get_params_gradients(self): grads = [] for layer in self.layers: grads.extend(layer.get_params_gradients()) return grads # Here is the simple neural network. It takes an input of shape `(Any, 10)` and passes it through `Dense(10, 4)`, `ReLU` and `Dense(4, 1)`. The output is a batch of size `(Any, 1)`. # ``` # INPUT # | # Dense(10, 4) # | # ReLU # | # Dense(4, 1) # | # OUTPUT # ``` nn = SequentialNN( Dense(10, 4), ReLU(), Dense(4, 1), Sigmoid() ) nn.forward(np.ones([2, 10])) # ## 1.6 NLL loss function # Here we will define the loss functions. Each loss should be able to compute its value and compute its gradient with respect to the input. We have implemented these functions (e.g. forward, backward) for you. class NLL(object): def forward(self, target_pred, target_true): self.output = blocks.nll_forward(target_pred, target_true) return self.output def backward(self, target_pred, target_true): return blocks.nll_grad_input(target_pred, target_true) # ## 1.7 $L_2$ regularization # # Loss functions update the weights of your model to improve your predictions. We do this by minimizing the loss function. However, up until now this loss function did not take into account the complexity of your model. Here we mean with complexity the number of parameters that your model stores. We do want to take complexity into account because complex models can perform poorly on test data, while performing excellent on train data. # # To penalize the complextity of the model, we introduce a regularizer. You'll learn more about regularizers in the lectures, but the general idea is that we take the values of the weights into account with the loss function. High values for weights are indicators of complexity. # # There are several ways of adding regularization to a model. We will implement [$L_2$ regularization](http://www.deeplearningbook.org/contents/regularization.html) also known as weight decay: # # The key idea of $L_2$ regularization is to add an extra term to the loss functions: # $$ # \mathcal{L}^* = \mathcal{L} + \frac{\lambda}{2} \|w\|^2_2 # $$ # # The part we added to the loss function is called the regularization function. # * $\lambda$ is named weight decay. It is a hyperparameter that determines the influence of the regularization to the outcome of the loss function. # * $\|w\|^2_2$ is the squared [euclidian norm](https://en.wikipedia.org/wiki/Euclidean_distance) where $\|w\|^2_2 = \|w_1\|^2_2 + \|w_2\|^2_2 ... \|w_k\|^2_2$. # This function in more detail becomes: # # $$ # \mathcal{L}^* = \mathcal{L} + \frac{\lambda}{2} \sum\limits_{m=1}^k \|w_m\|^2_2 # $$ # # Because we use a different loss function, the updating of the weights is also slightly changed: # # $$ # w_m \leftarrow w_m - \gamma \frac{\partial \mathcal{L}^*}{\partial w_m}\\ # \frac{\partial \mathcal{L}^*}{\partial w_m} = \frac{\partial \mathcal{L}}{\partial w_m} + \lambda w_m\\ # w_m \leftarrow w_m - \gamma \Big(\frac{\partial \mathcal{L}}{\partial w_m} + \lambda w_m\Big) # $$ # # Here, you'll implement the computation of $L_2$: # $$ # L_2(\lambda, [w_1, w_2, \dots, w_k]) = \frac{\lambda}{2} \sum\limits_{m=1}^k \|w_m\|^2_2 # $$ def l2_regularizer(weight_decay, weights): """Compute the L2 regularization term # Arguments weight_decay: float weights: list of arrays of different shapes # Output sum of the L2 norms of the input weights scalar """ ################# ### YOUR CODE ### ################# return output # You can test your forward pass below. Your output should be: `108.25` #test the L2 regularizer weight_decay = 2 weights = np.array([5,3,7,5,0.5]) print(l2_regularizer(weight_decay, weights)) am.test_student_function(username, l2_regularizer, ['weight_decay', 'weights']) # ## 1.8 SGD optimizer class SGD(object): ''' Stochastic gradient descent optimizer https://en.wikipedia.org/wiki/Stochastic_gradient_descent ''' def __init__(self, model, lr=0.01, weight_decay=0.0): self.model = model self.lr = lr self.weight_decay = weight_decay def update_params(self): weights = self.model.get_params() grads = self.model.get_params_gradients() for w, dw in zip(weights, grads): update = self.lr * (dw + self.weight_decay * w) # it writes the result to the previous variable instead of copying np.subtract(w, update, out=w) # # 2. Experiments # + # some function from week 2 def generate_2_circles(N=100): phi = np.linspace(0.0, np.pi * 2, 100) X1 = 1.1 * np.array([np.sin(phi), np.cos(phi)]) X2 = 3.0 * np.array([np.sin(phi), np.cos(phi)]) Y = np.concatenate([np.ones(N), np.zeros(N)]).reshape((-1, 1)) X = np.hstack([X1,X2]).T return X, Y def split(X, Y, train_ratio=0.7): size = len(X) train_size = int(size * train_ratio) indices = np.arange(size) np.random.shuffle(indices) train_indices = indices[:train_size] test_indices = indices[train_size:] return X[train_indices], Y[train_indices], X[test_indices], Y[test_indices] def plot_model_prediction(prediction_func, X, Y, hard=True): u_min = X[:, 0].min() - 1 u_max = X[:, 0].max() + 1 v_min = X[:, 1].min() - 1 v_max = X[:, 1].max() + 1 U, V = np.meshgrid(np.linspace(u_min, u_max, 100), np.linspace(v_min, v_max, 100)) UV = np.stack([U.ravel(), V.ravel()]).T c = prediction_func(UV).ravel() if hard: c = c > 0.5 plt.scatter(UV[:,0], UV[:,1], c=c, edgecolors= 'none', alpha=0.15) plt.scatter(X[:,0], X[:,1], c=Y.ravel(), edgecolors= 'black') plt.xlim(left=u_min, right=u_max) plt.ylim(bottom=v_min, top=v_max) plt.axes().set_aspect('equal') plt.show() # - X_train, Y_train, X_test, Y_test = split(*generate_2_circles(), 0.7) # + ##Training the network ## ###YOUR CODE FOR DESIGNING THE NETWORK ### model = SequentialNN( # 2 -> 16 -> 1 With ReLU and Sigmoid where it is required ) loss = NLL() weight_decay = 1e-4 sgd = SGD(model, lr=0.1, weight_decay=weight_decay) iters = 5000 # Number of times to iterate over all data objects model.set_training_phase(True) for i in range(iters): # get the predictions y_pred = model.forward(X_train) # compute the loss value + L_2 term loss_value = loss.forward(y_pred, Y_train) + l2_regularizer(weight_decay, model.get_params()) if i % 500 == 0: # log the current loss value print('Step: {}, \tLoss = {:.2f}'.format(i, loss_value)) # get the gradient of the loss functions loss_grad = loss.backward(y_pred, Y_train) # backprop the gradients model.backward(X_train, loss_grad) # perform the updates sgd.update_params() # - plot_model_prediction(lambda x: model.forward(x), X_test, Y_test) # # 3. Convolutions # ## 3.1 Matrix convolution # # # There is a way to create a **locally connected** layer which will learn local correlations using a smaller amount of parameters. # This layer is aptly called **Convolutional Layer** and is based on **matrix convolution** # A picture is worth a thousand words which is especially true when learning about convolution: # ![Image convolution](./src/conv.png) # In image convolution, a **filter**, also called **kernel**, is applied to the source matrix. # Each element from the kernel is multiplied by the corresponding element from the source matrix. The results are summed up and written to the target matrix. # # In this example, the output matrix has a smaller size than its source\*. This is because the kernel can not overlap the borders. **Zero padding** can be used to retain the original dimension. It is a simple solution which involves adding a border of zeros to the input. # # \* It may seem both matrices have the same size (both are shown with the same number of boxes. In the edges of the right matrix, however, no values are stored. The top-left corner of the right image starts where the $-3$ is placed. # # The source matrix $X$ is of size $N \times M$ and the kernel $K$ is of size $(2p+1) \times (2q +1 )$. # We define $X_{ij} = 0$ for $i > N, i < 1$ and $j > M, j < 1$. # In (other) words: If you try to access a pixel which is out of bounds assume that it is zero. # This is called **zero padding**. # # Therefore, the convolution of a matrix with a kernel is defined as follows: # $$ # Y = X \star K \\ # Y_{ij} = \sum\limits_{\alpha=0}^{2p} \sum\limits_{\beta=0}^{2q} # K_{\alpha \beta} X_{i + \alpha - p, j+\beta - q} # $$ # # This operation's name depends on the field: # * In machine learning: **convolution** # * In mathematics: **cross-correlation** # Finally, its time for you to implement matrix convolution. # You can use the example below this code block to test your implementation. def conv_matrix(matrix, kernel): """Perform the convolution of the matrix with the kernel using zero padding # Arguments matrix: input matrix np.array of size `(N, M)` kernel: kernel of the convolution np.array of size `(2p + 1, 2q + 1)` # Output the result of the convolution np.array of size `(N, M)` """ ################# ### YOUR CODE ### ################# return output # Let's test the function with the following data: # # $$ # X = \begin{bmatrix} # 1 & 2 & 3 \\ # 2 & 3 & 4 \\ # 3 & 4 & 5 \\ # \end{bmatrix} \quad # K = # \begin{bmatrix} # 1 & 0 & 0 \\ # 0 & 1 & 0 \\ # 0 & 0 & 2 \\ # \end{bmatrix} \quad # X \star K = # \begin{bmatrix} # 7 & 10 & 3 \\ # 10 & 14 & 6 \\ # 3 & 6 & 8 \\ # \end{bmatrix} # $$ # # We recreate the example data in Python to perform a local test run. # Don't be confused by [np.eye](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.eye.html). It fills our kernel matrix with ones on the diagonal from top-left to bottom-right. # + X = np.array([ [1, 2, 3], [2, 3, 4], [3, 4, 5] ]) K = np.eye(3) K[-1, -1] = 2 print(np.zeros(3)) print(K) # - # Run the following code block and compare the result with the example above. print(conv_matrix(X, K)) am.test_student_function(username, conv_matrix, ['matrix', 'kernel']) # ## 3.2 Basic kernels # # Matrix convolution can be used to process an image (think Instagram): blur, shift, detecting edges, and much more. # This [article](http://setosa.io/ev/image-kernels/) (**recommended read**) about image kernels should give you a better understanding of convolutions. It happens to be interactive as well. # # In convolutional layers, the kernels are learned by training on the dataset. However, there are predefined kernels, for example used on your Instagram photos. Some examples are: # # **Sharpen Kernel:** # $$ # \begin{equation*} # \begin{bmatrix} # 0 & -1 & 0 \\ # -1 & 5 & -1 \\ # 0 & -1 & 0 # \end {bmatrix} # \end{equation*} # $$ # # **Edge detection filter:** # $$ # \begin{equation*} # \begin{bmatrix} # -1 & -1 & -1 \\ # -1 & 8 & -1 \\ # -1 & -1 & -1 # \end {bmatrix} # \end{equation*} # $$ # # **Box blur of size 3:** # $$ \frac{1}{9} # \begin{equation*} # \begin{bmatrix} # 1 & 1 & 1 \\ # 1 & 1 & 1 \\ # 1 & 1 & 1 # \end {bmatrix} # \end{equation*} # $$ # # Let's play with convolutions by manipulating an image of a dog. rgb_img = plt.imread('./images/dog.png') plt.imshow(rgb_img) # Coloured images would require a 3-dimensional tensor to represent RGB (red, green, and blue). # Therefore, we will convert it to grayscale. This way it can be processed as a matrix. img = rgb_img.mean(axis=2) plt.imshow(img, cmap='gray') # First of all, let's blur the image with [box blur](https://en.wikipedia.org/wiki/Box_blur). It is just a convolution of a matrix with the kernel of size $N \times N$ of the following form: # # $$ # \frac{1}{N^2} # \begin{bmatrix} # 1 & \dots & 1\\ # \vdots & \ddots & \vdots\\ # 1 & \dots & 1\\ # \end{bmatrix} # $$ # # Every element of this filter is *one* and we divide the sum by the total amount of elements in the blur filter. You could understand it as taking the average of an image region. # # **Description:** # Perform the blur of the image. # # <u>Arguments:</u> # * `image` - Input matrix - [np.array](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.array.html) of size `(N, M)` # * `box_size` - Size of the blur kernel - `int > 0` the kernel is of size `(box_size, box_size)` # # <u>Output:</u> # The result of the blur [np.array](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.array.html) of size `(N, M)` def box_blur(image, box_size): """Perform the blur of the image # Arguments image: input matrix - np.array of size `(N, M)` box_size: the size of the blur kernel - int > 0 the kernel is of size `(box_size, box_size)` # Output the result of the blur np.array of size `(N, M)` """ ################# ### YOUR CODE ### ################# return output # You can test your solution before submitting it. Running the following code block should yield this result: # $$ # \begin{equation*} # \begin{bmatrix} # 1 & 2 & 1 \\ # 2 & 4 & 2 \\ # 1 & 2 & 1 # \end {bmatrix} # \end{equation*} # $$ # + test_image = np.array([ [9, 0, 9], [0, 0, 0], [9, 0, 9] ]) print(box_blur(test_image, 3)) # - am.test_student_function(username, box_blur, ['image', 'box_size']) # Let's blur the dog blur_dog = box_blur(img, box_size=3) plt.imshow(blur_dog, cmap='gray') # Now, we will get the vertical and horizontal gradients. To perform it we calculate the convolution of the image with the following kernels: # # $$ # K_h = # \begin{bmatrix} # -1 & 0 & 1\\ # \end{bmatrix} \quad # K_v = # \begin{bmatrix} # 1 \\ # 0 \\ # -1\\ # \end{bmatrix} \\ # X_h = X \star K_h \quad X_v = X \star K_v\\ # $$ # # And then we calculate the amplitude of the gradient: # # $$ # X_\text{grad} = \sqrt{X_h^2 + X_v^2} # $$ dog_h = conv_matrix(blur_dog, np.array([[-1, 0, 1]])) dog_v = conv_matrix(blur_dog, np.array([[-1, 0, 1]]).T) dog_grad = np.sqrt(dog_h ** 2 + dog_v ** 2) plt.imshow(dog_grad, cmap='gray') # This yields the edges of our blurred dog. It is not the only way to obtain edges though, there are plenty more: # * [Canny edge detection](https://en.wikipedia.org/wiki/Canny_edge_detector) # * [Sobel operator](https://en.wikipedia.org/wiki/Sobel_operator) # * [Prewitt operator](https://en.wikipedia.org/wiki/Prewitt_operator) # When you convolve an image with a kernel you obtain a map of responses. The more correlated the patch of an image is with the kernel, the higher the response. Let's take a closer look: # + pattern = np.array([ [0, 1, 0], [1, 1, 1], [0, 1, 0] ]) # Create the image image = np.pad(pattern, [(12, 12), (10, 14)], mode='constant', constant_values=0) plt.imshow(image, cmap='gray') plt.title('original image') plt.show() # Add some noise image = 0.5 * image + 0.5 * np.random.random(image.shape) plt.imshow(image, cmap='gray') plt.title('noisy image') plt.show() # Let's find the cross response = conv_matrix(image, pattern) plt.imshow(response, cmap='gray') plt.title('local response') plt.show() plt.imshow(response == response.max(), cmap='gray') plt.title('detected position') plt.show() # - # The brightest pixel highlights where the cross is located. We can find the area where the image is locally close to the kernel. This is especially useful for finding different patterns in images such as: eyes, legs, dogs, cats, etc. # # We defined kernels and applied them to images. But we can also **learn** them by minimizing loss and making the processing as effective as possible. In order to do this, we have to define the **Convolutional Layer** in the next chapter. # ## 3.3 Convolutional layer # # A **Convolutional Layer** works with images. Each image is a 3-dimensional object $N_{\text{channels}} \times H \times W$. # Here index *"channels"* refers to the 3 colors (or 1 for black & white images), $H$ to height, and $W$ to width. # And therefore, the collection of images is 4-dimensional tensor of shape $N_{\text{objects}} \times N_{\text{channels}} \times H \times W$. # # For example, 32 RGB images of size $224 \times 224$ are represented as a tensor of shape $32 \times 3 \times 224 \times 224$ # # A convolutional layer receives an image as its input. Here is how it works: # The layer has `n_in * n_out` kernels. It is a tensor of size `(n_in, n_out, kernel_h, kernel_w)` # It takes a 4-dimensional tensor of size `n_objects, n_in, H, W` as its input. # * `n_objects` is the collection of images. # * Each of them has `n_in` channels. # * The resolution of the images is `(H, W)` # # For each of the images the following operation is performed: # * In order to get the 1st output channel, all inputs are convolved with their corresponding kernels. # * Then the results are summed and written to the output channel. # This is our implementation: # ```python # for i in range(n_out): # out_channel = 0.0 # for j in range(n_in): # kernel_2d = K[i, j] # Retrieve kernel from the collection of kernels # input_channel = input_image[j] # Get one channel of the input image # out_channel += conv_matrix(input_channel, kernel_2d) # Perform convolution # output_image.append(out_channel) # Append the calculated channel to the output # ``` # # We implemented the convolutional layer for you. The implementation of `backward` is based on the idea that convolution could be represented as matrix multiplication. class ConvLayer(Layer): """ Convolutional Layer. The implementation is based on the representation of the convolution as matrix multiplication """ def __init__(self, n_in, n_out, filter_size): super(ConvLayer, self).__init__() self.W = np.random.normal(size=(n_out, n_in, filter_size, filter_size)) self.b = np.zeros(n_out) def forward(self, x_input): n_obj, n_in, h, w = x_input.shape n_out = len(self.W) self.output = [] for image in x_input: output_image = [] for i in range(n_out): out_channel = 0.0 for j in range(n_in): out_channel += conv_matrix(image[j], self.W[i, j]) output_image.append(out_channel) self.output.append(np.stack(output_image, 0)) self.output = np.stack(self.output, 0) return self.output def backward(self, x_input, grad_output): N, C, H, W = x_input.shape F, C, HH, WW = self.W.shape pad = int((HH - 1) / 2) self.grad_b = np.sum(grad_output, (0, 2, 3)) # pad input array x_padded = np.pad(x_input, ((0,0), (0,0), (pad, pad), (pad, pad)), 'constant') H_padded, W_padded = x_padded.shape[2], x_padded.shape[3] # naive implementation of im2col x_cols = None for i in range(HH, H_padded + 1): for j in range(WW, W_padded+1): for n in range(N): field = x_padded[n, :, i-HH:i, j-WW:j].reshape((1,-1)) if x_cols is None: x_cols = field else: x_cols = np.vstack((x_cols, field)) x_cols = x_cols.T d_out = grad_output.transpose(1, 2, 3, 0) dout_cols = d_out.reshape(F, -1) dw_cols = np.dot(dout_cols, x_cols.T) self.grad_W = dw_cols.reshape(F, C, HH, WW) w_cols = self.W.reshape(F, -1) dx_cols = np.dot(w_cols.T, dout_cols) dx_padded = np.zeros((N, C, H_padded, W_padded)) idx = 0 for i in range(HH, H_padded + 1): for j in range(WW, W_padded + 1): for n in range(N): dx_padded[n:n+1, :, i-HH:i, j-WW:j] += dx_cols[:, idx].reshape((1, C, HH, WW)) idx += 1 dx = dx_padded[:, :, pad:-pad, pad:-pad] grad_input = dx return grad_input def get_params(self): return [self.W, self.b] def get_params_gradients(self): return [self.grad_W, self.grad_b] # This layer transforms images with 3 channels into images with 8 channels by convolving them with kernels of size `(3, 3)` conv_layer = ConvLayer(3, 8, filter_size=3) # ## 3.4 Pooling layer # # The pooling layer **reduces the size of an image**. # # In the following figure $2 \times 2$ pooling is applied on the image which effectively reduces the size by half. # If you look closely, pooling operations have no effect on the depth of an image. # ![pool](./src/pool.png) # # There are several types of pooling operations but the most common one is **max pooling**. # # During a max pooling operation, the image is split into **windows** (or **filters**) and then the maximum of each window is used as the output. # # ![maxpool](./src/maxpool.png) def maxpool_forward(x_input): """Perform max pooling operation with 2x2 window # Arguments x_input: np.array of size (2 * W, 2 * H) # Output output: np.array of size (W, H) """ ################# ### YOUR CODE ### ################# return output # Once again, you can use example data to test your solution: # **Image:** # $$ # \begin{equation*} # \begin{bmatrix} # 1 & 1 & 2 & 4 \\ # 5 & 6 & 7 & 8 \\ # 3 & 2 & 1 & 0 \\ # 1 & 2 & 3 & 4 # \end {bmatrix} # \end{equation*} # $$ # # **Output:** # $$ # \begin{equation*} # \begin{bmatrix} # 6 & 8 \\ # 3 & 4 # \end {bmatrix} # \end{equation*} # $$ # + test_image = np.array([ [1, 1, 2, 4], [5, 6, 7, 8], [3, 2, 1, 0], [1, 2, 3, 4] ]) print(maxpool_forward(test_image)) # - am.test_student_function(username, maxpool_forward, ['x_input']) # We already implemented the gradient calculation. # It is not overly complicated; reading the code should help you to understand the concept. def maxpool_grad_input(x_input, grad_output): """Calculate partial derivative of the loss with respect to the input # Arguments x_input: np.array of size (2 * W, 2 * H) grad_output: partial derivative of the loss with respect to the output np.array of size (W, H) # Output output: partial derivative of the loss with respect to the input np.array of size (2 * W, 2 * H) """ height, width = x_input.shape # create the array of zeros of the required size grad_input = np.zeros(x_input.shape) # let's put 1 if the element with this position # is maximal in the window for i in range(0, height, 2): for j in range(0, width, 2): window = x_input[i:i+2, j:j+2] i_max, j_max = np.unravel_index(np.argmax(window), (2, 2)) grad_input[i + i_max, j + j_max] = 1 # put corresponding gradient instead of 1 grad_input = grad_input.ravel() grad_input[grad_input == 1] = grad_output.ravel() grad_input = grad_input.reshape(x_input.shape) return grad_input # Following up is the full implementation of the **MaxPool Layer**. class MaxPool2x2(Layer): def forward(self, x_input): n_obj, n_ch, h, w = x_input.shape self.output = np.zeros((n_obj, n_ch, h // 2, w // 2)) for i in range(n_obj): for j in range(n_ch): self.output[i, j] = maxpool_forward(x_input[i, j]) return self.output def backward(self, x_input, grad_output): n_obj, n_ch, _, _ = x_input.shape grad_input = np.zeros_like(x_input) for i in range(n_obj): for j in range(n_ch): grad_input[i, j] = maxpool_grad_input(x_input[i, j], grad_output[i, j]) return grad_input # ## 3.5 Flatten # # Convolutional neural networks are better at image processing than fully connected neural networks (dense networks). We will combine convolutional layers, which deal with 4-dimensional tensors, with dense layers, which work with matrices. # In order to bridge the gap between convolutional layers and dense layers we will implement the **Flatten Layer**. # # The Flatten layer receives a 4-dimensional tensor of size `(n_obj, n_channels, h, w)` as its input and reshapes it into a 2-dimensional tensor (matrix) of size `(n_obj, n_channels * h * w)`. # # The backward pass of this layer is pretty straightforward. Remember that we don't actually change any values; we merely reshape inputs. # # **Please implement `flatten_forward` and `flatten_grad_input` functions using [np.reshape](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.reshape.html)**. def flatten_forward(x_input): """Perform the reshaping of the tensor of size `(K, L, M, N)` to the tensor of size `(K, L*M*N)` # Arguments x_input: np.array of size `(K, L, M, N)` # Output output: np.array of size `(K, L*M*N)` """ ################# ### YOUR CODE ### ################# return output # You can use test data and compare the final shape. It should be `(100, 768)` for the following example. # Please ignore the use of [np.zeros](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.zeros.html) in this case. We are just interested in transforming shapes. # **Be aware:** This test will fail if you do not return an array like object! # + test_input = np.zeros((100, 3, 16, 16)) print(flatten_forward(test_input).shape) # - am.test_student_function(username, flatten_forward, ['x_input']) def flatten_grad_input(x_input, grad_output): """Calculate partial derivative of the loss with respect to the input # Arguments x_input: np.array of size `(K, L, M, N)` grad_output: partial derivative of the loss with respect to the output np.array of size `(K, L*M*N)` # Output output: partial derivative of the loss with respect to the input np.array of size `(K, L, M, N)` """ ################# ### YOUR CODE ### ################# return grad_input am.test_student_function(username, flatten_grad_input, ['x_input', 'grad_output']) # This is the, pretty self-explanatory, implemention of the **Flatten Layer**. class FlattenLayer(Layer): def forward(self, x_input): self.output = flatten_forward(x_input) return self.output def backward(self, x_input, grad_output): output = flatten_grad_input(x_input, grad_output) return output # # 4. Image Experiments # # This chapter focuses on conducting several experiments. We will train our neural networks with **mini-batches**. Mini-batches are small portions of our dataset, all mini-batches together should form the original dataset again. With our mini-batches in place we will feed these one-by-one to our neural network. # + import sys def iterate_minibatches(x, y, batch_size=16, verbose=True): assert len(x) == len(y) indices = np.arange(len(x)) np.random.shuffle(indices) for i, start_idx in enumerate(range(0, len(x) - batch_size + 1, batch_size)): if verbose: print('\rBatch: {}/{}'.format(i + 1, len(x) // batch_size), end='') sys.stdout.flush() excerpt = indices[start_idx:start_idx + batch_size] yield x[excerpt], y[excerpt] # - # Let's import the data. Please [download](http://yann.lecun.com/exdb/mnist/) it first. # # If you get an error with loading the data, chances are you'll need to unpack the downloaded files. from dataset_utils import load_mnist # + train = list(load_mnist(dataset='training', path='.')) train train_images = np.array([im[1] for im in train]) train_targets = np.array([im[0] for im in train]) # We will train a 0 vs. 1 classifier x_train = train_images[train_targets < 2][:1000] y_train = train_targets[train_targets < 2][:1000] y_train = y_train y_train = y_train.reshape((-1, 1)) # - # You just loaded the MNIST dataset. This dataset consists of gray-scale (so a single channel) images of size `28x28`. These images are represented by the RGB color model. This color model representes a color by 3 integers that range from 0 to 255, or in the case of gray-scale images this is a single integer. This means that each picture in the MNIST dataset is represented by 784 pixels with a value ranging from 0 to 255. This is how a single image looks like: plt.imshow(x_train[0].reshape(28, 28), cmap='gray_r') # To make divergence to an optimum easier, we will normalize the images to have values between 0 and 1. Then, by reshaping, we will add the dimensions for the channel which, for simplicity, was removed by the creators of this dataset. As you can see, this doesn't change anything in how the image looks like. x_train = x_train.astype('float32') / 255.0 x_train = x_train.reshape((-1, 1, 28, 28)) plt.imshow(x_train[0].reshape(28, 28), cmap='gray_r') # Now we will train a simple convolutional neural network: def get_cnn(): nn = SequentialNN( ConvLayer(1, 2, filter_size=3), # The output is of size [N_obj 2 28 28] ReLU(), # The output is of size [N_obj 2 28 28] MaxPool2x2(), # The output is of size [N_obj 2 14 14] ConvLayer(2, 4, filter_size=3), # The output is of size [N_obj 4 14 14] ReLU(), # The output is of size [N_obj 4 14 14] MaxPool2x2(), # The output is of size [N_obj 4 7 7] FlattenLayer(), # The output is of size [N_obj 196] Dense(4 * 7 * 7, 8), ReLU(), Dense(8, 1), Sigmoid() ) return nn nn = get_cnn() loss = NLL() optimizer = SGD(nn, weight_decay=0.0) # + # It will train for about 5 minutes num_epochs = 5 batch_size = 32 # We will store the results here history = {'loss': [], 'accuracy': []} # `num_epochs` represents the number of iterations for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch + 1, num_epochs)) # We perform iteration a one-by-one iteration of the mini-batches for x_batch, y_batch in iterate_minibatches(x_train, y_train, batch_size): # Predict the target value y_pred = nn.forward(x_batch) # Compute the gradient of the loss loss_grad = loss.backward(y_pred, y_batch) # Perform backwards pass nn.backward(x_batch, loss_grad) # Update the params optimizer.update_params() # Save loss and accuracy values history['loss'].append(loss.forward(y_pred, y_batch)) prediction_is_correct = (y_pred > 0.5) == (y_batch > 0.5) history['accuracy'].append(np.mean(prediction_is_correct)) print() # + # Let's plot the results to get a better insight plt.figure(figsize=(8, 5)) ax_1 = plt.subplot() ax_1.plot(history['loss'], c='g', lw=2, label='train loss') ax_1.set_ylabel('loss', fontsize=16) ax_1.set_xlabel('#batches', fontsize=16) ax_2 = plt.twinx(ax_1) ax_2.plot(history['accuracy'], lw=3, label='train accuracy') ax_2.set_ylabel('accuracy', fontsize=16) plt.show() # - # **Things you could try:** # Train the model with a different `batch_size`: # * What would happen with `batch_size=1`? # * What would happen with `batch_size=1000`? # * Does the speed of the computation depend on this parameter? If so, why? # # Train the model with a different number of `num_epochs`: # * What would happen with `num_epochs=1`? # * What would happen with `num_epochs=1000`? # * How does it affect computation time, resource strain, and accuracy? # Let's visualize the activations of the intermediate layers: # + viz_images = x_batch[:2] _ = nn.forward(viz_images) activations = { 'conv_1': nn.layers[0].output, 'relu_1': nn.layers[1].output, 'pool_1': nn.layers[2].output, 'conv_2': nn.layers[3].output, 'relu_2': nn.layers[4].output, 'pool_2': nn.layers[5].output, } # - # ### Input Images # + # Input f, (ax1, ax2) = plt.subplots(2, 1, figsize=(4, 8)) ax1.imshow(viz_images[0, 0], cmap=plt.cm.gray_r) ax1.set_xticks([]) ax1.set_yticks([]) ax2.imshow(viz_images[1, 0], cmap=plt.cm.gray_r) ax2.set_xticks([]) ax2.set_yticks([]) plt.show() # - # ### Activations of Conv 1 # + # Conv 1 f, axes = plt.subplots(2, 2, figsize=(8, 8)) for i in range(2): for j in range(2): ax = axes[i, j] ax.imshow(activations['conv_1'][i, j], cmap=plt.cm.gray_r) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Channel {}'.format(j + 1)) plt.show() # - # ### Activations of ReLU 1 # + # ReLU 1 f, axes = plt.subplots(2, 2, figsize=(8, 8)) for i in range(2): for j in range(2): ax = axes[i, j] ax.imshow(activations['relu_1'][i, j], cmap=plt.cm.gray_r) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Channel {}'.format(j + 1)) plt.show() # - # ### Activations of MaxPooling 1 # + # Max Pooling 1 f, axes = plt.subplots(2, 2, figsize=(8, 8)) for i in range(2): for j in range(2): ax = axes[i, j] ax.imshow(activations['pool_1'][i, j], cmap=plt.cm.gray_r) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Channel {}'.format(j + 1)) plt.show() # - # ### Activations of Conv 2 # + # Conv 2 f, axes = plt.subplots(2, 4, figsize=(16, 8)) for i in range(2): for j in range(4): ax = axes[i, j] ax.imshow(activations['conv_2'][i, j], cmap=plt.cm.gray_r) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Channel {}'.format(j + 1)) plt.show() # - # ### Activations of ReLU 2 # + # ReLU 2 f, axes = plt.subplots(2, 4, figsize=(16, 8)) for i in range(2): for j in range(4): ax = axes[i, j] ax.imshow(activations['relu_2'][i, j], cmap=plt.cm.gray_r) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Channel {}'.format(j + 1)) plt.show() # - # ### Activations of MaxPooling 2 # + # Max Pooling 2 f, axes = plt.subplots(2, 4, figsize=(16, 8)) for i in range(2): for j in range(4): ax = axes[i, j] ax.imshow(activations['pool_2'][i, j], cmap=plt.cm.gray_r) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Channel {}'.format(j + 1)) plt.show() # - # As we go deeper and deeper, images become less locally-correlated (the dependance between two neighbours decreases) and more semantically loaded. # Each convoluted pixel stores more and more useful information about the object. # In the end, this will be anaylzed using several **Dense Layers**. # # **Things you could try:** # * Change the architecture of the neural network # * Vary the number of kernels # * Vary the size of the kernels
week_3/Neural_Nets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import rabbitpy # The URL to connect to url = 'amqp://guest:guest@localhost:5672/%2F' # Open a connection to RabbitMQ connection = rabbitpy.Connection(url) # Open a channel to communicate with RabbitMQ on channel = connection.channel() # Create an object for interacting with the queue queue = rabbitpy.Queue(channel, 'example') # While there are messages in the queue, fetch them using Basic.Get while len(queue) > 0: message = queue.get() print('Message:') print(' ID: %s' % message.properties['message_id']) print(' Time: %s' % message.properties['timestamp'].isoformat()) print(' Body: %s' % message.body) message.ack()
notebooks/2.5 Basic.Get Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!--NAVIGATION--> # < [A Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb) | [Contents](Index.ipynb) | [Appendix: Figure Code](17-Figures.ipynb) > # # Resources for Further Learning # This concludes our whirlwind tour of the Python language. # My hope is that if you read this far, you have an idea of the essential syntax, semantics, operations, and functionality offered by the Python language, as well as some idea of the range of tools and code constructs that you can explore further. # # I have tried to cover the pieces and patterns in the Python language that will be most useful to a data scientist using Python, but this has by no means been a complete introduction. # If you'd like to go deeper in understanding the Python language itself and how to use it effectively, here are a handful of resources I'd recommend: # # - [*Fluent Python*](http://shop.oreilly.com/product/0636920032519.do) by <NAME>. This is an excellent OReilly book that explores best practices and idioms for Python, including getting the most out of the standard library. # - [*Dive Into Python*](http://www.diveintopython.net/) by <NAME>. This is a free online book that provides a ground-up introduction to the Python language. # - [*Learn Python the Hard Way*](http://learnpythonthehardway.org/) by <NAME>. This book follows a "learn by trying" approach, and deliberately emphasizes developing what may be the most useful skill a programmer can learn: Googling things you don't understand. # - [*Python Essential Reference*](http://www.dabeaz.com/per.html) by <NAME>. This 700-page monster is well-written, and covers virtually everything there is to know about the Python language and its built-in libraries. For a more application-focused Python walk-through, see Beazley's [*Python Cookbook*](http://shop.oreilly.com/product/0636920027072.do). # # To dig more into Python tools for data science and scientific computing, I recommend the following books: # # - [*The Python Data Science Handbook*](http://shop.oreilly.com/product/0636920034919.do) by yours truly. This book starts precisely where this report leaves off, and provides a comprehensive guide to the essential tools in Python's data science stack, from data munging and manipulation to machine learning. # - [*Effective Computation in Physics*](http://shop.oreilly.com/product/0636920033424.do) by <NAME> and <NAME>, is applicable to people far beyond the world of Physics research. It is a step-by-step, ground-up introduction to scientific computing, including an excellent introduction to many of the tools mentioned in this report. # - [*Python for Data Analysis*](http://shop.oreilly.com/product/0636920023784.do) by <NAME>, creator of the Pandas package. This book covers the Pandas library in detail, as well as giving useful information on some of the other tools that enable it. # # Finally, for an even broader look at what's out there, I recommend the following: # # - [*OReilly Python Resources*](http://shop.oreilly.com/category/browse-subjects/programming/python.do) O'Reilly features a number of excellent books on Python itself and specialized topics in the Python world. # - *PyCon*, *SciPy*, and *PyData*. The PyCon, SciPy, and PyData conferences draw thousands of attendees each year, and archive the bulk of their programs each year as free online videos. These have turned into an incredible set of resources for learning about Python itself, Python packages, and related topics. Search online for videos of both talks and tutorials: the former tend to be shorter, covering new packages or fresh looks at old ones. The tutorials tend to be several hours, covering the use of the tools mentioned here as well as others. # <!--NAVIGATION--> # < [A Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb) | [Contents](Index.ipynb) | [Appendix: Figure Code](17-Figures.ipynb) >
16-Further-Resources.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/xinformatics/DeepLearningLifeSciences/blob/master/chapter5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="q0sqfTukcUz7" colab_type="code" colab={} # Chapter5 Biophysical ML # we will explore in depth the problem of predicting how # small drug-like molecules bind to a protein of interest in the human body. # + id="Vcdq4UzmdjB2" colab_type="code" colab={} # Our goal therefore is to design learning algorithms that can effectively predict when a # given molecule is going to interact with a given protein. How can we do this? # + id="sG1iluzIfWtQ" colab_type="code" colab={} # # + id="Z54cBeqklsMg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1014b814-c2d4-4827-d42e-90267ef78ffa" ##setup tensorflow v1 # %tensorflow_version 1.x # + id="ow8NXQwMvjAv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="54df901b-e5c8-43d1-c9a6-90d31f03dae4" ## this will install anaconda and deepchem, will add path, execution will take sometime # !wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh # !chmod +x Anaconda3-2019.10-Linux-x86_64.sh # !bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local # !conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0 import sys sys.path.append('/usr/local/lib/python3.7/site-packages/') # + id="VLFzIFn9vnlu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="88e92d02-f897-4b67-e119-c2a05ba2282e" ############## check deepchem installation by import deepchem as dc dc.__version__ #should match with the installed # + id="cf843_mm2NhB" colab_type="code" colab={} #need to reinstall deepchem bcz of an openmm error # + id="kvxKVn6k2So5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="cfbadea5-6319-4af4-9ed8-b60c38bac545" # !curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py # + id="fJn06u7P3Vrp" colab_type="code" colab={} import deepchem_installer # + id="IajD8Yjr3Vpc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="3c0a8e81-5d5b-464f-c24a-fd0278a8d22c" # %time deepchem_installer.install(additional_packages=['mdtraj'], version='2.3.0') # + id="1ifVwJnr3Vm5" colab_type="code" colab={} import deepchem as dc # + id="zFlqASlp3Vja" colab_type="code" colab={} grid_featurizer = dc.feat.RdkitGridFeaturizer(voxel_width=2.0,feature_types=['hbond', 'salt_bridge', 'pi_stack', 'cation_pi', 'ecfp', 'splif'], sanitize=True, flatten=True) # + id="hD5Tic3Xx9dB" colab_type="code" colab={} # do not really need to use this, feature parameter # in the next function will take care of it # + id="5Qozh7AI0B_y" colab_type="code" colab={} #pdbbind dataset is large ~ 2GB, takes time # + id="xX7t_sJFz6NS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="efa2203c-a2ca-4266-ae41-0d2b5b7146a8" # bcz of an openmm error, I have to manually download import deepchem as dc from deepchem.utils import download_url import os data_dir = dc.utils.get_data_dir() dataset_file = os.path.join(data_dir, "pdbbind_core_df.csv.gz") if not os.path.exists(dataset_file): print('File does not exist. Downloading file...') download_url("https://s3-us-west-1.amazonaws.com/deepchem.io/datasets/pdbbind_core_df.csv.gz") print('File downloaded...') raw_dataset = dc.utils.save.load_from_disk(dataset_file) # + id="0XPuq65K4WMX" colab_type="code" colab={} #raw_dataset # + id="lhjFUW0E4udy" colab_type="code" colab={} grid_featurizer = dc.feat.RdkitGridFeaturizer( voxel_width=16.0, feature_types=["ecfp", "splif", "hbond", "pi_stack", "cation_pi", "salt_bridge"], ecfp_power=5, splif_power=5, parallel=True, flatten=True, sanitize=True) # + id="xhSX_10s5LHE" colab_type="code" colab={} compound_featurizer = dc.feat.CircularFingerprint(size=128) # + id="O2pZscQ-5QcJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="77b4f5d5-42e6-4633-9eac-0effa8578742" #load data pdbbind_tasks, (train_dataset, valid_dataset, test_dataset), transformers = dc.molnet.load_pdbbind_grid( featurizer="ECFP", subset="refined") # + id="9zrceJU75YKk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="240e2745-781c-4e01-bb08-bc55d51e5871" train_dataset.get_data_shape(),valid_dataset.get_data_shape(),test_dataset.get_data_shape() # + id="DAIJeS8b5vlc" colab_type="code" colab={} # now comes model from sklearn.ensemble import RandomForestRegressor # + id="qAQdeC366M8a" colab_type="code" colab={} #RSF model sklearn_model = RandomForestRegressor(n_estimators=100) model = dc.models.SklearnModel(sklearn_model) model.fit(train_dataset) # + id="28oYbsn58vtL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 411} outputId="09340da5-9a0c-437c-9dcd-842dc61ad96d" #MLP model n_features = train_dataset.X.shape[1] model_mlp = dc.models.MultitaskRegressor(n_tasks=len(pdbbind_tasks),n_features=n_features, layer_sizes=[2000, 1000],dropouts=0.5,learning_rate=0.0003) model_mlp.fit(train_dataset, nb_epoch=250) # + id="RIOTJOOr9yxC" colab_type="code" colab={} metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) # + id="w1lxYB8e-sBj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="18d5f378-d3f9-4457-a12b-e0cf8b4600da" #checking the RF model print("Evaluating model") train_scores = model.evaluate(train_dataset, [metric], transformers) test_scores = model.evaluate(test_dataset, [metric], transformers) print("Train scores ", train_scores) print("Test scores ", test_scores) # + id="bZ07tghD-8f5" colab_type="code" colab={} #RF train r2: 0.89423 #RF test r2: 0.5099 #overfitting evident # + id="nTTY9LSu_GLO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="53b99b68-5a2d-43f4-c23f-ae3e334147ac" #checking the MLP model print("Evaluating model") train_scores = model_mlp.evaluate(train_dataset, [metric], transformers) test_scores = model_mlp.evaluate(test_dataset, [metric], transformers) print("Train scores ", train_scores) print("Test scores ", test_scores) # + id="-skJY6Wb_M_M" colab_type="code" colab={} #MLP train r2: 0.918 #MLP test r2: 0.369 #overfitting more than RF # + id="X_rIaWEa_WMw" colab_type="code" colab={} #This is different than what the book says, in book RF is more ovefit # + id="_8wnfaODAmUF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="423fada9-a01f-430c-afc2-8771cb6c92da" test_dataset.y.shape # + id="8G5GJ9mlAmOf" colab_type="code" colab={} rf_predicted_test = model.predict(test_dataset) # + id="xWcsJgZGAmJa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bb68d55e-f732-4dd0-c966-a06bea10dfc9" rf_predicted_test.shape # + id="-0HVqiOz_wcV" colab_type="code" colab={} #plots # %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt # + id="YpEnt3vMBAej" colab_type="code" colab={} rf_predicted_test = model.predict(test_dataset) mlp_predicted_test = model_mlp.predict(test_dataset) rf_true_test = test_dataset.y # + id="o_pgqbFRAV4k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="fd912327-4b7e-46a9-918e-cba9c5be20a1" plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.scatter(rf_predicted_test, rf_true_test) plt.xlabel('Predicted pIC50s') plt.ylabel('True IC50') plt.title(r'RF predicted IC50 vs. True IC50') plt.xlim([-2.5, 2.5]) plt.ylim([-2.5, 2.5]) plt.plot([-2.5, 2.5], [-2.5, 2.5], color='k') plt.text(-2.5, 2, 'RF R-squared = %0.2f' % 0.5099) plt.subplot(1,2,2) plt.scatter(mlp_predicted_test, rf_true_test) plt.xlabel('Predicted pIC50s') plt.ylabel('True IC50') plt.title(r'MLP predicted IC50 vs. True IC50') plt.xlim([-2.5, 2.5]) plt.ylim([-2.5, 2.5]) plt.plot([-2.5, 2.5], [-2.5, 2.5], color='k') plt.text(-2.5, 2, 'MLP R-squared = %0.2f' % 0.369) plt.show() # + id="RRJCW1xTBhKc" colab_type="code" colab={} #chapter 5 completed
chapter5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/PGM-Lab/probai-2021-pyro/blob/main/Day1/notebooks/students_PPLs_Intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # TO AVOID KERNEL DYING WHEN PLOTTING (THIS IS A WINDOWS PARTICULAR PROBLEM) import os os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" # + [markdown] id="a75mGrGgL-0g" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # # Setup # Let's begin by installing and importing the modules we'll need. # + id="7Owp2eKrL-0j" import pyro import torch import pyro.distributions as dist import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt # - pyro.set_rng_seed(54321) # + [markdown] id="I-EvHtlvL-0k" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # # 1. **Pyro’s distributions** (http://docs.pyro.ai/en/stable/distributions.html) : # # # --- # * Pyro provides a wide range of distributions: **Normal, Beta, Cauchy, Dirichlet, Gumbel, Poisson, Pareto, etc.** # # --- # # + colab={"base_uri": "https://localhost:8080/"} id="_w52OifSL-0l" outputId="aeffad3d-994f-4142-b4d2-6fbd633fdd78" normal = dist.Normal(0,1) normal # + [markdown] id="yOSdcF4_L-0m" # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # # * Samples from the distributions are [Pytorch’s Tensor objects](https://pytorch.org/cppdocs/notes/tensor_creation.html) (i.e. multidimensional arrays). # # --- # # + colab={"base_uri": "https://localhost:8080/"} id="wloQxYMXL-0m" outputId="6db4407c-f91c-4096-fbdf-e1b764900e07" sample = normal.sample() sample # + colab={"base_uri": "https://localhost:8080/"} id="2h2iOp-qL-0o" outputId="c653e9d1-7591-49ec-f493-afc2fc9f5a78" sample = normal.sample(sample_shape=[3,4,5]) sample # + [markdown] id="r-0pNd9tL-0o" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * We can query the **dimensionlity** of a tensor with the ``shape`` property # # --- # + colab={"base_uri": "https://localhost:8080/"} id="Dk2ebGoYL-0p" outputId="8f6e9d2a-d1c5-4844-80d4-c5d7a76e969c" sample = normal.sample(sample_shape=[3,4,5]) sample.shape # + [markdown] id="DsVjombqL-0p" # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # --- # * Operations, like **log-likelihood**, are defined over tensors. # # --- # # # + colab={"base_uri": "https://localhost:8080/"} id="06G293JdL-0p" outputId="2c3a86a4-d687-4d5f-bee7-b8ca3977994d" normal.log_prob(sample) # + colab={"base_uri": "https://localhost:8080/"} id="R3v2rpRAL-0q" outputId="a4ef0280-b623-40c3-f9ba-0cabc25d253f" torch.sum(normal.log_prob(sample)) # + [markdown] id="zErtSTzYL-0q" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # # * **Multiple distributions** can be embedded in single object. # # * Below we define **three Normal distributions with different means but the same scale** in a single object. # # --- # + colab={"base_uri": "https://localhost:8080/"} id="VEcgGuTqL-0q" outputId="3089e379-80c2-4005-afae-a4b0d443842d" normal = dist.Normal(torch.tensor([1.,2.,3.]),1.) normal # + colab={"base_uri": "https://localhost:8080/"} id="1cRRDgTbL-0r" outputId="74c26db3-fff8-4a2f-b403-4f6958af1af2" normal.sample() # + colab={"base_uri": "https://localhost:8080/"} id="hji5mTnhL-0r" outputId="ebd68b3d-49d5-4ea4-d793-3676b50f8a6f" normal.log_prob(normal.sample()) # + [markdown] id="drChKHsWJ--U" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # + [markdown] id="T7drVBrtzNgC" # ### **<span style="color:red">Exercise: Open the notebook and play around</span>** # # * Test that everything works. # * Play a bit with the code in Section 1 of the notebook. # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # + [markdown] id="WV-iBl4xL-0r" # # 2. **Pyro’s models** (http://pyro.ai/examples/intro_part_i.html) : # # --- # * In Pyro, a probabilistic model is defined as a **stochastic function** (i.e. every time it is run, it returns a new sample). # # * Each random variable is associated with a **primitive stochastic function** using the construct ``pyro.sample(...)``. # --- # # # ### 2.1 A Temperature Model # # # # As initial running example, we consider the problem of **modelling the temperature**. We first start with a simple model where temperture is modeled using a random Normal variable. # + colab={"base_uri": "https://localhost:8080/"} id="kcU2EmBYL-0s" outputId="6a7c377c-37ea-4c75-862f-619ca5928619" # the function `model` is a stochastic function (its returned value is non-deterministic) # `temp` is a primitive stochastic function (uses pyro.sample) def model(): # `temp` is a tensor whose value is a random sample # from a Normal(15.0, 2.0) distribution temp = pyro.sample('temp', dist.Normal(15.0, 2.0)) return temp print(model()) print(model()) # + [markdown] id="N0pVeOVpL-0s" # See how the model is a stochastic function which **returns a different value everytime it is invoked**. # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # + [markdown] id="Z0biYXEVL-0s" # ### 2.2 A Temperature-Sensor Model # + [markdown] id="Jp6wefVaLCnx" # --- # * In Pyro, a stochastic method is defined as a **composition of primitive stochastic functions**. # * The temperature Model: we consider the presence of a **temperature sensor**. # * The temperature sensor gives **noisy observations** about the real temperature. # * The **error** of the sensor's measurements **is known**. # * A graphical representation of this model: # # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/PGM-Tem-Sensor.png?raw=1" alt="Drawing" width="150"> # </center> # --- # # + colab={"base_uri": "https://localhost:8080/"} id="VpxheEx-L-0t" outputId="f02dab39-d93d-4fe7-ca69-7d18069f7a72" # Again `model` is a stochastic function # `temp` a primitive stochastic function # `sensor` is a stochastic method (composition of primitive stochastic functions) def model(): temp = pyro.sample('temp', dist.Normal(15.0, 2.0)) sensor = pyro.sample('sensor', dist.Normal(temp, 1.0)) return (temp, sensor) out1 = model() out1 # + [markdown] id="XSXTeNtqL-0t" # --- # * The above method defines a joint probability distribution: # $$p(sensor, temp) = p(sensor|temp)p(temp)$$ # # # # # * In this case, we have a simple dependency between the variables. But, as we are in a PPL, dependencies can be expressed in terms of complex deterministic functions (more examples later). # # --- # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # + [markdown] id="CCayVyTsL-0t" # # 3. **Pyro’s inference** (http://pyro.ai/examples/intro_part_ii.html) : # + [markdown] id="QyC3v0cyL-0u" # ### Auxiliary inference functions (more details on Day 3) # # To make inference on Pyro, we will use a **variational inference** method, which performs gradient-based optimization to solve the inference problem. More details will be given on Day 3. # + id="arZOTC7gL-0u" from torch.distributions import constraints from pyro.optim import SGD from pyro.infer import Trace_ELBO import matplotlib.pyplot as plt from pyro.contrib.autoguide import AutoDiagonalNormal # Notice that there's no return statement def svi(temperature_model, guide, obs, num_steps = 5000, plot = False): pyro.clear_param_store() svi = pyro.infer.SVI(model = temperature_model, guide = guide, optim = SGD({"lr": 0.001, "momentum":0.1}), loss = Trace_ELBO()) losses, a,b = [], [], [] for t in range(num_steps): losses.append(svi.step(obs)) if t % 250 == 0: print('Step: ' + str(t) + '. Loss: ' + str(losses[-1])) if (plot): plt.plot(losses) plt.title("ELBO") plt.xlabel("step") plt.ylabel("loss"); plt.show() # + [markdown] id="ovLJGQtcPlBt" # --- # * To make inference in Pyro over a given model we need to define a **guide**, this **guide** has the **same signature** than its counterpart model. # # * The guide must provide **samples for those variables of the model which are not observed** using again the ``pyro.sample`` construct. # # * Guides are also parametrized using Pyro's parameters (``pyro.param``), so the variational inference algorithm will optimize these parameters. # # * All of that will be explained in detail on Day 3. # # --- # + id="ubF0fUp8PlB-" #The guide (NOTICE THAT THERE'S NO RETURN STATEMENT) def guide(obs): a = pyro.param("mean", torch.tensor(0.0)) b = pyro.param("scale", torch.tensor(1.), constraint=constraints.positive) #This represents a parametrized version of the posterior distribution for `temp` # `a` and `b` are learnable parameters temp = pyro.sample('temp', dist.Normal(a, b)) # + [markdown] id="FhjFntF7Ts40" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # + [markdown] id="mkkHJOFBL-0u" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # ### 3.1 Conditioning on a single observation # # Now, we continue with the last model defined in section 2.2, and assume we have a sensor reading and we want to compute the **posterior distribution** over the real temperature. # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/PGM-Tem-Sensor.png?raw=1" alt="Drawing" width="150"> # </center> # # --- # * This can be achived by introducing **observations in the random variable** with the keyword ``obs=``. # # --- # + id="TSzNRm7wL-0v" #The observatons obs = {'sensor': torch.tensor(18.0)} # Notice that `model` and `guide` have the same signature specified in the above paragraph. # `model` has not return statement def model(obs): # For `temp`, notice the match of the name in the guide and the model # Here `temp` represents a relization from the prior distribution of the temperature temp = pyro.sample('temp', dist.Normal(15.0, 2.0)) sensor = pyro.sample('sensor', dist.Normal(temp, 1.0), obs = obs['sensor']) # + [markdown] id="nH3PgF4wL-0w" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # --- # * Inference is made using the previously defined auxiliary functions, ``svi`` and ``guide``. # # * We can query the **posterior probability distribution**: # # # $$p(temp | sensor=18)=\frac{p(sensor=18|temp)p(temp)}{\int p(sensor=18|temp)p(temp) dtemp}$$ # # --- # + colab={"base_uri": "https://localhost:8080/", "height": 717} id="VObMyZQ_L-0w" outputId="bb273c4a-01e5-4e35-ed9f-7a8ae9b76a90" #Run inference svi(model, guide, obs, plot=True) #Print results print("P(Temperature|Sensor=18.0) = ") print(dist.Normal(pyro.param("mean").item(), pyro.param("scale").item())) print("") # + [markdown] id="ZglqMEmSL-0w" # --- # * Inference is an **optimization procedure**. # # * The **ELBO function is minimized** during the variational inference process. # # --- # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # + [markdown] id="WAbW6UoTL-0x" # ### 3.2 Learning from a bunch of observations # # --- # * Let us assume we have a **set of observations** about the temperature at different time steps. # # * In this case, and following a probabilistic modelling approach, we define a **set of random variables**. # # * One random variable for each **observation**, using a standard ``for-loop``. # # --- # + id="w2bPy-D4L-0x" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1])} def model(obs): for i in range(obs['sensor'].shape[0]): temp = pyro.sample(f'temp_{i}', dist.Normal(15.0, 2.0)) sensor = pyro.sample(f'sensor_{i}', dist.Normal(temp, 1.0), obs=obs['sensor'][i]) # + [markdown] id="fjcPTM1PL-0x" # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # # * What if we do **not know the mean temperature**. # # * We can **infer it from the data** by, e.g., using a **maximum likelihood** approach, # # $$ \mu_{t} = \arg\max_\mu \ln p(s_1,\ldots,s_n|\mu) = \arg\max_\mu \prod_i \int_{t_i} p(s_i|t_i)p(t_i|\mu) dt_i $$ # where $s_i$ and $t_i$ denote the sensor reading and the real temperature at time $i$. # # * The graphical model: # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/PGM-Tem_sensor4.png?raw=1" alt="Drawing" width="150"> # </center> # * With PPLs, we do not have to care about the **underlying inference problem** We just define the model and let the **PPL's engine** make the work for us. # # * We use Pyro's parameters (defined as ``pyro.param``), which are free variables we can optimize. # # # # --- # + id="yXEFGMkzL-0x" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1])} def model(obs): # `mean_temp` is learnable mean_temp = pyro.param('mean_temp', torch.tensor(15.0)) for i in range(obs['sensor'].shape[0]): temp = pyro.sample(f'temp_{i}', dist.Normal(mean_temp, 2.0)) sensor = pyro.sample(f'sensor_{i}', dist.Normal(temp, 1.0), obs = obs['sensor'][i]) # + cellView="form" id="9zTG-8UVL-0x" #@title #Define the guide def guide(obs): for i in range(obs['sensor'].shape[0]): mean_i = pyro.param(f'mean_{i}', obs['sensor'][i]) scale_i = pyro.param(f'scale_{i}', torch.tensor(1.), constraint = constraints.positive) temp = pyro.sample(f'temp_{i}', dist.Normal(mean_i, scale_i)) # + colab={"base_uri": "https://localhost:8080/"} id="GEfOM8zWL-0y" outputId="9a530a66-b954-40aa-aa75-a71ea565ce26" #@title #Run inference svi(model, guide, obs, num_steps=5000, plot = True) # - #Print results print("Estimated Mean Temperature (Maximum Likelihood Estimate)") mean_temp_ml = pyro.param("mean_temp").item() print(mean_temp_ml) # + # To get the name of all the parameters we can use the parameter store param_store = pyro.get_param_store() #Basically works as a dictionary print(param_store.get_all_param_names()) print("-" * 10) print(param_store.get_param("scale_0")) # Easier print("-" * 10) print(pyro.param("scale_0")) # + [markdown] id="tiCwbfJwL-0y" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * Instead of performing *maximum likelihood* learning, we can perform **Bayesian learning**. # # * We treat the unknown quantity as a **random variable**. # # * This model can be graphically represented as follows: # # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/PGM-Tem-Sensor2.png?raw=1" alt="Drawing" width="150"> # </center> # --- # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # + id="CaTdLm6ML-0y" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1])} def model(obs): mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 2.0)) for i in range(obs['sensor'].shape[0]): temp = pyro.sample(f'temp_{i}', dist.Normal(mean_temp, 2.0)) sensor = pyro.sample(f'sensor_{i}', dist.Normal(temp, 1.0), obs = obs['sensor'][i]) # + [markdown] id="jJfVNZFBL-0y" # --- # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * We perform inference over this model: # # $$ p(\mu_t | s_1,\ldots, s_n)=\frac{p(\mu_t)\prod_{i=1}^n \int p(s_i|t_i)p(t_i|\mu_t)dt_i }{\int \prod_{i=1}^n p(s_i|\mu_t)p(\mu_t) d\mu} $$ # --- # # + cellView="form" id="0MKYgw54L-0z" #@title #Define the guide def guide(obs): mean = pyro.param("mean", torch.mean(obs['sensor'])) scale = pyro.param("scale", torch.tensor(1.), constraint = constraints.positive) mean_temp = pyro.sample('mean_temp', dist.Normal(mean, scale)) for i in range(obs['sensor'].shape[0]): mean_i = pyro.param(f'mean_{i}', obs['sensor'][i]) scale_i = pyro.param(f'scale_{i}', torch.tensor(1.), constraint = constraints.positive) temp = pyro.sample(f'temp_{i}', dist.Normal(mean_i, scale_i)) # + colab={"base_uri": "https://localhost:8080/"} id="W0YiqM36L-0z" outputId="add3aa7f-0fc6-479f-9e1c-16adfe4465c7" import time #Run inference start = time.time() svi(model, guide, obs, num_steps=5000, plot = True) #Print results print("P(mean_temp|Sensor=[18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1]) =") print(dist.Normal(pyro.param("mean").item(), pyro.param("scale").item())) print("") end = time.time() print(f"{(end - start)} seconds") # + [markdown] id="YFKoGLcnL-0z" # --- # * The result of the learning is **not a point estimate**. # # * We have a **posterior distribution** which captures **uncertainty** about the estimation. # # --- # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="ES_yHXNKL-0z" outputId="94fa44f1-20a8-42c5-be5b-62e02f715194" mu = pyro.param("mean").item() scale = pyro.param("scale").item() x = np.linspace(mu - 3*scale, mu + 3*scale, 100) plt.plot(x, stats.norm.pdf(x, mu, scale), label='Posterior') point = mean_temp_ml plt.plot([point, point],[0., 1.], label='Point Estimate (MLE)') plt.legend() plt.show() # + [markdown] id="1SXUdMtUL-00" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # ### 3.3 The use of ``plate`` construct # # --- # # * Pyro can exploit **conditional independencies and vectorization** to make inference much faster. # # * This can be done with the construct **``plate``**. # # * With this construct, we can indicate that the variables $s_i$ and $t_i$ are **conditionally indepedendent** from another variables $s_j$ and $t_j$ given $\mu_t$. # # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/PGM-Tem-Sensor2.png?raw=1" alt="Drawing" width="150"> # </center> # --- # # # + id="4LlQKqp4L-00" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1])} def model(obs): mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 2.0)) # plate construct with pyro.plate('a', obs['sensor'].shape[0]): temp = pyro.sample('temp', dist.Normal(mean_temp, 2.0)) sensor = pyro.sample('sensor', dist.Normal(temp, 1.0), obs=obs['sensor']) # + [markdown] id="yAh8vLjLL-00" # --- # * The ``plate`` construct reflects the standard notational use in graphical models denoting the **repetition of some parts of of the graph**. # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/PGM-Tem-Sensor3.png?raw=1" alt="Drawing" width="250"> # </center> # * We can here make a distinction between **local** and **global** random variables: # # >* **Local random variables** caputure **specific information** about the $i$-th data sample (i.e. the real temperature at this moment in time). # # >* **Global random variables** capture **common information** about all the data samples (i.e. the average temperature of all data samples). # # --- # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # Observe how inference in this model is much **faster**. # + cellView="form" id="vNC2WW9tL-00" #@title #Define the guide def guide(obs_sensor): mean = pyro.param("mean", torch.mean(obs['sensor'])) scale = pyro.param("scale", torch.tensor(1.), constraint = constraints.positive) mean_temp = pyro.sample('mean_temp', dist.Normal(mean, scale)) # Notice here the part `as i` with pyro.plate('a', obs['sensor'].shape[0]) as i: mean_i = pyro.param('mean_i', obs['sensor'][i]) scale_i = pyro.param('scale_i', torch.tensor(1.), constraint = constraints.positive) temp = pyro.sample('temp', dist.Normal(mean_i, scale_i)) # + colab={"base_uri": "https://localhost:8080/"} id="ASYtP3j0L-01" outputId="663c4e44-f3be-47c4-ccdb-8db9a2384e79" #Run inference start = time.time() svi(model, guide, obs, num_steps=1000) #Print results print("P(mean_temp|Sensor=[18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1]) =") print(dist.Normal(pyro.param("mean").item(), pyro.param("scale").item())) print("") end = time.time() print(f"{(end - start)} seconds") # + [markdown] id="dUDwSWFsL-01" # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # ### **<span style="color:red">Exercise 1: </span>The role of *prior distributions* in learning** # # In this case we just want to llustrate how the output of learning depends of the particular **prior** we introduce in the model. Play with different options and extract conclusions: # # 1. What happens if we change the mean of the prior? # 2. What happens if we change the scale of the prior? # 3. What happens to the posterior if the number of data samples deacreases and increases? # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="5OWLg5tvL-01" outputId="c6e80f76-72af-41ec-a581-de6c2421fb59" #The observatons sample_size = 500 obs = {'sensor': torch.tensor(np.random.normal(18, 2, sample_size))} def model(obs): # Mean of the prior mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 5.0)) with pyro.plate('a', obs['sensor'].shape[0]): #Prior temp = pyro.sample('temp', dist.Normal(mean_temp, 2.0)) sensor = pyro.sample('sensor', dist.Normal(temp, 1.0), obs=obs['sensor']) #Run inference svi(model, guide, obs, num_steps=1000) #Print results print("P(Temperature|Sensor=18.0) = ") print(dist.Normal(pyro.param("mean").item(), pyro.param("scale").item())) x = np.linspace(16, 20, 100) plt.plot(x, stats.norm.pdf(x, pyro.param("mean").item(), pyro.param("scale").item()), label='Posterior') point = 18 plt.plot([point, point],[0., 1.], label='Point Estimate') plt.xlim(16,20) plt.legend() plt.show() # + [markdown] id="uf7kVC-hL-03" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # # **4. Icecream Shop** # # # * We have an ice-cream shop and we **record the ice-cream sales and the average temperature of the day** (using a temperature sensor). # # * We know **temperature affects the sales** of ice-creams. # # * We want to **precisely model** how temperature affects ice-cream sales. # # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/raw/main/Day1/Figures/Ice-cream_shop_-_Florida.jpg" alt="Drawing" width=300 > # </center> # # # + [markdown] id="uv2fWAHxDRxO" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # --- # * We have **observations** from temperature and sales. # # * Sales are modeled with a **Poisson** distribution: # # >- The rate of the Poisson **linearly depends of the real temperature**. # --- # # Next figure provides a graphical and a probabilistic description of the model: # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/Ice-Cream-Shop-Model.png?raw=1" alt="Drawing" width=700> # </center> # # # + id="HR8bu27OL-03" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1]), 'sales': torch.tensor([46., 47., 49., 44., 50., 54., 51., 52., 49., 53.])} def model(obs): # Global random variables mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 2.0)) alpha = pyro.sample('alpha', dist.Normal(0.0, 100.0)) beta = pyro.sample('beta', dist.Normal(0.0, 100.0)) # Local random variables with pyro.plate('a', obs['sensor'].shape[0]): temp = pyro.sample('temp', dist.Normal(mean_temp, 2.0)) sensor = pyro.sample('sensor', dist.Normal(temp, 1.0), obs = obs['sensor']) # `torch.max` function is used to avoid nearly zero rates rate = torch.max(torch.tensor(0.001), alpha + beta * temp) sales = pyro.sample('sales', dist.Poisson(rate), obs = obs['sales']) # + [markdown] id="gT34C5CpDmPZ" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # + cellView="form" id="nkOSF-WlL-03" #@title #Define the guide def guide(obs): # `mean` and `scale` are learnable parameters to parametrize the posterior for `mean_temp` mean = pyro.param("mean", torch.mean(obs['sensor'])) scale = pyro.param("scale", torch.tensor(1.), constraint = constraints.positive) mean_temp = pyro.sample('mean_temp', dist.Normal(mean, scale)) # `alpha_mean` and `alpha_scale` are learnable parameters to parametrize the posterior for `alpha` alpha_mean = pyro.param("alpha_mean", torch.mean(obs['sensor'])) alpha_scale = pyro.param("alpha_scale", torch.tensor(1.), constraint = constraints.positive) alpha = pyro.sample('alpha', dist.Normal(alpha_mean, alpha_scale)) # `beta_mean` and `beta_scale` are learnable parameters to parametrize the posterior for `beta` beta_mean = pyro.param("beta_mean", torch.tensor(1.0)) beta_scale = pyro.param("beta_scale", torch.tensor(1.), constraint = constraints.positive) beta = pyro.sample('beta', dist.Normal(beta_mean, beta_scale)) with pyro.plate('a', obs['sensor'].shape[0]) as i: mean_i = pyro.param('mean_i', obs['sensor'][i]) scale_i = pyro.param('scale_i', torch.tensor(1.), constraint = constraints.positive) temp = pyro.sample('temp', dist.Normal(mean_i, scale_i)) # + [markdown] id="6jqw09FND19b" # --- # * We run the **(variational) inference engine** and get the results. # # * With PPLs, we only care about modeling, **not about the low-level details** of the machine-learning solver. # # --- # + colab={"base_uri": "https://localhost:8080/"} id="gmaOTBYAL-03" outputId="7a6bd327-e423-4a9b-8cc0-96fa95cdaf9c" #Run inference svi(model, guide, obs, num_steps=1000) #Print results print("Posterior temperature mean") print(dist.Normal(pyro.param("mean").item(), pyro.param("scale").item())) print("") print("Posterior alpha") print(dist.Normal(pyro.param("alpha_mean").item(), pyro.param("alpha_scale").item())) print("") print("Posterior aeta") print(dist.Normal(pyro.param("beta_mean").item(), pyro.param("beta_scale").item())) # + [markdown] id="4ulfx96AL-04" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # ### <span style="color:red">Exercise 2: Introduce Humidity in the Icecream shop model </span> # --- # * Assume we also have a bunch of **humidity sensor measurements**. # * Assume the **sales are also linearly influenced by the humidity**. # * **Extend the above model** in order to integrate all of that. # --- # # Next figure provides a graphical and a probabilistic description of the model: # <center> # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/Ice-Cream-Shop-Model-Humidity.png?raw=1" alt="Drawing" width=700> # </center> # # # # + id="8UxOUxuTL-04" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1]), 'sales': torch.tensor([46., 47., 49., 44., 50., 54., 51., 52., 49., 53.]), 'sensor_humidity': torch.tensor([82.8, 87.6, 69.1, 74.2, 80.3, 94.2, 91.2, 92.2, 99.1, 93.2])} def model(obs): mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 2.0)) ## Introduce a random variable "mean_humidity" mean_humidity = pyro.sample("mean_humidity", dist.Normal(80.0, 5.0)) alpha = pyro.sample('alpha', dist.Normal(0.0, 100.0)) beta = pyro.sample('beta', dist.Normal(0.0, 100.0)) ## Introduce a coefficient for the humidity "gamma" gamma = pyro.sample("gamma", dist.Normal(0.0, 100.0)) with pyro.plate('a', obs['sensor'].shape[0]): temp = pyro.sample('temp', dist.Normal(mean_temp, 2.0)) sensor = pyro.sample('sensor', dist.Normal(temp, 1.0), obs=obs['sensor']) #Add the 'humidity' variable and the 'sensor_humidity' variable humidity = pyro.sample("humidity", dist.Normal(mean_humidity, 2.0)) sensor_humidity = pyro.sample("sensor_humidity", dist.Normal(humidity, 1.0), obs = obs["sensor_humidity"]) #Add the linear dependency for the rate with respect to temp and humidity (keep torch.max to avoid numerical stability issues) rate = torch.max(torch.tensor(0.001), alpha + beta * temp + gamma * humidity) sales = pyro.sample('sales', dist.Poisson(rate), obs=obs['sales']) # + [markdown] id="wC7ceQTkHrrg" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * We run the **(variational) inference engine** and get the results. # # * With PPLs, we only care about modeling, **not about the low-level details** of the machine-learning solver. # # --- # + cellView="form" id="zyR9EXz2L-04" #@title #Auxiliary Guide Code def guide(obs): # Posterior for `mean_temp` mean = pyro.param("mean", torch.mean(obs['sensor'])) scale = pyro.param("scale", torch.tensor(1.), constraint = constraints.positive) mean_temp = pyro.sample('mean_temp', dist.Normal(mean, scale)) # Posterior for `mean_humidity` meanH = pyro.param("meanH", torch.mean(obs['sensor_humidity'])) scaleH = pyro.param("scaleH", torch.tensor(1.), constraint = constraints.positive) mean_humidity = pyro.sample('mean_humidity', dist.Normal(meanH, scaleH)) # Posterior for `alpha` alpha_mean = pyro.param("alpha_mean", torch.mean(obs['sensor']), constraint = constraints.positive) alpha_scale = pyro.param("alpha_scale", torch.tensor(1.), constraint = constraints.positive) alpha = pyro.sample('alpha', dist.Normal(alpha_mean, alpha_scale)) # Posterior for `beta` beta_mean = pyro.param("beta_mean", torch.tensor(1.0), constraint = constraints.positive) beta_scale = pyro.param("beta_scale", torch.tensor(1.), constraint = constraints.positive) beta = pyro.sample('beta', dist.Normal(beta_mean, beta_scale)) # Posterior for `gamma` gamma_mean = pyro.param("gamma_mean", torch.tensor(1.0), constraint = constraints.positive) gamma_scale = pyro.param("gamma_scale", torch.tensor(1.), constraint = constraints.positive) gamma = pyro.sample('gamma', dist.Normal(gamma_mean, gamma_scale)) with pyro.plate('a', obs['sensor'].shape[0]) as i: # Posterior for `temp` mean_i = pyro.param('mean_i', obs['sensor'][i]) scale_i = pyro.param('scale_i', torch.tensor(1.), constraint=constraints.positive) temp = pyro.sample('temp', dist.Normal(mean_i, scale_i)) # Posterior for `humidity` meanH_i = pyro.param('meanH_i', obs['sensor_humidity'][i]) scaleH_i = pyro.param('scaleH_i', torch.tensor(1.), constraint=constraints.positive) humidity = pyro.sample('humidity', dist.Normal(meanH_i, scaleH_i)) # + colab={"base_uri": "https://localhost:8080/"} id="NvIXRoVeL-05" outputId="d8287eee-ad25-4cf7-dcf8-ef7917a920ab" #Run inference svi(model, guide, obs, num_steps = 1000) #Print results print("Posterior Temperature Mean") print(dist.Normal(pyro.param("mean").item(), pyro.param("scale").item())) print("") print("Posterior Humidity Mean") print(dist.Normal(pyro.param("meanH").item(), pyro.param("scaleH").item())) print("") print("Posterior Alpha") print(dist.Normal(pyro.param("alpha_mean").item(), pyro.param("alpha_scale").item())) print("") print("Posterior Beta") print(dist.Normal(pyro.param("beta_mean").item(), pyro.param("beta_scale").item())) print("") print("Posterior Gamma") print(dist.Normal(pyro.param("gamma_mean").item(), pyro.param("gamma_scale").item())) # + [markdown] id="NCld6GJGL-05" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # # 5. **Temporal Models** # # If we think there is a temporal dependency between the variables, we can easily encode that with PPLs. # # --- # * Let us assume that there is a **temporal dependency** between the variables. # # * E.g. the current **real temperature must be similar to the real temperature in the previous time step**. # # * This temporal dependency can **be modeled** using a **for-loop** in Pyro # # * Consider the **graphical representation**. # --- # # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/raw/main/Day1/Figures/tempmodel-temporal-III.png" alt="Drawing" style="width: 350px;" > # # # + id="dSzU0cWeL-05" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1])} # Even when a random variable `theta` appears in the picture # in this model is not considered def model(obs): mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 2.0)) for i in range(obs['sensor'].shape[0]): if i == 0: temp = pyro.sample(f'temp_{i}', dist.Normal(mean_temp, 2.0)) else: temp = pyro.sample(f'temp_{i}', dist.Normal(prev_temp, 2.0)) sensor = pyro.sample(f'sensor_{i}', dist.Normal(temp, 1.0), obs = obs['sensor'][i]) prev_temp = temp # + [markdown] id="EAnXvUqpJCm1" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * We run the **(variational) inference engine** and get the results. # # * With PPLs, we only care about modeling, **not about the low-level details** of the machine-learning solver. # # --- # + cellView="form" id="7XaW4rKXL-05" #@title #Define the guide def guide(obs): # Posterior for `mean_temp` mean = pyro.param("mean", torch.mean(obs['sensor'])) scale = pyro.param("scale", torch.tensor(1.), constraint = constraints.positive) mean_temp = pyro.sample('mean_temp', dist.Normal(mean, scale)) for i in range(obs['sensor'].shape[0]): mean_i = pyro.param(f'mean_{i}', obs['sensor'][i]) scale_i = pyro.param(f'scale_{i}', torch.tensor(1.), constraint = constraints.positive) temp = pyro.sample(f'temp_{i}', dist.Normal(mean_i, scale_i)) # + colab={"base_uri": "https://localhost:8080/"} id="XWBFc6w3L-06" outputId="95a134d4-da55-4aa6-9cab-3e17a03f8ad5" import time #Run inference svi(model, guide, obs, num_steps = 2000) smooth_temp = [] for i in range(obs['sensor'].shape[0]): smooth_temp.append(pyro.param(f'mean_{i}').item()) print('Finished') # - # # A HUGE WARNING # dictionary `obs` is modified after inference # AND NOW LET'S PRINT THE DICTIONARY `obs` defined above print(obs) print('Somewhere during the inference process it gets modified') print('Not cool!') # + [markdown] id="OJHqOj3VL-06" # --- # * Plot the **observed measurements** of the temperature **against** the inferred **real temperature**. # # * By querying the **local hidden** we can **smooth** the temperature. # # * The **recovered temperature** is much less noisy than the measured one. # --- # # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="cesOfH6UL-06" outputId="6c29c1dc-7b66-4476-f572-fcab827cb85d" # HARD-CODED DUE TO THE MODIFICATION OF `obs` AFTER INFERENCE plt.plot([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1], label = 'Sensor Temp (Observed)') plt.plot(smooth_temp, label = 'Smooth Temp (Inferred)') plt.legend() # + [markdown] id="yRDQWC-vL-06" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # ### <span style="color:red">Exercise 3: Temporal Extension of the Iceacream shop model </span> # # --- # * **Extends** Excersise 2. # * Assume temperature depends of the **temperature in the previous day**. # * Assume humidity depends of the **humidity in the previous day**. # * Assume sales depends on the **current temperature and humidity**. # * Use the following **graphical representation for reference**. # * Consider here that the plate representation has to be coded in Pyro using a **``for-loop``**. # # --- # # <img src="https://github.com/PGM-Lab/probai-2021-pyro/raw/main/Day1/Figures/icecream-model-temporal.png" alt="Drawing" width=500 > # # # # + id="GevI9bcjL-07" #The observatons obs = {'sensor': torch.tensor([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1]), 'sales': torch.tensor([46., 47., 49., 44., 50., 54., 51., 52., 49., 53.]), 'sensor_humidity': torch.tensor([82.8, 87.6, 69.1, 74.2, 80.3, 94.2, 91.2, 92.2, 99.1, 93.2])} def model(obs): mean_temp = pyro.sample('mean_temp', dist.Normal(15.0, 2.0)) ## Introduce a random variable "mean_humidity" mean_humidity = pyro.sample('mean_humidity', dist.Normal(80.0, 5.0)) alpha = pyro.sample('alpha', dist.Normal(0.0, 100.0)) beta = pyro.sample('beta', dist.Normal(0.0, 100.0)) ## Introduce a coefficient for the humidity "gamma" gamma = pyro.sample('gamma', dist.Normal(0.0, 100.0)) for i in range(obs['sensor'].shape[0]): if i == 0: temp = pyro.sample(f'temp_{i}', dist.Normal(mean_temp, 2.0)) #Introduce the 'humidity' variable at time 0. humidity = pyro.sample(f'humidity_{i}', dist.Normal(mean_humidity, 2.0)) else: temp = pyro.sample(f'temp_{i}', dist.Normal(prev_temp, 2.0)) #Introduce the f'humidity_{i}' variable defining the transition humidity = pyro.sample(f'humidity_{i}', dist.Normal(prev_humidity, 2.0)) sensor = pyro.sample(f'sensor_{i}', dist.Normal(temp, 1.0), obs = obs['sensor'][i]) #Introduce the f'sensor_humidity_{i}' variable. sensor_humidity = pyro.sample(f'sensor_humidity_{i}', dist.Normal(humidity, 1.0), obs = obs['sensor_humidity'][i]) #Add the linear dependency for the rate with respect to temp and humidity (keep torch.max to avoid numerical stability issues) rate = torch.max(torch.tensor(0.01), alpha + beta * temp + gamma * humidity) sales = pyro.sample(f'sales_{i}', dist.Poisson(rate), obs = obs['sales'][i]) prev_temp = temp #Keep humidity for the next time step. prev_humidity = humidity # + [markdown] id="BOo487l4L3L1" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * We run the **(variational) inference engine** and get the results. # # * With PPLs, we only care about modeling, **not about the low-level details** of the machine-learning solver. # # --- # + id="izRFQ0T3L-07" #@title #Define the guide def guide(obs): mean = pyro.param("mean", torch.mean(obs['sensor'])) scale = pyro.param("scale", torch.tensor(1.), constraint=constraints.greater_than(0.01)) mean_temp = pyro.sample('mean_temp', dist.Normal(mean, scale)) meanH = pyro.param("meanH", torch.mean(obs['sensor_humidity']), constraint=constraints.positive) scaleH = pyro.param("scaleH", torch.tensor(1.), constraint=constraints.greater_than(0.01)) humidity_mean = pyro.sample('mean_humidity', dist.Normal(meanH, scaleH)) alpha_mean = pyro.param("alpha_mean", torch.mean(obs['sensor'])) alpha_scale = pyro.param("alpha_scale", torch.tensor(1.), constraint=constraints.greater_than(0.01)) alpha = pyro.sample('alpha', dist.Normal(alpha_mean, alpha_scale)) beta_mean = pyro.param("beta_mean", torch.tensor(0.0)) beta_scale = pyro.param("beta_scale", torch.tensor(1.), constraint=constraints.greater_than(0.01)) beta = pyro.sample('beta', dist.Normal(beta_mean, beta_scale)) gamma_mean = pyro.param("gamma_mean", torch.tensor(0.0)) gamma_scale = pyro.param("gamma_scale", torch.tensor(1.), constraint=constraints.greater_than(0.01)) gamma = pyro.sample('gamma', dist.Normal(gamma_mean, gamma_scale)) for i in range(obs['sensor'].shape[0]): mean_i = pyro.param(f'mean_{i}', obs['sensor'][i]) scale_i = pyro.param(f'scale_{i}', torch.tensor(1.), constraint=constraints.greater_than(0.01)) temp = pyro.sample(f'temp_{i}', dist.Normal(mean_i, scale_i)) meanH_i = pyro.param(f'meanH_{i}', obs['sensor_humidity'][i]) scaleH_i = pyro.param(f'scaleH_{i}', torch.tensor(1.), constraint=constraints.greater_than(0.01)) humidity_i = pyro.sample(f'humidity_{i}', dist.Normal(meanH_i, scaleH_i)) # + colab={"base_uri": "https://localhost:8080/"} id="tMWwy2UNL-07" outputId="ea74d3c1-275a-453f-8397-3830e3158d65" import time #Run inference svi(model, guide, obs, num_steps=2000) smooth_temp=[] smooth_humidity=[] for i in range(obs['sensor'].shape[0]): smooth_temp.append(pyro.param(f'mean_{i}').item()) smooth_humidity.append(pyro.param(f'meanH_{i}').item()) print('Finished') # + [markdown] id="LosM2wXBMAFX" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> # # --- # * We can plot the observered measurements of the temperature against the **inferred real temperature** by our model. # # * The **recovered temperature** is much less noisy than the real one. # # --- # # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="7kr1dbx-L-07" outputId="7e4e5df6-d9da-4a94-a87e-18f4899b1119" plt.plot([18., 18.7, 19.2, 17.8, 20.3, 22.4, 20.3, 21.2, 19.5, 20.1], label='Sensor Temp') plt.plot(smooth_temp, label='Smooth Temp') plt.legend() # + [markdown] id="QStTTd9nN56M" # --- # * We can plot the observered measurements of the humidity against the **inferred real humidity** by our model. # # * The **recovered humidity** is much less noisy than the real one. # # --- # # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="WD7n3v9oL-08" outputId="552faaeb-89bd-4766-e80e-cb330d78e51d" humidity = torch.tensor([82.8, 87.6, 69.1, 74.2, 80.3, 94.2, 91.2, 92.2, 99.1, 93.2]) plt.plot(humidity.detach().numpy(), label='Sensor Humidity') plt.plot(smooth_humidity, label='Smooth Humidity') plt.legend() # + [markdown] id="858lPsjJOTJd" # <img src="https://github.com/PGM-Lab/probai-2021-pyro/blob/main/Day1/Figures/blue.png?raw=1" alt="Drawing" width=2000 height=20> #
Day1/notebooks/students_PPLs_Intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: epfl_ann # language: python # name: epfl_ann # --- # + [markdown] id="CJzeB-IUSMIZ" # # Miniproject: Landing on the Moon # # ## Introduction # # ### Description # # Traditionally, reinforcement learning has operated on "tabular" state spaces, e.g. "State 1", "State 2", "State 3" etc. However, many important and interesting reinforcement learning problems (like moving robot arms or playing Atari games) are based on either continuous or very high-dimensional state spaces (like robot joint angles or pixels). Deep neural networks constitute one method for learning a value function or policy from continuous and high-dimensional observations. # # In this miniproject, you will teach an agent to play the Lunar Lander game from [OpenAI Gym](https://gym.openai.com/envs/LunarLander-v2/). The agent needs to learn how to land a lunar module safely on the surface of the moon. The state space is 8-dimensional and (mostly) continuous, consisting of the X and Y coordinates, the X and Y velocity, the angle, and the angular velocity of the lander, and two booleans indicating whether the left and right leg of the lander have landed on the moon. # # The agent gets a reward of +100 for landing safely and -100 for crashing. In addition, it receives "shaping" rewards at every step. It receives positive rewards for moving closer to [0,0], decreasing in velocity, shifting to an upright angle and touching the lander legs on the moon. It receives negative rewards for moving away from the landing site, increasing in velocity, turning sideways, taking the lander legs off the moon and for using fuel (firing the thrusters). The best score an agent can achieve in an episode is about +250. # # There are two versions of the task: one with discrete controls and one with continuous controls but we'll only work with the discrete version. In the discrete version, the agent can take one of four actions at each time step: [do nothing, fire engines left, fire engines right, fire engines down]. # # We will use Policy Gradient approaches (using the REINFORCE rule) to learn the task. As you remember, in standard supervised learning tasks (e.g. image classification), the network generates a probability distribution over the outputs, and is trained to maximize the probability of a specific target output given an observation (input). In Policy Gradient methods, the network generates a probability distribution over actions, and is trained to maximize expected future rewards given an observation. # # ### Questions # **Question 1**. Suppose that you are designing the environment rewards yourself. Why do you think it is a good idea to have rewards in addition to the +100 reward for safe landing (e.g. for moving closer / further from [0, 0], for touching the lander legs on the moon)? One might say that if we only have a final reward, the agent will still be able to learn how to reach it. What will be the problem here? # # **Answer**: # # # **Question 2**. Now suppose you decide to give the agent a small reward if it moves closer to the landing point but you forget to penalize it when it moves away from it. What kinds of strange behaviour you may observe from the trained agent? # # **Answer**: # # # ### Prerequisites # # - Since you are using Colab, everything is pre-installed, and you just need to run the cells. However, be careful with computational limits: any kernel in an *open* browser window is killed after 12 hours, and after 90 minutes if the window is **closed**. # # - You can work in the same notebook within your team, but make sure to copy (this) notebook provided by TAs into your Google Drive and to use the proper sharing permissions. # # - You should know the concepts of "policy", "policy gradient", "REINFORCE", "REINFORCE with baseline". If you want to start and haven't seen these yet in class, read Sutton & Barto (2018) Chapter 13 (13.1-13.4). # # ### What you will learn # # - You will learn how to implement a policy gradient neural network using the REINFORCE algorithm. # - You will learn how to implement baselines, including a learned value network. # - You will learn how to analyze the performance of an RL algorithm. # # ### Notes # - Reinforcement learning is noisy! Normally one should average over multiple random seeds with the same parameters to really see the impact of a change to the model, but we won't do this due to time constraints. However, you should be able to see learning over time with every approach. If you don't see any improvement, or very unstable learning, double-check your model and try adjusting the learning rate. # # - You may sometimes see `AssertionError: IsLocked() = False` after restarting your code. To fix this, reinitialize the environments by running the Gym Setup code below. # # - You will not be marked on the episode movies. Please delete these movies before uploading your code. # # ### Evaluation criteria # # The miniproject is marked out of 18, with a further mark breakdown in each question: # - Exercise 1: 7 points # - Exercise 2: 3 points # - Exercise 3: 3 points # - Exercise 4: 5 points # # We may perform random tests of your code but will not rerun the whole notebook. # + [markdown] id="u0MWTkgBlShb" # ## Dependencies Setup # Please run the following cell to install the required packages. Note that you may be asked to restart the notebook. After restarting and reruning the cells, everything should work. # + id="Z2CD0wA7GV0N" #remove " > /dev/null 2>&1" to see what is going on under the hood # !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 # !pip3 install box2d-py > /dev/null 2>&1 # !pip3 install gym[Box_2D] pyvirtualdisplay > /dev/null 2>&1 # !apt-get update > /dev/null 2>&1 # !apt-get install cmake > /dev/null 2>&1 # !pip install --upgrade setuptools 2>&1 # !pip install ez_setup > /dev/null 2>&1 # + id="HIJH-Ns0SMIp" language="javascript" # IPython.OutputArea.prototype._should_scroll = function(lines) { # return false; # } # + [markdown] id="JvVREM-USMIr" # ### Your Names # # Before you start, please enter your sciper number(s) in the field below; they are used to load the data. # + id="IrgMhC4bSMIr" sciper = {'student_1': 0, 'student_2': 0} seed = sciper['student_1']+sciper['student_2'] # + [markdown] id="seB6IjU1SMIr" # ## Setup # # ### Dependencies and constants # + id="SCLi0Uz9iDXX" import gym from gym import logger as gymlogger from gym.wrappers import Monitor gymlogger.set_level(40) #error only import numpy as np import random import matplotlib.pyplot as plt # %matplotlib inline import math import glob import logging import io import base64 from IPython import display as ipythondisplay from IPython.display import HTML, clear_output from matplotlib.animation import FuncAnimation from pyvirtualdisplay import Display from gym.envs.box2d.lunar_lander import heuristic display = Display(visible=0, size=(1400, 900)) display.start() # + id="8PVWg8MBGcYe" import keras import tensorflow as tf # from tensorflow_probability.distributions import Beta from keras.models import Sequential from keras.layers import Dense, Lambda from keras.optimizers import Adam from keras import backend as K np.random.seed(seed) tf.random.set_seed(seed*2) # + [markdown] id="d0tAYCl4SMIs" # ### Gym Setup # # Here we load the Reinforcement Learning environments from Gym. # # We limit each episode to 500 steps so that we can train faster. # + id="3RarYKAOSMIs" gym.logger.setLevel(logging.ERROR) discrete_env = gym.make('LunarLander-v2') discrete_env._max_episode_steps = 500 discrete_env.seed(seed*3) gym.logger.setLevel(logging.WARN) # %matplotlib inline plt.rcParams['figure.figsize'] = 12, 8 plt.rcParams["animation.html"] = "jshtml" # + [markdown] id="3bee0anSSMIt" # ### Utilities # # We include a function that lets you visualize an "episode" (i.e. a series of observations resulting from the actions that the agent took in the environment). # # As well, we will use the `Results` class (a wrapper around a python dictionary) to store, save, load and plot your results. You can save your results to disk with `results.save('filename')` and reload them with `Results(filename='filename')`. Use `results.pop(experiment_name)` to delete an old experiment. # + id="e2aJVaASSMIt" def AddValue(output_size, value): return Lambda(lambda x: x + value, output_shape=(output_size,)) def render(episode, env): fig = plt.figure() img = plt.imshow(env.render(mode='rgb_array')) plt.axis('off') def animate(i): img.set_data(episode[i]) return img, anim = FuncAnimation(fig, animate, frames=len(episode), interval=24, blit=True) html = HTML(anim.to_jshtml()) plt.close(fig) # !rm None0000000.png return html class Results(dict): def __init__(self, *args, **kwargs): if 'filename' in kwargs: data = np.load(kwargs['filename']) super().__init__(data) else: super().__init__(*args, **kwargs) self.new_key = None self.plot_keys = None self.ylim = None def __setitem__(self, key, value): super().__setitem__(key, value) self.new_key = key def plot(self, window): clear_output(wait=True) for key in self: #Ensure latest results are plotted on top if self.plot_keys is not None and key not in self.plot_keys: continue elif key == self.new_key: continue self.plot_smooth(key, window) if self.new_key is not None: self.plot_smooth(self.new_key, window) plt.xlabel('Episode') plt.ylabel('Reward') plt.legend(loc='lower right') if self.ylim is not None: plt.ylim(self.ylim) plt.show() def plot_smooth(self, key, window): if len(self[key]) == 0: plt.plot([], [], label=key) return None y = np.convolve(self[key], np.ones((window,))/window, mode='valid') x = np.linspace(window/2, len(self[key]) - window/2, len(y)) plt.plot(x, y, label=key) def save(self, filename='results'): np.savez(filename, **self) # + [markdown] id="d3Cqp4T3SMIt" # ### Test runs # # To get an idea of how the environment works, we'll plot an episode resulting from random actions at each point in time, and a "perfect" episode using a specially-designed function to land safely within the yellow flags. # # Please remove these plots before submitting the miniproject to reduce the file size. # + id="SXRlD9qFSMIu" def run_fixed_episode(env, policy): frames = [] observation = env.reset() done = False while not done: frames.append(env.render(mode='rgb_array')) action = policy(env, observation) observation, reward, done, info = env.step(action) return frames def random_policy(env, observation): return env.action_space.sample() def heuristic_policy(env, observation): return heuristic(env.unwrapped, observation) # + id="gaFRxXOFSMIu" episode = run_fixed_episode(discrete_env, random_policy) render(episode, discrete_env) # + id="kBdD4gsfSMIv" episode = run_fixed_episode(discrete_env, heuristic_policy) render(episode, discrete_env) # + [markdown] id="L-26a5CoSMIy" # ## Experiment Loop # # This is the method we will call to setup an experiment. Reinforcement learning usually operates on an Observe-Decide-Act cycle, as you can see below. # # You don't need to add anything here; you will be working directly on the RL agent. # + id="NnBvklKsSMIy" num_episodes = 3000 def run_experiment(experiment_name, env, num_episodes, policy_learning_rate=0.001, value_learning_rate=0.001, baseline=None, entropy_cost=0, max_ent_cost=0, num_layers=3): #Initiate the learning agent agent = RLAgent(n_obs=env.observation_space.shape[0], action_space=env.action_space, policy_learning_rate=policy_learning_rate, value_learning_rate=value_learning_rate, discount=0.99, baseline=baseline, entropy_cost=entropy_cost, max_ent_cost=max_ent_cost, num_layers=num_layers) rewards = [] all_episode_frames = [] step = 0 for episode in range(1, num_episodes+1): #Update results plot and occasionally store an episode movie episode_frames = None if episode % 10 == 0: results[experiment_name] = np.array(rewards) results.plot(10) if episode % 500 == 0: episode_frames = [] #Reset the environment to a new episode observation = env.reset() episode_reward = 0 while True: if episode_frames is not None: episode_frames.append(env.render(mode='rgb_array')) # 1. Decide on an action based on the observations action = agent.decide(observation) # 2. Take action in the environment next_observation, reward, done, info = env.step(action) episode_reward += reward # 3. Store the information returned from the environment for training agent.observe(observation, action, reward) # 4. When we reach a terminal state ("done"), use the observed episode to train the network if done: rewards.append(episode_reward) if episode_frames is not None: all_episode_frames.append(episode_frames) agent.train() break # Reset for next step observation = next_observation step += 1 return all_episode_frames, agent # + [markdown] id="eKjbSbixSMI1" # ## The Agent # # Here we give the outline of a python class that will represent the reinforcement learning agent (along with its decision-making network). We'll modify this class to add additional methods and functionality throughout the course of the miniproject. # # + id="sJk4sMGuSMI4" class RLAgent(object): def __init__(self, n_obs, action_space, policy_learning_rate, value_learning_rate, discount, baseline = None, entropy_cost = 0, max_ent_cost = 0, num_layers=3): #We need the state and action dimensions to build the network self.n_obs = n_obs self.n_act = action_space.n self.plr = policy_learning_rate self.vlr = value_learning_rate self.gamma = discount self.entropy_cost = entropy_cost self.max_ent_cost = max_ent_cost self.num_layers = num_layers <add code here> #These lists stores the cumulative observations for this episode self.episode_observations, self.episode_actions, self.episode_rewards = [], [], [] #Build the keras network self._build_network() def observe(self, state, action, reward): """ This function takes the observations the agent received from the environment and stores them in the lists above.""" pass def decide(self, state): """ This function feeds the observed state to the network, which returns a distribution over possible actions. Sample an action from the distribution and return it.""" pass def train(self): """ When this function is called, the accumulated episode observations, actions and discounted rewards should be fed into the network and used for training. Use the _get_returns function to first turn the episode rewards into discounted returns. Apply simple or adaptive baselines if needed, depending on parameters.""" pass def _get_returns(self): """ This function should process self.episode_rewards and return the discounted episode returns at each step in the episode. Hint: work backwards.""" pass def _build_network(self): """ This function should build the network that can then be called by decide and train. The network takes observations as inputs and has a policy distribution as output.""" pass # + [markdown] id="7n-UmwhaSMI6" # ## Exercise 1: REINFORCE with simple baseline # # ### Description # # Implement the REINFORCE Policy Gradient algorithm using a deep neural network as a function approximator. # # 1. Implement the `observe` method of the RLAgent above. # 2. Implement the `_build_network` method. Your network should take the 8-dimensional state space as input and output a softmax distribution over the 4 discrete actions. It should have 3 hidden layers with 16 units each with ReLU activations. Use the REINFORCE loss function. HINT: Keras has a built-in "categorical cross-entropy" loss, and a `sample_weight` argument in fit/train_on_batch. Consider how these could be used together. # 3. Implement the `decide`, `train` and `_get_returns` methods using the inputs and outputs of your network. In `train`, implement a baseline based on a moving average (over episodes) of the mean returns (over trials of one episode); it should only be in effect when the agent is constructed with the `use_simple_baseline` keyword. Also, use `train_on_batch` to form one minibatch from all the experiences in an episode. Hint: see Question 2) below. # 4. Try a few learning rates and pick the best one (the default for Adam is a good place to start). Run the functions below and include the resulting plots, with and without the baseline, for your chosen learning rate. # 5. Answer the questions below in max. 1-2 sentence(s). # # WARNING: Running any experiments with the same names (first argument in run_experiment) will cause your results to be overwritten. # # **Mark breakdown: 7 points total** # - 5 points for implementing and plotting basic REINFORCE with reasonable performance (i.e. a positive score) and answering the questions below. # - 2 points for implementing and plotting the simple baseline with reasonable performance. # + [markdown] id="H_tbHOSJSMI8" # ### Solution # + id="YgBnDR_1SMI8" #Supply a filename here to load results from disk results = Results() policy_learning_rate = 0.002 _, _ = run_experiment("REINFORCE", discrete_env, num_episodes, policy_learning_rate) episodes, _ = run_experiment("REINFORCE (with baseline)", discrete_env, num_episodes, policy_learning_rate, baseline='simple') # + id="gD1LNjY7SMI9" render(episodes[-1], discrete_env) # + [markdown] id="VQoNVTl0SMI-" # **Question 1**: We have at least three posibilities of picking the action: i) sample an action according to the softmax distribution, ii) select action with max action probability and iii) use an epsilon-greedy strategy. What is the difference between these strategies and which one(s) is(are) preferable during training and which one(s) is(are) preferable during testing?. # # # **Answer**: # + [markdown] id="j_VgNlZsSMI-" # **Question 2**: In the train method above we throw away the data from an episode after we use it to train the network (make sure that you do that). Why is it not a good idea to keep the old episodes and train the policy network on both old and new data? (Note: Reusing data can still be possible but requires modifications to the REINFORCE algorithm that we are using). # # **Answer**: # + [markdown] id="ViA-XGNvSMI_" # ## Exercise 2: Adaptive baseline # ### Description # # Add a second neural network to your model that learns an observations-dependent adaptive baseline and subtracts it from your discounted returns. # # 1. Modify the `_build_network` function of RLAgent to create a second "value network" when `adaptive` is passed for the baseline argument. The value network should have the same or similar structure as the policy network, without the softmax at the output. # 3. In addition to training your policy network, train the value network on the Mean-Squared Error compared to the discounted returns. # 4. Train your policy network on $R - b(s)$, i.e. the returns minus the adaptive baseline (the output of the value network). Your implementation should allow for a different learning rate for the value and policy network. # 5. Try a few learning rates and plot all your best results together (without baseline, with simple baseline, with adaptive baseline). You may or may not be able to improve on the simple baseline! Return the trained model to use it in the next exercise. # # TECHNICAL NOTE: Some textbooks may refer to this approach as "Actor-Critic", where the policy network is the "Actor" and the value network is the "Critic". Sutton and Barto (2018) suggest that Actor-Critic only applies when the discounted returns are bootstrapped from the value network output, as you saw in class. This can introduce instability in learning that needs to be addressed with more advanced techniques, so we won't use it for this miniproject. You can read more about state-of-the-art Actor-Critic approaches here: https://arxiv.org/pdf/1602.01783.pdf # # **Mark breakdown: 3 points total** # - 3 points for implementing and plotting the adaptive baseline with the other two conditions, with reasonable performance (i.e. at least similar to the performance in Exercise 1). # + [markdown] id="jw4J1tWtSMI_" # ### Solution # + id="NyQu57nfSMI_" value_learning_rate = 0.002 episodes, d_model = run_experiment("REINFORCE (adaptive baseline)", discrete_env, num_episodes, policy_learning_rate, value_learning_rate, baseline='adaptive') # + id="1OMufH8KSMI_" render(episodes[-1], discrete_env) # + [markdown] id="thlnQsckSMJC" # ## Exercise 3: Visualizing the Value Function # # ### Description # # Ideally, our value network should have learned to predict the relative values across the input space. We can test this by plotting the value prediction for different observations. # # 1. Write a function to plot the value network prediction across [x,y] space for given (constant) values of the other state variables. X is always in [-1,1], and Y generally lies in [-0.2,1], where the landing pad is at [0,0]. (`plt.imshow`, `plt.title`, and `plt.colorbar` can be useful) # 2. Plot (with titles specifying the state variable combinations) the values for 5-6 combinations of the other 6 state variables, including [0,0,0,0,0,0]. The X and Y velocity are generally within [-1,1], the angle is in [-pi,pi] and the angular velocity lies roughly within [-3,3]. The last two inputs indicating whether the legs have touched the ground are 0 (False) or 1 (True). Include two combinations with (one of the) state variables out of these ranges. Use the same color bar limits across the graphs so that they can be compared easily. # 3. Answer the question below in max. 2-3 sentence(s). # # **Mark breakdown: 3 points total** # - 3 points for the plots of the value function and answering the question below. # + [markdown] id="vZqhrWmLSMJD" # ### Solution # + [markdown] id="Vhg0znkOSMJE" # **Question**: Does your value map for the state variables combination [0,0,0,0,0,0] make sense? What about the value maps for the combinations with state variables out of the ranges above? # # **Answer**: # # + [markdown] id="wlxYhDW0Fysj" # ## Exercise 4: Comparing Architectures # ### Descrition # # Choosing a good neural network architecture is always a tricky question - on one hand, you want a complex architecture that is flexible enough to be able to solve the task, and on the other hand, you want to train your network as fast as possible and to not overuse your computational power. In the previous sections, we asked you to create a network with 3 hidden layers which you saw that is able to successfully solve the task and play the game. What happens if we do the same with 1 or 2 hidden layers? In this exercise, we ask you to look into the effect of the architecture and to compare different models with each other. # # 1. Include an extra parameter `num_layers` in the RLAgent class (by default it is equal to 3). # 2. Change the `_build_network` function so that it creates a policy and value networks with the required number of layers. # 3. Compare (on the same axes) the resulting plots for num_layers = 1, 2, 3. # + [markdown] id="DIaeIPwISMJF" # ## For your Interest.. # + [markdown] id="fxGtxNzPSMJF" # The code you've written above can be easily adapted for other environments in Gym. If you like, try playing around with different environments and network structures!
Project-01/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Name | SEC |B.N.| # -----|---|----| # <NAME> | 2 | 15 # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # - # # Loading & Preprocessing the Data uni_data = pd.read_csv('univariateData.dat', names=['x1', 'y']) multi_data = pd.read_csv('multivariateData.dat', names=['x1', 'x2', 'y']) multi_data.head() # + def scale(data): ''' Scale data to have zero mean and standerd deviation of one. ''' data = np.array(data, dtype=float) # make sure that the data is a numpy array of floats mean_list = [] std_list = [] for i in range(data.shape[1]): mean, std = np.mean(data[:,i]), np.std(data[:,i]) data[:,i] = (data[:,i] - mean)/std mean_list.append(mean); std_list.append(std) return mean_list, std_list, data # Scale all features and targets to have mean of '0' and std of '1' and save the mean and std for each them, # to be used later to unscale certain feature or the target. mean_uni, std_uni, uni_data = scale(uni_data) mean_multi, std_multi, multi_data = scale(multi_data) # - # Adding the interception term 'x0' for both datasets uni_data = np.c_[np.ones((uni_data.shape[0], 1)), uni_data] multi_data = np.c_[np.ones((multi_data.shape[0], 1)), multi_data] multi_data[:5,:] # + # Split to train and test sets for both datasets by 80:20 ratio. y_uni = uni_data[:,-1].reshape(-1, 1) # Convert y_uni to be (m, 1) matrix where m is number of samples X_uni_train, X_uni_test, y_uni_train, y_uni_test = train_test_split(uni_data[:,:-1], y_uni, test_size=0.2, random_state=0) y_multi = multi_data[:,-1].reshape(-1, 1) X_multi_train, X_multi_test, y_multi_train, y_multi_test = train_test_split(multi_data[:,:-1], y_multi, test_size=0.2, random_state=0) # - y_uni_train.shape # # Linear Regression Class class LinearRegressor(): def __init__(self, model_type="uni", n_iterations=1000, lr=0.01): # Initializing our model's weights if model_type == 'uni': self.w = np.random.randn(2,1) elif model_type == 'multi': self.w = np.random.randn(3, 1) self.lr = lr self.n_iterations = n_iterations def fit(self, X, y): self.X = X self.y = y self.m = X.shape[0] for i in range(self.n_iterations): self.gradient_descent() def predict(self, X_test): return np.dot(X_test, self.w) def compute_cost(self): cost = (1/self.m) * sum( (np.dot(self.X, self.w) - self.y)**2 ) return float(cost) def gradient_descent(self): gradients = (2 / self.m) * np.dot(self.X.T, (np.dot(self.X,self.w) - self.y)) self.w = self.w - self.lr * gradients def evaluate_performance(self, y, y_predicted): # Lets calculate Mean Absolute Error (MAE) which will be our evaluation metric. MAE = sum(np.abs(i-j) for i,j in zip(y,y_predicted) ) / float(len(y)) return float(MAE) # # Univariate Linear Regression # + model_1 = LinearRegressor() model_1.fit(X_uni_train, y_uni_train) y_uni_predicted = model_1.predict(X_uni_test) print(f"The total training cost is: {model_1.compute_cost():0.2f}") print(f"The MAE for the train set is: {model_1.evaluate_performance(y_uni_train, model_1.predict(X_uni_train)):0.2f}") print(f"The MAE for the test set is: {model_1.evaluate_performance(y_uni_test, y_uni_predicted):0.2f}") # - # Visualize the trainig data plt.plot(X_uni_train[:,1], y_uni_train, 'bo'); plt.plot(X_uni_train[:,1], model_1.predict(X_uni_train), '-r'); # # Multivariate Linear Regression # + model_2 = LinearRegressor(model_type='multi', n_iterations=10000, lr=0.001) model_2.fit(X_multi_train, y_multi_train) y_multi_predicted = model_2.predict(X_multi_test) print(f"The total training cost is: {model_2.compute_cost():0.2f}") print(f"The MAE for the train set is: {model_2.evaluate_performance(y_multi_train, model_2.predict(X_multi_train)):0.2f}") print(f"The MAE for the test set is: {model_2.evaluate_performance(y_multi_test, y_multi_predicted):0.2f}") # - # Visualize the trainig data fig = plt.figure(figsize=(15,10)) ax1 = fig.add_subplot(111, projection='3d') ax1.scatter(X_multi_train[:,1],X_multi_train[:,2], y_multi_train, 'bo'); # ax1.plot_surface(X_multi_train[:,1],X_multi_train[:,2], model_2.predict(X_multi_train)); model_2.w # # BOUNS: Univariante Lasso regression # > **Lasso regression** is a modification of linear regression, where the model is penalized for the sum of absolute values of the weights. Thus, the absolute values of weight will be (in general) reduced, and many will tend to be zeros. During training, the objective function become: # # <img src="lasso_costfn.png" width="60%"> # # > So the hyperparameter alpha is used to penalize higher weights by increasing the cost by adding the regularization term hence, reduces the model's overfitting. class LassoRegressor(): def __init__(self, model_type="uni", n_iterations=100, lr=0.01, alpha=1): # Initializing our model's weights if model_type == 'uni': self.w = np.random.randn(2,1) elif model_type == 'multi': self.w = np.random.randn(3, 1) self.lr = lr self.n_iterations = n_iterations self.alpha = alpha def fit(self, X, y): self.X = X self.y = y self.m = X.shape[0] for i in range(self.n_iterations): self.gradient_descent() def predict(self, X_test): return np.dot(X_test, self.w) def compute_cost(self): cost = (1/self.m) * sum( (np.dot(self.X, self.w) - self.y)**2 ) + self.alpha * sum(np.abs(self.w[1:])) return float(cost) def gradient_descent(self): gradients = (2 / self.m) * np.dot(self.X.T, (np.dot(self.X,self.w) - self.y)) self.w = self.w - self.lr * gradients def evaluate_performance(self, y, y_predicted): # Lets calculate Mean Absolute Error (MAE) which will be our evaluation metric. MAE = sum(np.abs(i-j) for i,j in zip(y,y_predicted) ) / float(len(y)) return float(MAE) # + model_3 = LassoRegressor() model_3.fit(X_uni_train, y_uni_train) y_uni_predicted = model_3.predict(X_uni_test) print(f"The total training cost is: {model_3.compute_cost():0.2f}") print(f"The MAE for the train set is: {model_3.evaluate_performance(y_uni_train, model_3.predict(X_uni_train)):0.2f}") print(f"The MAE for the test set is: {model_3.evaluate_performance(y_uni_test, y_uni_predicted):0.2f}") # - # Visualize the trainig data plt.plot(X_uni_train[:,1], y_uni_train, 'bo'); plt.plot(X_uni_train[:,1], model_3.predict(X_uni_train), '-r');
Task 2 - Linear Regression/Task 2 - Linear Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Here we can't use recursion because whenever we call a left sub tree # then it completes the work of entire left sub tree and then return. # You can't get the control to stop at 1. So, we use iterative approach # and use a queue say 1 2 3. Firstly we ask for childrens of 1, 2 3 are # given and then 2 childres are asked then added to queue. 1) Take root input 2) Add it to queue 3) While queue is not empty: i) Take out the front of queue --> a (Queue is a) ii) Ask for its children If valid children attach them to queue 'a' & add them to queue class BinaryTreeNode: def __init__(self, data): self.data = data self.left = None self.right = None def printTreeDetailed(root): if root == None: return print(root.data, end = ":") if root.left is not None: print("L", root.left.data, end = ",") if root.right is not None: print("R", root.right.data, end = " ") print() printTreeDetailed(root.left) printTreeDetailed(root.right) import queue def takeLevelWiseTreeInput(): q = queue.Queue() print("Enter root") rootData = int(input()) if rootData == -1: return None root = BinaryTreeNode(rootData) q.put(root) while (not(q.empty())): current_node = q.get() print("Enter left child of ", current_node.data) leftChildData = int(input()) if leftChildData != -1: leftChild = BinaryTreeNode(leftChildData) current_node.left = leftChild q.put(leftChild) print("Enter right child of ", current_node.data) rightChildData = int(input()) if rightChildData != -1: rightChild = BinaryTreeNode(rightChildData) current_node.right = rightChild q.put(rightChild) return root root = takeLevelWiseTreeInput() printTreeDetailed(root)
14 Binary Trees - 2/14.07 Level wise Input Binary Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # - Goal: provide comsol a 3-argument (T, xWF6, xH2) table to interpolate the thermal diffusion coefficients D_T(k) from IPython.core.display import display, HTML import pygments display(HTML("<style>.container { width:266mm !important; }</style>")) # to set cell widths #to get this into the dissertation, it was #1. exported with jupyter notebook as html (space gets wider), #2. converted to a pdf with https://www.sejda.com/de/html-to-pdf (space gets wider again), #3. placed as a Verknüpfung into Adobe illustrator, where for each page separate Zeichenflächen # have been chosen. For their placement the coordinate origin should be changed from "centered" # to "left". #4. Each Zeichenfläche was saved separately and loaded in LaTeX formatter = pygments.formatters.get_formatter_by_name('html', linenos='inline') #Version 1.4 import numpy as np import pandas as pd import time # + ###=== INPUT DATA =======### dfAllPolys = pd.read_excel('C:/Daten/Modeling/Stoffdaten/Polynomals_for_T_star_related_quantities.xlsx') R_const = 8.31446 #[J mol^-1 K^-1] speciesData={0:{'species':'H_2', 'molar mass [kg/mol]':0.00201588, '(epsilon/k_B)[K]':lambda T:59.7, 'sigma[m]':lambda T:2.827E-10, }, 1:{'species':'WF_6', 'molar mass [kg/mol]':0.297830419, '(epsilon/k_B)[K]': lambda T:(819.92 -1.0619*T + 0.000619*T**2 - (6.62E-8)*T**3), 'sigma[m]': lambda T:(4.9734 + 0.0009284*T + 6.5815E-7*T**2 - (7.315E-10)*T**3)*10**-10, }, 2:{'species':'HF', 'molar mass [kg/mol]':0.020006, '(epsilon/k_B)[K]':lambda T:330, 'sigma[m]':lambda T:3.148E-10, },} ''' 2:{'species':'SiH_4', 'molar mass [kg/mol]':0.03212, '(epsilon/k_B)[K]':lambda T:207.6, 'sigma[m]':lambda T:4.084E-10, }, ''' #references: polynomals Kleijn1991, p. 37; other data as in Table 8.1 in present dissertation ###======================### #number of species: N = len(speciesData) #zip molar masses: m_={} for key in speciesData: m_[key] = speciesData[key]['molar mass [kg/mol]'] def create_T_dep_Dicts(T): '''returns T dependent literature data as compact indexed dictionaries for use in formulas; this has to be executed in the beginning of the T loop, which will fill the look up table, to provide the function calc_Md_and_detMd with needed T dependent local variables''' sigma_, epsilonDivKb_ = {}, {} for k in range(N): sigma_[k]=speciesData[k]['sigma[m]'](T) for k in range(N): epsilonDivKb_[k]=speciesData[k]['(epsilon/k_B)[K]'](T) return sigma_, epsilonDivKb_ def getQuantityValue(Quantity,T_star): ''' returns value for requested Quantity (Omega_i or A*-C*) looks up the polynomals in table 2.5 in [Kleijn1993] depending on T_star. ''' dfPolys = dfAllPolys.query('Quantity == @Quantity and T_star_low <= @T_star < T_star_high') return float(dfPolys['a0']+dfPolys['a1']*T_star+dfPolys['a2']*T_star**2+dfPolys['a3']*T_star**3) def calc_Md_and_detMd(T,f_): ''' returns the matrix in the denominator in the equation for D_T [Kleijn1993, Eq. 2.89] and its determinant needs the for loop variables T, f_ as input needs m_, sigma_, epsilonDivKb_, c_p_ as lokal variables ''' #following lambda is an unnamed python function not to be confused with thermal conductivity sigma = lambda i,j: 0.5*(sigma_[i]+sigma_[j]) #[m] [Eq. 2.69] epsilonDivKb = lambda i,j: (epsilonDivKb_[i]*epsilonDivKb_[j])**0.5 #[K] [Eq. 2.70] T_star = lambda i,j: T/epsilonDivKb(i,j) #[-] [Eq. 2.71] Omega_mu = lambda i,j: getQuantityValue('Omega_mu',T_star(i,j)) Omega_D = lambda i,j: getQuantityValue('Omega_D',T_star(i,j)) A_star = lambda i,j: getQuantityValue('A_star',T_star(i,j)) B_star = lambda i,j: getQuantityValue('B_star',T_star(i,j)) C_star = lambda i,j: getQuantityValue('C_star',T_star(i,j)) #to obtain lamb (thermal heat conductivity in W/(m*K) [p.43, Eq. 2.80 = 2.81 for i = j)]: lamb = lambda i,j: (0.00263*(T*(m_[i]+m_[j])/(2*m_[i]*m_[j]))**0.5 /((sigma(i,j)*1E10)**2*Omega_mu(i,j))) #(K/kg*mol)**0.5/m²...units does not fit, #but final result fit to Fig 2.5 with this def L_00(i,j): #[Eq. 2.90 and 2.91] result=0. if i!=j: result=2.*f_[i]*f_[j]/(A_star(i,j)*lamb(i,j)) for n in range(N): if n!=i: result+=2.*f_[j]*f_[n]*m_[j]/(m_[i]*A_star(i,n)*lamb(i,n)) # the f_[j] is correct compared to Hirschfelder1967 # with f_[i] as in the book of Kleijn, its not possible to reproduce Fig 2.5 return result def L_01(i,j): result=0. if i==j: for n in range(N): if n!=i: result+=(5.*f_[i]*f_[n]*m_[n]*(6./5.*C_star(i,n)-1) /((m_[i]+m_[n])*A_star(i,n)*lamb(i,n))) if i!=j: result=(-5.*f_[i]*f_[j]*m_[i]*(6./5.*C_star(i,j)-1.) /((m_[i]+m_[j])*A_star(i,j)*lamb(i,j))) return result def L_10(i,j): return m_[j]/m_[i]*L_01(i,j) def L_11(i,j): result=0. if i==j: result=-4.*f_[i]**2/lamb(i,j) for n in range(N): if n!=i: result-=(2.*f_[i]*f_[n]*(15./2.*m_[i]**2+25./4.*m_[n]**2 -3.*m_[n]**2*B_star(i,n) +4.*m_[i]*m_[n]*A_star(i,n)) /((m_[i]+m_[n])**2*A_star(i,n)*lamb(i,n))) if i!=j: result=(2*f_[i]*f_[j]*m_[i]*m_[j]/((m_[i]+m_[j])**2*A_star(i,j)*lamb(i,j)) *(55./4.-3.*B_star(i,j)-4.*A_star(i,j))) return result #filling of the matrix according to gas species i,j, #which start here at zero and not with one as in the book, #as matrix indizes and range(...) also starts at zero Md = np.zeros((2*N,2*N)) # matrix in denominator in Eq. 2.89 for i in range(N): #i,j=0,1,2,... #i=row, j=column for j in range(N): Md[i,j] = L_00(i,j) Md[i,j+N] = L_01(i,j) Md[i+N,j] = L_10(i,j) Md[i+N,j+N] = L_11(i,j) return Md, np.linalg.det(Md) #so that det(Md) needs to be calculated only once for all k def kroneckerDelta(i,j): #carefull: np.kron(i,j) is something else if i == j: return 1. else: return 0. def calc_D_T(Md, detMd, k, f_): ''' Returns thermal diffusion coefficients for different species k, which do share the same Md and detMd. ''' Mc = np.zeros((2*N+1, 2*N+1))# matrix in counter Mc[:-1,:-1] = Md for j in range(N): Mc[N+j, 2*N] = f_[j] #last column (index start at 0, thus column 2*N e.g.=6 is the 7. column) Mc[2*N, j] = f_[j]*kroneckerDelta(j,k) #last row return -8*m_[k]/5/R_const*np.linalg.det(Mc)/detMd ######## build look-up-dataframe for D_T(k): ####### df_D = pd.DataFrame() ###=== T & molefractions loop CONFIG =======### T_loop = np.linspace(300,1100,18) f_0_loop = np.linspace(1e-6,1-1e-6,11) f_0 = f_0_loop[0] #will be replaced by other is elements of f_0_loop during following nested for-loops f_1_loop = [1e-6,1e-2]+np.logspace(-1.6,-0.07,12).tolist()+[1-f_0] t0 = time.clock() for T in T_loop: sigma_,epsilonDivKb_= create_T_dep_Dicts(T) for f_0 in f_0_loop: for f_1 in f_1_loop: if f_0 > 0 and f_1 > 0 and f_0+f_1 < 1: f_=[f_0, f_1, 1-f_0-f_1] Md,detMd = calc_Md_and_detMd(T,f_) single_row = {'$T$ [K]':T} for k in range(N): single_row['$x('+speciesData[k]['species']+')$'] = f_[k] single_row['$D_T('+speciesData[k]['species']+')$ [kg/(m*s)]'] = calc_D_T(Md,detMd,k,f_) df_D = df_D.append(pd.DataFrame.from_records([single_row]),sort = False) neededTime = time.clock() - t0 df_D.head() # - print('needed time: ' + '%.1f'%(neededTime/60) + ' min') # + # ==== EXPORT for Comsol ============== fillingUpSpecies = 2 #the species that fills up the mole fraction to 1 #and is thus not needed as argument myColumnOrder = ['$T$ [K]'] for k in range (N): if k != fillingUpSpecies: myColumnOrder += df_D.filter(like='x')\ .filter(like=speciesData[k]['species']).columns.tolist() for k in range (N): myColumnOrder += df_D.filter(like='D_T')\ .filter(like=speciesData[k]['species']).columns.tolist() destiFile = r'P:\WILMA\Leonard Raumann\Comsol\single fiber tube\input data\D_T.txt' df_D[myColumnOrder].to_csv(destiFile,index=False, sep='\t', float_format='%1.2e',) df_D[myColumnOrder].head() # -
Python scripts/thermal_diffusion_coefficient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 関連性 # # 統計での興味の一つは,取得した標本群の間にどういう関係があるか,ということだろう.この節では関連性を中心に説明していく. # ## 相関係数 (Correlation Coefficient) # # # 屋久島の降雨量と,お隣の種子島の降雨量には関係があるだろうか? 鹿屋と志布志ではどうだろう.もっと離れた札幌では? # # kagoshima-rain-month.txt の11列目が屋久島,3列目が種子島,8列目が鹿屋,10列目が志布志の降雨量になっている. # 札幌の降水量は sapporo-rain-month.txt にある. # とりあえずグラフを描いてみよう. # # + import numpy as np from matplotlib import pyplot # %matplotlib inline yakushima = np.loadtxt("./dat/kagoshima-rain-month.txt", usecols=11) tanegashima = np.loadtxt("./dat/kagoshima-rain-month.txt", usecols=2) kagoshima = np.loadtxt("./dat/kagoshima-rain-month.txt", usecols=6) sapporo = np.loadtxt("./dat/sapporo-rain-month.txt", usecols=1) pyplot.plot(yakushima, label="yakushima") pyplot.plot(tanegashima, label="tanegashima") pyplot.plot(kagoshima, label="kagoshima") pyplot.plot(sapporo, label="sapporo") pyplot.legend() pyplot.show() # - # なんだかよく分からない.関連があるような気もするし,しない気もする.相関があるかどうかを見るときはグラフなら散布図,数値的に評価するには相関係数を使う.相関係数はNumpyの`corrcoef()`で計算できる. # + pyplot.axes().set_aspect('equal') pyplot.xlabel("yakushima") pyplot.xlim([0, 1500]) pyplot.ylabel("tanegashima") pyplot.ylim([0, 1500]) pyplot.scatter(yakushima, tanegashima) pyplot.show() print(np.corrcoef(yakushima, tanegashima)[0][1]) pyplot.axes().set_aspect('equal') pyplot.xlabel("yakushima") pyplot.xlim([0, 1500]) pyplot.ylabel("kagoshima") pyplot.ylim([0, 1500]) pyplot.scatter(yakushima, kagoshima) pyplot.show() print(np.corrcoef(yakushima, kagoshima)[0][1]) pyplot.axes().set_aspect('equal') pyplot.xlabel("yakushima") pyplot.xlim([0, 1500]) pyplot.ylabel("sapporo") pyplot.ylim([0, 1500]) pyplot.scatter(yakushima, sapporo) pyplot.show() print(np.corrcoef(yakushima, sapporo)[0][1]) # - # 相関係数 (ピアソンの積率相関係数) $r$は次の式で定義される. # $$r=\frac{\sum_{i=1}^N\left(x_i - \bar{x}\right)\left(y_i - \bar{y}\right)}{\sqrt{\sum_{i=1}^N\left(x_i - \bar{x}\right)^2\sum_{i=1}^N\left(y_i - \bar{y}\right)^2}}$$ # 別の書き方をすると # $$r = \frac{\sum_{i=1}^N\left(Z_{x,i}Z_{y,i}\right)}{N-1}$$ # $Z$はZスコア. # # 他にスピアマンの順位相関係数,ケンドールの順位相関係数などもある. # # 相関に関する議論で注意する必要があるのは,非線形な相関がある場合である. # 「相関」という用語は,確率変数同士の線形関係に限定した用語ではない.JISでは「相関」は「二つの確率変数の分布法則の関係。**多くの場合**,線形関係の程度を指す。」とされている.これに対して,相関係数は**線形関係の強さ**の指標になっている. # # つまり,相関係数は比例関係にあるかないかしか判定できない. # 例えば $\theta$ と $\sin(\theta)$,$\cos(\theta)$ の相関係数を計算するとどうなるだろうか? # # + import math import random x = [] cos = [] sin = [] for loop in range(10): for t in range(0,360): x.append(t) cos.append(math.cos( math.radians(t)) + random.gauss(0, 0.1) ) sin.append(math.sin( math.radians(t)) + random.gauss(0, 0.1) ) pyplot.scatter(x,cos) pyplot.scatter(x,sin) pyplot.show() print(np.corrcoef(x, cos)[0][1]) print(np.corrcoef(x, sin)[0][1]) # - # $\theta$と$\cos(\theta)$の相関係数は0だが$\sin(\theta)$との相関係数は $-0.78$になる. # # また,次のような例でも相関係数は0になる. # + import random x = [] y = [] for i in range(20000): x.append(random.gauss(0,0.1) + random.randint(0,1) + 1) y.append(random.gauss(0,0.1) + random.randint(0,1) + 1) pyplot.axes().set_aspect('equal') pyplot.xlim([0, 3]) pyplot.ylim([0, 3]) pyplot.scatter(x,y) pyplot.show() print(np.corrcoef(x, y)[0][1]) # - # $x$と$y$に何らかの関連がありそうだが,相関係数は0なのである. # ### 相関係数とサンプル数 # # 線形の関係に話を戻して,相関係数はどのぐらいの値であれば統計的に有意な相関があると言えるだろうか. # 相関係数がどの程度の大きさであればよいかは,サンプル数に依存する. # サンプル数が少ないときは,たまたま相関があるように見えてしまうことがあるので,大きな相関係数でなければ有意とは言えない. # 相関のない2つの変数でサンプル数を変えながら相関係数を計算してみよう. # + samples = [5, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 500, 1000] #samples = [1000, 5000, 10000] s = [] r = [] Trial = 100 for Ns in samples: for t in range(Trial): x = [] y = [] for i in range(Ns): x.append(random.randint(0,100)) y.append(random.randint(0,100)) s.append(Ns) r.append(np.corrcoef(x,y)[0][1]) pyplot.ylim([-1,1]) pyplot.grid() pyplot.scatter(s, r) pyplot.show() # - # xとyは乱数なので当然ながら本来の相関係数は0である.しかし,サンプル数が小さいときには相関係数が$-1$から$1$まで様々な値を取る.サンプル数を増やしていくとこの偶然による相関係数は小さくなっていくが,サンプル数100でも 0.25程度の相関係数は残る.サンプル数と,有意と見なしてよい相関係数の関係は下の表のようになる (出典:Statistics Hacks). # # |サンプルサイズ|有意と見なせる最小の相関係数| # |:------------:|:---------------------:| # |5|0.88| # |10|0.63| # |15|0.51| # |20|0.44| # |25|0.40| # |30|0.38| # |60|0.26| # |100|0.20| # # 屋久島の降水量と種子島の降水量はサンプル数482で相関係数 0.83だから,これは高い相関がある.屋久島と鹿児島の相関係数は0.66 で種子島よりは低いが,この相関は偶然というわけではなくちゃんと意味のある相関がある,ということだ. # 屋久島と札幌は一見すると相関がないように見えるが,サンプル数482で相関係数$-0.22$というのは何らかの有意な相関があるということを示しており, # 全くの無相関ではないということが分かる. # ## 回帰直線 # # 変数の間に相関があるとき,片方が分かれば他の変数を予測することができる. # # #### メモ:「回帰」ってどこに帰るのか? # # 回帰 (regression) は「元の状態に戻る」 # 平均への回帰 (regression toward the mean) # ### 単純回帰 (Simple Linear Regression) # # $$基準変数 = 定数 + (予測変数 \times 重み付け)$$ # $$重み付け = 相関係数 \times \frac{基準変数の標準偏差}{予測変数の標準偏差}$$ # $$定数 = 基準変数の平均値 - (重み付け \times 予測変数の平均値)$$ # # ### 最小二乗法 (Least Squares Method) # # 自分で書くのは面倒なので,`scipy` の `optimize` を使う. # `statsmodels`の `OLS()` (Ordinary Least Squares) を使うという手もある. # + ### Simple Linear Regression mean_yaku = yakushima.mean() mean_tane = tanegashima.mean() sigma_yaku = yakushima.std() sigma_tane = tanegashima.std() r = np.corrcoef(yakushima, kagoshima)[0][1] weight = r * sigma_tane / sigma_yaku offset = mean_tane - (weight * mean_yaku) print("y = %.2f + %.2f x" % (offset, weight)) xs = range(1500) ys = offset + weight * xs ### Least Squares import scipy.optimize def fit_func(param,x,y): a = param[0] b = param[1] residual = y-(a*x+b) return residual init_param = [0.,0.] result = scipy.optimize.leastsq(fit_func, init_param, args=(yakushima, tanegashima)) a_fit = result[0][0] b_fit = result[0][1] print("y = %.2f + %.2f x" % (b_fit, a_fit)) xls = range(1500) yls = b_fit + a_fit * xls ### plot pyplot.xlabel("yakushima") pyplot.ylabel("tanegashima") pyplot.scatter(yakushima, tanegashima) pyplot.plot(xs,ys,color="red", label="Simple Regression") pyplot.plot(xls,yls,color="orange", label="Least Square") pyplot.legend() pyplot.show() # - # 単純回帰と最小二乗法は実はけっこうずれる.相関の傾向から大きくはずれた点がある場合に,それらの点が近似式に与える影響が違うことなどが原因である. # ### 重回帰 # # ## 偶然かどうかの判定 ー カイ2乗検定 # # カイ2乗検定は,以下のカイ2乗値を計算して,その値の大きさで仮説を棄却すべきかどうか判定する方法である. # $$\chi^2 = \sum\frac{\left(Observed - Expected\right)^2}{Expected}$$ # Observed は観測された値,Expected は期待度数.期待値ではなく,「仮説通りであればこうなるだろう」の値にすることが多い. # # サイコロで$\chi^2$値を計算してみよう.正しいサイコロではなく1の出る確率を操作できるサイコロにしておく.prob1 が1の出る確率で,それ以外の目は均等に出るものとする. # + import random prob1 = 0.17 probo = (1 - prob1) / 5.0 Ns = 10000 res = [0,0,0,0,0,0] chi2 = 0 for i in range(Ns): df = random.random() for d in range(6): if (df < prob1 + probo * (d)): res[d] += 1 break for d in range(6): chi2 += (res[d] - Ns/6)**2 / (Ns/6) print(res) print(chi2) # - # prob1 や試行回数 $Ns$ を変えると $\chi^2$値は変わる.ではこの値をどう使えばよいかというと,$\chi^2$は p値に変換ができる.面倒なので `scipy.stats.chisquare` を使おう. # + import scipy.stats print(scipy.stats.chisquare(res)) # - # 実際には p値に変換するのではなく,自由度とp値から求められる棄却限界値を使って偶発的なノイズなのか有意な偏りなのかを判定する. # # 例) 滋賀大学 中川雅央による表: # https://www.biwako.shiga-u.ac.jp/sensei/mnaka/ut/chi2disttab.html # # 自由度6で有意水準 0.05 のとき,$\chi^2$の棄却水準は 12.6 となる.つまり,上で計算した $\chi^2$の値が 12.6 を超えていれば # 95%の確からしさでサイコロに偏りがある (サイコロの目が均等に出るという仮説は棄却される.) # # prob1 の値を変えると,0.16~0.17 ぐらいでは $\chi^2$値は小さいが,この範囲を超えると急速に大きな値になることが分かる. # # # ### より正確な方法 ー G検定 # # カイ2乗検定は計算が簡単だが,実は近似を基にした簡易手法である.対数尤度を近似せずに # # $$G = 2\sum Observed\ln\left(\frac{Observed}{Expected}\right)$$ # # を用いる方がより正確である.ただし標本数が十分であればG検定と$\chi^2$検定は同じ結果になる. # # ### 二元のカイ2乗検定 (独立性検定) # # サイコロの例は一元のカイ2乗検定だったが,2つの項目の間に関連性があるかどうか,を判定するのが二元のカイ2乗検定である. # これは下のような2x2分割表で,2つの項目間の関連性を検定する. # # |好み |男性|女性|合計| # |---- |----|----|----| # |いちご|34|41|75| # |りんご|45|30|75| # |合計|79|71|150| # # 一元のカイ2乗検定では「男性はいちごよりりんごを好むと言えるかどうか」が検定できる.二元ではこの2x2の表から # 「性別と果物の好みに関連性はあるか」を検定することができる.計算自体は一元と同じで,各マスの期待度数を計算して # $\chi^2$を計算すればよい.期待度数は # # |好み |男性|女性|合計| # |---- |----|----|----| # |いちご|39.5|35.5|75| # |りんご|39.5|35.5|75| # |合計|79|71|150| # # で,$\chi^2$は 3.24 になる.p=0.05 で棄却限界は 3.84 で,3.24 よりも大きい. # つまりこの分布の偏りは偶然によるものである確率が 5%以上あるので,帰無仮説を棄却しないのが妥当ということになる. # # # ### より正確な方法 ー Fischerの正確確率検定 # # カイ2乗検定は,期待度数が小さいときに結果が不正確になることが知られている,そのような場合にはより正確な Fischerの正確確率検定を使うべきだ. # Wikipedia によると,おおむね期待度数が 10以下になるような場合にはカイ2乗検定は使わない方がよいらしい. # ### シンプソンのパラドックス # ## 2つの標本群の比較 ー t検定 # # ある2つの標本群があったとき,それらの母集団の性質は同じかどうかを知りたいときに使うのが t検定である. # # 同じ正規分布からサンプリングした場合でも,標本群の分布が等しくなるとは限らない.標本群の平均や標準偏差を求めたとしても, # 特にサンプル数があまり大きくないときには偶然によってずれてしまうことがありうる.例えば同じガウス分布の母集団から2つの標本群を作って # その平均値と標準偏差を求めてみると, # + sample1 = np.array([]) sample2 = np.array([]) Ns1 = 50 Ns2 = 15 for i in range(Ns1): sample1 = np.append(sample1, random.gauss(100,10)) for j in range(Ns2): sample2 = np.append(sample2, random.gauss(100,10)) print("sample1: mu = %.2f / sigma = %.3f" % (sample1.mean(), sample1.std())) print("sample2: mu = %.2f / sigma = %.3f" % (sample2.mean(), sample2.std())) # - # ずれるときはずれる.この標本群の差が母集団にもあるのか,ただの偶然なのかは以下の t値を計算して判定できる.(この式は分散が等しくない場合でも使える改良版で,ウェルチのt検定と呼ばれる.分散が等しいことを前提とした方は区別するときはスチューデントのt検定と呼ばれる.) # $$t = \frac{\mu_1 - \mu_2}{\sqrt{\frac{\sigma_1^2}{Ns_1} + \frac{\sigma_2^2}{Ns_2}}}$$ # 実際に計算してみると, # # + import math m1 = sample1.mean() m2 = sample2.mean() v1 = sample1.var() v2 = sample2.var() tvalue = (m1 - m2) / math.sqrt( v1 / Ns1 + v2 / Ns2) print(tvalue) # - # t値が2より小さければ,95%の確率で母集団は同じ平均値をもっていると言える. # (厳密にはサンプルサイズによって変わるが,サンプルサイズが10以下の場合を除いておおむね2が境界値になる.) #
04_Relationship.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np print pd.__version__ print np.__version__ fname = 'sample.txt' # !head sample.txt df = pd.read_csv('sample.txt', sep='\s+') df.head() df.ix[0,0] df.dtypes df = pd.read_csv('sample.txt', sep='\s+', dtype={'SourceID': np.int64}) df.ix[0,0] arr = np.loadtxt('sample.txt', skiprows=1) arr[0,0] arr.dtype
checkproblem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Test on the example image # # # For each block of code, hit [RUN] in the top left of the screen, or [SHITF] + [RETURN] on your keyboard. While the action is being carried out you will see ln [*] and when each step is done you will see the * replaced with a number (ln [1]). Most steps will also plot out an image as well when the step is done running. # # # Upload your image # # * In the top left-hand corner click "File" # * Click "Open..." from the drop down menu # * Click the folder named "images" # * In the top right-hand corner click "Upload" # * Choose your image from your computer # * Click the blue "Upload" button # # How to read code # * Everything after a hashtag (light green and italic) is a comment. Read through these instructions to help understand what each step is doing. The computer doesn't read these comments; these are notes for the humans reading the code! # * `pcv.`____ are functions from the PlantCV software library. These functions DO something (i.e. read in an image, resize the image, ...) # * Stuff inside parentheses are parameters. These are the ingredients where your function is the recipe. # * Stuff to the left of equals sign are the things "returned" by a function. Generally, a function will spit out one or more objects, usually image objects, that get saved to variable names. These output objects are usually the input for the next step. # # Update your code to read your new image # * In the block of code that starts `class options:` change # the line where `self.image` gets assigned # Import software needed from plantcv import plantcv as pcv import numpy as np # + class options: def __init__(self): self.image = "images/maize.JPG" # ^ # | # Replace "maize.JPG" with your image name. # NOTE: this is case sensitive! self.debug = "plot" self.writeimg= False self.result = "./g2p_results" self.outdir = "." # Get options args = options() # Set debug to the global parameter pcv.params.debug = args.debug # + # Read image (sometimes you need to run this line twice to see the image) # Inputs: # filename - Image file to be read in # mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv' img, path, filename = pcv.readimage(filename=args.image, mode='rgb') # + # The image is quite large which can slow down computation, so resize. The image should look the same but the scale # (the x- and y-axis numbers) will be smaller. # Inputs: # img - RGB or grayscale image # resize_x - How much to resize in the x axis # resize_y - How much to resize in the y axis img = pcv.resize(img=img, resize_x=.5, resize_y=.5) # + # Convert RGB to LAB and extract the green-magenta channel ('a') # Input: # rgb_img - RGB image data # channel- Split by 'l' (lightness), 'a' (green-magenta), or 'b' (blue-yellow) channel a_img = pcv.rgb2gray_lab(rgb_img=img, channel='a') # ^ # | # Try changing the channel. We want the plant to be either lighter or darker # than the background. # + # Threshold can be on either light or dark objects in the image. # Inputs: # gray_img - Grayscale image data # threshold- Threshold value (between 0-255) # max_value - Value to apply above threshold (255 = white) # object_type - 'light' (default) or 'dark'. If the object is lighter than the background then standard threshold is done. # If the object is darker than the background then inverse thresholding is done. a_thresh_img = pcv.threshold.binary(gray_img=a_img, threshold=125, max_value=255, object_type='dark') # ^ ^ # | | # Adjust the threshold until | # the plant is completely | # white but not much background | # is white. Change to 'light' # if the plant from the # rgb2gray step is lighter # than the background. # + # Filter out dark noise from an image. # Inputs: # gray_img - Grayscale or binary image data # kernel - Optional neighborhood, expressed as an array of 1's and 0's. If None (default), # uses cross-shaped structuring element. closed = pcv.closing(gray_img=a_thresh_img) # + # Fill small objects (reduce image noise) # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled filled = pcv.fill(bin_img=closed, size=40) # ^ # | # Increase the size to remove extra things in the background # but try not to lose any pieces of plant. # + # Dilate the mask to avoid losing leaf tips # Inputs: # gray_img = input image # ksize = kernel size, integer # i = iterations, i.e. number of consecutive filtering passes dilated = pcv.dilate(gray_img=filled, ksize=5, i=1) # + # Use a lowpass (blurring) filter to smooth sobel image # Inputs: # gray_img - Grayscale image data # ksize - Kernel size (integer or tuple), (ksize, ksize) box if integer input, # (n, m) box if tuple input m_blur = pcv.median_blur(gray_img=dilated, ksize=12) # + # Fill in any holes in the plant mask # Inputs: # bin_img - Binary image data filled_mask = pcv.fill_holes(bin_img=m_blur) # + # Identify objects # Inputs: # img - RGB or grayscale image data for plotting # mask - Binary mask used for detecting contours obj_cnt, obj_hierarchy = pcv.find_objects(img=img, mask=filled_mask) # + # Define region of interest (ROI) # Inputs: # img - RGB or grayscale image to plot the ROI on # x - The x-coordinate of the upper left corner of the rectangle # y - The y-coordinate of the upper left corner of the rectangle # h - The height of the rectangle # w - The width of the rectangle roi_cnt, roi_hierarchy = pcv.roi.rectangle(img=img, x=500, y=500, h=1000, w=2000) # ^ ^ # | | # _____________________________ # Update these # Depending on the resolution of an image, this will likely need updating. # We want the rectangle to at least partially contain the plant but avoids # any large background objects that have yet to be filtered out. # + # Decide which objects to keep # Inputs: # img = img to display kept objects # roi_contour = contour of roi, output from any ROI function # roi_hierarchy = contour of roi, output from any ROI function # object_contour = contours of objects, output from pcv.find_objects function # obj_hierarchy = hierarchy of objects, output from pcv.find_objects function # roi_type = 'partial' (default, for partially inside), 'cutto', or # 'largest' (keep only largest contour) plant_obj, plant_hier, plant_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi_cnt, roi_hierarchy=roi_hierarchy, object_contour=obj_cnt, obj_hierarchy=obj_hierarchy, roi_type='partial') # + # Skeletonize the plant mask (one-pixel wide representation) # Inputs: # mask - Binary mask skeleton = pcv.morphology.skeletonize(mask=plant_mask) # The output of this step can look like it's almost totally black since the lines are # so thin and the image is large. # + # Adjust line thickness with the global line thickness parameter (default = 5), # and provide binary mask of the plant for debugging. NOTE: the objects and # hierarchies returned will be exactly the same but the debugging image (segmented_img) # will look different. pcv.params.line_thickness = 10 # Prune the skeleton # Inputs: # skel_img = Skeletonized image # size = Pieces of skeleton smaller than `size` should get removed. (Optional) Default `size=0`. # mask = Binary mask for debugging (optional). If provided, debug images will be overlaid on the mask. pruned, seg_img, edge_objects = pcv.morphology.prune(skel_img=skeleton, size=100, mask=plant_mask) # ^ # | # Increase size to remove the extra, small spikes on # the skeletonized plant but don't increase to the # point where actual leaves get removed. # + # Sort segments into leaf objects and stem objects # Inputs: # skel_img = Skeletonized image # objects = List of contours # mask = (Optional) binary mask for debugging. If provided, debug image # will be overlaid on the mask. leaf_obj, stem_obj = pcv.morphology.segment_sort(skel_img=pruned, objects=edge_objects, mask=plant_mask) # + # Similar to line thickness, there are optional text size and text thickness parameters # that can be adjusted to better suit images or varying sizes. pcv.params.text_size=3 # (default text_size=.55) pcv.params.text_thickness=8 # (defaul text_thickness=2) # Identify segments # Inputs: # skel_img = Skeletonized image # objects = List of contours # mask = (Optional) binary mask for debugging. If provided, debug image # will be overlaid on the mask. segmented_img, labeled_img = pcv.morphology.segment_id(skel_img=skeleton, objects=leaf_obj, mask=plant_mask) # + # Measure path lengths of segments # Inputs: # segmented_img = Segmented image to plot lengths on # objects = List of contours labeled_img = pcv.morphology.segment_path_length(segmented_img=segmented_img, objects=leaf_obj) # + # Measure euclidean distance of segments # Inputs: # segmented_img = Segmented image to plot lengths on # objects = List of contours labeled_img = pcv.morphology.segment_euclidean_length(segmented_img=segmented_img, objects=leaf_obj) # + # Measure the angle of segments # Inputs: # segmented_img = Segmented image to plot angles on # objects = List of contours labeled_img = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=leaf_obj) # + # Measure the leaf insertion angles # NOTE: This function is slow and will likely take up to 2 minutes to run # Inputs: # skel_img = Skeletonize image # segmented_img = Segmented image to plot insertion angles on # leaf_objects = List of leaf contours # stem_objects = List of stem objects # size = Size of the inner portion of each leaf to find a linear regression line labeled_img = pcv.morphology.segment_insertion_angle(skel_img=skeleton, segmented_img=segmented_img, leaf_objects=leaf_obj, stem_objects=stem_obj, size=90) # + # Format data collected into a table leaf_ids = np.vstack(pcv.outputs.observations['segment_path_length']['label']) segment_path_length = np.vstack(pcv.outputs.observations['segment_path_length']['value']) segment_eu_length = np.vstack(pcv.outputs.observations['segment_eu_length']['value']) seg_angles = np.vstack(pcv.outputs.observations['segment_angle']['value']) segment_insertion_angle = np.vstack(pcv.outputs.observations['segment_insertion_angle']['value']) data_table = np.hstack((leaf_ids, segment_path_length, segment_eu_length, seg_angles, segment_insertion_angle)) # + # Print data out to a text file that can be imported into Excel np.savetxt("leaf_phenotype_data.txt", data_table, delimiter=',', fmt='%10.5f', header='leaf_id, path_length, eu_length, angle, insertion_angle') # To see the text file saved out, click 'File' tab in top left corner, click 'Open' # Download this file to your computer by checking the box directly to the # left of the file named "leaf_phenotype_data.txt" and then click "Download" # in the top left corner.
notebooks/Eveland_July_2019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNIST End to End examples with Kubeflow compoenents # # This pipeline contains 5 steps, it finds the best hyperparameter using Katib, creates PVC for storing models, processes the hyperparameter results, distributedly trains the model on TFJob with the best hyperparameter using more iterations, and finally serves the model using KFServing. You can visit this [medium blog](https://medium.com/@liuhgxa/an-end-to-end-use-case-by-kubeflow-b2f72b0b587) for more details on this pipeline. # ### Define the model name, Kubeflow user namespace, and storageclass for running this pipeline # # Change the below cell with a name you want to use for this pipeline, which namespace you want to execute on Kubeflow, and the [storageclass](https://kubernetes.io/docs/concepts/storage/storage-classes/) to run and store the trained model. **Make sure the storageclass below supports ReadWriteMany in order to run distributed training.** # # The default storageclass value below will work on IBM Cloud. For other cloud providers such as AWS and GCP, find out the default storageclass by running the bash command `kubectl get storageclass`, and replace the storageclass variable default value below. If the Kubernetes Cluster doesn't have any storageclass, simply assign the storageclass variable below as `''` to use the generic Kubernetes persistent storage claim. # ibmc-file-gold is the recommended ReadWriteMany storageclass for IBM Cloud. storageclass = 'ibmc-file-gold' model_name = "mnist-demo" user_namespace = "anonymous" # Import the DSL package and define the Kubeflow pipeline # + import json from string import Template import kfp from kfp import components from kfp.components import func_to_container_op import kfp.dsl as dsl # + def convert_mnist_experiment_result(experiment_result) -> str: import json r = json.loads(experiment_result) args = [] for hp in r: print(hp) args.append("%s=%s" % (hp["name"], hp["value"])) return " ".join(args) def add_istio_annotation(op): op.add_pod_annotation(name='sidecar.istio.io/inject', value='false') return op @dsl.pipeline( name="End to end pipeline", description="An end to end example including hyperparameter tuning, train and inference." ) def mnist_pipeline( name=model_name, namespace=user_namespace, storageclass=storageclass, step=4000): # step 1: create a Katib experiment to tune hyperparameters objectiveConfig = { "type": "minimize", "goal": 0.001, "objectiveMetricName": "loss", } algorithmConfig = {"algorithmName" : "random"} parameters = [ {"name": "--tf-learning-rate", "parameterType": "double", "feasibleSpace": {"min": "0.01","max": "0.03"}}, {"name": "--tf-batch-size", "parameterType": "discrete", "feasibleSpace": {"list": ["16", "32", "64"]}}, ] rawTemplate = { "apiVersion": "kubeflow.org/v1", "kind": "TFJob", "metadata": { "name": "{{.Trial}}", "namespace": "{{.NameSpace}}" }, "spec": { "tfReplicaSpecs": { "Chief": { "replicas": 1, "restartPolicy": "OnFailure", "template": { "spec": { "containers": [ { "command": [ "sh", "-c" ], "args": [ "python /opt/model.py --tf-train-steps=2000 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}" ], "image": "liuhougangxa/tf-estimator-mnist", "name": "tensorflow" } ] } } }, "Worker": { "replicas": 3, "restartPolicy": "OnFailure", "template": { "spec": { "containers": [ { "command": [ "sh", "-c" ], "args": [ "python /opt/model.py --tf-train-steps=2000 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}" ], "image": "liuhougangxa/tf-estimator-mnist", "name": "tensorflow" } ] } } } } } } trialTemplate = { "goTemplate": { "rawTemplate": json.dumps(rawTemplate) } } metricsCollectorSpec = { "source": { "fileSystemPath": { "path": "/tmp/tf", "kind": "Directory" } }, "collector": { "kind": "TensorFlowEvent" } } katib_experiment_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml') op1 = katib_experiment_launcher_op( experiment_name=name, experiment_namespace=namespace, parallel_trial_count=3, max_trial_count=12, objective=str(objectiveConfig), algorithm=str(algorithmConfig), trial_template=str(trialTemplate), parameters=str(parameters), metrics_collector=str(metricsCollectorSpec), # experiment_timeout_minutes=experimentTimeoutMinutes, delete_finished_experiment=False) # step2: create a TFJob to train your model with best hyperparameter tuned by Katib tfjobjson_template = Template(""" { "apiVersion": "kubeflow.org/v1", "kind": "TFJob", "metadata": { "name": "$name", "namespace": "$namespace", "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "tfReplicaSpecs": { "Chief": { "replicas": 1, "restartPolicy": "OnFailure", "template": { "metadata": { "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "volumes": [ { "name": "export-model", "persistentVolumeClaim": { "claimName": "$modelpvc" } } ], "containers": [ { "command": [ "sh", "-c" ], "args": [ "python /opt/model.py --tf-train-steps=$step --tf-export-dir=/mnt/export $args" ], "image": "liuhougangxa/tf-estimator-mnist", "name": "tensorflow", "volumeMounts": [ { "mountPath": "/mnt/export", "name": "export-model" } ] } ] } } }, "Worker": { "replicas": 3, "restartPolicy": "OnFailure", "template": { "metadata": { "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "volumes": [ { "name": "export-model", "persistentVolumeClaim": { "claimName": "$modelpvc" } } ], "containers": [ { "command": [ "sh", "-c" ], "args": [ "python /opt/model.py --tf-train-steps=$step --tf-export-dir=/mnt/export $args" ], "image": "liuhougangxa/tf-estimator-mnist", "name": "tensorflow", "volumeMounts": [ { "mountPath": "/mnt/export", "name": "export-model" } ] } ] } } } } } } """) convert_op = func_to_container_op(convert_mnist_experiment_result) op2 = convert_op(op1.output) volume_template = Template(""" { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { "name": "{{workflow.name}}-modelpvc", "namespace": "$namespace" }, "spec": { "accessModes": ["ReadWriteMany"], "resources": { "requests": { "storage": "1Gi" } }, "storageClassName": "$storageclass" } } """) volopjson = volume_template.substitute({'namespace': namespace, 'storageclass': storageclass}) volop = json.loads(volopjson) modelvolop = dsl.ResourceOp( name="modelpvc", k8s_resource=volop ) tfjobjson = tfjobjson_template.substitute( {'args': op2.output, 'name': name, 'namespace': namespace, 'step': step, 'modelpvc': modelvolop.outputs["name"] }) tfjob = json.loads(tfjobjson) train = dsl.ResourceOp( name="train", k8s_resource=tfjob, success_condition='status.replicaStatuses.Worker.succeeded==3,status.replicaStatuses.Chief.succeeded==1' ) # step 3: model inferencese by KFServing Inferenceservice inferenceservice_template = Template(""" { "apiVersion": "serving.kubeflow.org/v1alpha2", "kind": "InferenceService", "metadata": { "name": "$name", "namespace": "$namespace" }, "spec": { "default": { "predictor": { "tensorflow": { "storageUri": "pvc://$modelpvc/" } } } } } """) inferenceservicejson = inferenceservice_template.substitute({'modelpvc': modelvolop.outputs["name"], 'name': name, 'namespace': namespace}) inferenceservice = json.loads(inferenceservicejson) inference = dsl.ResourceOp( name="inference", k8s_resource=inferenceservice, success_condition='status.url').after(train) dsl.get_pipeline_conf().add_op_transformer(add_istio_annotation) # - # Assign permission to Kubeflow pipeline service account and run this pipeline using the kfp-tekton SDK. You can skip the below command if you are runnning with multi-user mode. # !kubectl create clusterrolebinding $user_namespace-admin --clusterrole cluster-admin --serviceaccount=kubeflow:pipeline-runner # Submit the Kubeflow pipeline # + # Specify Kubeflow Pipeline Host host=None # Submit a pipeline run from kfp_tekton import TektonClient TektonClient(host=host).create_run_from_pipeline_func(mnist_pipeline, arguments={}) # - # When the pipeline done, you can get `inferenceservice` name using the below command, for example in this case in my cluster, the `inference-name` is `mnist-demo` # !kubectl get inferenceservice -n $user_namespace # Download a mnist picture for inference test if it's not in this directory, such as 9.bmp from [here](https://raw.githubusercontent.com/hougangliu/pipelines/e2e-pipeline-sample/samples/contrib/e2e-mnist/9.bmp). Then upload it to the notebook. # # Update the **istio_ingress_gateway** below with your kfserving ingress endpoint. Then, execute the below cell to send a sample payload to the deployed model. # + import numpy as np from PIL import Image import requests # Get istio_ingress_gateway endpoint by "kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}'" istio_ingress_gateway = "xxx.xx.xx.xxx" # Get inference_name as above step 1 inference_name = model_name inference_namespace = user_namespace # image_file is the mnist picture uploaded as above step 2 image_file = '9.bmp' data = np.array(Image.open(image_file).convert('L').resize((28, 28))).astype(np.float).reshape(-1, 28, 28, 1) np.set_printoptions(threshold=np.inf) json_request = '{{ "instances" : {} }}'.format(np.array2string(data, separator=',', formatter={'float':lambda x: "%.1f" % x})) headers={"Host": "%s.%s.example.com" % (inference_name, inference_namespace)} response = requests.post("http://%s/v1/models/%s:predict" % (istio_ingress_gateway, inference_name), data = json_request, headers = headers) print(response.json()) # - # # Clean up # # Due to Tekton lacking the exit operation support. We need to run the below commands to clean up the resources from this pipeline. # !kubectl delete inferenceservice -n $user_namespace $model_name # !kubectl delete experiment -n $user_namespace $model_name # !kubectl delete tfjob -n $user_namespace $model_name
samples/e2e-mnist/mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # slow down a bit when hacking something together, e.g. I forgot to add a simple function call # tuple unpacking is nice, but cannot be done in a nested list comprehension # don't forget .items in for k,v in dict.items() # use hashlib for md5 encodings # multiline list comprehensions don't need extra parentheses, but multiline if statements do # np.clip min and max can be omitted by specifying None # try except looks nice untill it obscures your real error # parsing ints to ints instead of strings is really important # checking whether someting is an int should be done with isinstance, not with isalpha() (fails on int) # removing from a list while iterating can be done safely by iterating over a slice(?) # with re make sure to use r'' literal strings # read assignment before tinkering with networkx and discovering its not necessary # sometimes a simple for loop works better then a list comprehension when parsing the input, and just add to concept variables # for incrementing a string, you can use chr(ord(inp)+1) # find repeating characters re.findall(r'([a-z])\1', password) # regex: modify operator to nongreedy by appending ? # ok so sometime you can bruteforce a problem.... # while manually modifying the input, make sure to change exactly the right line # before doing bfs, check if the search space is not going to explode # top 10 score by using a factors function! # itertools.chain is nice for iterating over multiple iterables # 1!=2!=1 == True, to check all different use len(set([a,b,c]))==3 # - from dataclasses import dataclass from math import gcd, ceil import re from collections import Counter, defaultdict, namedtuple, deque import itertools import numpy as np from matplotlib import pyplot as plt from aoc_utils import * import networkx as nx from collections import defaultdict from itertools import permutations from aoc_utils import * import re # + cur = 20151125 multi = 252533 div = 33554393 r = 1 c = 1 seekrow = 2947 seekcol = 3029 # seekrow = 3 # seekcol = 3 def nextone(prev): return (prev * multi) % div for i in range(5000000000): cur = nextone(cur) if r == 1: r = c+1 c = 1 else: r-=1 c+=1 if r == seekrow and c == seekcol: print(cur) # - # https://en.wikipedia.org/wiki/Modular_exponentiation#Right-to-left_binary_method cur = 20151125 multi = 252533 div = 33554393 r = 1 c = 1 seekrow = 2947 seekcol = 3029 # seekrow = 3 # seekcol = 3 count = sum(range(seekrow + seekcol -2)) + seekcol count x(seekrow,seekcol) def x(r,c): return ((r+c)**2-3*r-c)//2 pow(multi,count,div)*cur%div # + v = 20151125 e = 18168396 a = 252533 m = 33554393 def f(x,e): if e == 0: return 1 if e == 1: return x r = f(x,e/2) r *= r if e % 2: r *= x return r%m print (v*f(a,e)%m) # + def egcd(a, b): if a == 0: return (b, 0, 1) else: g, y, x = egcd(b % a, a) return (g, x - (b // a) * y, y) def modinv(a, m): g, x, y = egcd(a, m) if g != 1: raise Exception('modular inverse does not exist') else: return x % m # - numtimes = 101741582076661 decksize = 119315717514047 cardnum = 2020 # a⋅x=b(modn) # x = 2020(mod decksize) amin = modinv(101741582076661, decksize) pow(amin, numtimes, decksize)*2020%decksize 62416301438548
advent_of_code_2015/day 25/solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="eYd84wbrjTx4" # # Modeling: K-Nearest Neighbors & Credit Card Fraud, Refined Dataset # # Refer to our Exploratory or Cleaning Notebooks for details on the dataset: # 1. ./clean_CC-fraud.ipynb # 2. ./EDA-FS_CC-fraud.ipynb # # In this notebook, we will explore how well we can predict credit card fraud using K-Nearest Neighbors on a subset of credit card data that is balanced in order to have an equal number of fraud cases and legitimate cases. We will fit two models: # 1. A model that uses all available features # 2. A model using selected features that demonstrate several thresholds of correlation to our classifications # + [markdown] id="aOu-0A-XjTx8" # ## **1.** Imports & Settings # + id="zEj9rZkqjTx9" import pandas as pd import numpy as np # model and score from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.metrics import * # plot import matplotlib.pyplot as plt import seaborn as sns pd.set_option('display.max_rows', 200) # + [markdown] id="wmHaZzTojTx9" # --- # + [markdown] id="zD8o72nZjTx9" # ## **2.** Load Data # Here we load the data from Google Drive, but the data can also be accessed from a realtive path for use with Jupyter Notebooks/Lab. # + [markdown] id="UMwT-McSN1rL" # ### **2.1.** Load Data w/ Jupyter Notebooks/Lab # ``` # # Run this code if using Jupyter Notebooks/Lab # # file_name = 'power_sub.csv' # # file_name = 'robust_sub.csv' # file_name = 'minmax_sub.csv' # # df = pd.read_csv(f'./data/{file_name') # ``` # + [markdown] id="XxKRW38Lj9pJ" # ### **2.2.** Access & Load Data w/ Google Drive # + colab={"base_uri": "https://localhost:8080/"} id="q8dkL3vRkPa-" executionInfo={"status": "ok", "timestamp": 1611557833705, "user_tz": -540, "elapsed": 24798, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="a2282552-6004-4ac1-af6f-4b8d9b285d13" # Run this cell if using Google Colab from google.colab import drive drive.mount('/content/drive') # + [markdown] id="O3mHPB1kVYs6" # In order to test each of our scaling methods, we will uncomment our target method and run the notebook. The results of each run will be saved to a CSV file for review. # + id="SpwW8cvljTx9" # file_name = 'power_sub.csv' # file_name = 'robust_sub.csv' file_name = 'minmax_sub.csv' path = f'/content/drive/MyDrive/Colab Notebooks/KNN_creditCardFraud/data/{file_name}' df = pd.read_csv(path) # + [markdown] id="2oI1PbOvjTx-" # ## **3.** Detail and View Data # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="87ZmJEbMjTx-" executionInfo={"status": "ok", "timestamp": 1611538643760, "user_tz": -540, "elapsed": 533, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="494acadb-b9fb-4357-cf33-5c0fac56ec8b" df.head() # + [markdown] id="adrbm03-jTx_" # --- # + [markdown] id="B_VaxSabjTx_" # ## **4.** Train Model # + [markdown] id="Xpxq_AJPYsGG" # First, let's define the class DataFrame that we'll use for both of our models. # + id="YAn6EEtLYzOT" # declare our target variables # ('Class' is the response variable w/ val 1 in case of fraud and 0 otherwise.) classes_df = df['Class'] # + [markdown] id="kd9xiYbePzEz" # ### **4.1.** Model I: All Features # + [markdown] id="k664T-7pjTx_" # #### **4.1.1.** Split Data # # Define our features DataFrame as all columns except for 'Class'. # + id="VZuiySozjTx_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611538645108, "user_tz": -540, "elapsed": 489, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="755bf3ab-b0da-43d3-ec70-36b51eb0fde9" # features_df is our features matrix features_df = df.drop('Class', axis=1) print(f'Features \n{features_df.head().to_string()}\n') print(f'Classifications \n{classes_df.head().to_string()}') # + [markdown] id="m0wiEmaww_Nl" # Split data into our training and testing sets. # + id="TUkVTCbwsren" # split data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features_df, classes_df, test_size=0.2, random_state=65, stratify=classes_df) # + [markdown] id="-oHX_KdpBRlt" # #### **4.1.2.** Find Optimal Hyperparamaters # Below we use GridSearchCV to test combinations of our target parameters. Although we can adjust and even use multiple options for the scoring metric, we will use the default scoring along with two cross-validation folds to find our best parameters. This is essentially a brute-force option but as our dataset is relatively small, it does not take a significant amount of time. # + id="QfwmGicxjTyA" # define the hyperparameters and the possible values to test # n_neighbors only tests odd numbers as an even number of neighbors # may result in an equal count of fraud and non-fraud neighbors n_neighbors = np.arange(1, 51, 2) weights = ['uniform', 'distance'] # minkowski with p=1 is equivalent to manhattan_distance # minkowski with p=2 is equivalent to euclidean_distance metric = ['minkowski'] p = np.arange(1, 4, 1) algorithm = ['auto', 'ball_tree', 'kd_tree', 'brute'] leaf_size = np.arange(1, 51, 1) # we can pass multiple scoring options to GridSearchCV # scoring = {'AUC': 'roc_auc', 'Accuracy': make_scorer(accuracy_score)} # define paramgrid as a dict of the above values and the model's # designation for the hyperparameters paramgrid = dict(n_neighbors=n_neighbors, weights=weights, metric=metric, p=p) # instantiate our model knn = KNeighborsClassifier() # instantiate GridSearchCV grid = GridSearchCV(estimator=knn, param_grid=paramgrid, cv=2, n_jobs=-1, verbose=1) # + colab={"base_uri": "https://localhost:8080/"} id="bweDzgxsUZN_" executionInfo={"status": "ok", "timestamp": 1611538675135, "user_tz": -540, "elapsed": 29805, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="348b0bdd-904e-46b3-8823-e3e15aea4004" grid.fit(X_train, y_train) # + [markdown] id="6O-Mly1XJlr8" # Below we visualize the results of our GridSearchCV, providing some comparative insights on the combinations of parameters we tested. # + colab={"base_uri": "https://localhost:8080/", "height": 384} id="AEwWejRrPACm" executionInfo={"status": "ok", "timestamp": 1611538676126, "user_tz": -540, "elapsed": 29377, "user": {"displayName": "<NAME>", "photoUrl": "<KEY>", "userId": "01748275738753925060"}} outputId="bbd94970-4d20-407c-edf9-b65b9b7631b1" # we'll use this again for our second model, define as function def plot_search_results(grid): """ Params: grid: A trained GridSearchCV object. """ # results from grid search means_test = results['mean_test_score'] stds_test = results['std_test_score'] # means_train = results['mean_train_score'] # stds_train = results['std_train_score'] # get indexes of values per hyper-parameter masks=[] masks_names= list(grid.best_params_.keys()) for p_k, p_v in grid.best_params_.items(): masks.append(list(results['param_'+p_k].data==p_v)) params=grid.param_grid plt.rcParams['figure.facecolor'] = 'white' # Plotting results fig, ax = plt.subplots(1,len(params),sharex='none', sharey='all', figsize=(25,8)) fig.suptitle('Score per parameter') fig.text(0.04, 0.5, 'MEAN SCORE', va='center', rotation='vertical') pram_preformace_in_best = {} for i, p in enumerate(masks_names): m = np.stack(masks[:i] + masks[i+1:]) pram_preformace_in_best best_parms_mask = m.all(axis=0) best_index = np.where(best_parms_mask)[0] x = np.array(params[p]) y_1 = np.array(means_test[best_index]) e_1 = np.array(stds_test[best_index]) # y_2 = np.array(means_train[best_index]) # e_2 = np.array(stds_train[best_index]) ax[i].errorbar(x, y_1, e_1, linestyle='--', marker='o', label='test') # ax[i].errorbar(x, y_2, e_2, linestyle='-', marker='^',label='train' ) ax[i].set_xlabel(p.upper()) plt.legend() plt.tight_layout(rect=[.05, 0.03, 1, 0.95]) plt.show() plot_search_results(grid) # + [markdown] id="072Yzc9AJMBd" # Next, we assign our best parameters to variables so that we can easily pass them to our final model. # + colab={"base_uri": "https://localhost:8080/"} id="V4_myp_6UcHg" executionInfo={"status": "ok", "timestamp": 1611538675136, "user_tz": -540, "elapsed": 29470, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="9512c199-2302-49e5-93eb-60db52a62cb7" best_params = grid.best_params_ best_n_neighbors = best_params['n_neighbors'] best_weight = best_params['weights'] best_metric = best_params['metric'] best_p = best_params['p'] print(f'Best Params: {best_params} {grid.best_score_}') results = grid.cv_results_ # + [markdown] id="Q8DFapAJBinA" # #### **4.1.3.** Instantiate w/ Optimal Hyperparameters # Passing our optimal hyperparameters to fit our final model. # + colab={"base_uri": "https://localhost:8080/"} id="QPKupAA-nfyy" executionInfo={"status": "ok", "timestamp": 1611538676127, "user_tz": -540, "elapsed": 29028, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="29867990-1933-434d-9ce4-59fdcbb8c8ee" # %%time # instantiate the model with optimal neighbors knn = KNeighborsClassifier(n_neighbors=best_n_neighbors, weights=best_weight, metric=best_metric, p=best_p) # fit the model on the training data knn.fit(X_train, y_train) # make predictions with our test features guesses = knn.predict(X_test) guesses # + [markdown] id="3fswjXB5Bl7h" # #### **4.1.4** Evaluate # As part of our evaluation, we collect several scoring metrics and save them for review in our conclusion. # + [markdown] id="xxAYv5ApO11t" # ##### **4.1.4.1.** Scores # + colab={"base_uri": "https://localhost:8080/"} id="pGUJHk5ZrQSL" executionInfo={"status": "ok", "timestamp": 1611538676127, "user_tz": -540, "elapsed": 27981, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="26b31663-76a0-46f8-d45d-5c4aee0e0166" # score our predictions against the actual classifications from our test set accuracy = accuracy_score(y_test, guesses) recall = recall_score(y_test, guesses) precision = precision_score(y_test, guesses) f1 = f1_score(y_test, guesses) print(f'Accuracy: {accuracy}') print(f'Recall: {recall}') print(f'Precision: {precision}') print(f'F1: {f1}') # + [markdown] id="4-yctZfOO8Ll" # ##### **4.1.4.2.** Confusion Matrix # A visual representation of our True and False Positives/Negatives. # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="IXv71Zj7OKvU" executionInfo={"status": "ok", "timestamp": 1611538676128, "user_tz": -540, "elapsed": 27318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPX<KEY>", "userId": "01748275738753925060"}} outputId="39570ebc-2e5c-4c8b-e659-e0c88a6b552b" cnf_matrix = confusion_matrix(y_test, guesses) # create labels group_names = ['True Neg','False Pos','False Neg','True Pos'] group_counts = ['{0:0.0f}'.format(value) for value in cnf_matrix.flatten()] group_percentages = ['{0:.2%}'.format(value) for value in cnf_matrix.flatten()/np.sum(cnf_matrix)] labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(group_names,group_counts,group_percentages)] labels = np.asarray(labels).reshape(2,2) # create matrix p = sns.heatmap(pd.DataFrame(cnf_matrix), annot=labels, cmap="binary" , fmt='') plt.title('Confusion Matrix: All Features, Balanced Subset', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') # + [markdown] id="Uc68M7kHPNkG" # ##### **4.1.4.3.** ROC-AUC Curve # The ROC-AUC curve is a performance measurement for our classifications at various threshold settings. KNN, however, does not have a threshold setting as some classification models do. In the case of KNN, the ROC is a curve representing the percentage of n-neighbors that agree with the final classification. # <br /> # <br /> # The AUC score represents the degree of separability—how well the model can distinguish between our classes. # <br /> # <br /> # Although we are not attempting to tune our model according to something like a threshold parameter, ROC-AUC is a good comparative measure of separability. # + id="yRa3A3VyO0us" colab={"base_uri": "https://localhost:8080/", "height": 513} executionInfo={"status": "ok", "timestamp": 1611538676437, "user_tz": -540, "elapsed": 24961, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="94fea39d-e457-49cf-9c75-0a90279c6b7a" y_scores = knn.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) f, (ax) = plt.subplots(figsize=(8,8)) plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([-.01, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve of KNN: All Features, Balanced Subset') plt.show() # + [markdown] id="3mJ7BQaVhJul" # ##### **4.1.4.4.** Save Results # + colab={"base_uri": "https://localhost:8080/", "height": 100} id="GcU-rhMUdAeh" executionInfo={"status": "ok", "timestamp": 1611461807366, "user_tz": -540, "elapsed": 32948, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="621132b2-ecb0-43be-f968-bb18e601e446" # define columns and values results_data = { 'transformer': file_name, 'features': 'all', 'features_note': 'n/a', 'best_k': best_n_neighbors, 'best_weight': best_weight, 'best_metric': best_metric, 'best_p': best_p, 'accuracy': accuracy, 'recall': recall, 'precision': precision, 'f1': f1, 'auc': roc_auc } # create and view dataframe results_df = pd.DataFrame(results_data, index=[0]) results_df # + id="U0cgwMCkdTeS" results_df.to_csv('/content/drive/MyDrive/Colab Notebooks/KNN_creditCardFraud/data/results.csv', mode='a', header=False, index=False) # + [markdown] id="6rUlhcx2P93u" # ### **4.2.** Model II: Select Features # We've defined several feature sets to test by commenting/uncommenting the target code block. All results are saved to CSV below. # <br /> # <br /> # In order to keep track of our results, feature notes are included alongside each set. A correlation coefficient >= magnitude .20, for example, refers to our Correlation Matrix and any value less than -.20 or greater than .20. # + id="z12XzOFS3Gy8" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611461807367, "user_tz": -540, "elapsed": 32933, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="11760f36-280f-4d18-f484-fadf5f922fcc" features_note = 'corr_coef >= mag .20' select_features_df = df[['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V9', 'V10', 'V11', 'V12', 'V14', 'V16', 'V17', 'V18', 'V19']] # features_note = 'corr_coef >= mag .30' # select_features_df = df[['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V9', 'V10', # 'V11', 'V12', 'V14', 'V16', 'V17', 'V18']] # features_note = 'corr_coef >= mag .40' # select_features_df = df[['V1', 'V2', 'V3', 'V4', 'V7', 'V9', 'V10', 'V11', # 'V12', 'V14', 'V16', 'V17', 'V18']] # features_note = 'corr_coef >= mag .50' # select_features_df = df[['V3', 'V4', 'V9', 'V10', 'V11', 'V12', 'V14', 'V16', # 'V17']] # features_note = 'neg corr_coef >= mag .30' # select_features_df = df[['V1', 'V3', 'V5', 'V6', 'V7', 'V9', 'V10','V12', 'V14', # 'V16', 'V17', 'V18']] print(f'Features \n{select_features_df.head().to_string()}\n') print(f'Classifications \n{classes_df.head().to_string()}') # + [markdown] id="axBHyt6vAtXj" # #### **4.2.1** Split Data # Split our data in to training and test sets. # + id="pCzaI7jQ4Cd-" ## Split data into training and testing sets X_cur_train, X_cur_test, y_cur_train, y_cur_test = train_test_split(select_features_df, classes_df, test_size=0.2, random_state=65, stratify=classes_df) # + [markdown] id="59JTMp0IA0qa" # #### **4.2.2.** Find Optimal Hyperparamaters # Below we use GridSearchCV to test combinations of our target parameters. Although we can adjust the scoring method and even use multiple options, we will use the default scoring along with two cross-validation folds to find our best parameters. This is essentially a brute-force option but as our dataset is relatively small, it does not take a significant amount of time. # + id="qMejo5MM37ba" # define the hyperparameters and the possible values to test # n_neighbors only tests odd numbers as an even number of neighbors # may result in an equal count of fraud and non-fraud neighbors n_neighbors = np.arange(1, 51, 2) weights = ['uniform', 'distance'] # minkowski with p=1 is equivalent to manhattan_distance # minkowski with p=2 is equivalent to euclidean_distance metric = ['minkowski'] p = np.arange(1, 4, 1) algorithm = ['auto', 'ball_tree', 'kd_tree', 'brute'] leaf_size = np.arange(1, 51, 1) # we can pass multiple scoring options to GridSearchCV # scoring = {'AUC': 'roc_auc', 'Accuracy': make_scorer(accuracy_score)} # define paramgrid as a dict of the above values and the model's # designation for the hyperparameters paramgrid = dict(n_neighbors=n_neighbors, weights=weights, metric=metric, p=p) # instantiate our model knn = KNeighborsClassifier() # instantiate GridSearchCV sub_grid = GridSearchCV(estimator=knn, param_grid=paramgrid, cv=2, n_jobs=-1, verbose=1) # + id="Jk9XqTuNQPQr" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611461818273, "user_tz": -540, "elapsed": 43820, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="5414c7df-1dc8-467c-83f7-687604375759" sub_grid.fit(X_cur_train, y_cur_train) # + [markdown] id="gXzrsYG1ZEIs" # We will assign our best parameters to variables so that we can easily pass them to our final model. # + id="yP3YUxI9WAGG" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611461818274, "user_tz": -540, "elapsed": 43812, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="8517ec19-0338-41f1-ebd6-14a2b284c8c0" best_params = sub_grid.best_params_ best_n_neighbors = best_params['n_neighbors'] best_weight = best_params['weights'] best_metric = best_params['metric'] best_p = best_params['p'] print(f'The Best Params: {best_params} {sub_grid.best_score_}') results = sub_grid.cv_results_ # + [markdown] id="qdeB8dRiZJ0C" # Below we visualize the results of our GridSearchCV, providing some comparative insights on the combinations of parameters we tested. # + id="ZZ8pjx7zWAeW" colab={"base_uri": "https://localhost:8080/", "height": 366} executionInfo={"status": "ok", "timestamp": 1611461818857, "user_tz": -540, "elapsed": 44387, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="6764a598-1213-43fa-ac5a-b788d70b634d" # use function defined in 4.1.2. plot_search_results(sub_grid) # + [markdown] id="J26GxN-bA9oQ" # #### **4.2.3.** Instantiate w/ Optimal Hyperparameters # + id="755Rm7gRCDr1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611461819236, "user_tz": -540, "elapsed": 44755, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="4324da4f-2728-43c7-c8aa-8afb8e36b2ca" # %%time # Instantiate the model with optimal neighbors. knn = KNeighborsClassifier(n_neighbors=best_n_neighbors, weights=best_weight, metric=best_metric, p=best_p) # Fit the model on the training data. knn.fit(X_cur_train, y_cur_train) # make predictions with our test features cur_guesses = knn.predict(X_cur_test) cur_guesses # + [markdown] id="FpaYvywvBHyU" # #### 4.2.4. Evaluate # As part of our evaluation, we collect several scoring metrics and save them for review in our conclusion. # + [markdown] id="w6xqoNEMUN3G" # ##### 4.2.4.1. Scores # + id="qLWdUpk5CD5h" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611461819236, "user_tz": -540, "elapsed": 44745, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="ad140d35-105e-4790-c7db-0022874997c8" # score our predictions against the actual classifications from our test set accuracy = accuracy_score(y_cur_test, cur_guesses) recall = recall_score(y_cur_test, cur_guesses) precision = precision_score(y_cur_test, cur_guesses) f1 = f1_score(y_cur_test, cur_guesses) print(f'Accuracy: {accuracy}') print(f'Recall: {recall}') print(f'Precision: {precision}') print(f'F1: {f1}') # + [markdown] id="xybO7e2gUGdj" # ##### 4.2.4.2. Confusion Matrix # A visual representation of our True and False Positives/Negatives. # + id="JkxEGestyasI" colab={"base_uri": "https://localhost:8080/", "height": 312} executionInfo={"status": "ok", "timestamp": 1611461819237, "user_tz": -540, "elapsed": 44736, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="5b20c9e1-80c2-4cfd-e42d-1676f606ead7" cnf_matrix = confusion_matrix(y_cur_test, cur_guesses) # create labels group_names = ['True Neg','False Pos','False Neg','True Pos'] group_counts = ['{0:0.0f}'.format(value) for value in cnf_matrix.flatten()] group_percentages = ['{0:.2%}'.format(value) for value in cnf_matrix.flatten()/np.sum(cnf_matrix)] labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(group_names,group_counts,group_percentages)] labels = np.asarray(labels).reshape(2,2) # create matrix p = sns.heatmap(pd.DataFrame(cnf_matrix), annot=labels, cmap="binary" , fmt='') plt.title('Confusion Matrix: All Features, Balanced Subset', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') # + [markdown] id="-H3AS9B2URj6" # ##### 4.2.4.3. ROC-AUC Curve # The ROC-AUC curve is a performance measurement for our classifications at various threshold settings. KNN, however, does not have a threshold setting as some classification models do. In the case of KNN, the ROC is a curve representing the percentage of n-neighbors that agree with the final classification. # <br /> # <br /> # The AUC score represents the degree of separability—how well the model can distinguish between our classes. # <br /> # <br /> # Although we are not attempting to tune our model according to something like a threshold parameter, ROC-AUC is a good comparative measure of performance. # + id="kRMeSKcNbdo0" colab={"base_uri": "https://localhost:8080/", "height": 513} executionInfo={"status": "ok", "timestamp": 1611461819704, "user_tz": -540, "elapsed": 45194, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>", "userId": "01748275738753925060"}} outputId="228b1b0a-08a6-4945-a550-6cef2dc5c5b6" y_cur_scores = knn.predict_proba(X_cur_test) fpr, tpr, threshold = roc_curve(y_cur_test, y_cur_scores[:, 1]) roc_auc = auc(fpr, tpr) f, (ax) = plt.subplots(figsize=(8,8)) plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([-.01, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve of KNN: Select Features, Balanced Subset') plt.show() # + [markdown] id="ty_iRw_1hWeC" # ##### **4.2.4.4.** Save Results # + colab={"base_uri": "https://localhost:8080/", "height": 151} id="6e9IsYGMEYbo" executionInfo={"status": "ok", "timestamp": 1611461819705, "user_tz": -540, "elapsed": 45177, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="c321c37a-d13a-476e-9813-a5dd24c95154" # define columns and values sub_results_data = { 'transformer': file_name, 'features': str(select_features_df.columns.to_list()), 'features_note': features_note, 'best_k': best_n_neighbors, 'best_weight': best_weight, 'best_metric': best_metric, 'best_p': best_p, 'accuracy': accuracy, 'recall': recall, 'precision': precision, 'f1': f1, 'auc': roc_auc } # create and view dataframe sub_results_df = pd.DataFrame(sub_results_data, index=[0]) sub_results_df # + id="6Cw-DzTSRirq" # append results to csv sub_results_df.to_csv('/content/drive/MyDrive/Colab Notebooks/KNN_creditCardFraud/data/sub_results.csv', mode='a', header=False, index=False) # + [markdown] id="mM5R60AvjTyA" # --- # + [markdown] id="LhgkqHXgjTyA" # ## **5.** Conclusion # + [markdown] id="RDN5KYa5TxeW" # ### 5.1. Results # Load and view the results for the full-featured tests: # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="5ROKNVFuNAkd" executionInfo={"status": "ok", "timestamp": 1611539835046, "user_tz": -540, "elapsed": 499, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="e886c374-7464-428a-f32c-374f2dd244c6" results_path = '/content/drive/MyDrive/Colab Notebooks/KNN_creditCardFraud/data/results.csv' results_df = pd.read_csv(results_path) # sorted by recall, then F1 results_df.drop_duplicates().sort_values(['recall', 'f1'], ascending=False) # + [markdown] id="MU9kbuQXgoJW" # Load and view the results for the feature-selected tests: # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="nWLqfIctN8KX" executionInfo={"status": "ok", "timestamp": 1611557836883, "user_tz": -540, "elapsed": 916, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj5HYPXhJXM0cgj51rkso2aUfnOzt_0neDu7J4y2rc=s64", "userId": "01748275738753925060"}} outputId="c42f4058-b35c-49d1-c447-3ddc9d58d8ce" sub_results_path = '/content/drive/MyDrive/Colab Notebooks/KNN_creditCardFraud/data/sub_results.csv' sub_results_df = pd.read_csv(sub_results_path) # sorted by recall, then F1 sub_results_df.sort_values(['recall', 'f1'], ascending=False) # + [markdown] id="zXT1i_5rUHkk" # ### **5.2.** Results Analysis # Above, we have a number of models with their own strengths and weaknesses to assess. We've recorded the results of several scoring methods, defined below, not just to compare but also as an exercise in considering their individual values in the context of our goal: identify instances of fraud. # # > **Accuracy** is a simple ratio of correctly predicted observations to total observations or the total number of predictions our model guessed correctly <br /><br /> # >$$ Accuracy = \frac{True Positives + True Negatives}{True Positives + False Positives + False Negatives + True Negatives}$$ # # <br /> # <br /> # # >**Precision** evaluates how precise a model is in predicting positive labels. It is the ratio of correctly predicted positive observations to the total predicted positive observations. High precision is relative to a low false-positive rate.<br /><br /> # >$$ Precision = \frac{True Positives}{True Positives + False Positives}$$ # # <br /> # <br /> # # >**Recall** (Sensitivity) represents the percentages of true positives our model correctly identified. The ratio of correctly predicted positive observations to all observations in the actual class.<br /><br /> # >$$ Recall = \frac{True Positives}{True Positives + False Negatives}$$ # # <br /> # <br /> # # >**F1 Score** is the weighted average of Precision and Recall. This score takes both false positives and false negatives into account.<br /><br /> # >$$ F1 = \frac{2*(Recall * Precision)}{Recall + Precision}$$ # # <br /> # <br /> # # The highest value for these metrics is 1, representing a perfect score. # <br /> # <br /> # When considering what is important in identifying credit card fraud, we pay special attention to Recall, as false negatives represent instances of fraud that we were unable to identify. In a practical sense, if we have cases of false positives, the cost is, potentially, an employee auditing a particular transaction whereas a false negative is an instance of successful theft. # <br /> # <br /> # We must also consider the results of our tests with the dataset using all features. While, in this instance, the best-performing model's scores are nearly identical to our feature-selected model's performance, we do run the risk of noise and less separability when we include features that do not contribute significantly to the final result. We would also see a greater appreciation for dimensionality reduction if working with a larger dataset. A trimmed dataset would see significantly faster training times by comparison to a dataset with more dimensions. # <br /> # <br /> # With that in mind, we would move forward with the feature-selected model at index #11 above. This particular model took data that was scaled with the MinMax scalar and used only features with a correlation coefficient of magnitude .30 or greater. It not only scored the highest in Recall but also in F1 which also takes false negatives into account. # # + [markdown] id="KYdJJcuHT4rW" # ### 5.3. Final Thoughts # # + [markdown] id="sn7wMhVvUQrg" # #### **5.3.1.** Size of Dataset # # After creating our balanced subset, we were left with 984 total observations. The small size of the dataset proved to be forgiving in terms of the time it took to fit each model. It allowed us to test a wider range of scaling methods as well as a breadth of parameters with GridSearchCV without much thought for efficiency. # <br /> # <br /> # However, we must also consider that, although we used every observation of fraud, a different slice of legitimate transactions used alongside those observations of fraud would yield different scores. Though, I do not believe the results would be significantly different. One way to continue this experiment could involve changing our random_state values in the subsetting process, record those results, repeat and compare. # # + [markdown] id="hN_2cbXEGPjh" # #### **5.3.2.** Reflecting on Methods # If I were to approach this project again, I would likely separate each model into individual notebooks. Currently, a single notebook with both an all-features and a feature-selected model is quite large. # <br /> # <br /> # Additionally, I would consider a more efficient method for testing our alternatively scaled data sets as well as our different sets of selected features. # <br /> # <br /> # Due to the small size of the dataset, we had the opportunity to use methods and explore variables that we may not consider for a significantly larger dataset for concerns of time. GridSearchCV, for example, may be optimized to test fewer, strategically selected parameters. # + [markdown] id="08zCculoUT8g" # #### **5.3.3.** Alternatives to K-Nearest Neighbors # # I chose to use K-Nearest Neighbors for this project because it was the particular model that I wanted to explore and learn more about at the time. If I were to pursue a more effective model, I would consider logistic regression. Although KNN performs well in the context of credit card fraud, we should place a higher value on the ability to eliminate false negatives. Using a classification model such as logistic regression allows us to easily adjust the probability threshold, effectively raising the bar for what is considered a legitimate transaction. Whereas we see a recall score of .93 with KNN, we could use log regression, consult the ROC-AUC curve and find a threshold that would lower or eliminate false negatives with the least possible cost to other scoring metrics.
notebooks/model-KNN-subset_CC-fraud.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy.cluster.hierarchy as sch from sklearn.metrics import accuracy_score train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') train.head() train.dtypes train.isnull().sum() # + # train.drop(['ID','Claim','Gender'],axis=1,inplace=True) # - train.head() train['Claim'].value_counts() train.drop(['Gender'],axis=1,inplace= True) train = pd.get_dummies(train) X = train.drop(['Claim'],axis=1) cols = X.columns y= train['Claim'] from imblearn.under_sampling import RandomUnderSampler rus = RandomUnderSampler(random_state=42) X_res, y_res = rus.fit_resample(X, y) X_res = pd.DataFrame(X_res,columns=cols) X_res.drop(['ID'],axis=1,inplace= True) pd.Series(y_res).value_counts() X_res # X_res = pd.get_dummies(X_res, columns=['Agency','Agency Type','Distribution Channel','Product Name','Destination']) X_res.dtypes X_res.shape plt.figure(figsize=(15,15)) dendrogram = sch.dendrogram(sch.linkage(X_res, method = 'ward')) plt.title('Dendrogram') plt.xlabel('Customers') plt.ylabel('Euclidean Distance') plt.show() from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=2, init='k-means++', random_state=42) y_kmeans = kmeans.fit_predict(X_res) y_kmeans accuracy_score(y_res,y_kmeans) from sklearn.cluster import AgglomerativeClustering agg = AgglomerativeClustering(n_clusters=2, affinity='euclidean',linkage='ward') y_agg = agg.fit_predict(X_res) accuracy_score(y_res,y_agg)
C19_Clustering-Kmeans/Travel Insurance Dataset clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import matplotlib.pyplot as plt import seaborn as sns from plot import * from plot_utils import * infar = ['infar'+os.sep+f for f in os.listdir('infar') if os.path.isfile('infar'+os.sep+f) and not f.endswith(('.json', '.py', '.ipynb'))] infar.sort() ucfsports = ['ucfsports'+os.sep+f for f in os.listdir('ucfsports') if os.path.isfile('ucfsports'+os.sep+f) and not f.endswith(('.json', '.py', '.ipynb'))] ucfsports.sort() ucf11 = ['ucf11'+os.sep+f for f in os.listdir('ucf11') if os.path.isfile('ucf11'+os.sep+f) and not f.endswith(('.json', '.py', '.ipynb'))] ucf11.sort() jhmdb = ['jhmdb'+os.sep+f for f in os.listdir('jhmdb') if os.path.isfile('jhmdb'+os.sep+f) and not f.endswith(('.json', '.py', '.ipynb'))] jhmdb.sort() kth = ['kth'+os.sep+f for f in os.listdir('kth') if os.path.isfile('kth'+os.sep+f) and not f.endswith(('.json', '.py', '.ipynb'))] kth.sort() f = plt.figure(figsize=(20,10)) ipypthon_plot_sd(ucfsports, prefix='ucfsports/ucfsports_2', suffix='q-5_uncertainty.log', subplot=231, figure=f, marker_epoch=163, title="UCF Sports 1") ipypthon_plot_sd(ucfsports, prefix='ucfsports/ucfsports_B', suffix='oth_uncertainty.log', subplot=232, figure=f, marker_epoch=181, title="UCF Sports 2") ipypthon_plot_sd(ucfsports, prefix='ucfsports/ucfsports_B', suffix='q-5_uncertainty.log', subplot=233, figure=f, marker_epoch=95, title="UCF Sports 3") ipypthon_plot_sd(infar, prefix='infar/infar_', suffix='q-5_uncertainty.log', subplot=234, figure=f, marker_epoch=76, title="InfAR") ipypthon_plot_sd(ucf11, prefix='ucf11/ucf11_3', suffix='q-5_uncertainty.log', subplot=235, figure=f, marker_epoch=132, title="UCF11") ipypthon_plot_sd(kth, prefix='kth/kth_', suffix='q-5_uncertainty.log', subplot=236, figure=f, marker_epoch=70, title="KTH") #f.text(0.05, 0.8, 'CIFAR-100', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.05, 0.52, 'CIFAR-10', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.05, 0.25, 'MNIST', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.21, 0.05, 'ALL DATA', fontsize=15, fontweight='semibold') #f.text(0.505, 0.05, '1/4', fontsize=15, fontweight='semibold') #f.text(0.775, 0.05, '1/32', fontsize=15, fontweight='semibold') plt.show() f = plt.figure(figsize=(28,13)) ipypthon_plot_sd_agains_acc(ucfsports, prefix='ucfsports/ucfsports_2', suffix='q-5_uncertainty.log', subplot=231, figure=f, marker_epoch=163, title="UCF Sports 1") ipypthon_plot_sd_agains_acc(ucfsports, prefix='ucfsports/ucfsports_B', suffix='oth_uncertainty.log', subplot=232, figure=f, marker_epoch=181, title="UCF Sports 2") ipypthon_plot_sd_agains_acc(ucfsports, prefix='ucfsports/ucfsports_B', suffix='q-5_uncertainty.log', subplot=233, figure=f, marker_epoch=95, title="UCF Sports 3") ipypthon_plot_sd_agains_acc(infar, prefix='infar/infar_', suffix='q-5_uncertainty.log', subplot=234, figure=f, marker_epoch=76, title="InfAR") ipypthon_plot_sd_agains_acc(ucf11, prefix='ucf11/ucf11_3', suffix='q-5_uncertainty.log', subplot=235, figure=f, marker_epoch=132, title="UCF11") ipypthon_plot_sd_agains_acc(kth, prefix='kth/kth_', suffix='q-5_uncertainty.log', subplot=236, figure=f, marker_epoch=70, title="KTH") #f.text(0.05, 0.8, 'CIFAR-100', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.05, 0.52, 'CIFAR-10', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.05, 0.25, 'MNIST', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.21, 0.05, 'ALL DATA', fontsize=15, fontweight='semibold') #f.text(0.505, 0.05, '1/4', fontsize=15, fontweight='semibold') #f.text(0.775, 0.05, '1/32', fontsize=15, fontweight='semibold') plt.show() f = plt.figure(figsize=(20,10)) ipypthon_plot_sd(ucfsports, prefix='ucfsports/ucfsports_2', suffix='q-5_uncertainty.log', what_sd="total", subplot=231, figure=f, marker_epoch=163) ipypthon_plot_sd(ucfsports, prefix='ucfsports/ucfsports_B', suffix='oth_uncertainty.log', what_sd="total", subplot=232, figure=f, marker_epoch=181) ipypthon_plot_sd(ucfsports, prefix='ucfsports/ucfsports_B', suffix='q-5_uncertainty.log', what_sd="total", subplot=233, figure=f, marker_epoch=95) ipypthon_plot_sd(infar, prefix='infar/infar_', suffix='q-5_uncertainty.log', what_sd="total", subplot=234, figure=f, marker_epoch=76) ipypthon_plot_sd(ucf11, prefix='ucf11/ucf11_3', suffix='q-5_uncertainty.log', what_sd="total", subplot=235, figure=f, marker_epoch=77) ipypthon_plot_sd(jhmdb, prefix='jhmdb/jhmdb_', suffix='q-5_uncertainty.log', what_sd="total", subplot=236, figure=f, marker_epoch=30) #f.text(0.05, 0.8, 'CIFAR-100', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.05, 0.52, 'CIFAR-10', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.05, 0.25, 'MNIST', fontsize=15, fontweight='semibold', rotation=45) #f.text(0.21, 0.05, 'ALL DATA', fontsize=15, fontweight='semibold') #f.text(0.505, 0.05, '1/4', fontsize=15, fontweight='semibold') #f.text(0.775, 0.05, '1/32', fontsize=15, fontweight='semibold') plt.show()
results/standard_deviations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="kBG-Icr8Oda_" # #data # + id="ZCG9T_8DOggR" import pandas as pd import numpy as np df = pd.read_csv('https://query.data.world/s/k77dz3twrne5hs62f2fgn7rj6lrpwg') users = pd.DataFrame({'user':df.manufacturer.unique()}) users = np.array(list(users)) pro = pd.DataFrame({'product':df.product_name.unique()}) pro = np.array(list(pro)) # users # + id="Sc_a_dXn-s7d" users = pd.DataFrame({'user':df.manufacturer.unique()}) users_train = users.iloc[:, :663] users_test = users.iloc[:, 663:] users_train = np.array(list(users_train)) users_test = np.array(list(users_test)) # + id="ajrkmxV62560" pro = pd.DataFrame({'product':df.product_name.unique()}) pro_train = pro.iloc[:, :2491] pro_test = pro.iloc[:, 2491:] pro_train = np.array(list(pro_train)) pro_test = np.array(list(pro_test)) # + id="ppqBiKvDyJTl" pro_met = tf.data.Dataset.from_tensor_slices((np.array(list(df['manufacturer'])))) # + [markdown] id="rQmGV4zbTmCK" # #recommendation # + colab={"base_uri": "https://localhost:8080/"} id="0R4wx7LtxQcz" outputId="3561f007-5127-4c96-8e1b-3c52ba0fd387" # !pip install -q tensorflow-recommenders # + id="hjp4eAZHO9LH" import os import pprint import tempfile from typing import Dict, Text import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_recommenders as tfrs # + id="DBU9dPEhwrdC" embedding_dimension = 256 # + id="9v4s-8GXxmJs" user_model = tf.keras.Sequential([ tf.keras.layers.StringLookup( vocabulary=users, mask_token=None), # We add an additional embedding to account for unknown tokens. tf.keras.layers.Embedding(len(users) + 1, embedding_dimension) ]) pro_model = tf.keras.Sequential([ tf.keras.layers.StringLookup( vocabulary=pro, mask_token=None), tf.keras.layers.Embedding(len(pro) + 1, embedding_dimension) ]) # + id="eIThpubkx_sT" metrics = tfrs.metrics.FactorizedTopK( candidates=pro_met.batch(128).map(pro_model) ) # + id="dmz9_dUNzs_j" task = tfrs.tasks.Retrieval( metrics=metrics ) # + id="6gUsQvN23RCG" class firstModel(tfrs.Model): def __init__(self, user_model, pro_model): super().__init__() self.pro_model: tf.keras.Model = pro_model self.user_model: tf.keras.Model = user_model self.task: tf.keras.layers.Layer = task def compute_loss(self, features: Dict[Text, tf.Tensor], training = False): user_embeddings = self.user_model(features[0]) pro_embeddings = self.pro_model(features[0]) return self.task(user_embeddings, pro_embeddings) # Dict[Text, tf.Tensor] # pro_model # user_model # + id="toerJ5lG6Ate" model = firstModel(user_model, pro_model) model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1)) # + colab={"base_uri": "https://localhost:8080/"} id="DOg1S8a_7RUU" outputId="570183c2-4db6-4589-8ecb-d08973ba7f96" model.fit(users_train, pro_train, epochs=3) # + id="yd0de_ew91So" # model.evaluate(users_test, pro_test, return_dict=True) # + colab={"base_uri": "https://localhost:8080/"} id="Mbhc9FzuIAuY" outputId="a87bacd9-43ce-45f1-dd36-ba35f91fde7e" index = tfrs.layers.factorized_top_k.BruteForce(model.user_model) index.index_from_dataset( tf.data.Dataset.zip((pro_met.batch(100), pro_met.batch(100).map(model.pro_model))) ) _, titles = index(tf.constant(["42"])) print(f"Recommendations for user 42: {titles[0, :3]}") # + colab={"base_uri": "https://localhost:8080/"} id="MoLnq2qfMl1L" outputId="9027b10f-6412-4777-a3e6-dd5e2e3cdd8c" # Export the query model. with tempfile.TemporaryDirectory() as tmp: path = os.path.join(tmp, "model") # Save the index. tf.saved_model.save(index, path) # Load it back; can also be done in TensorFlow Serving. loaded = tf.saved_model.load(path) # Pass a user id in, get top predicted movie titles back. scores, titles = loaded(["42"]) print(f"Recommendations: {titles[0][:3]}") # + id="m97WbxRrM_Gp"
.ipynb_checkpoints/TFRS_task1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Домашнее задание №2 (курс "Практикум по программированию на языке Python") # ### Тема: Объектно-ориентированное программирование на языке Python. # # #### Преподаватель: <NAME> (<EMAIL>) # # **Выдана**: 20 марта 2021 # # **Дедлайн**: 21:00 04 апреля 2021 # # **Среда выполнения**: Jupyter Notebook (Python 3.7) # # #### Правила: # # Результат выполнения задания - Jupyter Notebook с кодом и подробными ответами в случае теоретических вопросов. __Максимальное число баллов за задание - 20__. # # Все ячейки должны быть "выполненными", при этом результат должен воспроизводиться при проверке (на Python 3.7). Если какой-то код не был запущен или отрабатывает с ошибками, то пункт не засчитывается. Задание, сданное после дедлайна, _не принимается_. Можно отправить недоделанное задание, выполненные пункты будут оценены. # # Готовое задание отправляется на почту преподавателя. # # Задание выполняется самостоятельно. Если какие-то студенты будут уличены в списывании, все они автоматически получат за эту работу 0 баллов. Если вы нашли в Интернете какой-то специфичный код, который собираетесь заимствовать, обязательно укажите это в задании - наверняка вы не единственный, кто найдёт и использует эту информацию. # # Удалять фрагменты формулировок заданий запрещается. # #### Постановка задачи: # # - В данной работе нужно # - ответить на ряд теоретических вопросов; # - решить набор задач, проверяющих владение ООП-инструментами языка; # - решить задачу на проектирование кода. # - Ответы на теоретические вопросы должны быть полными и обоснованными. # - Каждая задача представляет собой написание функции или класса, а также набора тестов, проверяющих работу решения в общих и крайних случаях. # - Отсутствие тестов автоматически уменьшает количество баллов за задание как минимум в два раза, некачественные тесты также будут штрафоваться. # - Даже если это не указано явно в требованиях, код должен быть по возможности неизбыточным, работать с разумной сложностью и объёмом потребялемой памяти, проверяющие могут снизить балл за задание, выполненное без учёта этого требования. # - Результирующий код должен быть читаемым, с единой системой отступов и адеквантными названиями переменных, проверяющие могут снизить балл за задание, выполненное без учёта этого требования. # __Задание 1 (2 балла):__ Дайте подробные ответы на следующие вопросы: # # 1. *В чём смысл инкапсуляции? Приведите пример конкретной ситуации в коде, в которой нарушение инкапсуляции приводит к проблемам.* # # Инкапсулция -- хранение данных и методов работы с ними внутри одного класса с доступом к данным только через методы, позволяет скрыть детали реализации от пользователя. Всё то, что не входит в интерфейс, инкапсулируется в классе. # # Пусть мы создали класс, один из атрибутов - массив. Нарушение инкапсуляции - работа с массивом напрямую, а не посредством методов. Пусть много чего уже напрограммировано. Вдруг, теперь нам нужно работать не с массивами, а с базой данных. Для исправления придется просматривать всю программу и вносить изменения везде, где мы обращались к массиву напрямую. Задача трудная и можно сделать кучу ошибок. В случае соблюдения инкапсуляяции пришлось бы исправлять только меоды. # # 2. *Какой метод называется статическим? Что такое параметр `self`?* # # Статический метод определяется в классе, но для вызова не требует объекта класса. Вызывается от класса как от типа. # # Наличие объекта self в качестве первого аргумента - отличие метода класса от функции. Статичесий метод класса не получает на входе self. Статические методы часто используются для специализированного создания объектов класса. # # 3. *В чём отличия методов `__new__` и `__init__`?* # # Метод `__init__` не является конструктором, ничего не создает и не возвращает. `__new__` занимается созданием бъектов, статический метод в отличие от `__init__`. `__init__` получает на вход объект и инициализирует его атрибуты. # # 4. Какие виды отношений классов вы знаете? Для каждого приведите примеры. Укажите взаимные различия. # # Наследование - класс наследует класс. Вертикальная иерархия. Дочерний класс сохраняет все свойства своего родительского класса и как-то расширяет его функциональность.Пример: базовый класс - Animal, дочерний класс - Horse. Дочернй класс сохраняет атрибуты родителького класса, но и появляются новые атрибуты, характерные для дочерненго класса. # # Реализация - частный случай наследования. Наследование от интерфейса, реализуем его методы, задаем его поля, состояния. # # Ассоциация - горизонтальная связь между объектами двух классов или между одним классом и несколькими. # # Вариант ассоциации - композиция: вложенность объекта одного класса в другой (главный управляет жизненным циклом зависимого). Пример: самолет и авиадвигатель. При удалении управляющего объекта удаляется и управляемый объект. # # Другой вариант ассоциации - агрегация: вложенность объекта одного класса в другой (объекты остаются независимыми). Пример: студенты и факультеты, студенты - составляющая часть факультетов, но при упразднении факаультетов студенты не исчезают. # # 5. Зачем нужны фабрики? Опишите смысл использования фабричного метода, фабрики и абстрактной фабрики, а также их взаимные отличия. # # Фабрика - идея создания объекта с использованием какого-то другого объекта. # # Простая фабрика - это класс, в котором есть один метод с большим условным оператором, выбирающим создаваемый продукт. Этот метод вызывают с неким аргументом/парамметром, по которому определяется какой из продуктов нужно создать. Создаваемые объекты могут быть никак не связаны друг с другом. # # Фабриный метод - это устройство классов, при котором подклассы могут переопределять тип создаваемого в суперклассе продукта. Имеется иерархия продуктов и абстрактный создающий метод, который переопределяется в подклассах. # # Абстрактная фабрика - устройство классов, облегчающее создание семейств продуктов, предназначен для создания систем взаимосвязанных объектов без указания их конкретных классов. Пример: классы "Транспорт + двигатель + управление". # __Задание 2 (1 балл):__ Опишите класс комплексных чисел. У пользователя должна быть возможность создать его объект на основе числа и в алгебраической форме, и в полярной. Класс должен поддерживать основные математические операции (+, -, \*, /) за счет перегрузки соответствующих магических методов. Также он должен поддерживать возможность получить число в алгебраической и полярной форме. Допускается использование модуля `math`. # + import math class ComplexNumber: def __init__(self, x = 0, y = 0, r = 0, fi = 0): self.x = x self.y = y self.r = r self.fi = fi def __add__(self, other): return ComplexNumber(self.x + other.x, self.y + other.y, self.r**2 + other.r**2 + 2*self.r*other.r*math.cos(other.fi - self.fi), self.fi/2 + other.fi/2) def __sub__(self, other): return ComplexNumber(self.x - other.x, self.y - other.y, self.r**2 + other.r**2 - 2*self.r*other.r*math.cos(self.fi - other.fi), self.fi - other.fi) def __mul__(self, other): return ComplexNumber(self.x*other.x - self.y*other.y, self.r*other.r, self.fi + other.fi) def __truediv__(self, other): return ComplexNumber((self.x*other.x + self.y*other.y)/(other.x**2 + other.y**2), (self.y*other.x - self.x*other.y)/(other.x**2 + other.y**2), self.r/other.r, self.fi - other.fi) def __str__(self): # Строковое представление объекта числа sign = '+' if self.y >= 0 else '' if self.y == 0: return '{}'.format(self.x) else: return '{}{}{}i'.format(self.x, sign, self.y) d = ComplexNumber(6, 5) print('Re(a) = ', a.x, '\n') print('Im(a) = ', a.y, '\n') b = ComplexNumber(0, 0, 2, 20) print('r(b) = ', b.r, '\n') print('fi(a) = ', b.fi, '\n') a = ComplexNumber(5, 6, math.sqrt(5**2 +6**2), math.atan(6/5)) c = ComplexNumber(2, 3, math.sqrt(2**2 + 3**2), math.atan(3/2)) print(a+c) print(a-c) print(a*c) print(a/c) # - # __Задание 3 (2 балла):__ Опишите класс для векторов в N-мерном пространстве. В качестве основы используйте список значений координат вектора, задаваемый `list`. Обеспечьте поддержку следующих операций: сложение, вычитание (с созданием нового вектора-результата), скалярное произведение, косинус угла, евклидова норма. Все операции, которые можно перегрузить с помощью магических методов, должны быть реализованы именно через них. Класс должен производить проверку консистентности аргументов для каждой операции и в случаях ошибок выбрасывать исключение `ValueError` с исчерпывающим объяснением ошибки. # + class Vector: def __init__(self, vector_values_list = 0): self.values = vector_values_list def __getitem__(self,key): return self.values[key] def __add__(self, other): if(len(self.values) != len(other.values)): try: raise ValueError except: print('Ошибка. Нельзя сложить векторы, так как они имеют разные размеры.') else: return Vector([self[i] + other[i] for i in range(len(self.values))]) def __sub__(self, other): if(len(self.values) != len(other.values)): try: raise ValueError except: print('Ошибка. Нельзя вычесть векторы, так как они имеют разные размеры.') else: return Vector([self[i] - other[i] for i in range(len(self.values))]) def __and__(self, other): # скалярное произведение & if(len(self.values) != len(other.values)): try: raise ValueError except: print('Ошибка. Нельзя вычислить скалярное произведение векторов, так как они имеют разные размеры.') else: return sum([self[i]*other[i] for i in range(len(self.values))]) def norm(self): return math.sqrt(sum([self[i]**2 for i in range(len(self.values))])) def cos(self, other): if self.norm() == 0 or other.norm() == 0: return 1 else: return (self&other)/(self.norm()*other.norm()) v = Vector([1, 3, 4, 5]) u = Vector([3, 4, 6, 4]) print(v.values) print(v[0]) print((v + u).values) print((v - u).values) print(v & u) print(v.norm()) print(u.cos(v)) f = Vector([1, 2, 3]) k = Vector([0, 0, 0]) print(f.cos(k)) f + u f & u # - # __Задание 4 (2 балл):__ Опишите декоратор, который принимает на вход функцию и при каждом её вызове печатает строку "This function was called N times", где N - число раз, которое это функция была вызвана на текущий момент (пока функция существует как объект, это число, очевидно, может только неубывать). # + def calls_counter(func): def __calls_counter(*args, **kw): __calls_counter.count += 1 res = func(*args, **kw) print("This function was called {} times".format(__calls_counter.count)) return res __calls_counter.count = 0 return __calls_counter @calls_counter def Sum(a, b): return a + b print(Sum(2, 4)) print(Sum(3, 6)) # - # __Задание 5 (3 балла):__ Опишите декоратор класса, который принимает на вход другой класс и снабжает декорируемый класс всеми атрибутами входного класса, названия которых НЕ начинаются с "\_". В случае конфликтов имён импортируемый атрибут должен получить имя с суффиксом "\_new". # + def copy_class_attrs(cls1): def _decorate(cls2): for e in dir(cls1): if not e.startswith('_'): if hasattr(cls2, e) == False: setattr(cls2, e, 0) else: setattr(cls2, str(e) + '_new', 0) return cls2 return _decorate class A(): pass A.some_cls_attr = '1' A.attr_A = 'A' @copy_class_attrs(A) class B(): some_cls_attr = 0 pass print(dir(B)) print(dir(A)) # - # __Задание 6 (5 баллов):__ Опишите класс для хранения двумерных числовых матриц на основе списков. Реализуйте поддержку индексирования, итерирования по столбцам и строкам, по-элементные математические операции (с помощью магических методов), операцию умножения матрицы (как метод `dot` класса), транспонирование, поиска следа матрицы, а также поиск значения её определителя, если он существует, в противном случае соответствующий метод должен выводить сообщение об ошибке и возвращать `None`. # # Матрицу должно быть возможным создать из списка (в этом случае у неё будет одна строка), списка списков, или же передав явно три числа: число строк, число столбцов и значение по-умолчанию (которое можно не задавать, в этом случае оно принимается равным нулю). Все операции должны проверять корректность входных данных и выбрасывать исключение с информативным сообщением в случае ошибки. # # Матрица должна поддерживать методы сохранения на диск в текстовом и бинарном файле и методы обратной загрузки с диска для обоих вариантов. Также она должна поддерживать метод полного копирования. Обе процедуры должны быть реализованы с помощью шаблона "примесь" (Mixin), т.е. указанные функциональности должны быть описаны в специализированных классах. # # В реализации математических операций запрещается пользоваться любыми функциями, требующими использования оператора `import`. # + from copy import deepcopy class Matrix: def __init__(self, row = None, col = None, number = 0, values = None): if col == None: self.values = deepcopy(row) if type(row[0]) == int: self.coln = len(row) self.rown = 1 else: self.coln = len(row[0]) self.rown = len(row) else: self.values = [[number for _ in range(col)] for _ in range(row)] self.coln = col self.rown = row def __getitem__(self,key): return self.values[key] def __add__(self, other): if self.rown != other.rown or self.coln != other.coln: try: raise ValueError except: print('Ошибка. Нельзя сложить матрицы, так как они имеют разные размеры.') else: return Matrix([[self[i][j] + other[i][j] for j in range(self.coln)] for i in range(self.rown)]) def __sub__(self, other): if self.rown != other.rown or self.coln != other.coln: try: raise ValueError except: print('Ошибка. Нельзя вычесть матрицы, так как они имеют разные размеры.') else: return Matrix([[self[i][j] - other[i][j] for j in range(self.coln)] for i in range(self.rown)]) def __mul__(self, other): if isinstance(other, int) or isinstance(other, float): return Matrix([[other*self[i][j] for j in range(self.coln)] for i in range(self.rown)]) elif self.coln != other.rown: try: raise ValueError except: print('Ошибка. Нельзя перемножить матрицы таких размеров.') else: C = Matrix(self.rown, other.coln) for i in range(self.rown): for j in range(other.coln): for k in range(self.coln): C[i][j] += self[i][k] * other[k][j] return C def __rmul__(self, other): return self.__mul__(other) def trans(self): return Matrix([[self[j][i] for j in range(self.rown)] for i in range(self.coln)]) def tr(self): if self.rown != self.coln: try: raise ValueError except: print('Ошибка. Матрица не кавадратная, след не определен.') else: return sum([self[i][i] for i in range(self.rown)]) def minor(self, i, j): return Matrix([row[:j] + row[j+1:] for row in [self[k] for k in range(i)] + [self[k] for k in range(i + 1, self.rown)]]) def det(self): if self.rown != self.coln: try: raise ValueError except: print('Ошибка. Матрица не кавадратная, детерминант не определен.') else: if self.rown == 2: return self[0][0]*self[1][1] - self[0][1]*self[1][0] d = 0 for c in range(self.rown): d += ((-1)**c)*self[0][c]*det(self.minor(0,c)) return d A = Matrix([[1, 2], [5, 6]]) print(A.values) B = Matrix(2, 3, 5) print(B.values) print(B[1][2]) print(B.rown, B.coln) C = Matrix(5, 3) print(C.values) D = Matrix(2, 2, 2) print(A + B) print((A + D).values) print((D - A).values) print((5*A).values) print((A*5).values) print((A*B).values) A*C B_T = B.trans() print(B_T.values) print(A.tr()) B.tr() print(B.minor(1, 1).values) print(A.det()) print(B.det()) # - # __Задание 7 (5 баллов):__ Ставится задача расчета стоимости чашки кофе. Опишите классы нескольких типов кофе (латте, капучино, американо), а также классы добавок к кофе (сахар, сливки, кардамон, пенка, сироп). Используйте шаблон "декоратор". Каждый класс должен характеризоваться методом вычисления стоимости чашки `calculate_cost`. Пользователь должен иметь возможность комбинировать любое число добавок с выбранным кофе и получить на выходе общую стоимость: # # ``` # cream(sugar(latte())).calculate_cost() # ``` # # Первым элементом чашки всегда должен быть сам кофе, а не добавка, в противном случае при попытке создания чашки должно выбрасываться исключение: # # ``` # cream(latte(sugar())).calculate_cost() -> exception # ``` # # Кофе может встречаться в чашке только один раз, в противном случае при попытке создания чашки должно выбрасываться исключение: # # ``` # cappuccino(sugar(latte())).calculate_cost() -> exception # ``` # # Добавки могут включаться в чашку в любом количестве и порядке. # Добавление новых типов кофе и добавок не должно требовать изменения существующего кода.
tasks/Kurdyukova_Python_HW_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sklearn import pandas as pd import numpy as np from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.naive_bayes import CategoricalNB import math sklearn.__version__ # + data = pd.read_csv('data/student.csv', sep=',') msk = np.random.rand(len(data)) < 0.8 Y = data['Grade'] X = data.drop(columns=['Grade']) X = X.apply(lambda feature: pd.factorize(feature)[0]) X_train = X[msk] X_test = X[~msk] Y_train = Y[msk] Y_test = Y[~msk] # - model = CategoricalNB() model.fit(X_train, Y_train) predict = model.predict(X_test) # accuracy print (classification_report(Y_test, predict,digits = 6)) print (confusion_matrix(Y_test, predict)) print (accuracy_score(Y_test, predict)) # End
Naive Bayes with Scikit-learn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Nets v3 # `nn_v3` # Should do [Working efficiently with jupyter lab](https://florianwilhelm.info/2018/11/working_efficiently_with_jupyter_lab/) # When this was a notebook with integrated tests, we did: \ # ` # # %load_ext autoreload # # %autoreload 2 # # %matplotlib widget # # #%matplotlib inline` # import Importing_Notebooks import numpy as np # A network built of components which: # 1. accept an ordered set of reals (we'll use `numpy.array`, and call them vectors) at the input port and produce another at the output port - this is forward propagation. ${\displaystyle f\colon \mathbf {R} ^{n}\to \mathbf {R} ^{m}}$ # 1. accept an ordered set of reals at the output port, representing the gradient of the loss function at the output, and produce the gradient of the loss function at the input port - this is back propagation, aka backprop. ${\displaystyle b\colon \mathbf {R} ^{m}\to \mathbf {R} ^{n}}$ # 1. from the gradient of the loss function at the output, calculate the partial of the loss function w.r.t the internal parameters ${\displaystyle \frac{\partial E}{\partial w} }$ # 1. accept a scalar $\eta$ to control the adjustment of internal parameters. _Or is this effected by scaling the loss gradient before passing??_ # 1. update internal parameters ${\displaystyle w \leftarrow w - \eta \frac{\partial E}{\partial w} }$ # class Layer: def __init__(self): pass def __call__(self, x): """Compute response to input""" raise NotImplementedError def backprop(self, output_delE): """Use output error gradient to adjust internal parameters, return gradient of error at input""" raise NotImplementedError def state_vector(self): """Provide the layer's learnable state as a vector""" raise NotImplementedError def set_state_from_vector(self, sv): """Set the layer's learnable state from a vector""" raise NotImplementedError # A network built of a cascade of layers: class Network: def __init__(self): self.layers = [] self.eta = 0.1 #FIXME def extend(self, net): self.layers.append(net) def __call__(self, input): v = input for net in self.layers: v = net(v) return v def learn(self, facts): for (x, expected) in facts: y = self(x) e = y - expected #loss = float(e.dot(e.T))/2.0 loss = np.einsum('ij,ij', e, e) egrad = e * self.eta for net in reversed(self.layers): egrad = net.backprop(egrad) return loss def state_vector(self): """Provide the network's learnable state as a vector""" return np.concatenate([layer.state_vector() for layer in self.layers]) def set_state_from_vector(self, sv): """Set the layer's learnable state from a vector""" i = 0 for layer in self.layers: lsvlen = len(layer.state_vector()) layer.set_state_from_vector(sv[i:i+lsvlen]) i += lsvlen # ___ # ## Useful Layers # ### Identify class IdentityLayer(Layer): def __call__(self, x): return x def backprop(self, output_delE): return output_delE def state_vector(self): return np.array([]) def set_state_from_vector(self, sv): pass # ### Affine # A layer that does an [affine transformation](https://mathworld.wolfram.com/AffineTransformation.html) aka affinity, which is the classic fully-connected layer with output offsets. # # $$ \mathbf{M} \mathbf{x} + \mathbf{b} = \mathbf{y} $$ # where # $$ # \mathbf{x} = \sum_{j=1}^{n} x_j \mathbf{\hat{x}}_j \\ # \mathbf{b} = \sum_{i=1}^{m} b_i \mathbf{\hat{y}}_i \\ # \mathbf{y} = \sum_{i=1}^{m} y_i \mathbf{\hat{y}}_i # $$ # and $\mathbf{M}$ can be written # $$ # \begin{bmatrix} # m_{1,1} & \dots & m_{1,n} \\ # \vdots & \ddots & \vdots \\ # m_{m,1} & \dots & m_{m,n} # \end{bmatrix} \\ # $$ # #### Error gradient back-propagation # $$ # \begin{align} # \frac{\partial loss}{\partial\mathbf{x}} # &= \frac{\partial loss}{\partial\mathbf{y}} \frac{\partial\mathbf{y}}{\partial\mathbf{x}} \\ # &= \mathbf{M}^\mathsf{T}\frac{\partial loss}{\partial\mathbf{y}} # \end{align} # $$ # #### Parameter adjustment # $$ # \frac{\partial loss}{\partial\mathbf{M}} # = \frac{\partial loss}{\partial\mathbf{y}} \frac{\partial\mathbf{y}}{\partial\mathbf{M}} # = \frac{\partial loss}{\partial\mathbf{y}} \mathbf{x} \\ # \frac{\partial loss}{\partial\mathbf{b}} # = \frac{\partial loss}{\partial\mathbf{y}} \frac{\partial\mathbf{y}}{\partial\mathbf{b}} # = \frac{\partial loss}{\partial\mathbf{y}} # $$ # #### Adapting to `numpy` # In `numpy` it is more convenient to use row vectors, particularly for calculating the transform on multiple inputs in one operation. We use the identity $ \mathbf{M} \mathbf{x} = (\mathbf{x} \mathbf{M}^\mathsf{T})^\mathsf{T}.$ To avoid cluttering names, we will use `M` in the code below to hold $\mathbf{M}^\mathsf{T}$. class AffineLayer(Layer): """An affine transformation, which is the classic fully-connected layer with offsets. The layer has n inputs and m outputs, which numbers must be supplied upon creation. The inputs and outputs are marshalled in numpy arrays, 1-D in the case of a single calculation, and 2-D when calculating the outputs of multiple inputs in one call. If called with 1-D array having shape == (n,), e.g numpy.arange(n), it will return a 1-D numpy array of shape (m,). If called with a 2-D numpy array, input shall have shape (k,n) and will return a 2-D numpy array of shape (k,m), suitable as input to a subsequent layer that has input width m. """ def __init__(self, n, m): self.M = np.empty((n, m)) self.b = np.empty(m) self.randomize() def randomize(self): self.M[:] = np.random.randn(*self.M.shape) self.b[:] = np.random.randn(*self.b.shape) def __call__(self, x): self.input = x self.output = x @ self.M + self.b return self.output def backprop(self, output_delE): input_delE = output_delE @ self.M.T o_delE = np.atleast_2d(output_delE) self.M -= np.einsum('ki,kj->ji', o_delE, np.atleast_2d(self.input)) self.b -= np.sum(o_delE, 0) return input_delE def state_vector(self): return np.concatenate((self.M.ravel(), self.b.ravel())) def set_state_from_vector(self, sv): """Set the layer's learnable state from a vector""" l_M = len(self.M.ravel()) l_b = len(self.b.ravel()) self.M[:] = sv[:l_M].reshape(self.M.shape) self.b[:] = sv[l_M : l_M + l_b].reshape(self.b.shape) # ### Map # Maps a scalar function on the inputs, for e.g. activation layers. class MapLayer(Layer): """Map a scalar function on the input taken element-wise""" def __init__(self, fun, dfundx): self.vfun = np.vectorize(fun) self.vdfundx = np.vectorize(dfundx) def __call__(self, x): self.input = x return self.vfun(x) def backprop(self, output_delE): input_delE = self.vdfundx(self.input) * output_delE return input_delE def state_vector(self): return np.array([]) def set_state_from_vector(self, sv): pass # --- # # Tests # *Dangerously incomplete* \ # Mostly `unittest` the `.py` version with a separate test script, see `test-nn_v3.py`. if __name__ == '__main__': two_wide = np.arange(2*4).reshape(-1,2) print(f"two_wide is:\n{two_wide}") three_wide = np.arange(3*4).reshape(-1,3) print(f"three_wide is:\n{three_wide}") # A few very basic tests: iL = IdentityLayer() assert all(np.equal(iL(np.arange(5)), np.arange(5))) assert all(np.equal(iL.backprop(np.arange(7)), np.arange(7))) assert np.equal(iL(two_wide), two_wide).all() mL = MapLayer(lambda x:x**2, lambda d:2*d) assert np.equal(mL(np.array([7,3,-11])), np.array([49,9,121])).all() assert np.equal(mL.backprop(np.array([2,3,4])), 2 * np.array([2,3,4]) * np.array([7,3,-11])).all() assert np.equal(mL(two_wide), np.array([0, 1, 4, 9, 16, 25, 36, 49]).reshape(-1,2)).all() #print(f"mL.backprop(np.array([2,3])) is:\n{mL.backprop(np.array([2,3]))}") assert np.equal(mL.backprop(np.array([2,3])), 2 * np.array([2,3]) * two_wide).all() a = AffineLayer(2,3) assert a(np.arange(2)).shape == (3,) out = a(np.array([1,-2])) assert out.shape == (3,) # AffineLayer has parameters that learn out_grad = np.array([4, 2, 7]) * 0.001 in_grad = a.backprop(out_grad) print(f"in_grad is:\n{in_grad}") print(f"a(two_wide) is:\n{a(two_wide)}") bp = a.backprop(three_wide * 0.001) print(f"bp is:\n{bp}") # --- # To produce an importable `nn_v3.py`: # 1. Save this notebook # 1. Uncomment the `jupyter nbconvert` line below # 1. Execute it. # 1. Comment out the convert again # 1. Save the notebook again in that form # + # ###!jupyter nbconvert --to script nn_v3.ipynb # -
nbs/OLD/nn_v3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ART Adversarial Patch attack # + from __future__ import absolute_import, division, print_function, unicode_literals # Install Imagenet stubs to have some image examples import sys # !{sys.executable} -m pip install git+https://github.com/nottombrown/imagenet_stubs import random import numpy as np import tensorflow as tf sess = tf.InteractiveSession() from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.preprocessing import image from matplotlib import pyplot as plt plt.rcParams['figure.figsize'] = [10, 10] import imagenet_stubs from imagenet_stubs.imagenet_2012_labels import name_to_label import warnings warnings.filterwarnings('ignore') from art.classifiers import TensorFlowClassifier from art.attacks import AdversarialPatch # - # # Settings target_image_name = 'toaster.jpg' image_shape = (224, 224, 3) batch_size = 3 scale_min = 0.3 scale_max = 1.0 rotation_max = 22.5 learning_rate = 200000.0 max_iter = 1000 # # Model definition # + _image_input = tf.keras.Input(shape=image_shape) _target_ys = tf.placeholder(tf.float32, shape=(None, 1000)) model = tf.keras.applications.resnet50.ResNet50(input_tensor=_image_input, weights='imagenet') _logits = model.outputs[0].op.inputs[0] target_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=_target_ys, logits=_logits)) tfc = TensorFlowClassifier(clip_values=(0, 1), input_ph=_image_input, labels_ph=_target_ys, output=_logits, sess=sess, loss=target_loss) # - # # Imagenet training images # + images_list = list() target_image = None for image_path in imagenet_stubs.get_image_paths(): im = image.load_img(image_path, target_size=(224, 224)) im = image.img_to_array(im) im = np.expand_dims(im, axis=0) im = preprocess_input(im) if image_path.endswith(target_image_name): target_image = im else: images_list.append(im) images = random.sample(images_list, batch_size) images = np.concatenate(images, axis=0) # - # # Adversarial patch generation ap = AdversarialPatch(classifier=tfc, target=name_to_label('toaster'), rotation_max=rotation_max, scale_min=scale_min, scale_max=scale_max, learning_rate=learning_rate, max_iter=max_iter, batch_size=batch_size, clip_patch=[(-103.939, 255.0 - 103.939), (-116.779, 255.0 - 116.779), (-123.680, 255.0 - 123.680)]) patch, patch_mask = ap.generate(x=images) def from_keras(x): x = np.copy(x) x[:, :, 2] += 123.68 x[:, :, 1] += 116.779 x[:, :, 0] += 103.939 return x[:, :, [2, 1, 0]].astype(np.uint8) plt.imshow((from_keras(patch) * patch_mask).astype(np.uint)) # # Evaluation patched_images = ap.apply_patch(images, scale=0.5) def predict_model(model, image): plt.imshow((from_keras(image)).astype(np.uint)) plt.show() image = np.copy(image) image = np.expand_dims(image, axis=0) image = preprocess_input(image) prediction = model.predict(image) top = 3 prediction_decode = decode_predictions(prediction, top=top)[0] print('Predictions:') lengths = list() for i in range(top): lengths.append(len(prediction_decode[i][1])) max_length = max(lengths) for i in range(top): name = prediction_decode[i][1] name = name.ljust(max_length, " ") probability = prediction_decode[i][2] output_str = "{} {:.2f}".format(name, probability) print(output_str) predict_model(model, patched_images[0, :, :, :]) predict_model(model, patched_images[1, :, :, :]) predict_model(model, patched_images[2, :, :, :])
notebooks/attack_adversarial_patch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dvc # language: python # name: conda-env-dvc-py # --- from sklearn.ensemble import RandomForestClassifier from sklearn.compose import ColumnTransformer from sklearn.compose import make_column_selector from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline import pandas as pd from sklearn import set_config set_config(display='diagram') penguins = pd.read_csv("penguins.csv", dtype={"species": 'category', "island": 'category', "gender": 'category'}) penguins.iloc[200] penguins.iloc[-1] X = penguins.drop("species", axis=1) y = penguins['species'].cat.codes ct = ColumnTransformer([ ("category", OrdinalEncoder(), ['island', 'gender']), ("numerical", 'passthrough', ['culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g']) ]) clf = Pipeline([ ("preprocess", ct), ("random_forest", RandomForestClassifier()) ]) from sklearn.model_selection import cross_val_score from sklearn.metrics import SCORERS scores = cross_val_score(clf, X, y, scoring='precision_weighted') scores.mean(), scores.std() clf.fit(X, y) import joblib joblib.dump(clf, "penguin_clf.joblib")
training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/luisfergr/pylance-release/blob/main/1_Listas_Enlazadas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="8f-DdMraxCIx" # **Listas Enlazadas** # # + [markdown] id="HUnOBMFyzWUT" # **Definicion de la clase Estudiante** # + id="3OxBSjTjxIX8" class estudiante: def __init__(self, carne, nombre, edad, direccion, telefono, email, carrera, puesto): self.carne = carne self.nombre = nombre self.edad = edad self.direccion = direccion self.telefono = telefono self.email = email self.carrera = carrera self.puesto = puesto # + [markdown] id="068XYb8zzayD" # **Definicion de la clase Nodo** # + id="6IT1YAtLzd0X" class nodo: def __init__(self, estudiante=None, siguiente=None): self.estudiante = estudiante self.siguiente = siguiente # + [markdown] id="otW51Nyk3Hy1" # # + [markdown] id="T8TjIwh43Mxy" # **Definicion de la Lista Enlazada** # + id="g3pvyrwQ3J0d" class lista_enlazada: def __init__(self): self.primero = None #se inicializa la lista def insertar(self, estudiante): if self.primero is None: #Si primero esta vacia. self.primero = nodo(estudiante = estudiante) return actual = self.primero while actual.siguiente: actual = actual.siguiente actual.siguiente = nodo(estudiante=estudiante) def recorrer(self): actual = self.primero while actual != None: print("carne: ", actual.estudiante.carne, "nombre: ", actual.estudiante.nombre, "email: ", actual.estudiante.email, "->" ) actual = actual.siguiente def eliminar(self, carne): actual = self.primero anterior = None while actual and actual.estudiante.carne != carne: anterior = actual actual = actual.siguiente if anterior is None: self.primero=actual.siguiente actual.siguiente=None elif actual: anterior.siguiente=actual.siguiente actual.siguiente=None def buscar(self,carne): actual=self.primero while actual !=None: if actual.estudiante.carne == carne: print("carne: ", actual.estudiante.carne, "nombre: ", actual.estudiante.nombre, "email: ", actual.estudiante.email, "->" ) break else: actual = actual.siguiente ##tareas-ipc2 ##tarea-1 cuaderno # + [markdown] id="Y_KjD9fs4gWz" # Gerson > Karen > Luis # + [markdown] id="jxLOeJ-Gs0TS" # # **Creacion de objetos Estudiante** # # # + id="PAktQv3ps6v0" e1 = estudiante(201915059, "<NAME>", 20, "9 calle 10-02 zona 1", 24400101, "<EMAIL>", "Ingenieria en Sistemas", "Progaramador Jr") e2 = estudiante(201915060, "<NAME>", 21, "7 calle 10-02 zona 1", 24400102, "<EMAIL>", "Ingenieria en Sistemas", "Progaramador Jr") e3 = estudiante(201915061, "<NAME>", 22, "8 calle 10-02 zona 1", 24400103, "<EMAIL>", "Ingenieria en Sistemas", "Progaramador Jr") # + [markdown] id="VmJymkI1ugr9" # **Insercion** # + id="ab9mq-wKuUFO" lista_e = lista_enlazada() lista_e.insertar(e1) lista_e.insertar(e2) lista_e.insertar(e3) # + [markdown] id="CaCFVqmwut8B" # **Recorrer la lista** # + colab={"base_uri": "https://localhost:8080/"} id="Q0jb6jdZuwch" outputId="e4603061-d18b-4769-cea9-c85e737f5f78" lista_e.recorrer() # + [markdown] id="xlARQ7X6wjTR" # **Eliminar un nodo de la lista** # + colab={"base_uri": "https://localhost:8080/"} id="71hWcSUjwozJ" outputId="1ce32769-7ac6-4685-ea3a-f99363d57b9e" lista_e.eliminar(201915060) lista_e.recorrer() # + [markdown] id="KDIo5GIW92-B" # **Buscar un nodo de la lista** # + colab={"base_uri": "https://localhost:8080/"} id="w-dNsHfh95at" outputId="402e1eeb-8642-4f6e-ce58-e7dd0d3cdf5d" lista_e.buscar(201915061) # + id="8meMXalI9_bF"
1_Listas_Enlazadas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="2e6BW_p2q-0d" import pandas as pd Sports=pd.read_excel('Sports.xlsx') start=76 end=100 Sports=Sports[start:end] Business=pd.read_excel('Business.xlsx') Business=Business[start:end] Science=pd.read_excel('Science.xlsx') Science=Science[start:end] Health=pd.read_excel('Health.xlsx') Health=Health[start:end] # + id="eskrp2E6rQqQ" Sports['News']=Sports['title']+Sports['summery'] Sports=Sports.drop(['summery','title','date'],axis=1) # + id="AyZ3F8RlrDgF" Business['News']=Business['title']+Business['summery'] Business=Business.drop(['summery','title','date'],axis=1) # + id="LWZmfF3LrUHZ" Health['News']=Health['title']+Health['summery'] Health=Health.drop(['summery','title','date'],axis=1) # + id="YIRC4O10rX1M" Science['News']=Science['title']+Science['summery'] Science=Science.drop(['summery','title','date'],axis=1) # + id="U5HRAhF1raM2" frames=[Sports,Health,Science,Business] combine=pd.concat(frames,ignore_index=True,keys=['Index','News','Category','Link']) combine.drop(['Unnamed: 5'],axis=1) combine.index=combine.index+1 # + colab={"base_uri": "https://localhost:8080/"} id="TkXulKDlreI1" outputId="2dd1c959-7a7c-4a4f-9b1f-7219958fc137" combine.info() combine.drop('Unnamed: 5',axis=1) combine.to_excel('ResponseSP19-BCS-089.xlsx') # + id="ZX7jEF3brnZJ" combine.to_excel('Response.xlsx')
Categories.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # Simple interactive Python debugger # Our buggy program def remove_html_markup(s): tag = False quote = False out = "" for c in s: if c == '<' and not quote: tag = True elif c == '>' and not quote: tag = False elif c == '"' or c == "'" and tag: quote = not quote elif not tag: out = out + c return out # main program that runs the buggy program def main(): print remove_html_markup('"<b>foo</b>"') # ------------ my-spyder -------------------------- """ *** INSTRUCTIONS *** 1. Improve and expand the 'debug' function to accept a print command 'p <arg>'. If the print command has no argument, print out the dictionary that holds all variables. Print the value of the supplied variable in a form 'var = repr(value)', if the 'p' is followed by an argument, or print 'No such variable:', arg if no such variable exists. 2. Improve and expand the 'debug' function to accept a breakpoint command 'b <line>'. Add the line number to the breakpoints dictionary, or print 'You must supply a line number' if 'b' is not followed by a line number. 3. Improve and expand the 'debug' function to accept a watchpoint command 'w <var name>'. Add the variable name to the watchpoints dictionary, or print 'You must supply a variable name' if 'w' is not followed by a string. 4. Improve and expand the debug function to accept a new command: a delete command 'd <type> <argument>', where <type> is either b for breakpoint, or w for watchpoint. The following argument should be either a number for the breakpoint or a string for the watchpoint. If there is mismatch between type and argument, you should print out "Incorrect command". In the case of "d b <argument>" you should delete that breakpoint from the breakpoint dictionary, or print "No such breakpoint defined", repr(argument) In case of watchpoint, you should delete the watchpoint if such variable exists, or print: variable, "is not defined as watchpoint" 5. Improve the traceit function to watch for variables in the watchpoint dictionary and print out (literally like that): event, frame.f_lineno, frame.f_code.co_name and then the values of the variables, each in new line, in a format: somevar ":", "Initialized"), "=>", repr(somevalue) if the value was not set, and got set in the line, or somevar ":", repr(old-value), "=>", repr(new-value) when the value of the variable has changed. If the value is unchanged, do not print anything. """ import sys import copy def debug(command, my_locals): global stepping global breakpoints global watchpoints if command.find(' ') > 0: arg = command.split(' ')[1] else: arg = None if command.startswith('s'): # step stepping = True return True elif command.startswith('c'): # continue stepping = False return True elif command.startswith('p'): # print if arg is None: print my_locals else: if arg in my_locals: print "{} = {}".format(arg, repr(my_locals[arg])) else: print "No such variable: {}".format(arg) return False return True elif command.startswith('b'): # breakpoint if arg is None: print "You must supply a line number" else: breakpoints[int(arg)] = True return True elif command.startswith('w'): # watch variable if arg is None: print "You must supply a variable name" else: # We need to use copy.deepcopy to take into account mutable objects that # might change during the execution. if arg in my_locals: watchpoints[arg] = copy.deepcopy(my_locals[arg]) else: watchpoints[arg] = "variable_not_set" elif command.startswith('d'): # delete watch/break point if arg is None: print """Arguments missing. Correct usage: d <type> <argument>, <type> can be either 'b' for breakpoint or 'w' for watchpoint, <argument> should be a number for breakpoints or a string for watchpoints.""" arg = command.split(' ')[1:] if len(arg) != 2: print "Delete command takes 2 arguments, {} given.".format(len(arg)) elif arg[0] not in ['b', 'w']: print "Incorrect command." elif arg[0] == 'b': # delete breakpoint try: line = int(arg[1]) except: print "Incorrect command." return False if line not in breakpoints: print "No such breakpoint defined", repr(line) else: del breakpoints[line] return True else: # delete watchpoint if arg[1] not in watchpoints: print "{}, is not defined as watchpoint".format(arg[1]) else: del watchpoints[arg[1]] return True elif command.startswith('q'): # quit print "Exiting my-spyder..." sys.exit(0) else: print "No such command", repr(command) return False def input_command(): # command = raw_input("(my-spyder): ") global commands command = commands.pop(0) return command def traceit(frame, event, trace_arg): """The local trace function should return a reference to itself (or to another function for further tracing in that scope), or None to turn off tracing in that scope.""" global stepping global breakpoints global watchpoints global previous_line if event == 'line': message = "{}, {}, {}\n".format(event, previous_line, frame.f_code.co_name) changes = "" for variable, value in watchpoints.items(): if variable in frame.f_locals: current_value = copy.deepcopy(frame.f_locals[variable]) if value == "variable_not_set": changes += "{} : Initialized => {} \n".format(variable, repr(current_value)) watchpoints[variable] = current_value elif current_value != value: changes += "{} : {} => {} \n".format(variable, repr(value), repr(current_value)) watchpoints[variable] = current_value else: watchpoints[variable] = "variable_not_set" if changes: print message, changes resume = False while not resume: command = input_command() resume = debug(command, frame.f_locals) if stepping or frame.f_lineno in breakpoints: print event, frame.f_lineno, frame.f_code.co_name, frame.f_locals resume = False while not resume: command = input_command() resume = debug(command, frame.f_locals) previous_line = frame.f_lineno return traceit # globals breakpoints = {} watchpoints = {"quote": True} watch_values = {} stepping = True previous_line = None commands = ["w c", "c", "c", "w out", "c", "c", "c", "q"] # Using the tracer sys.settrace(traceit) main() sys.settrace(None) # -
src/my-spyder v5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Test out the laplan.pcts functions # + import boto3 import intake import pandas import laplan # + catalog = intake.open_catalog("../catalogs/*.yml") s3 = boto3.client('s3') bucket_name = 'city-planning-entitlements' # - pcts = catalog.pcts.read() """ Let's replicate what this does, but just on select cases to see what's going on FULL_PREFIX = list(laplan.pcts.VALID_PCTS_PREFIX) remove_prefix = ["ENV", "PAR", "ADM"] prefix = [x for x in FULL_PREFIX if x not in remove_prefix] suffix = ["TOC", "CUB"] pcts = laplan.pcts.subset_pcts(pcts, start_date="2017-10-01", prefix_list=prefix, get_dummies=True) pcts = laplan.pcts.drop_child_cases(pcts, keep_child_entitlements=True) """ # + cases_to_keep = [ "PAR-2020-384-TOC", # this should get dropped because of excluded prefix "DIR-2020-360-TOC-SPR", # this should get kept "ZA-2010-861-CUB-CU-ZV-ZAA-SPR-PA1", # this has lots of suffixes for us to learn about "PAR-2020-387-CUB", # this should get dropped because of excluded prefix ] pcts = pcts[pcts.CASE_NUMBER.isin(cases_to_keep)].drop_duplicates(subset = ["CASE_NUMBER"]) print(f"# obs: {len(pcts)}") print(f"# unique case numbers: {len(pcts.CASE_NUMBER.unique())}") # + remove_prefix = ["ENV", "PAR", "ADM"] FULL_PREFIX = list(laplan.pcts.VALID_PCTS_PREFIX) prefix_list = [x for x in FULL_PREFIX if x not in remove_prefix] suffix_list = ["TOC", "CUB"] start_date = "2017-10-01" end_date = pandas.Timestamp.now() # + # Subset PCTS by start / end date start_date = ( pandas.to_datetime(start_date) if start_date else pandas.to_datetime("2010-01-01") ) end_date = pandas.to_datetime(end_date) if end_date else pandas.Timestamp.now() pcts = ( pcts[ (pcts.FILE_DATE >= start_date) & (pcts.FILE_DATE <= end_date) ] .drop_duplicates() .reset_index(drop=True) ) # + import re GENERAL_PCTS_RE = re.compile("([A-Z]+)-([0-9X]{4})-([0-9]+)((?:-[A-Z0-9]+)*)$") MISSING_YEAR_RE = re.compile("([A-Z]+)-([0-9]+)((?:-[A-Z0-9]+)*)$") # Parse CASE_NBR cols = pcts.CASE_NUMBER.str.extract(GENERAL_PCTS_RE) all_prefixes = cols[0] all_suffixes = cols[3].str[1:] print("show case number parsed with GENERAL_PCTS_RE") display(cols.head()) display(all_prefixes.head()) display(all_suffixes.head()) # + # Parse additional prefixes and suffixes that did not pass the first regex # to fill NaN values based on indices. Suffixes at position 2 instead of 3. failed_general_parse = all_prefixes.isna() additional_cols = pcts[failed_general_parse].CASE_NUMBER.str.extract(MISSING_YEAR_RE) print("failed to parse go through MISSING_YEAR_RE") display(additional_cols.head()) # + # Now fill in those failed to parse the first time around # Find the index where that happened, and assign those values for prefixes and suffixes additional_prefixes = additional_cols[0] additional_suffixes = additional_cols[2].str[1:] all_prefixes.at[additional_prefixes.index] = additional_prefixes.values all_suffixes.at[additional_suffixes.index] = additional_suffixes.values all_suffixes = all_suffixes.str.split("-", expand=True) # + # Start by excluding all rows that failed to parse. successfully_parsed = all_prefixes.notna() # Create 2 series, holds all True values for each case allow_prefix = pandas.Series(True, index=pcts.index) allow_suffix = pandas.Series(True, index=pcts.index) # + # Subset by prefix if prefix_list is not None: allow_prefix = all_prefixes.isin(prefix_list) # Takes the previous series, which was all true, # now only those who are part of "allow_prefix" have True, rest are False allow_prefix # - # Subset by suffix. Since the suffix may be in any of the all_suffixes # column, we logical-or them together, checking if each column has one # of the ?requested ones. if suffix_list is not None: print("before: all Trues") display(allow_suffix) allow_suffix = ~allow_suffix print("after: all Falses") display(allow_suffix) # Loop through each column in all_suffixes, labeled 0, 1, ..., n # Turn on to be True if it's already True (will work after 2nd iteration), # or if the suffix is found in our allowed suffixes # This way, even if we loop through ones that aren't in our allowed suffixes, if it's already True, # it won't turn to False for c in all_suffixes.columns: print(f"Column: {c}") test = all_suffixes[c].isin(suffix_list) print("suffix in this column is in suffix list") print(test) allow_suffix = allow_suffix | all_suffixes[c].isin(suffix_list) print("allow_suffix, at the end of this loop") display(allow_suffix) print("***********************") # + # If this condition is met, select the row (which is indexed by case_number) subset = successfully_parsed & allow_prefix & allow_suffix pcts = pcts[subset] # Also, only select the rows that meet the above condition for our prefixes and suffixes dataframes all_prefixes = all_prefixes[subset] all_suffixes = all_suffixes[subset] # - prefix_dummies = pandas.get_dummies(all_prefixes, dtype="bool") print(f"set of prefix_list or all the valid ones: {set(prefix_list or VALID_PCTS_PREFIX)}") print(f"set of prefixes in our dummies: {set(prefix_dummies.columns)}") missing_prefixes = set(prefix_list or VALID_PCTS_PREFIX) - set( prefix_dummies.columns ) print(f"missing prefixes: {missing_prefixes}") # + suffix_dummies = pandas.get_dummies(all_suffixes.stack(), dtype="bool").max( level=0 ) display(suffix_dummies.head()) # Identify if any of the requested suffixes are missing. If so, # populate them with a column of falses print(f"set of suffix_list or all the valid ones: {set(suffix_list or VALID_PCTS_SUFFIX)}") print(f"set of suffixes in our dummies: {set(suffix_dummies.columns)}") missing_suffixes = set(suffix_list or VALID_PCTS_SUFFIX) - set( suffix_dummies.columns ) print(f"missing suffixes: {missing_suffixes}") # - # Make sure they are all nullable boolean type suffix_dummies = suffix_dummies.astype("boolean") prefix_dummies = prefix_dummies.astype("boolean") # + # Combine the dfs. pcts = pandas.concat((pcts, prefix_dummies, suffix_dummies), axis=1) pcts # -
notebooks/step-by-step-subset-pcts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Intro to Classifying Structured Data with TensorFlow # This notebook demonstrates classifying structured. The code presented here can become a starting point for a problem you care about. Our goal is to introduce a variety of techniques (especially, feature engineering) rather than to aim for high-accuracy on the demo dataset we'll explore. # # ### Notes # * If you run this notebook multiple times, you'll want to restore it to a clean state. When you run the notebook, the Estimators will write logs and checkpoint files to disk. These will be in a *./graphs* directory in the same folder as this notebook. Delete this to restore to a clean state. # # # * We'll demonstrate two types of input functions. First, the pre-built Pandas input function, and second, one written using the new [Datasets API](https://www.tensorflow.org/programmers_guide/datasets). At the time of writing (v1.3) the Datasets API is in contrib. When it moves to core (most likely in v1.4) we'll update this notebook. # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np import pandas as pd from IPython.display import Image import tensorflow as tf print('This code requires TensorFlow v1.3+') print('You have:', tf.__version__) # - # ### About the dataset # # Here, we'll work with the [Adult dataset](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/) from the 1990 US Census. Our task is to predict whether an individual has an income over $50,000 / year, based attributes such as their age and occupation. This is a generic problem with a variety of numeric and categorical attributes - which makes it useful for demonstration purposes. # # A great way to get to know the dataset is by using [Facets](https://github.com/pair-code/facets) - an open source tool for visualizing and exploring data. At the time of writing, the [online demo](https://pair-code.github.io/facets/) has the Census data preloaded. Try it! In the screenshot below, each dot represents a person, or, a row from the CSV. They're colored by the label we want to predict ('blue' for less than 50k / year, 'red' for more). In the online demo, clicking on a person will show the attributes, or columns from the CSV file, that describe them - such as their age and occuptation. Image(filename='./images/facets1.jpg', width=500) census_train_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data' census_test_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test' census_train_path = tf.contrib.keras.utils.get_file('census.train', census_train_url) census_test_path = tf.contrib.keras.utils.get_file('census.test', census_test_url) # The dataset is missing a header, so we'll add one here. You can find descriptions of these columns in the [names file](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names). column_names = [ 'age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income' ] # ### Load the using Pandas # # In the first half of this notebook, we'll assume the dataset fits into memory. Should you need to work with larger files, you can use the Datasets API to read them. # + # Notes # 1) We provide the header from above. # 2) The test file has a line we want to disgard at the top, so we include the parameter 'skiprows=1' census_train = pd.read_csv(census_train_path, index_col=False, names=column_names) census_test = pd.read_csv(census_test_path, skiprows=1, index_col=False, names=column_names) # Drop any rows that have missing elements # Of course there are other ways to handle missing data, but we'll # take the simplest approach here. census_train = census_train.dropna(how="any", axis=0) census_test = census_test.dropna(how="any", axis=0) # - # ### Correct formatting problems with the Census data # As it happens, there's a small formatting problem with the testing CSV file that we'll fix here. The labels in the testing file are written differently than they are in the training file. Notice the extra "." after "<=50K" and ">50K" in the screenshot below. # # You can open the CSVs in your favorite text editor to see the error, or you can see it with Facets in "overview mode" - which makes it easy to catch this kind of mistake early. Image(filename='./images/facets2.jpg', width=500) # Separate the label we want to predict into its own object # At the same time, we'll convert it into true/false to fix the formatting error census_train_label = census_train.pop('income').apply(lambda x: ">50K" in x) census_test_label = census_test.pop('income').apply(lambda x: ">50K" in x) # I find it useful to print out the shape of the data as I go, as a sanity check. print ("Training examples: %d" % census_train.shape[0]) print ("Training labels: %d" % census_train_label.shape[0]) print() print ("Test examples: %d" % census_test.shape[0]) print ("Test labels: %d" % census_test_label.shape[0]) # Likewise, I like to see the head of each file, to help spot errors early on. First for the training examples... census_train.head() # ... and now for the labels. Notice the label column is now true/false. census_train_label.head(10) # + # Likewise, you could do a spot check of the testing examples and labels. # census_test.head() # census_test_label.head() # - # # Estimators and Input Functions # # [TensorFlow Estimators](https://www.tensorflow.org/get_started/estimator) provide a high-level API you can use to train your models. Here, we'll use Canned Estimators ("models-in-a-box"). These handle many implementation details for you, so you can focus on solving your problem (e.g., by coming up with informative features using the feature engineering techniques we introduce below). # # To learn more about Estimators, you can watch this talk from Google I/O by <NAME>: [Effective TensorFlow for Non-Experts](https://www.youtube.com/watch?v=5DknTFbcGVM). Here's a diagram of the methods we'll use here. Image(filename='./images/estimators1.jpeg', width=400) # You can probably guess the purpose of methods like train / evaluate / and predict. What may be new to you, though, are [Input Functions](https://www.tensorflow.org/get_started/estimator#describe_the_training_input_pipeline). These are responsible for reading your data, preprocessing it, and sending it to the model. When you use an input function, your code will read *estimator.train(your_input_function)* rather than *estimator.train(your_training_data)*. # # First, we'll use a [pre-built](https://www.tensorflow.org/get_started/input_fn) input function. This is useful for working with a Pandas dataset that you happen to already have in memory, as we do here. Next, we'll use the [Datasets API](https://www.tensorflow.org/programmers_guide/datasets) to write our own. The Datasets API will become the standard way of writing input functions moving forward. It's in contrib in TensorFlow v1.3, but will most likely move to core in v1.4. # ### Input functions for training and testing data # Why do we need two input functions? There are a couple differences in how we handle our training and testing data. We want the training input function to loop over the data indefinitely (returning batches of examples and labels when called). We want the testing input function run for just one epoch, so we can make one prediction for each testing example. We'll also want to shuffle the training data, but not the testing data (so we can compare it to the labels later). def create_train_input_fn(): return tf.estimator.inputs.pandas_input_fn( x=census_train, y=census_train_label, batch_size=32, num_epochs=None, # Repeat forever shuffle=True) def create_test_input_fn(): return tf.estimator.inputs.pandas_input_fn( x=census_test, y=census_test_label, num_epochs=1, # Just one epoch shuffle=False) # Don't shuffle so we can compare to census_test_labels later # See the bottom of the notebook for an example of doing this with the new Datasets API. # # Feature Engineering # # Now we'll specify the features we'll use and how we'd like them represented. To do so, we'll use tf.feature_columns. Basically, these enable you to represent a column from the CSV file in a variety of interesting ways. Our goal here is to demostrate how to work with different types of features, rather than to aim for an accurate model. Here are five different types we'll use in our Linear model: # # * A numeric_column. This is just a real-valued attribute. # * A bucketized_column. TensorFlow automatically buckets a numeric column for us. # * A categorical_column_with_vocabulary_list. This is just a categorical column, where you know the possible values in advance. This is useful when you have a small number of possibilities. # * A categorical_column_with_hash_bucket. This is a useful way to represent categorical features when you have a large number of values. Beware of hash collisions. # * A crossed_column. Linear models cannot consider interactions between features, so we'll ask TensorFlow to cross features for us. # # In the Deep model, we'll also use: # # * An embedding column(!). This automatically creates an embedding for categorical data. # # You can learn more about feature columns in the [Large Scale Linear Models Tutorial](https://www.tensorflow.org/tutorials/linear#feature_columns_and_transformations) in the [Wide & Deep tutorial](https://www.tensorflow.org/tutorials/wide_and_deep#define_base_feature_columns), as well as in the [API doc](https://www.tensorflow.org/api_docs/python/tf/feature_column). # # Following is a demo of a couple of the things you can do. # A list of the feature columns we'll use to train the Linear model feature_columns = [] # To start, we'll use the raw, numeric value of age. age = tf.feature_column.numeric_column('age') feature_columns.append(age) # Next, we'll add a bucketized column. Bucketing divides the data based on ranges, so the classifier can consider each independently. This is especially helpful to linear models. Here's what the buckets below look like for age, as seen using Facets. Image(filename='./images/buckets.jpeg', width=400) # + age_buckets = tf.feature_column.bucketized_column( tf.feature_column.numeric_column('age'), boundaries=[31, 46, 60, 75, 90] # specify the ranges ) feature_columns.append(age_buckets) # - # You can also evenly divide the data, if you prefer not to specify the ranges yourself. # + # age_buckets = tf.feature_column.bucketized_column( # tf.feature_column.numeric_column('age'), # list(range(10)) #) # + # Here's a categorical column # We're specifying the possible values education = tf.feature_column.categorical_column_with_vocabulary_list( "education", [ "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", "Preschool", "12th" ]) feature_columns.append(education) # - # If you prefer not to specify the vocab in code, you can also read it from a file, or alternatively - use a categorical_column_with_hash_bucket. Beware of hash collisions. # A categorical feature with a possibly large number of values # and the vocabulary not specified in advance. native_country = tf.feature_column.categorical_column_with_hash_bucket('native-country', 1000) feature_columns.append(native_country) # Now let's create a crossed column for age and education. Here's what this looks like. Image(filename='./images/crossed.jpeg', width=400) age_cross_education = tf.feature_column.crossed_column( [age_buckets, education], hash_bucket_size=int(1e4) # Using a hash is handy here ) feature_columns.append(age_cross_education) # ## Train a Canned Linear Estimator # # Note: logs and a checkpoint file will be written to *model_dir*. Delete this from disk before rerunning the notebook for a clean start. train_input_fn = create_train_input_fn() estimator = tf.estimator.LinearClassifier(feature_columns, model_dir='graphs/linear', n_classes=2) estimator.train(train_input_fn, steps=1000) # ## Evaluate test_input_fn = create_test_input_fn() estimator.evaluate(test_input_fn) # ### Predict # # The Estimator returns a generator object. This bit of code demonstrates how to retrieve predictions for individual examples. # + # reinitialize the input function test_input_fn = create_test_input_fn() predictions = estimator.predict(test_input_fn) i = 0 for prediction in predictions: true_label = census_test_label[i] predicted_label = prediction['class_ids'][0] # Uncomment the following line to see probabilities for individual classes # print(prediction) print("Example %d. Actual: %d, Predicted: %d" % (i, true_label, predicted_label)) i += 1 if i == 5: break # - # ## What features can you use to achieve higher accuracy? # This dataset is imbalanced, so an an accuracy of around 75% is *low* in this context (this could be achieved merely by predicting *everyone* makes less than 50k / year). In fact, if you look through the predictions closely, you'll find that many are zero. We'll get a little smarter as we go. # ## Train a Deep Model # ### Add an embedding feature(!) and update the feature columns # Instead of using a hash to represent categorical features, here we'll use a learned embedding. (Cool, right?) We'll also update how the features are represented for our deep model. Here, we'll use a different combination of features that before, just for fun. # + # We'll provide vocabulary lists for features with just a few terms workclass = tf.feature_column.categorical_column_with_vocabulary_list( 'workclass', [' Self-emp-not-inc', ' Private', ' State-gov', ' Federal-gov', ' Local-gov', ' ?', ' Self-emp-inc', ' Without-pay', ' Never-worked']) education = tf.feature_column.categorical_column_with_vocabulary_list( 'education', [' Bachelors', ' HS-grad', ' 11th', ' Masters', ' 9th', ' Some-college', ' Assoc-acdm', ' Assoc-voc', ' 7th-8th', ' Doctorate', ' Prof-school', ' 5th-6th', ' 10th', ' 1st-4th', ' Preschool', ' 12th']) marital_status = tf.feature_column.categorical_column_with_vocabulary_list( 'marital-status', [' Married-civ-spouse', ' Divorced', ' Married-spouse-absent', ' Never-married', ' Separated', ' Married-AF-spouse', ' Widowed']) relationship = tf.feature_column.categorical_column_with_vocabulary_list( 'relationship', [' Husband', ' Not-in-family', ' Wife', ' Own-child', ' Unmarried', ' Other-relative']) # - feature_columns = [ # Use indicator columns for low dimensional vocabularies tf.feature_column.indicator_column(workclass), tf.feature_column.indicator_column(education), tf.feature_column.indicator_column(marital_status), tf.feature_column.indicator_column(relationship), # Use embedding columns for high dimensional vocabularies tf.feature_column.embedding_column( # now using embedding! # params are hash buckets, embedding size tf.feature_column.categorical_column_with_hash_bucket('occupation', 100), 10), # numeric features tf.feature_column.numeric_column('age'), tf.feature_column.numeric_column('education-num'), tf.feature_column.numeric_column('capital-gain'), tf.feature_column.numeric_column('capital-loss'), tf.feature_column.numeric_column('hours-per-week'), ] estimator = tf.estimator.DNNClassifier(hidden_units=[256, 128, 64], feature_columns=feature_columns, n_classes=2, model_dir='graphs/dnn') train_input_fn = create_train_input_fn() estimator.train(train_input_fn, steps=2000) test_input_fn = create_test_input_fn() estimator.evaluate(test_input_fn) # That's a little better. # ### TensorBoard # If you like, you can start TensorBoard by running this from a terminal command (in the same directory as this notebook): # # ```$ tensorboard --logdir=graphs``` # # then pointing your web-browser to ```http://localhost:6006``` (check the TensorBoard output in the terminal in case it's running on a different port). # # When that launches, you'll be able to see a variety of graphs that compares the linear and deep models. Image(filename='./images/tensorboard.jpeg', width=500) # ## Datasets API # Here, I'll demonstrate how to use the new [Datasets API](https://www.tensorflow.org/programmers_guide/datasets), which you can use to write complex input pipeline from simple, reusable pieces. # # At the time of writing (v1.3) this API is in contrib. It's most likely moving into core in v1.4, which is good news. Using TensorFlow 1.4, the below can be written using *regular* Python code to parse the CSV file, via the *Datasets.from_generator()* method. This improves producivity a lot - it means you can use Python to read, parse, and apply whatever logic you wish to your input data - then you can take advantage of the reusable pieces of the Datasets API (e.g., batch, shuffle, repeat, etc) - as well as the optional performance tuning (e.g., prefetch, parallel process, etc). # # In combination with Estimators, this means you can train and tune deep models at scale on data of almost any size, entirely using a high-level API. I'll update this notebook after v1.4 is released with an example. It's neat. # + # I'm going to reset the notebook to show you how to do this from a clean slate # %reset -f import collections import tensorflow as tf census_train_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data' census_test_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test' census_train_path = tf.contrib.keras.utils.get_file('census.train', census_train_url) census_test_path = tf.contrib.keras.utils.get_file('census.test', census_test_url) # - # Provide default values for each of the CSV columns # and a header at the same time. csv_defaults = collections.OrderedDict([ ('age',[0]), ('workclass',['']), ('fnlwgt',[0]), ('education',['']), ('education-num',[0]), ('marital-status',['']), ('occupation',['']), ('relationship',['']), ('race',['']), ('sex',['']), ('capital-gain',[0]), ('capital-loss',[0]), ('hours-per-week',[0]), ('native-country',['']), ('income',['']), ]) # + # Decode a line from the CSV. def csv_decoder(line): """Convert a CSV row to a dictonary of features.""" parsed = tf.decode_csv(line, list(csv_defaults.values())) return dict(zip(csv_defaults.keys(), parsed)) # The train file has an extra empty line at the end. # We'll use this method to filter that out. def filter_empty_lines(line): return tf.not_equal(tf.size(tf.string_split([line], ',').values), 0) def create_train_input_fn(path): def input_fn(): dataset = ( tf.contrib.data.TextLineDataset(path) # create a dataset from a file .filter(filter_empty_lines) # ignore empty lines .map(csv_decoder) # parse each row .shuffle(buffer_size=1000) # shuffle the dataset .repeat() # repeate indefinitely .batch(32)) # batch the data # create iterator columns = dataset.make_one_shot_iterator().get_next() # separate the label and convert it to true/false income = tf.equal(columns.pop('income')," >50K") return columns, income return input_fn def create_test_input_fn(path): def input_fn(): dataset = ( tf.contrib.data.TextLineDataset(path) .skip(1) # The test file has a strange first line, we want to ignore this. .filter(filter_empty_lines) .map(csv_decoder) .batch(32)) # create iterator columns = dataset.make_one_shot_iterator().get_next() # separate the label and convert it to true/false income = tf.equal(columns.pop('income')," >50K") return columns, income return input_fn # - # ## Here's code you can use test the Dataset input functions # + train_input_fn = create_train_input_fn(census_train_path) next_batch = train_input_fn() with tf.Session() as sess: features, label = sess.run(next_batch) print(features['education']) print(label) print() features, label = sess.run(next_batch) print(features['education']) print(label) # - # From here, you can use the input functions to train and evaluate your Estimators. I'll add some minimal code to do this, just to show the mechanics. # + train_input_fn = create_train_input_fn(census_train_path) test_input_fn = create_train_input_fn(census_test_path) feature_columns = [ tf.feature_column.numeric_column('age'), ] estimator = tf.estimator.DNNClassifier(hidden_units=[256, 128, 64], feature_columns=feature_columns, n_classes=2, # creating a new folder in case you haven't cleared # the old one yet model_dir='graphs_datasets/dnn') estimator.train(train_input_fn, steps=100) estimator.evaluate(train_input_fn, steps=100) # - # This would be a good time to clean up the logs and checkpoints on disk, by deleting ```./graphs``` and ```./graphs_datasets```. # ## Next steps # # ## To learn more about feature engineering # # Check out the [Wide and Deep tutorial](https://www.tensorflow.org/tutorials/wide_and_deep). Also, see that tutorial for another kind of Estimator you can try that combines the Linear and Deep models. # # ## To learn more about Datasets # # Check out the [programmers guide](https://www.tensorflow.org/programmers_guide/datasets), and check back after v1.4 is released for the Dataset.from_generator method, which I think will improve productivity a lot.
Week_02_Unit_02_03_04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hideCode=false hidePrompt=false import pandas as pd import numpy as np from datetime import timedelta, date from datetime import datetime from pymongo import MongoClient from pymongo.collation import Collation, CollationStrength import geopandas as geopd import shapely.geometry import json import pytz import geopandas as gpd import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (20, 10) # - def geotweet_connect(username, pwd): client = MongoClient('mongodb://%s:%s@<EMAIL>:27016' % (username, pwd)) db = client['tweets'] geotweets = db['geotweets'] return geotweets # + ## connect locally (oncampus or over vpn) #geotweets = geotweet_connect('guest','roboctopus') #print(geotweets.estimated_document_count()) # + ## connect remotely ## create tunnel in unix with #ssh -N -f -L localhost:27016:localhost:27016 hydra_j c = MongoClient(host='127.0.0.1', port=27016) #c.test.caommand('buildinfo') db = c['tweets'] #db.list_collection_names() geotweets = db['geotweets'] geotweets.find_one() # - start = datetime(2016,3,1) end = datetime(2016,3,2) geotweets.find_one({'tweet_created_at':{'$gte':start, '$lt':end}})
notebooks/test/test_notebooks/test_hydraconnect.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.1 # language: julia # name: julia-1.5 # --- # # Tutorial básico de Julia # # ## Julia I # # # > <NAME> $\mathbb{R}i \vec c \hbar$ # > # > Agradecimiento especial a <NAME> por iniciar este tutorial que se ha ido actualizando a lo largo del tiempo. # # En el enlace de [Mi primer Notebook](http://nbviewer.jupyter.org/url/sistemas.fciencias.unam.mx/~rich/Herramientas/notebooks/Mi_primer_notebook.ipynb), se puede aprender acerca de las bondades y beneficios de la herramienta que ocupamos actualmente, a saber **Python 2** dentro de la plataforma de **Jupyter Notebook**. En este Notebook y en los subsecuentes a este tutorial comenzaremos a aprender a programar en el lenguaje **Julia**. # # Comencemos por notar una sutil diferencia entre la notación entre *Python 2* y *Julia*. Veamos la diferencia entre hacer las cosas en Julia y en Python 2: # # > Julia: # # ``` julia # print("Hola") # ``` # # > Python: # # ```python # print "Hola" # ``` # # En el caso de Julia podemos observar que es un lenguaje mucho más estructurado con una notación más estricta o formal con la finalidad de no dejar ambigüedades. De hecho, en este ejemplo vemos que es más parecido a **Python 3**. A lo largo del camino veremos que hay muchas similitudes con **Python 3**, pero de una manera mucho más estructurada, con la ventaja de que **Julia** es más rápido si se sabe manejar de manera adecuada. # ## Herramienta de ayuda y variables # # Empezaremos con una de las herramientas más útiles en Julia, la función de ayuda. Veamos cómo se usa, y para ello se utiliza símbolo de **?** antes de cada comando como se muestra a continuación: # # ```julia # ?cos # ``` # # Al ejecutar esta celda, Julia nos dirá información sobre la función `cos` (coseno), en particular, la salida de `?cos` es: # # ```julia # cos(x) # Compute cosine of x, where x is in radians. # ``` # # En general, la función **?** nos dirá cosas tales como los tipos de argumentos que puede tomar la función, la forma de las salidas y, algunas veces, ejemplos de uso de la función acerca de la cuál estamos preguntando. # # Ahora imaginémonos que queremos calcular el determinante de una matriz. Es razonable pensar que Julia tiene incorporada una función que haga esto, pero no sabemos qué función es. Tenemos dos formas de proceder: # # 1. Revisar la documentación de julia [aquí](https://docs.julialang.org) o # # 2. Intentar adivinar. # # Aunque la opción dos parece absurda, en Julia suele ser muy fácil, esto debido a que podemos empezar por escribir en la computadora # # ```julia # de # ``` # # y luego presionamos la tecla `tab`. Esto generará una lista al lado de donde escribimos `de` con todas las funciones, variables que ya se hayan definido o disponibles en el momento. En este ejemplo en particular se obtiene # # ``` julia # dec # deconv # deepcopy # default_worker_pool # deg2rad # delete! # deleteat! # den # deserialize # det # # ``` # # Claramente la función `det` es la principal candidata a ser la que buscamos, ahora simplemente tecleamos # # ```julia # ?det # ``` # # y así nos convencemos de que en efecto con `det` calculamos determinantes. # # Una de las primeras cosas que vamos a hacer con Julia es definir variables. Esto es muy simple y basta con escribir # # ```julia # # x=5 # # ``` # y ya tenemos a `x` con el valor $5$. # # Si ahora quisiéramos imprimir el valor de `x` se usa la siguiente sintaxis: # # ```julia # println("$x") # ``` # # El operador ```$``` le está diciendo a Julia que evalúe a `x`, lo convierta en una cadena de caracteres y lo ponga en ese lugar. x=5 println("x = $x") # Otra forma de hacerlo es simplemente decirle a Julia que nos de el valor de `x` sin la necesidad de que lo convierta en una cadena. println(x) # Note que la primera forma de imprimir el valor de `x` es equivalente a la siguiente: println("x = ",x) # ## Tipos de datos # # ### Números # # Ahora hablaremos un poco de la forma en la que Julia interpreta algunas de las variables que le decimos. Julia clasifica sus objetos con una etiqueta que se llama tipo. Por ejemplo, el número $1$ es del tipo `Int64`. ¿Qué quiere decir esto?. Significa que Julia le está pidiendo a la computadora que reserve 64 bits de memoria en alguna parte de la memoria para hacer existir al número introducido, en este caso el número *uno*. La variable `x` que definimos de ejemplo es de este tipo. ¿Cómo lo podemos saber?. Existe la función `typeof(x)` que nos dice qué tipo de objeto es `x`. Julia es muy inteligente con los tipos. Por ejemplo, en algunos lenguajes de programación si ponemos: # # ``` # 1/5 # ``` # # obtenemos de salida 0. Esto debido a que $1$ y $5$ son del tipo enteros, entonces el resultado debe ser un entero y el entero más cercano a $\frac{1}{5}$ es $0$. En cambio, si en Julia escribimos # # ```julia # 1/5 # ``` # # el output que obtenemos es $0.2$. Es decir, Julia 'se da cuenta' que (obviamente) no queremos que el resultado de escribir `1/5` sea un entero y entonces automáticamente le asigna el tipo `Float64`, que se puede decir que es el tipo que corresponde a los reales. Puedes experimentar un poco con esto preguntando con `typeof` los tipos de algunos objetos. Existe también el tipo de objetos `complex` y el número $i$ de los números complejos se denota por `im`, entonces para escribir $x+iy$ escribimos # # ```julia # x + im*y # ``` # # En general, no es necesario decirle a Julia qué tipos de objetos vamos a usar porque el programa siempre intenta cambiar los tipos de manera adecuada para que las operaciones que uno le está pidiendo a la computadora se puedan realizar. # # Hasta antes de la versión `0.5` de Julia, se podía usar la función `int(x)` para convertir a `x` en un entero, pero ésta ha sido eliminada. Sin embargo, sí se puede usar la función `float(x)` para convertir a x en un flotante. Si deseas obtener un resultado entero de una operación, se pude usar la función `round(x)`. # # Para calcular potencias de números escribimos `x^y`. También se puede usar notación científica y escribir `1e2`, lo cual nos regresa un `100.0` (del tipo flotante, no entero). # # ### Arreglos # # Ahora veamos uno de los elementos más importantes de Julia, los arreglos. Éstos son un tipo de objeto que guarda muchos valores en una sola variable. Podemos pensar en algunos arreglos como vectores o como matrices, pero en realidad pueden ser mucho más que eso. Empecemos por definir un par de arreglos sencillos. x=[1,2,3] y=[1 2 3] # En principio estos dos arreglos parecen similares, pero preguntémosle a Julia si tienen diferencias. Para esto escribimos `typeof(x)` y `typeof(y)`. # # ```julia # In: typeof(x) # Out: Array{Int64,1} # # In: typeof(x) # Out: Array{Int64,2} # ``` # La diferencia está en lo que se encuentra al lado de `Int64`, `x` es un arreglo que se indexa con un sólo índice y `y` se puede indexar con dos. ¿Qué significa esto?. Cuando definimos `x` Julia nos dice que es un arreglo de 3 elementos pero cuando definimos `y` nos dice que es un arreglo de `1x3` elementos. Esto quiere decir, como en las matrices, que tenemos un renglón y 3 columnas. Si queremos accesar a un elemento específico de x, escribimos `x[i]`, donde `i` será el i-ésimo elemento del arreglo. En Julia, el primer elemento comienza con 1. # # ```julia # x[i] # ``` # # Como dijimos, `y` está indexado por dos índices, lo cual quiere decir que podemos escribir `y[1,j]` y esto nos da el elemento que se encuentra en el primer renglón en la columna j-ésima. # # ```julia # y[1,j] # ``` # # Ahora definamos una verdadera matriz para hablar más sobre los índices. Podemos definir la siguiente matriz: M = [1 2 3;-1 -2 -3;0 0 0] # El tipo de `M` es `Array{Int64,2}` y tiene 9 elementos. Supongamos que queremos extraer el valor -3. Esto lo podemos hacer de dos formas. La primera es llamar al elemento por renglón y columna, entonces escribiríamos `M[2,3]` y obtenemos el `-3`, pero Julia nos permite hacer otra cosa. Si empezamos a contar desde el `1` hacia abajo, de modo que cuando lleguemos a la columna `2` y nuestra cuenta vaya en 8, se puede decir que el `-3` es el elemento 8 del arreglo. Entonces si escribimos `M[8]` Julia nos regresa el `-3` tal como se podría esperar. Esto no parecería ser muy importante pero a veces puede ser muy útil, en especial cuando hacemos ciclos (los cuales veremos en el siguiente Notebook). # # El operador `*` en Julia tiene muchas formas de usarse, a cada una de estas formas se le llama un método. En particular, si tenemos dos matrices que se puedan multiplicar de acuerdo a las reglas el álgebra lineal, podemos multiplicarlas directamente como `A*B`. Por ejemplo, podemos multiplicar de la siguiente forma: y*M*x # En lenguaje matemático es análogo a que escribiéramos $\mathbf{y}^T \mathbb{M} \mathbf{x}$. Si deseas aprender un poco más acerca de las operaciones que se pueden realizar con los arreglos puedes consultar [Julia arrays](https://docs.julialang.org/en/v1/base/arrays/). # # Los arreglos que hemos definido hasta ahora tienen como tipo de dato el tipo de los elementos que lo conforman. Podemos hacer un arreglo de `strings`. S = ["Uno" "Dos" "Tres";"Hola" "Adiós" "Void"] # Pero podemos definir un arreglo sin decirle a Julia qué tipo de dato vamos a guardar en cada una de sus entradas. Lo que sí debemos especificar es el tamaño del arreglo. Por ejemplo U = Array{Any}(undef,2,2,3) # `U` es un arreglo complicado, así que intentemos entenderlo. Primero, el arreglo se indexa con 3 índices, cada uno de los cuales puede tener distintos valores. Es decir, si llamamos al elemento `U[i,j,k]` es claro que `i` sólo puede valer 1, `j` puede valer 1 ó 2 y `k` puede valer 1,2 ó 3. Segundo, el tipo de dato que tiene es `Any`, esto significa que podemos llenarlo con cualquier cosa. ¿Qué significa esto?. Si vemos los ejemplos anteriores de `S` y `M`, si intentamos sustituir uno de los elementos de `S` por un número o uno de los elementos de `M` por una cadena de caracteres Julia nos regresa un error como se muestra a continuación: S[1,1] = 5 M[1,1] = "Hola" # Estos errores se deben a que la conversión de tipo no es clara para Julia y entonces no procede. Ahora, el arreglo `U` está vacío, pero intentemos llenarlo. U[1,1,1] = "Hola" ; U[1,2,1] = 5 U[1,2,2] = im; U # Y podemos observar que el arreglo `U` ahora consta de elementos de tipos distintos sin ningún problema. Esto puede ser extraordinariamente útil, los elementos de `U` incluso podrían ser arreglos. # # Con lo que sabemos hasta ahora sobre los arreglos, haremos mención de una herramienta útil de Julia. Supongamos que tenemos la función de 3 variables # # ```julia # h(x,y,z) # ``` # # Podemos definir un par de arreglos $X=[x_1,x_2,x_3]$ y $Y=[y_1 \; y_2 \; y_3]$. Si quisiéramos evaluar la función `h` en los valores de estos arreglos, no basta con escribir `h(X)` o `h(Y)`, pero se puede hacer lo siguiente: # # ```julia # h(X...) # h(Y...) # ``` # # Lo que hace el operador `...` es "desempacar" los elementos de `X` y `Y` para pasar cada uno de sus elementos como argumento individual a la función `h`. Es decir, lo que escribimos es equivalente a escribir: # # ```julia # h(X[1],X[2],X[3]) # ``` # # Como motivación del uso de arreglos, ahora mostramos una manera interesante de definir una matriz que dará pie a definir los ciclos. R = reshape([rand(1:9) for i in 1:9],3,3) # Lo que aparece es una matriz de `3x3` cuyos elementos son números aleatorios entre 1 y 9. Usamos dos funciones: `reshape` y `rand`, un ciclo `for` y un rango `1:9`. Para aprender un poco más de cada una de estás cosas usa `typeof()` y `?`. En el siguiente Notebook aprenderemos a usar ciclos. # # ### Tuplas # # Además de los arreglos en Julia tiene otro tipo de estructura de datos llamada tupla que se escriben # # ```julia # (n1,n2,...,nL) # ``` # ó # # ```julia # n1,n2,...,nL # ``` # # Y tenemos una tupla de `L` elementos. Las dos formas de escribirlo son equivalentes. Las $n_i$ no tienen que ser todas del mismo tipo. Cada una de ellas puede ser distinta que las demás. typeof((1,2,3)) 1,[1,2] typeof((1,[1,2])) # La utilidad de las tuplas está en que a veces queremos que una función nos regrese más de un valor, pero no queremos guardar esto en un arreglo. Podemos pedirle a Julia que nos devuelva una tupla y guardar los valores por separado. Por ejemplo: H(x,y) = 2x , 3y valor_1,valor_2 = H(1,1) valor_1 valor_2 # El ejercicio anterior nos muestra la forma básica de como definir una función. Hay varias maneras de hacerlo y a continuación veamos como definir una función en Julia. # # ## Funciones # # Podemos fácilmente definir funciones: # # ```julia # f(y) = y^2 - 1 # ``` # # Con esto hemos definido la función `f` que podemos evaluar en la `y` que queramos. Otra forma de definir funciones que resulta más útil cuando la función no es para hacer una simple operación aritmética es: # # ```julia # function signo(x) # if x < 0 # -1 # else # 1 # end # end # ``` # # Esta función (que ya está definida en Julia y se llama, por supuesto, `sign`) nos dice el signo del número `x` que pongamos como argumento. Analicemos la sintaxis de la función # # 1. Primero tenemos que decirle a julia que vamos a definir una función, esto lo hacemos escribiendo `function`. # 2. Después ponemos el nombre de la función (en este caso `signo`) y ponemos a continuación entre paréntesis los argumentos que va a tomar la función. Los tipos de argumentos puedes ser muy variados, pueden ser enteros, flotantes, arreglos, incluso otras funciones. # >Nota: Existen funciones que no llevan argumentos, por ejemplo, la función `rand()` nos da un número pseudo-aleatorio entre 0 y 1, inténtalo. Intenta también preguntarle a Julia qué más sabe hacer la función `rand`. # # 3. Ahora presionamos `enter` y Julia automáticamente pone una sangría para hacer más claro el cuerpo de la función. # >Nota: En el ejemplo, después del if hay una sangría, esto se hace para dejar muy claro qué partes del código pertenecen a los ciclos. # # 4. Finalmente debemos avisar a Julia que hemos terminado de definir la función. Esto se hace con `end`. Nota que `function` y `end` empiezan 'a la misma altura'. # # En Julia se pueden definir funciones con variables que se conocen como opcionales. Esto se hace de la siguiente forma: # # ```julia # G(x,y=n) # ``` # # Esto define a una función `G` que toma como argumento a `x` y opcionalmente podemos poner un argumento `y`, en caso de que no lo pongamos `G` se evalúa con el valor predeterminado `n`. Por ejemplo, definimos: # # ```julia # f_2(y,x=0) = y^2 - 1 + x # ``` # # Esta función va a dar el mismo valor que la `f` que habíamos definido de ejemplo anterior siempre y cuando no le pasemos el segundo argumento. En cambio, cuando le demos un segundo argumento distinto de $0$ es claro que la función tendrá otro comportamiento. Veamos: f_2(y,x=0) = y^2 - 1 + x f_2(1) f_2(1,1) # Experimenta definiendo funciones que regresen tuplas y ve la practicidad de utilizar este tipo de arreglos. Como se puede observar, Julia ofrece una gran versatilidad en el manejo de las variables. Además, este lenguaje está orientado al manejo eficiente de arreglos, haciendo que este lenguaje sea idóneo para cálculo numérico. # Regresar a las [Herramientas](http://sistemas.fciencias.unam.mx/~rich/Herramientas/) # # Curso relacionado con este notebook: [Física Computacional](http://sistemas.fciencias.unam.mx/~rich/FisComp/) # # Se agradece el apoyo de los proyectos: # * PE 112919 durante el año 2020. *Actualización a la última versión de Julia. Se han agregado explicaciones y ejemplos* # * PE 105017 durante el año 2017. *Idea original*
Notebooks_Herramientas/Tutorial_Julia/Julia_I.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example: Using MIRAGE to Generate Moving Target Exposures # ### (i.e. Solar System target observations) # The `mirage` simulator is broken up into three basic stages: # # 1. **Creation of a "seed image".**<br> # This is generally a noiseless countrate image that contains signal # only from the astronomical sources to be simulated. Currently, the # mirage package contains code to produce a seed image starting # from object catalogs.<br> # Note that the much larger amount of data in a # seed image containing moving targets means that this step will be significantly # slower than when generating a simple seed image for a sidereal observation.<br><br> # # 2. **Dark current preparation.**<br> # The simulated data will be created by adding the simulated sources # in the seed image to a real dark current exposure. This step # converts the dark current exposure to the requested readout pattern # and subarray size requested by the user.<br><br> # # 3. **Observation generation.**<br> # This step converts the seed image into an exposure of the requested # readout pattern and subarray size. It also adds cosmic rays and # Poisson noise, as well as other detector effects (IPC, crosstalk, etc). # This exposure is then added to the dark current exposure from step 2.<br><br> # *Table of Contents:* # * Single image simulation # * [Running simulator steps independently](#run_steps_independently) # * [Running simulator steps together](#run_steps_together) # * [Running multiple simulations](#mult_sims) # * [Generating `yaml` files](#make_yaml) # * [Example `yaml` file](#yaml_example) # --- # ## Getting Started # # <div class="alert alert-block alert-warning"> # **Important:** # Before proceeding, ensure you have set the MIRAGE_DATA environment variable to point to the directory that contains the reference files associated with MIRAGE. # <br/><br/> # If you want JWST pipeline calibration reference files to be downloaded in a specific directory, you should also set the CRDS_DATA environment variable to point to that directory. This directory will also be used by the JWST calibration pipeline during data reduction. # <br/><br/> # You may also want to set the CRDS_SERVER_URL environment variable set to https://jwst-crds.stsci.edu. This is not strictly necessary, and Mirage will do it for you if you do not set it, but if you import the crds package, or any package that imports the crds package, you should set this environment variable first, in order to avoid an error. # </div> # *Table of Contents:* # * [Imports](#imports) # * [Generating `yaml` files](#make_yaml) # * [Create Simulated Data](#run_steps_together) # * [Simulating Multiple Exposures](#mult_sims) # * [Running Simulation Steps Independently](#run_steps_independently) # * [Example `yaml` file](#yaml_example) # --- # <a id='imports'></a> # # Imports # Set the MIRAGE_DATA environment variable if it is not # set already. This is for users at STScI. import os # + #os.environ["MIRAGE_DATA"] = "/my/mirage_data/" #os.environ["CRDS_DATA"] = "/user/myself/crds_cache" #os.environ["CRDS_SERVER_URL"] = "https://jwst-crds.stsci.edu" # - # For examining outputs from glob import glob from scipy.stats import sigmaclip import numpy as np from astropy.io import fits import matplotlib.pyplot as plt # %matplotlib inline # mirage imports from mirage import imaging_simulator from mirage.seed_image import catalog_seed_image from mirage.dark import dark_prep from mirage.ramp_generator import obs_generator from mirage.yaml import yaml_generator # --- # <a id='make_yaml'></a> # # Generating input yaml files # Note that Mirage does not yet support the automated creation of yaml files from an APT file for solar system targets. For the time being, the easiest work-around for this is to start with an existing yaml file (such at that on the [example yaml page of the documentation](https://mirage-data-simulator.readthedocs.io/en/latest/example_yaml.html), and manually edit the input fields. Remember to set the `Telescope: tracking` entry to `non-sidereal`, so that targets in the `movingTargetToTrack` catalog will remain at a fixed location in the output data, while background targets in the `pointsource`, `galaxyListFile`, and `extended` catalogs will trail across the field of view over the course of each exposure. # --- # <a id='run_steps_together'></a> # # Create Simulated Data # ### The imaging simulator class # The imaging_simulator.ImgSim class is a wrapper around the three main steps of the simulator (detailed in the [Running simulator steps independently](#run_steps_independently) section below). This convenience function is useful when creating simulated imaging mode data. WFSS data will need to be run in a slightly different way. See the WFSS example notebook for details. # Specify the yaml input file to use yamlfile = 'movingtarget_example_data/moving_target_test.yaml' # Run all steps of the imaging simulator for yaml file #1 m = imaging_simulator.ImgSim() m.paramfile = yamlfile m.create() # ### Examine the Output def show(array, title, min=0, max=1000): plt.figure(figsize=(12, 12)) plt.imshow(array, clim=(min, max)) plt.title(title) plt.colorbar().set_label('DN$^{-}$/s') def show_mult(array1, array2, array3, title, min=0, max=1000): fig = plt.figure(figsize=(18, 18)) a = fig.add_subplot(131) aplt = plt.imshow(array1, clim=(min, max)) b = fig.add_subplot(132) bplt = plt.imshow(array2, clim=(min, max)) plt.title(title) c = fig.add_subplot(133) cplt = plt.imshow(array3, clim=(min, max)) # #### Noiseless Seed Image # This image is an intermediate product. It contains only the signal from the astronomical sources and background. There are no detector effects, nor cosmic rays added to this count rate image. # + # First, look at the noiseless seed image # In this case, the seed image is 4D rather than the # 2D that it is for sidereal targets. # So let's look at just the final frame of the seed image # The non-sidereal target is in the center of the frame and appears # as a normal PSF (although hard to see in this view). All of the # background stars and galaxies are # smeared, since the telescope was not tracking at the sidereal rate. show(m.seedimage[0,-1,:,:],'Seed Image',max=250) # - # #### Final Output Product # Next examine the final output product. The `datatype` parameter in the yaml file specifies that Mirage should save both the raw and linearized versions of the output. Let's look first at the linearized version. lin_file = 'movingtarget_example_data/jw12345024002_01101_00001_ncrb5_linear.fits' with fits.open(lin_file) as hdulist: linear_data = hdulist['SCI'].data print(linear_data.shape) show(linear_data[0, -1, :, :], "Final Group", max=250) # Examine the raw output. First a single group, which is dominated by noise and detector artifacts. raw_file = 'movingtarget_example_data/jw12345024002_01101_00001_ncrb5_uncal.fits' with fits.open(raw_file) as hdulist: raw_data = hdulist['SCI'].data print(raw_data.shape) show(raw_data[0, -1, :, :], "Final Group", max=15000) # Many of the instrumental artifacts can be removed by looking at the difference between two groups. Raw data values are integers, so first make the data floats before doing the subtraction. show(1. * raw_data[0, -1, :, :] - 1. * raw_data[0, 0, :, :], "Last Minus First Group", max=200) # This raw data file is now ready to be run through the [JWST calibration pipeline](https://jwst-pipeline.readthedocs.io/en/stable/) from the beginning. If dark current subtraction is not important for you, you can use Mirage's linear output, skip some of the initial steps of the pipeline, and begin by running the [Jump detection](https://jwst-pipeline.readthedocs.io/en/stable/jwst/jump/index.html?highlight=jump) and [ramp fitting](https://jwst-pipeline.readthedocs.io/en/stable/jwst/ramp_fitting/index.html) steps. # --- # <a id='run_steps_independently'></a> # # Running simulation steps independently # ## First generate the "seed image" # This is generally a 2D noiseless countrate image that contains only simulated astronomical sources. However, when creating data using non-sidereal tracking or for sidereal tracking where a moving target (e.g. asteroid, KBO) are in the field of view, the seed image will in fact be a 3D seed ramp. # # A seed image is generated based on a `.yaml` file that contains all the necessary parameters for simulating data. An example `.yaml` file is show at the [bottom of this notebook](#yaml_example). # yaml file that contains the parameters of the # data to be simulated # Example yaml file shown at the bottom of this # notebook yamlfile = 'movingtarget_example_data/moving_target_test.yaml' cat = catalog_seed_image.Catalog_seed() cat.paramfile = yamlfile cat.make_seed() # ### Look at the seed image # + def show(array,title,min=0,max=1000): plt.figure(figsize=(12,12)) plt.imshow(array,clim=(min,max)) plt.title(title) plt.colorbar().set_label('DN/s') def show_mult(array1,array2,array3,title,min=0,max=1000): fig = plt.figure(figsize=(18,18)) a = fig.add_subplot(131) aplt = plt.imshow(array1,clim=(min,max)) b = fig.add_subplot(132) bplt = plt.imshow(array2,clim=(min,max)) plt.title(title) c = fig.add_subplot(133) cplt = plt.imshow(array3,clim=(min,max)) #plt.colorbar().set_label('DN/s') # + # In this case, the seed image is 4D rather than the # 2D that it is for sidereal targets. # So let's look at just the final frame of the seed image # The non-sidereal target is in the center of the frame and appears # as a normal PSF (although hard to see in this view). All of the # background stars and galaxies are # smeared, since the telescope was not tracking at the sidereal rate. show(cat.seedimage[0,-1,:,:],'Seed Image',max=250) # - # Look at the first, middle, and last frames of the seed image # so we can see the background sources moving relative to the target show_mult(cat.seedimage[0,0,:,:],cat.seedimage[0,3,:,:],cat.seedimage[0,-1,:,:],'Seed Images',max=250) # ## Prepare the dark current exposure # This will serve as the base of the simulated data. # This step will linearize the dark current (if it # is not already), and reorganize it into the # requested readout pattern and number of groups. d = dark_prep.DarkPrep() d.paramfile = yamlfile d.prepare() # ### Look at the dark current # For this, we will look at an image of the final group # minus the first group exptime = d.linDark.header['NGROUPS'] * cat.frametime diff = (d.linDark.data[0,-1,:,:] - d.linDark.data[0,0,:,:]) / exptime show(diff,'Dark Current Countrate',max=0.1) # ## Create the final exposure # Turn the seed image into a exposure of the # proper readout pattern, and combine it with the # dark current exposure. Cosmic rays and other detector # effects are added. # # The output can be either this linearized exposure, or # a 'raw' exposure where the linearized exposure is # "unlinearized" and the superbias and # reference pixel signals are added, or the user can # request both outputs. This is controlled from # within the yaml parameter file. obs = obs_generator.Observation() obs.linDark = d.prepDark obs.seed = cat.seedimage obs.segmap = cat.seed_segmap obs.seedheader = cat.seedinfo obs.paramfile = yamlfile obs.create() # ### Examine the final output image # Look at the last group minus the first group with fits.open(obs.linear_output) as h: lindata = h[1].data header = h[0].header # The central target is difficult to see in this full field view exptime = header['EFFINTTM'] diffdata = (lindata[0,-1,:,:] - lindata[0,0,:,:]) / exptime show(diffdata,'Simulated Data',min=0,max=20) # Zoom in on the center of the field of view, where the target of # interest lies. show(diffdata[800:1200,800:1200],'Center of FOV',min=0,max=20) # Show on a log scale, to bring out the presence of the dark current # Noise in the CDS image makes for a lot of pixels with values < 0, # which makes this kind of an ugly image. Add an offset so that # everything is positive and the noise is visible offset = 2. plt.figure(figsize=(12,12)) plt.imshow(np.log10(diffdata[800:1200,800:1200]+offset),clim=(0.001,np.log10(80))) plt.title('Simulated Data') plt.colorbar().set_label('DN/s') # --- # <a id='run_steps_together'></a> # # Running simulation steps together # ## For convenience, combine the three steps into a single function. # By having modular steps, the steps can be combined in various ways. For imaging data, including data with non-sidereal or moving targets, we will most likely want to run the three steps above in order for each target. For convenience, the imaging_simulator.py function wraps the three steps together. from mirage import imaging_simulator # First, run all steps of the imaging simulator for yaml file #1 m = imaging_simulator.ImgSim() m.paramfile = 'movingtarget_example_data/moving_target_test.yaml' m.create() # If you have multiple exposures that will use the same dark current image (with the same readout pattern, subarray size, and number of groups), you can feed the output from the initial run of `dark_prep` into future runs of the `obs_generator`, to save time. This can be accomplished with the `imaging_simulator.py` code, as shown below. # (Note that time savings are minimal in this case, where the readout pattern is RAPID and there are only a handful of groups. This means that no averaging/skipping of frames has to be done within `dark_prep.py`) # Now that the linearized dark product has been created, if you want to use it # when running the simulator with a different yaml file (or repeating the run # with the same yaml file) you can provide the filename of the dark product, and the # dark_prep step will be skipped. # NOTE: if you use the same dark product for multiple exposures, those exposures # will contain exactly the same dark signal. This may or may not be advisable, depending # on your goals for the simulated data. m = imaging_simulator.ImgSim() m.paramfile = 'movingtarget_example_data/moving_target_test.yaml' m.override_dark = 'movingtarget_example_data/V12345024002P000000000112o_B5_F250M_movingtarget_uncal_linear_dark_prep_object.fits' m.create() # --- # <a id='mult_sims'></a> # ## Running Multiple Simulations # ### Each yaml file, will simulate an exposure for a single pointing using a single detector. # To simulate an exposure using multiple detectors, you must have multiple yaml files. Consider this cumbersome example: # ```python # yaml_a1 = 'sim_param_A1.yaml' # yaml_a2 = 'sim_param_A2.yaml' # yaml_a3 = 'sim_param_A3.yaml' # yaml_a4 = 'sim_param_A4.yaml' # yaml_a5 = 'sim_param_A5.yaml' # # make_sim(yaml_a1) # make_sim(yaml_a2) # make_sim(yaml_a3) # make_sim(yaml_a4) # make_sim(yaml_a5) # ``` # # This can be performed more efficiently, either in series or in parallel: # # ### In Series # ```python # paramlist = [yaml_a1,yaml_a2,yaml_a3,yaml_a4,yaml_a5] # # def many_sim(paramlist): # '''Function to run many simulations in series # ''' # for file in paramlist: # m = imaging_simulator.ImgSim() # m.paramfile = file # m.create() # ``` # # ### In Parallel # # Since each `yaml` simulations does not depend on the others, we can parallelize the process to speed things up: # ```python # # Need to test this. May need a wrapper since the # # imaging simulator is a class # # from multiprocessing import Pool # # n_procs = 5 # number of cores available # # with Pool(n_procs) as pool: # pool.map(make_sim, paramlist) # ``` # --- # <a id='make_yaml'></a> # ## Generating input yaml files # For convenience, observing programs with multiple pointings # and detectors can be simulated starting with the program's # APT file. The xml and pointings files must be exported from # APT, and are then used as input into a tool that will # generate a series of yaml input files. from mirage.apt import apt_inputs from mirage.yaml import yaml_generator # + # ## Only works for normal imaging, right? Not yet modified for moving targets # # Create a series of data simluator input yaml files # # from APT files # yam = yaml_generator.SimInput() # yam.input_xml = 'example_imaging_program.xml' # yam.pointing_file = 'example_imaging_program.pointing' # yam.siaf = '$MIRAGE_DATA/nircam/reference_files/SIAF/NIRCam_SIAF_2018-01-08.csv' # yam.output_dir = './' # yam.simdata_output_dir = './' # yam.observation_table = 'observation_list.yaml' # yam.use_JWST_pipeline = True # yam.use_linearized_darks = False # yam.datatype = 'linear' # yam.reffile_setup(instrument='nircam') # yam.create_inputs() # + # yfiles = glob(os.path.join(yam.output_dir,'V*yaml')) # + # m = imaging_simulator.ImgSim() # m.paramfile = yfiles[0] # m.create() # - # --- # <a id='yaml_example'></a> # ## Example yaml input file # # Entries listed as 'config' have default files that are present in the # config directory of the repository. The scripts are set up to # automatically find and use these files. The user can replace 'config' # with a filename if they wish to override the default. # # In general, if 'None' is placed in a field, then the step that uses # that particular file will be skipped. # # Note that the linearized_darkfile entry overrides the dark entry, unless # linearized_darkfile is set to None, in which case the dark entry will be # used. # # Use of a valid readout pattern in the readpatt entry will cause the # simulator to look up the values of nframe and nskip and ignore the # values given in the yaml file. # ```yaml # Inst: # instrument: NIRCam #Instrument name # mode: imaging #Observation mode (e.g. imaging, WFSS, moving_target) # use_JWST_pipeline: False #Use pipeline in data transformations # # Readout: # readpatt: RAPID #Readout pattern (RAPID, BRIGHT2, etc) overrides nframe,nskip unless it is not recognized # nframe: 1 #Number of frames per group # nint: 1 #Number of integrations per exposure # resets_bet_ints: 1 #Number of detector resets between integrations # array_name: NRCB5_FULL #Name of array (FULL, SUB160, SUB64P, etc) # filter: F250M #Filter of simulated data (F090W, F322W2, etc) # pupil: CLEAR #Pupil element for simulated data (CLEAR, GRISMC, etc) # # Reffiles: #Set to None or leave blank if you wish to skip that step # dark: None #Dark current integration used as the base # linearized_darkfile: $MIRAGE_DATA/nircam/darks/linearized/B5/Linearized_Dark_and_SBRefpix_NRCNRCBLONG-DARK-60090141241_1_490_SE_2016-01-09T02h46m50_uncal.fits # Linearized dark ramp to use as input. Supercedes dark above # badpixmask: $MIRAGE_DATA/nircam/reference_files/badpix/NRCB5_17161_BPM_ISIMCV3_2016-01-21_ssbspmask_DMSorient.fits # If linearized dark is used, populate output DQ extensions using this file # superbias: $MIRAGE_DATA/nircam/reference_files/superbias/NRCB5_superbias_from_list_of_biasfiles.list.fits #Superbias file. Set to None or leave blank if not using # linearity: $MIRAGE_DATA/nircam/reference_files/linearity/NRCBLONG_17161_LinearityCoeff_ADU0_2016-05-22_ssblinearity_v2_DMSorient.fits #linearity correction coefficients # saturation: $MIRAGE_DATA/nircam/reference_files/saturation/NRCB5_17161_WellDepthADU_2016-03-10_ssbsaturation_wfact_DMSorient.fits #well depth reference files # gain: $MIRAGE_DATA/nircam/reference_files/gain/NRCB5_17161_Gain_ISIMCV3_2016-02-25_ssbgain_DMSorient.fits #Gain map # pixelflat: None # illumflat: None #Illumination flat field file # astrometric: $MIRAGE_DATA/nircam/reference_files/distortion/NRCB5_FULL_distortion.asdf #Astrometric distortion file (asdf) # distortion_coeffs: $MIRAGE_DATA/nircam/reference_files/SIAF/NIRCam_SIAF_2017-03-28.csv #CSV file containing distortion coefficients # ipc: $MIRAGE_DATA/nircam/reference_files/ipc/NRCB5_17161_IPCDeconvolutionKernel_2016-03-18_ssbipc_DMSorient.fits #File containing IPC kernel to apply # invertIPC: True #Invert the IPC kernel before the convolution. True or False. Use True if the kernel is designed for the removal of IPC effects, like the JWST reference files are. # occult: None #Occulting spots correction image # pixelAreaMap: $MIRAGE_DATA/nircam/reference_files/pam/NIRCam_B5_PAM_imaging.fits #Pixel area map for the detector. Used to introduce distortion into the output ramp. # subarray_defs: config #File that contains a list of all possible subarray names and coordinates # readpattdefs: config #File that contains a list of all possible readout pattern names and associated NFRAME/NSKIP values # crosstalk: config #File containing crosstalk coefficients # filtpupilcombo: config #File that lists the filter wheel element / pupil wheel element combinations. Used only in writing output file # flux_cal: config #File that lists flux conversion factor and pivot wavelength for each filter. Only used when making direct image outputs to be fed into the grism disperser code. # # nonlin: # limit: 60000.0 #Upper singal limit to which nonlinearity is applied (ADU) # accuracy: 0.000001 #Non-linearity accuracy threshold # maxiter: 10 #Maximum number of iterations to use when applying non-linearity # robberto: False #Use Massimo Robberto type non-linearity coefficients # # cosmicRay: # path: $MIRAGE_DATA/nircam/cosmic_ray_library/ #Path to CR library # library: SUNMIN #Type of cosmic rayenvironment (SUNMAX, SUNMIN, FLARE) # scale: 1.5 #Cosmic ray scaling factor # suffix: IPC_NIRCam_B5 #Suffix of library file names # seed: 2956411739 #Seed for random number generator # # simSignals: # pointsource: my_ptsrc_catalog.list #File containing a list of point sources to add (x,y locations and magnitudes) # psfpath: $MIRAGE_DATA/nircam/psf_data/ #Path to PSF library # psfbasename: nircam #Basename of the files in the psf library # psfpixfrac: 0.25 #Fraction of a pixel between entries in PSF library (e.g. 0.1 = files for PSF centered at 0.25 pixel intervals within pixel) # psfwfe: predicted #PSF WFE value (predicted, requirements) # psfwfegroup: 0 #WFE realization group (0 to 9) # galaxyListFile: my_galaxies_catalog.list # extended: None #Extended emission count rate image file name # extendedscale: 1.0 #Scaling factor for extended emission image # extendedCenter: 1024,1024 #x,y pixel location at which to place the extended image if it is smaller than the output array size # PSFConvolveExtended: True #Convolve the extended image with the PSF before adding to the output image (True or False) # movingTargetList: None #Name of file containing a list of point source moving targets (e.g. KBOs, asteroids) to add. # movingTargetSersic: None #ascii file containing a list of 2D sersic profiles to have moving through the field # movingTargetExtended: None #ascii file containing a list of stamp images to add as moving targets (planets, moons, etc) # movingTargetConvolveExtended: True #convolve the extended moving targets with PSF before adding. # movingTargetToTrack: my_nonsidereal_target.cat #File containing a single moving target which JWST will track during observation (e.g. a planet, moon, KBO, asteroid) This file will only be used if mode is set to "moving_target" # zodiacal: None #Zodiacal light count rate image file # zodiscale: 1.0 #Zodi scaling factor # scattered: None #Scattered light count rate image file # scatteredscale: 1.0 #Scattered light scaling factor # bkgdrate: 0.0 #Constant background count rate (electrons/sec/pixel) # poissonseed: 2012872553 #Random number generator seed for Poisson simulation) # photonyield: True #Apply photon yield in simulation # pymethod: True #Use double Poisson simulation for photon yield # # Telescope: # ra: 53.1 #RA of simulated pointing # dec: -27.8 #Dec of simulated pointing # rotation: 0.0 #y axis rotation (degrees E of N) # tracking: non-sidereal #sidereal or non-sidereal # # newRamp: # dq_configfile: config #config file used by JWST pipeline # sat_configfile: config #config file used by JWST pipeline # superbias_configfile: config #config file used by JWST pipeline # refpix_configfile: config #config file used by JWST pipeline # linear_configfile: config #config file used by JWST pipeline # # Output: # file: V42424024002P000000000112o_B5_F250M_uncal.fits #Output filename # directory: ./ # Directory in which to place output files # datatype: linear,raw # Type of data to save. 'linear' for linearized ramp. 'raw' for raw ramp. 'linear,raw' for both # format: DMS #Output file format Options: DMS, SSR(not yet implemented) # save_intermediates: False #Save intermediate products separately (point source image, etc) # grism_source_image: False # Create an image to be dispersed? # unsigned: True #Output unsigned integers? (0-65535 if true. -32768 to 32768 if false) # dmsOrient: True #Output in DMS orientation (vs. fitswriter orientation). # program_number: 42424 #Program Number # title: Supernovae and Black Holes Near Hyperspatial Bypasses #Program title # PI_Name: <NAME> #Proposal PI Name # Proposal_category: GO #Proposal category # Science_category: Cosmology #Science category # observation_number: '002' #Observation Number # observation_label: Obs2 #User-generated observation Label # visit_number: '024' #Visit Number # visit_group: '01' #Visit Group # visit_id: '42424024002' #Visit ID # sequence_id: '2' #Sequence ID # activity_id: '2o' #Activity ID. Increment with each exposure. # exposure_number: '00001' #Exposure Number # obs_id: 'V42424024002P000000000112o' #Observation ID number # date_obs: '2019-10-15' #Date of observation # time_obs: '06:29:11.852' #Time of observation # obs_template: 'NIRCam Imaging' #Observation template # primary_dither_type: NONE #Primary dither pattern name # total_primary_dither_positions: 1 #Total number of primary dither positions # primary_dither_position: 1 #Primary dither position number # subpix_dither_type: 2-POINT-MEDIUM-WITH-NIRISS #Subpixel dither pattern name # total_subpix_dither_positions: 2 #Total number of subpixel dither positions # subpix_dither_position: 2 #Subpixel dither position number # xoffset: 344.284 #Dither pointing offset in x (arcsec) # yoffset: 466.768 #Dither pointing offset in y (arcsec) # ```
examples/MovingTarget_simulator_use_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sympy.physics.mechanics import * import sympy as sp mechanics_printing(pretty_print=True) m1, m2, m3, m4, l1, l2, l3, l4 = sp.symbols(r'm_1 m_2 m_3 m_4 l_1 l_2 l_3 l_4') t, g, h = sp.symbols('t g h') v1, v2, v3, v4 = dynamicsymbols(r'\theta_1 \theta_2 \theta_3 \theta_4') dv1, dv2, dv3, dv4 = dynamicsymbols(r'\theta_1 \theta_2 \theta_3 \theta_4', 1) ddv1, ddv2, ddv3, ddv4 = dynamicsymbols(r'\theta_1 \theta_2 \theta_3 \theta_4', 2) # + x1 = l1 * sp.sin(v1) y1 = -l1 * sp.cos(v1) x2 = x1 + l2 * sp.sin(v2) y2 = y1 + -l2 * sp.cos(v2) x3 = x2 + l3 * sp.sin(v3) y3 = y2 + -l3 * sp.cos(v3) x4 = x3 + l4 * sp.sin(v4) y4 = y3 + -l4 * sp.cos(v4) dx1 = x1.diff(t) dy1 = y1.diff(t) dx2 = x2.diff(t) dy2 = y2.diff(t) dx3 = x3.diff(t) dy3 = y3.diff(t) dx4 = x4.diff(t) dy4 = y4.diff(t) # - V = (m1 * g * y1) + (m2 * g * y2) + (m3 * g * y3) + (m4 * g * y4) T = (sp.Rational(1, 2) * m1 * (dx1**2 + dy1**2)) + (sp.Rational(1, 2) * m2 * (dx2**2+dy2**2)) + (sp.Rational(1, 2) * m3 * (dx3**2 + dy3**2)) + (sp.Rational(1, 2) * m4 * (dx4**2 + dy4**2)) L = T - V LM = LagrangesMethod(L, [v1, v2, v3, v4]) soln = LM.form_lagranges_equations() sp.latex(soln) soln1 = sp.simplify(soln[0]) soln2 = sp.simplify(soln[1]) soln3 = sp.simplify(soln[2]) soln4 = sp.simplify(soln[3]) solution = sp.solve([soln1, soln2, soln3, soln4], (ddv1, ddv2, ddv3, ddv4)) sp.latex(solution)
Pendula/Simple/4Pendulum/QuadruplePendulum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- #import libraries import pandas as pd import numpy as np import seaborn as sns import scipy # # Loading and exploring data #retrieving the data dataset = pd.read_csv("lalonde.csv") dataset # pd.set_option('display.max_columns', None) dataset.groupby('treat').mean() # # T-testing continuous variables # get a list of all continuous variables' names continuous_confounders = ["age", "educ", "re74", "re75"] # create an empty dictionary t_test_results = {} # loop over column_list and execute code explained above for variable in continuous_confounders: group1 = dataset.where(dataset.treat==0).dropna()[variable] group2 = dataset.where(dataset.treat==1).dropna()[variable] t_test_results[variable] = scipy.stats.ttest_ind(group1,group2) results = pd.DataFrame.from_dict(t_test_results,orient='Index') results.columns = ['statistic','pvalue'] results # # Logistic Regression #isolating treatment and confounders ("slicing" dataframe) treat = dataset.iloc[:, 0] confounders= dataset.iloc[:, 1:-1] confounders # + tags=[] #logistic regression import statsmodels.api as sm confounders = sm.add_constant(confounders) # This library needs you to add the intercept # - confounders # + tags=[] propensity_model = sm.Logit(treat, confounders).fit() propensity_model.summary() # - # # Predicting PS #predicting the propensity of being treated propensity_score = propensity_model.predict(confounders) propensity_score #create dataframe with treated and propensities propensity_dataframe = np.vstack([treat, propensity_score]) propensity_dataframe = np.transpose(propensity_dataframe) propensity_dataframe # # # # Splitting data into treated vs. non-treated # + tags=[] #finish preparations for common support region # Gets an array with true for untreated else false non_treated = propensity_dataframe[:,0] == 0 # Subsets for untreated: non_treated = propensity_dataframe[non_treated] # Keeps only propensity column (gets rid of treated column) non_treated = non_treated[:, 1] # Same as above, but for treated. treated = propensity_dataframe[:,0] == 1 treated = propensity_dataframe[treated] treated = treated[:, 1] # - # # Visualizing groups #Common support region plot_non_treated = sns.kdeplot(non_treated, shade = True, color = "r") plot_treated = sns.kdeplot(treated, shade = True, color = "b") # Our interest here is matching the overlapping regions. # # PSM # + #isolating Y, treat and confounders # ".values" will return the array of values instead of a dataframe treat = dataset.iloc[:, 0].values confounders= dataset.iloc[:, 1:-1].values Y = dataset.iloc[:, -1].values # + #import causal inference library #pip install CausalInference from causalinference import CausalModel propensity_model = CausalModel(Y, treat, confounders) propensity_model.est_propensity_s() propensity_model.est_via_matching(bias_adj = True) # - print(propensity_model.estimates) print(propensity_model.propensity)
4. Propensity Score Matching/Py-PSM_lalonde.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # __CCD Data__ # # In this example, we'll use the 'ccd' module to 'simulate' ccd imaging of a strong lens made using a tracer. By simulate, we mean that it will appear as if we had observed it using a real telescope, with this example making an image representative of Hubble Space Telescope imaging. # + # %matplotlib inline from autolens.data import ccd from autolens.data.array import grids from autolens.lens import ray_tracing from autolens.model.galaxy import galaxy as g from autolens.model.profiles import light_profiles as lp from autolens.model.profiles import mass_profiles as mp from autolens.lens.plotters import ray_tracing_plotters from autolens.data.plotters import ccd_plotters # - # To simulate an image, we need to model the telescope's optics. We'll do this by convolving the image with a Point-Spread Function, which we can simulate as a Gaussian using the imaging module. psf = ccd.PSF.simulate_as_gaussian(shape=(11, 11), sigma=0.1, pixel_scale=0.1) # To simulate ccd data, we use a special type of grid. This grid pads its 2D dimensions relative to the PSF-shape, to ensure that the edge's of our simulated image are not degraded. image_plane_grid_stack = grids.GridStack.grid_stack_for_simulation(shape=(100, 100), pixel_scale=0.1, psf_shape=psf.shape) print(image_plane_grid_stack.regular.image_shape) print(image_plane_grid_stack.regular.padded_shape) # Now, lets setup our lens galaxy, source galaxy and tracer. lens_galaxy = g.Galaxy(mass=mp.EllipticalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6, axis_ratio=0.7, phi=45.0)) source_galaxy = g.Galaxy(light=lp.EllipticalSersic(centre=(0.1, 0.1), axis_ratio=0.8, phi=45.0, intensity=1.0, effective_radius=1.0, sersic_index=2.5)) tracer = ray_tracing.TracerImageSourcePlanes(lens_galaxies=[lens_galaxy], source_galaxies=[source_galaxy], image_plane_grid_stack=image_plane_grid_stack) # Lets look at the tracer's image-plane image - this is the image we'll be simulating. ray_tracing_plotters.plot_image_plane_image(tracer=tracer) # To simulate the image, we don't use the image-plane image plotted above. Instead, we use an image-plane image which has been generated specifically for simulating an image, using the padded grid above. This ensures edge-effects do not degrade our simulation's PSF convolution. print(tracer.image_plane_image.shape) print(tracer.image_plane_image_for_simulation.shape) # Now, to simulate the ccd imaging data, we pass the tracer's image-plane image to the ccd module's simulate function. This adds the following effects to the image: # # 1) Telescope optics: Using the Point Spread Function above. # # 2) The Background Sky: Although the image that is returned is automatically background sky subtracted. # # 3) Poisson noise: Due to the background sky, lens galaxy and source galaxy Poisson photon counts. simulated_ccd = ccd.CCDData.simulate(array=tracer.image_plane_image_for_simulation, pixel_scale=0.1, exposure_time=300.0, psf=psf, background_sky_level=0.1, add_noise=True) # Lets plot the image - we can see the image has been blurred due to the telescope optics and noise has been added. ccd_plotters.plot_image(ccd_data=simulated_ccd) # Finally, lets output these files to.fits files, we'll begin to analyze them in the next tutorial! # + path = '/path/to/AutoLens/workspace/howtolens/chapter_1_introduction' # If you are using Docker, the path you should use to output these images is (e.g. comment out this line) # path = '/home/user/workspace/howtolens/chapter_1_introduction' # If you arn't using docker, you need to change the path below to the chapter 2 directory and uncomment it # path = '/path/to/user/workspace/howtolens/chapter_1_introduction' ccd.output_ccd_data_to_fits(ccd_data=simulated_ccd, image_path=path+'/data/image.fits', noise_map_path=path+'/data/noise_map.fits', psf_path=path+'/data/psf.fits', overwrite=True)
workspace/howtolens/chapter_1_introduction/tutorial_7_imaging.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Inferência # Em determinada cidade, a prefeitura disse que as famílias ali residentes teriam, em média, 2 filhos em idade escolar. Coletamos dados de 50 residências, espalhadas igualmente pela cidade install.packages('moments',repos='http://cran.us.r-project.org') library('moments') filhos <- c(3, 2, 2, 2, 1, 2, 2, 2, 2, 3, 1, 2, 1, 2, 2, 0, 1, 2, 2, 1, 2, 1, 1, 2, 2, 3, 2, 1, 2, 3, 4, 1, 1, 2, 1, 1, 1, 2, 3, 0, 2, 2, 1, 2, 3, 3, 2, 2, 3, 2) media = mean(filhos) desvio = sd(filhos) print(paste('Média:',media)) print(paste('Desvio:',desvio)) print(paste('Assimetria:',skewness(filhos))) print(paste('Curtose:',kurtosis(filhos))) print(summary(filhos)) # ## Calculando z-scores z1 <-qnorm(0.025) z2 <- qnorm(0.975) print(paste('Zscore esquerdo:',z1)) print(paste('Zscore direito:',z2)) # + # Plotando a distribuição normal e o intervalo de confiança: x <- seq(-3,3,length=500) y <- dnorm(x,mean=0, sd=1) plot(x,y, type="l", lwd=2, main = 'Distribuição normal padrão') # Calculando os limites do intervalo de confiança de 95%: lines(c(0,0),c(0,dnorm(0))) lines(c(z1,z1),c(0,dnorm(z1))) lines(c(z2,z2),c(0,dnorm(z2))) # - # Calculando a margem de erro: E <- z2 * (desvio / sqrt(length(filhos))) print(paste('Margem de erro:',E)) minf <- media - E msup <- media + E print(sprintf('A média de filhos está entre: %f e %f com 95%% de confiança',minf,msup)) # + # Agora com distribuição T de Student: graus = length(filhos) - 1 # Graus de liberdade print(paste('Graus de liberdade:',graus)) # Plotando a distribuição T de Student e o intervalo de confiança: x <- seq(-3,3,length=500) y <- dt(x,df=graus) plot(x,y, type="l", lwd=2, main = 'Distribuição T de Student') t1 <-qt(0.025,df=graus) t2 <- qt(0.975,df=graus) print(t1) lines(c(0,0),c(0,dt(0,df=graus))) lines(c(t1,t1),c(0,dt(t1,df=graus))) lines(c(t2,t2),c(0,dt(t2,df=graus))) E2 <- t2 * (desvio / sqrt(length(filhos))) print(paste('Margem de erro:',E2)) minf2 <- media - E2 msup2 <- media + E2 print(sprintf('A média de filhos está entre: %f e %f com 95%% de confiança',minf2,msup2)) # - # ## Testes de hipótese # + # Teste de hipótese lote <- c(58.5, 60.1, 60.02, 57.4, 60.3, 55.4, 58.2, 59.8, 54.3, 60.4, 60.7, 60.1, 55.6, 57.1, 60.0, 60.7, 60.3, 56.7, 57.9, 59.01) print(summary(lote)) # - print(paste('Desvio padrão:',sd(lote))) print(paste('Tamanho da amostra:',length(lote))) graus = length(lote) - 1 # Graus de liberdade print(paste('Graus de liberdade:',graus)) tq1 <-qt(0.05,df=graus) # teste unicaudal à esquerda por isso usamos 0.05 para 95% print(paste('t-crítico',tq1)) x <- seq(-3,3,length=500) y <- dt(x,df=graus) plot(x,y, type="l", lwd=2, main = 'Distribuição T de Student - queijos') lines(c(0,0),c(0,dt(0,df=graus))) lines(c(tq1,tq1),c(0,dt(tq1,df=graus))) # Calculando o T Score da nossa amostra: ta <- (mean(lote) - 60) / (sd(lote) / sqrt(length(lote))) print(paste('T crítico:',ta)) if (ta < tq1) { print('Rejeitamos a hipótese nula') } else { print('Não rejeitamos a hipótese nula') } pvalue <- pt(ta, 19) print(paste('p-value:',pvalue)) t.test(lote,alternative = "less", mu=60, conf.level = 0.95) # ## Teste bilateral amostra <- c(95.88,101.2,102.04,100.1,98.7,96.18,97.53,100.79, 98.52,100.08,100.45,99.19,99.91,101.01,98.78,101.02,98.78, 100.18,100.94,97.12) head(amostra) mediah0 <- 100 # média da hipótese nula media <- mean(amostra) desvio <- sd(amostra) n <- length(amostra) gl = n - 1 print(paste('média',media,'desvio',desvio,'n',n,'gl',gl)) tc <-qt(0.025,df=gl) print(paste('T-scores críticos:',tc,-tc)) t_observado <- (media - mediah0) / (desvio / sqrt(n)) print(paste('t_observado',t_observado)) # Agora podemos gerar o gráfico tc1 <- tc tc2 <- -tc x <- seq(-3,3,length=500) y <- dt(x,df=gl) plot(x,y, type="l", lwd=2, main = 'Distribuição T de Student - bilateral') lines(c(0,0),c(0,dt(0,df=gl))) lines(c(tc1,tc1),c(0,dt(tc1,df=gl)), lty='dashed', lwd=2) lines(c(tc2,tc2),c(0,dt(tc2,df=gl)), lty='dashed', lwd=2) lines(c(t_observado,t_observado),c(0,dt(t_observado,df=gl)), lwd=2) print(paste('alfa:',dt(tc1,df=gl))) print(paste('valor-p:',dt(t_observado,df=gl))) t.test(amostra, mu=100, conf.level = 0.95)
book-R/inferenciaR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _kg_hide-output=true import warnings warnings.filterwarnings('ignore') import os, gc import cudf import pandas as pd import numpy as np import cupy as cp import janestreet import xgboost as xgb from hyperopt import hp, fmin, tpe, Trials from hyperopt.pyll.base import scope from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import GroupKFold import matplotlib.pyplot as plt from tqdm.notebook import tqdm from joblib import dump, load import tensorflow as tf tf.random.set_seed(2212) import tensorflow.keras.backend as K import tensorflow.keras.layers as layers from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau, ModelCheckpoint, EarlyStopping TEST = True # - # weighted average as per Donate et al.'s formula # https://doi.org/10.1016/j.neucom.2012.02.053 # [0.0625, 0.0625, 0.125, 0.25, 0.5] for 5 fold def weighted_average(a): w = [] n = len(a) for j in range(1, n + 1): j = 2 if j == 1 else j w.append(1 / (2**(n + 1 - j))) return np.average(a, weights = w) # + _kg_hide-input=true from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples from sklearn.utils.validation import _deprecate_positional_args # https://github.com/getgaurav2/scikit-learn/blob/d4a3af5cc9da3a76f0266932644b884c99724c57/sklearn/model_selection/_split.py#L2243 class GroupTimeSeriesSplit(_BaseKFold): """Time Series cross-validator variant with non-overlapping groups. Provides train/test indices to split time series data samples that are observed at fixed time intervals according to a third-party provided group. In each split, test indices must be higher than before, and thus shuffling in cross validator is inappropriate. This cross-validation object is a variation of :class:`KFold`. In the kth split, it returns first k folds as train set and the (k+1)th fold as test set. The same group will not appear in two different folds (the number of distinct groups has to be at least equal to the number of folds). Note that unlike standard cross-validation methods, successive training sets are supersets of those that come before them. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of splits. Must be at least 2. max_train_size : int, default=None Maximum size for a single training set. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import GroupTimeSeriesSplit >>> groups = np.array(['a', 'a', 'a', 'a', 'a', 'a',\ 'b', 'b', 'b', 'b', 'b',\ 'c', 'c', 'c', 'c',\ 'd', 'd', 'd']) >>> gtss = GroupTimeSeriesSplit(n_splits=3) >>> for train_idx, test_idx in gtss.split(groups, groups=groups): ... print("TRAIN:", train_idx, "TEST:", test_idx) ... print("TRAIN GROUP:", groups[train_idx],\ "TEST GROUP:", groups[test_idx]) TRAIN: [0, 1, 2, 3, 4, 5] TEST: [6, 7, 8, 9, 10] TRAIN GROUP: ['a' 'a' 'a' 'a' 'a' 'a']\ TEST GROUP: ['b' 'b' 'b' 'b' 'b'] TRAIN: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] TEST: [11, 12, 13, 14] TRAIN GROUP: ['a' 'a' 'a' 'a' 'a' 'a' 'b' 'b' 'b' 'b' 'b']\ TEST GROUP: ['c' 'c' 'c' 'c'] TRAIN: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\ TEST: [15, 16, 17] TRAIN GROUP: ['a' 'a' 'a' 'a' 'a' 'a' 'b' 'b' 'b' 'b' 'b' 'c' 'c' 'c' 'c']\ TEST GROUP: ['d' 'd' 'd'] """ @_deprecate_positional_args def __init__(self, n_splits=5, *, max_train_size=None ): super().__init__(n_splits, shuffle=False, random_state=None) self.max_train_size = max_train_size def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ if groups is None: raise ValueError( "The 'groups' parameter should not be None") X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits n_folds = n_splits + 1 group_dict = {} u, ind = np.unique(groups, return_index=True) unique_groups = u[np.argsort(ind)] n_samples = _num_samples(X) n_groups = _num_samples(unique_groups) for idx in np.arange(n_samples): if (groups[idx] in group_dict): group_dict[groups[idx]].append(idx) else: group_dict[groups[idx]] = [idx] if n_folds > n_groups: raise ValueError( ("Cannot have number of folds={0} greater than" " the number of groups={1}").format(n_folds, n_groups)) group_test_size = n_groups // n_folds group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size) for group_test_start in group_test_starts: train_array = [] test_array = [] for train_group_idx in unique_groups[:group_test_start]: train_array_tmp = group_dict[train_group_idx] train_array = np.sort(np.unique( np.concatenate((train_array, train_array_tmp)), axis=None), axis=None) train_end = train_array.size if self.max_train_size and self.max_train_size < train_end: train_array = train_array[train_end - self.max_train_size:train_end] for test_group_idx in unique_groups[group_test_start: group_test_start + group_test_size]: test_array_tmp = group_dict[test_group_idx] test_array = np.sort(np.unique( np.concatenate((test_array, test_array_tmp)), axis=None), axis=None) yield [int(i) for i in train_array], [int(i) for i in test_array] import numpy as np from sklearn.model_selection import KFold from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples from sklearn.utils.validation import _deprecate_positional_args # modified code for group gaps; source # https://github.com/getgaurav2/scikit-learn/blob/d4a3af5cc9da3a76f0266932644b884c99724c57/sklearn/model_selection/_split.py#L2243 class PurgedGroupTimeSeriesSplit(_BaseKFold): """Time Series cross-validator variant with non-overlapping groups. Allows for a gap in groups to avoid potentially leaking info from train into test if the model has windowed or lag features. Provides train/test indices to split time series data samples that are observed at fixed time intervals according to a third-party provided group. In each split, test indices must be higher than before, and thus shuffling in cross validator is inappropriate. This cross-validation object is a variation of :class:`KFold`. In the kth split, it returns first k folds as train set and the (k+1)th fold as test set. The same group will not appear in two different folds (the number of distinct groups has to be at least equal to the number of folds). Note that unlike standard cross-validation methods, successive training sets are supersets of those that come before them. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of splits. Must be at least 2. max_train_group_size : int, default=Inf Maximum group size for a single training set. group_gap : int, default=None Gap between train and test max_test_group_size : int, default=Inf We discard this number of groups from the end of each train split """ @_deprecate_positional_args def __init__(self, n_splits=5, *, max_train_group_size=np.inf, max_test_group_size=np.inf, group_gap=None, verbose=False ): super().__init__(n_splits, shuffle=False, random_state=None) self.max_train_group_size = max_train_group_size self.group_gap = group_gap self.max_test_group_size = max_test_group_size self.verbose = verbose def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ if groups is None: raise ValueError( "The 'groups' parameter should not be None") X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits group_gap = self.group_gap max_test_group_size = self.max_test_group_size max_train_group_size = self.max_train_group_size n_folds = n_splits + 1 group_dict = {} u, ind = np.unique(groups, return_index=True) unique_groups = u[np.argsort(ind)] n_samples = _num_samples(X) n_groups = _num_samples(unique_groups) for idx in np.arange(n_samples): if (groups[idx] in group_dict): group_dict[groups[idx]].append(idx) else: group_dict[groups[idx]] = [idx] if n_folds > n_groups: raise ValueError( ("Cannot have number of folds={0} greater than" " the number of groups={1}").format(n_folds, n_groups)) group_test_size = min(n_groups // n_folds, max_test_group_size) group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size) for group_test_start in group_test_starts: train_array = [] test_array = [] group_st = max(0, group_test_start - group_gap - max_train_group_size) for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]: train_array_tmp = group_dict[train_group_idx] train_array = np.sort(np.unique( np.concatenate((train_array, train_array_tmp)), axis=None), axis=None) train_end = train_array.size for test_group_idx in unique_groups[group_test_start: group_test_start + group_test_size]: test_array_tmp = group_dict[test_group_idx] test_array = np.sort(np.unique( np.concatenate((test_array, test_array_tmp)), axis=None), axis=None) test_array = test_array[group_gap:] if self.verbose > 0: pass yield [int(i) for i in train_array], [int(i) for i in test_array] # - # # Preprocessing # + print('Loading...') train = cudf.read_csv('/kaggle/input/jane-street-market-prediction/train.csv') features = [c for c in train.columns if 'feature' in c] # features.remove('feature_43') print('Filling...') train1 = train.to_pandas() train = train1.query('date > 85').reset_index(drop = True) train = train.query('weight > 0').reset_index(drop = True) train[features] = train[features].fillna(method = 'ffill').fillna(0) train['action'] = ((train['resp_1'] > 0) & (train['resp_2'] > 0) & (train['resp_3'] > 0) & (train['resp_4'] > 0) & (train['resp'] > 0)).astype('int') resp_cols = ['resp', 'resp_1', 'resp_2', 'resp_3', 'resp_4'] X = train[features].values y = np.stack([(train[c] > 0).astype('int') for c in resp_cols]).T sw = np.mean(np.abs(train[resp_cols].values), axis = 1) # - n_splits = 5 group_gap = 31 # # Training def create_ae_mlp(num_columns, num_labels, hidden_units, dropout_rates, ls = 1e-2, lr = 1e-3): inp = tf.keras.layers.Input(shape = (num_columns, )) x0 = tf.keras.layers.BatchNormalization()(inp) encoder = tf.keras.layers.GaussianNoise(dropout_rates[0])(x0) encoder = tf.keras.layers.Dense(hidden_units[0])(encoder) encoder = tf.keras.layers.BatchNormalization()(encoder) encoder = tf.keras.layers.Activation('swish')(encoder) decoder = tf.keras.layers.Dropout(dropout_rates[1])(encoder) decoder = tf.keras.layers.Dense(num_columns, name = 'decoder')(decoder) x_ae = tf.keras.layers.Dense(hidden_units[1])(decoder) x_ae = tf.keras.layers.BatchNormalization()(x_ae) x_ae = tf.keras.layers.Activation('swish')(x_ae) x_ae = tf.keras.layers.Dropout(dropout_rates[2])(x_ae) out_ae = tf.keras.layers.Dense(num_labels, activation = 'sigmoid', name = 'ae_action')(x_ae) x = tf.keras.layers.Concatenate()([x0, encoder]) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Dropout(dropout_rates[3])(x) for i in range(2, len(hidden_units)): x = tf.keras.layers.Dense(hidden_units[i])(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('swish')(x) x = tf.keras.layers.Dropout(dropout_rates[i + 2])(x) out = tf.keras.layers.Dense(num_labels, activation = 'sigmoid', name = 'action')(x) model = tf.keras.models.Model(inputs = inp, outputs = [decoder, out_ae, out]) model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = lr), loss = {'decoder': tf.keras.losses.MeanSquaredError(), 'ae_action': tf.keras.losses.BinaryCrossentropy(label_smoothing = ls), 'action': tf.keras.losses.BinaryCrossentropy(label_smoothing = ls), }, metrics = {'decoder': tf.keras.metrics.MeanAbsoluteError(name = 'MAE'), 'ae_action': tf.keras.metrics.AUC(name = 'AUC'), 'action': tf.keras.metrics.AUC(name = 'AUC'), }, ) return model params = {'num_columns': len(features), 'num_labels': 5, 'hidden_units': [96, 96, 896, 448, 448, 256], 'dropout_rates': [0.03527936123679956, 0.038424974585075086, 0.42409238408801436, 0.10431484318345882, 0.49230389137187497, 0.32024444956111164, 0.2716856145683449, 0.4379233941604448], 'ls': 0, 'lr':1e-3, } if not TEST: scores = [] batch_size = 4096 gkf = PurgedGroupTimeSeriesSplit(n_splits = n_splits, group_gap = group_gap) for fold, (tr, te) in enumerate(gkf.split(train['action'].values, train['action'].values, train['date'].values)): ckp_path = f'JSModel_{fold}.hdf5' model = create_ae_mlp(**params) ckp = ModelCheckpoint(ckp_path, monitor = 'val_action_AUC', verbose = 0, save_best_only = True, save_weights_only = True, mode = 'max') es = EarlyStopping(monitor = 'val_action_AUC', min_delta = 1e-4, patience = 10, mode = 'max', baseline = None, restore_best_weights = True, verbose = 0) history = model.fit(X[tr], [X[tr], y[tr], y[tr]], validation_data = (X[te], [X[te], y[te], y[te]]), sample_weight = sw[tr], epochs = 100, batch_size = batch_size, callbacks = [ckp, es], verbose = 0) hist = pd.DataFrame(history.history) score = hist['val_action_AUC'].max() print(f'Fold {fold} ROC AUC:\t', score) scores.append(score) K.clear_session() del model rubbish = gc.collect() if not TEST: print('Weighted Average CV Score:', weighted_average(scores)) # + def reduce_mem_usage(props): start_mem_usg = props.memory_usage().sum() / 1024**2 print("Memory usage of properties dataframe is :",start_mem_usg," MB") NAlist = [] # Keeps track of columns that have missing values filled in. for col in props.columns: if props[col].dtype != object: # Exclude strings # Print current column type # print("******************************") # print("Column: ",col) # print("dtype before: ",props[col].dtype) # make variables for Int, max and min IsInt = False mx = props[col].max() mn = props[col].min() # Integer does not support NA, therefore, NA needs to be filled if not np.isfinite(props[col]).all(): NAlist.append(col) props[col].fillna(mn-1,inplace=True) # test if column can be converted to an integer asint = props[col].fillna(0).astype(np.int64) result = (props[col] - asint) result = result.sum() if result > -0.01 and result < 0.01: IsInt = True # Make Integer/unsigned Integer datatypes if IsInt: if mn >= 0: if mx < 255: props[col] = props[col].astype(np.uint8) elif mx < 65535: props[col] = props[col].astype(np.uint16) elif mx < 4294967295: props[col] = props[col].astype(np.uint32) else: props[col] = props[col].astype(np.uint64) else: if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max: props[col] = props[col].astype(np.int8) elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max: props[col] = props[col].astype(np.int16) elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max: props[col] = props[col].astype(np.int32) elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max: props[col] = props[col].astype(np.int64) # Make float datatypes 32 bit else: props[col] = props[col].astype(np.float32) # Print new column type # print("dtype after: ",props[col].dtype) # print("******************************") # Print final result print("___MEMORY USAGE AFTER COMPLETION:___") mem_usg = props.memory_usage().sum() / 1024**2 print("Memory usage is: ",mem_usg," MB") print("This is ",100*mem_usg/start_mem_usg,"% of the initial size") return props, NAlist if TEST: train, _ = reduce_mem_usage(train1) exclude = set([2,5,19,26,29,36,37,43,63,77,87,173,262,264,268,270,276,294,347,499]) train = train[~train.date.isin(exclude)] features = [c for c in train.columns if 'feature' in c] f_mean = train[features[1:]].mean() train[features[1:]] = train[features[1:]].fillna(f_mean) train = train[train.weight>0] train['action'] = ((train['resp'].values) > 0).astype('int') train['action1'] = ((train['resp_1'].values) > 0).astype('int') train['action2'] = ((train['resp_2'].values) > 0).astype('int') train['action3'] = ((train['resp_3'].values) > 0).astype('int') train['action4'] = ((train['resp_4'].values) > 0).astype('int') X = train.loc[:, train.columns.str.contains('feature')].values y = train.loc[:, 'action'].astype('int').values X_ = X y_ = train.loc[:, 'action3'].astype('int').values clf1 = xgb.XGBClassifier( n_estimators=100, max_depth=11, learning_rate=0.05, subsample=0.90, colsample_bytree=0.7, missing=-999, random_state=21, tree_method='gpu_hist', # THE MAGICAL PARAMETER reg_alpha=10, reg_lambda=10, ) clf1.fit(X_, y_) clf2 = xgb.XGBClassifier( n_estimators=100, max_depth=11, learning_rate=0.05, subsample=0.90, colsample_bytree=0.7, missing=-999, random_state=210, tree_method='gpu_hist', # THE MAGICAL PARAMETER reg_alpha=10, reg_lambda=10, ) clf2.fit(X_, y_) clf3 = xgb.XGBClassifier( n_estimators=100, max_depth=11, learning_rate=0.05, subsample=0.90, colsample_bytree=0.7, missing=-999, random_state=2010, tree_method='gpu_hist', # THE MAGICAL PARAMETER reg_alpha=10, reg_lambda=10, ) clf3.fit(X_, y_) # - if TEST: inp = tf.keras.layers.Input(shape = len(features)) model4 = create_ae_mlp(**params) model4.load_weights('../input/jsmodelesade/JSModel_3 (1).hdf5') out4 = model4(inp) model5 = create_ae_mlp(**params) model5.load_weights('../input/jsmodelesade/JSModel_4 (1).hdf5') out5 = model5(inp) model6 = create_ae_mlp(**params) model6.load_weights('../input/jsmodelesade/JSModel_4_1212.hdf5') out6 = model6(inp) model7 = create_ae_mlp(**params) model7.load_weights('../input/jsmodelesade/JSModel_4_1214.hdf5') out7 = model7(inp) out = (out4[-1] + out5[-1]+out6[-1] + out7[-1]) / 4 model = tf.keras.models.Model(inputs = inp, outputs = out) model.call = tf.function(model.call, experimental_relax_shapes = True) model.summary() # # Example Test Prediction # + # example_test = pd.read_csv('../input/jane-street-market-prediction/example_test.csv') # example_test = example_test.query('weight > 0').reset_index(drop = True) # example_test[features] = example_test[features].fillna(method = 'ffill').fillna(0) # test_preds = np.mean(model.predict(example_test[features], batch_size = 4096), axis = 1) # opt_th = test_preds.mean() # print(opt_th) # - # # Submission if TEST: from numba import njit @njit def fast_fillna(array, values): if np.isnan(array.sum()): array = np.where(np.isnan(array), values, array) return array env = janestreet.make_env() env_iter = env.iter_test() opt_th = 0.51 tmp = np.zeros(len(features)) for (test_df, pred_df) in tqdm(env_iter): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values x_tt[0, :] = fast_fillna(x_tt[0, :], tmp) tmp = x_tt[0, :] pred = ((np.mean(model(x_tt, training = False).numpy(), axis = 1)+(clf1.predict_proba(x_tt)[0][1]+clf2.predict_proba(x_tt)[0][1]+clf3.predict_proba(x_tt)[0][1])/3))/2 pred_df.action = np.where(pred >= opt_th, 1, 0).astype(int) else: pred_df.action = 0 env.predict(pred_df)
current-1th-jane-street-ae-mlp-xgb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Explore sorters weaknesses with with ground-truth comparison # ============================================================= # # Here a syntetic dataset will demonstrate some weaknesses. # # Standard weaknesses : # # * not all units are detected # * a unit is detected, but not all of its spikes (false negatives) # * a unit is detected, but it detects too many spikes (false positives) # # Other weaknesses: # # * detect too many units (false positive units) # * detect units twice (or more) (reduntant units = oversplit units) # * several units are merged into one units (overmerged units) # # # To demonstarte this the script `generate_erroneous_sorting.py` generate a ground truth sorting with 10 units. # We duplicate the results and modify it a bit to inject some "errors": # # * unit 1 2 are perfect # * unit 3 4 have medium agreement # * unit 5 6 are over merge # * unit 7 is over split in 2 part # * unit 8 is redundant 3 times # * unit 9 is missing # * unit 10 have low agreement # * some units in tested do not exist at all in GT (15, 16, 17) # # # # Import # # # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import spikeinterface.extractors as se import spikeinterface.sorters as sorters import spikeinterface.comparison as sc import spikeinterface.widgets as sw from generate_erroneous_sorting import generate_erroneous_sorting # - # Here the agreement matrix # # sorting_true, sorting_err = generate_erroneous_sorting() comp = sc.compare_sorter_to_ground_truth(sorting_true, sorting_err, exhaustive_gt=True) sw.plot_agreement_matrix(comp, ordered=False) # Here the same matrix but **ordered** #  It is now quite trivial to check that fake injected errors are enlighted here. # # sw.plot_agreement_matrix(comp, ordered=True) # Here we can see that only Units 1 2 and 3 are well detected with 'accuracy'>0.75 # # print('well_detected', comp.get_well_detected_units(well_detected_score=0.75)) # Here we can explore **"false positive units"** units that do not exists in ground truth # # print('false_positive', comp.get_false_positive_units(redundant_score=0.2)) # Here we can explore **"redundant units"** units that do not exists in ground truth # # print('redundant', comp.get_redundant_units(redundant_score=0.2)) # Here we can explore **"overmerged units"** units that do not exists in ground truth # # print('overmerged', comp.get_overmerged_units(overmerged_score=0.2)) # Here we can explore **"bad units"** units that a mixed a several possible errors. # # print('bad', comp.get_bad_units()) # There is a convinient function to summary everything. # # # + comp.print_summary(well_detected_score=0.75, redundant_score=0.2, overmerged_score=0.2) plt.show()
Notebooks/spikeinterface_examples/plot_5_comparison_sorter_weaknesses.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Multiple linear regression # # 독립변수가 2개 이상인 경우의 회귀를 Multiple linear regression이라고 한다. # ### 필요한 library를 Import # + # For these lessons we will need NumPy, pandas, matplotlib and seaborn import numpy as np import pandas as pd import matplotlib.pyplot as plt # and of course the actual regression (machine learning) module from sklearn.linear_model import LinearRegression # - # ## Load the data # + # Load the data from a .csv in the same folder data = pd.read_csv('../../data/1.02. Multiple linear regression.csv') # Let's explore the top 5 rows of the df data.head() # - # This method gives us very nice descriptive statistics. We don't need this for now, but will later on! data.describe() # ## Create the multiple linear regression # ### Declare the dependent and independent variables # 종속변수와 독립변수를 선언한다. # + # There are two independent variables: 'SAT' and 'Rand 1,2,3' x = data[['SAT','Rand 1,2,3']] # and a single depended variable: 'GPA' y = data['GPA'] # - # ### Regression 모형의 생성 # + # We start by creating a linear regression object reg = LinearRegression() # The whole learning process boils down to fitting the regression reg.fit(x,y) # - # Getting the coefficients of the regression reg.coef_ # Note that the output is an array # Getting the intercept of the regression reg.intercept_ # Note that the result is a float as we usually expect a single value # ### 모형의 평가 - Calculating the R-squared # # 결정계수를 확인한다. 모형의 설명력을 확인할 수 있다. # Get the R-squared of the regression reg.score(x,y) # ### 모형의 평가 - Formula for Adjusted $R^2$ # # 독립변수를 계속적으로 추가하면 모형의 설명력은 증가한다. # 불필요하게 설명력이 없는 변수가 추가 되었을 때 이를 보정하는 방법이 바로 adj $R^2$ 이다. # # adj $R^2$ 는 다음과 같은 공식으로 계산 가능하다. # # $R^2_{adj.} = 1 - (1-R^2)*\frac{n-1}{n-p-1}$ # Get the shape of x, to facilitate the creation of the Adjusted R^2 metric x.shape # + # If we want to find the Adjusted R-squared we can do so by knowing the r2, the # observations, the # features r2 = reg.score(x,y) # Number of observations is the shape along axis 0 n = x.shape[0] # Number of features (predictors, p) is the shape along axis 1 p = x.shape[1] # We find the Adjusted R-squared using the formula adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1) adjusted_r2 # - # ### 모형의 평가 - MSE # # 모형의 예측값과 실제값 사이의 차이가 작을수록 좋은 모형으로 평가할 수 있다. # + from sklearn.metrics import mean_squared_error y_true = y y_pred = reg.predict(x) mse = mean_squared_error(y_true, y_pred) rmse = mean_squared_error(y_true, y_pred, squared=False) # squared = False를 사용하면 RMSE(Root Mean Square Error) print("MSE = ", mse, "\tRMSE = ", rmse)
03Supervised/01Regression/02MultipleLinearRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## PyCity Schools Analysis # ##### Top and Bottom Performing Schools # - The most immediate observation is that Charter schools populate the top 5 performing schools and District schools populate the bottom 5 performing schools for standardized test scores. # - Overall, District schools had lower standardized test scores than Charter schools. # # ##### Subject Grades # - Math and Reading standardized test scores seem to stay the same across the board between grades in a school. # - If the test grades deviated wildly from each other between different grades, further analysis would be required. That doesn't appear to be the case. # - Therefore, any solutions that would improve one grade's performance would probably work for all the other grades as well, across most of these schools. # # ##### Budget & Population # - Large Schools (population greater than 2000 students) appear to have a significantly lower overall passing rate than schools with less than 2000 students, by about 20%. # - It appears the more that schools spent on their students in \$\$ amounts, the less well they performed on these standardized tests. # - Charter schools have smaller budgets than District schools. They also have fewer students. # # ##### Conclusions # - Given the observations and conditions above, one of the conclusions we might draw from this dataset is that a higher school budget doesn't necessarily mean better performance on standardized tests. # - Perhaps there is a correlation between the *size* of a school and performance on standardized tests, however, and it would be a good set of data to delve further into. # - Specifically, this data shows that the more students a school has, the worse its performance on standardized tests. # # ##### Opinions # - This could be the result of numerous factors; in my opinion it would be due to the lack of attention to every individual student in the time crunch required of teachers to get their lessons across. However this data doesn't necessarily prove that theory in and of itself so we would require further data. Perhaps something like *Average Amount of Time Teacher Spends With Individual Students* or something similar. # # ##### Other # - Why is there someone named 'Dr. <NAME>' in the student list under Huang High School? Are some teachers actually mixed in with these students? What if some of them don't have prefixes like 'Dr.' in their name? Does this data need further scrubbing? My numbers seem generally the same as the example results we were shown so I'm just going to trust the teachers on this one. import pandas as pd #import schools and then students file schools_file = "Resources/schools_complete.csv" students_file = "Resources/students_complete.csv" #read one dataframe for schools schools_df = pd.read_csv(schools_file) # schools_df.head() #rename 'name' column to 'school' to prepare for merge schools_df = schools_df.rename(columns={"name":"school"}) # schools_df.head() #read second dataframe for students students_df = pd.read_csv(students_file) # students_df.head() #outer merge on 'school' column merged_df = pd.merge(schools_df, students_df, on="school", how="outer") merged_df.head() #check for missing data #merged_df.count() #check columns merged_df.columns # ## District Summary #Create Dataframe with headers: 'Total Schools', 'Total Students,' 'Total Budget', 'Avg Math Score', 'Avg Reading Score', #'% Passing Math', '% Passing Reading,' % Overall Passing Rate' totalschools = merged_df['school'].nunique() totalschools totalstudents = len(merged_df['name']) totalstudents totalbudget = sum(schools_df['budget']) totalbudget #score averages mathscore = merged_df['math_score'].mean() readingscore = merged_df['reading_score'].mean() #passing score formulas #return the amount of math scores above 65 #revision: actually this assignment considers 'above 70' to be passing #anyway this formula passes the amount of students whose scores are over 70. all little harvard prodigies. passmath = len(merged_df.loc[merged_df['math_score'] > 70, ['math_score']]) passmath #average is the amount of passing scores divided by all students. example file reads 72.392137 #if passing score is '65' i get 0.83 which doesn't match the example file #if passing scores is '70' i get '0.72392137' which matches the example file. so i'll go with that then. #would be nice if the readme file specified what counts as a passing score. percmath = passmath / totalstudents * 100 percmath passreading = len(merged_df.loc[merged_df['reading_score'] > 70, ['reading_score']]) percreading = passreading / totalstudents * 100 #according to the example this should read 82.971662 percreading #get overall passing rate "(Average of the above two)" reading and writing and put it all in a new DF #in the example the result is clearly wrong and gives 80.431606 as the average of 72.392137 and 82.971662 percpassing = (percmath + percreading) / 2 percpassing # + #putting it together dist_summary = pd.DataFrame({"Total Schools": totalschools, "Total Students": totalstudents, "Total Budget": totalbudget, "Average Math Score": mathscore, "Average Reading Score": readingscore, "% Passing Math": percmath, "% Passing Reading": percreading, "% Overall Passing Rate": percpassing}, index=[0]) #formating dist_summary["Total Budget"] = dist_summary["Total Budget"].map("${:,.2f}".format) dist_summary = dist_summary[["Total Schools", "Total Students", "Total Budget", "Average Math Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] dist_summary # - # ## School Summary #Group by school, show 'School Type', 'Total Students', 'Total School Budget', 'Per Student Budget', 'Average Math Score', #'% Passing Math', '% Passing Reading', '% Overall Passing Rate' school_group = merged_df.groupby(['school']) #basic stuff #i borrowed this schooltype thing from someone else's solution because it took me like 6 hours to figure this out to no avail. #takeaway: strings are annoying schooltype = schools_df.set_index('school')["type"] totalstudents = school_group['name'].count() totschoolbud = school_group['budget'].mean() perstudentbud = totschoolbud / totalstudents # + #average math score mathscore = school_group['math_score'].mean() #average reading score readingscore = school_group['reading_score'].mean() #% passing math #also took these % passing from another solution because i couldn't figure out why i can't use .loc with groupby passmath = merged_df[merged_df['math_score'] > 70].groupby('school')['Student ID'].count() / totalstudents * 100 #% passing reading passreading = merged_df[merged_df['reading_score'] > 70].groupby('school')['Student ID'].count() / totalstudents * 100 #% overall passing rate percpassing = (passmath + passreading) / 2 # + school_summary = pd.DataFrame({"School Type": schooltype, "Total Students": totalstudents, "Total School Budget": totschoolbud, "Per Student Budget": perstudentbud, "Average Math Score": mathscore, "Average Reading Score": readingscore, "% Passing Math": passmath, "% Passing Reading": passreading, "% Overall Passing": percpassing}) #formating school_summary["Total School Budget"] = school_summary["Total School Budget"].map("${:,.2f}".format) school_summary = school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing"]] school_summary # - # ## Top Performing Schools (By Passing Rate) #sort by overall passing and put highest on top top_schools = school_summary.sort_values(['% Overall Passing'], ascending=False) top_schools.head() # ## Bottom Performing Schools (By Passing Rate) #"I put my thing down, flip it and reverse it" - <NAME>, 2002 bottom_schools = school_summary.sort_values(['% Overall Passing'], ascending=True) bottom_schools.head() # ## Math Scores by Grade # + # breaking down this formula: # first it locates all entries matching '9th' under the 'grade' column in students_df # then it groups them by 'school' # then it returns the mean of the values in the math_score column # the index is still the schools from the previous dataframes so we don't have to do anything #yes, this would probably work a lot better in a loop of some sort but i'm against the clock here ninth = students_df.loc[students_df['grade'] == '9th'].groupby('school')['math_score'].mean() tenth = students_df.loc[students_df['grade'] == '10th'].groupby('school')['math_score'].mean() eleventh = students_df.loc[students_df['grade'] == '11th'].groupby('school')['math_score'].mean() twelfth = students_df.loc[students_df['grade'] == '12th'].groupby('school')['math_score'].mean() math_grade = pd.DataFrame({"9th": ninth.map("{:,.2f}".format), "10th": tenth.map("{:,.2f}".format), "11th": eleventh.map("{:,.2f}".format), "12th": twelfth.map("{:,.2f}".format)}) math_grade = math_grade [['9th', '10th', '11th', '12th']] math_grade # - # ## Reading Scores by Grade # + ninth = students_df.loc[students_df['grade'] == '9th'].groupby('school')['reading_score'].mean() tenth = students_df.loc[students_df['grade'] == '10th'].groupby('school')['reading_score'].mean() eleventh = students_df.loc[students_df['grade'] == '11th'].groupby('school')['reading_score'].mean() twelfth = students_df.loc[students_df['grade'] == '12th'].groupby('school')['reading_score'].mean() reading_grade = pd.DataFrame({"9th": ninth.map("{:,.2f}".format), "10th": tenth.map("{:,.2f}".format), "11th": eleventh.map("{:,.2f}".format), "12th": twelfth.map("{:,.2f}".format)}) reading_grade = reading_grade [['9th', '10th', '11th', '12th']] reading_grade # - # ## Scores by School Spending #creating bins based on example, then making a new variable to hold them spending_bins = [0, 585, 615, 645, 675] group_names = ["<$585", "$585-615", "$615-645", "$645-675"] merged_df["Spending Range"] = pd.cut(merged_df['budget']/merged_df['size'], spending_bins, labels=group_names) # school_summary # + spending_group = merged_df.groupby("Spending Range") #Average Math Score, Average Reading Score, % Passing Math, % Passing Reading, % Overall Passing Rate avgmath = spending_group['math_score'].mean() avgreading = spending_group['reading_score'].mean() #passing math passmath = merged_df[merged_df['math_score'] > 70].groupby("Spending Range")['Student ID'].count() / spending_group['Student ID'].count() * 100 #% passing reading passreading = merged_df[merged_df['reading_score'] > 70].groupby("Spending Range")['Student ID'].count() / spending_group['Student ID'].count() * 100 #% overall passing rate percpassing = (passmath + passreading) / 2 # + spending_scores = pd.DataFrame({"Average Math Score": avgmath.map("{:,.2f}".format), "Average Reading Score": avgreading.map("{:,.2f}".format), "% Passing Math": passmath.map("{:,.2f}".format), "% Passing Reading": passreading.map("{:,.2f}".format), "% Overall Passing Rate": percpassing.map("{:,.2f}".format)}) spending_scores = spending_scores[['Average Math Score', 'Average Reading Score', '% Passing Math', '% Passing Reading', '% Overall Passing Rate']] spending_scores.index.name = "Spending Ranges (Per Student)" spending_scores # - # ## Scores by School Size #just using the Example's bins size_bins = [0, 1000, 2000, 5000] group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"] merged_df['School Sizes'] = pd.cut(merged_df['size'], size_bins, labels = group_names) # + size_group = merged_df.groupby('School Sizes') #Average Math Score, Average Reading Score, % Passing Math, % Passing Reading, % Overall Passing Rate avgmath = size_group['math_score'].mean() avgreading = size_group['reading_score'].mean() #passing math passmath = merged_df[merged_df['math_score'] > 70].groupby("School Sizes")['Student ID'].count() / size_group['Student ID'].count() * 100 #% passing reading passreading = merged_df[merged_df['reading_score'] > 70].groupby("School Sizes")['Student ID'].count() / size_group['Student ID'].count() * 100 #% overall passing rate percpassing = (passmath + passreading) / 2 # + size_scores = pd.DataFrame({"Average Math Score": avgmath.map("{:,.2f}".format), "Average Reading Score": avgreading.map("{:,.2f}".format), "% Passing Math": passmath.map("{:,.2f}".format), "% Passing Reading": passreading.map("{:,.2f}".format), "% Overall Passing Rate": percpassing.map("{:,.2f}".format)}) size_scores = size_scores[['Average Math Score', 'Average Reading Score', '% Passing Math', '% Passing Reading', '% Overall Passing Rate']] size_scores.index.name = "School Size" size_scores # - # ## Scores by School Type # + #group by school type type_group = merged_df.groupby('type') #Average Math Score, Average Reading Score, % Passing Math, % Passing Reading, % Overall Passing Rate avgmath = type_group['math_score'].mean() avgreading = type_group['reading_score'].mean() #passing math passmath = merged_df[merged_df['math_score'] > 70].groupby("type")['Student ID'].count() / type_group['Student ID'].count() * 100 #% passing reading passreading = merged_df[merged_df['reading_score'] > 70].groupby("type")['Student ID'].count() / type_group['Student ID'].count() * 100 #% overall passing rate percpassing = (passmath + passreading) / 2 # + type_scores = pd.DataFrame({"Average Math Score": avgmath.map("{:,.2f}".format), "Average Reading Score": avgreading.map("{:,.2f}".format), "% Passing Math": passmath.map("{:,.2f}".format), "% Passing Reading": passreading.map("{:,.2f}".format), "% Overall Passing Rate": percpassing.map("{:,.2f}".format)}) type_scores = type_scores[['Average Math Score', 'Average Reading Score', '% Passing Math', '% Passing Reading', '% Overall Passing Rate']] type_scores.index.name = "School Type" type_scores
PyCitySchools/PyCitySchools_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os import sys from state_cleaner import * import datetime import csv from scipy import stats as st import itertools import re from datetime import datetime # %load_ext autoreload # %autoreload 2 # %matplotlib inline # - os.chdir('../..') os.chdir('CovidMobile') os.getcwd() def prez_data(): prez = pd.read_csv("data/countypres_2000-2016.csv") prez = prez[prez['year']==2016] prez['vote_share'] = prez['candidatevotes'] / prez['totalvotes'] prez['max'] = prez.groupby(['FIPS','year'])['candidatevotes'].transform(max) prez_win = prez[prez['max']==prez['candidatevotes']].copy() prez_win = pd.concat([prez_win.reset_index(),prez[prez['party']=='republican']['vote_share'].rename('vote_share_rep').reset_index()],axis=1) return prez_win df = pd.read_csv('data/compiled_2020-05-28.csv') df.shape df['date'].max() df.columns.values dct = df[['state_abb_x','FIPS']].drop_duplicates().dropna() dct.columns = ['neighbors_state','neighbors'] df.shape neighbors = df[['state_abb_x','FIPS']+list(df.filter(like='neighbors').columns.values)].drop_duplicates().dropna(subset=['state_abb_x','FIPS']) neighbors_brs = list(neighbors.filter(like='neighbors').columns.values) prez = prez_data() df_all = df.merge(prez, on =['FIPS'],how='left') # + orders = ['soe', 'sah'] for x in orders: df_all['post_{}'.format(x)] = np.where(df_all[x]<=df_all['date'],1,0) orders = ['state_sah', 'state_mandate', 'state_non_ess', 'state_lgb', 'state_sc', 'state_bar_res', 'state_election', 'state_emergency', 'state_compliance', 'state_prepare','nyt_sah'] for x in orders: df_all['post_{}'.format(x)] = np.where(df_all['{}_{}'.format(x,'0')]<=df_all['date'],1,0) df_all['post_{}'.format(x)] = np.where(df_all['{}_{}'.format(x,'1')]<=df_all['date'],0,df_all['post_{}'.format(x)]) df_all['eased_{}'.format(x)] = np.where(df_all['{}_{}'.format(x,'1')]<=df_all['date'],1,0) df_all['all_p_{}'.format(x)] = np.where(df_all['{}_{}'.format(x,'0')]<=df_all['date'],1,0) # - county = pd.concat([df_all[['FIPS','date']],df_all.filter(like='post_'),df_all.filter(like='eased_')],axis=1) county.drop_duplicates(inplace=True) county.columns.values for i,x in enumerate(neighbors_brs): i = i +1 county.columns = [x, 'date', 'post_soe_n{}'.format(i), 'post_sah_n{}'.format(i), 'post_state_sah_n{}'.format(i), 'post_state_mandate_n{}'.format(i), 'post_state_non_ess_n{}'.format(i), 'post_state_lgb_n{}'.format(i), 'post_state_sc_n{}'.format(i), 'post_state_bar_res_n{}'.format(i), 'post_state_election_n{}'.format(i), 'post_state_emergency_n{}'.format(i), 'post_state_compliance_n{}'.format(i), 'post_state_prepare_n{}'.format(i), 'post_nyt_sah_n{}'.format(i),'eased_state_sah_n{}'.format(i), 'eased_state_mandate_n{}'.format(i),'eased_state_non_ess_n{}'.format(i), 'eased_state_lgb_n{}'.format(i), 'eased_state_sc_n{}'.format(i), 'eased_state_bar_res_n{}'.format(i), 'eased_state_election_n{}'.format(i), 'eased_state_emergency_n{}'.format(i), 'eased_state_compliance_n{}'.format(i), 'eased_state_prepare_n{}'.format(i),'eased_nyt_sah_n{}'.format(i)] df_all = df_all.merge(county, on=[x,'date'],how='left') df_all.shape df_all[['post_state_sah', 'eased_state_sah', 'post_state_mandate', 'eased_state_mandate', 'post_state_non_ess', 'eased_state_non_ess', 'post_state_lgb', 'eased_state_lgb', 'post_state_sc', 'eased_state_sc', 'post_state_bar_res', 'eased_state_bar_res', 'post_state_election', 'eased_state_election', 'post_state_emergency', 'eased_state_emergency', 'post_state_compliance', 'eased_state_compliance', 'post_state_prepare', 'eased_state_prepare','post_nyt_sah','eased_nyt_sah']].describe() for x in ['eased_state_sah_n','eased_state_non_ess_n','eased_state_lgb_n','eased_state_bar_res_n','eased_nyt_sah_n']: df_all['{}eighborall'.format(x)]=np.where(df_all.filter(like=x).sum(axis=1)>0,1,0) for x in ['post_state_sah_n','post_state_bar_res_n','post_state_non_ess_n','post_nyt_sah_n']: try: df_all.drop(['{}eighborall'.format(x)],axis=1,inplace=True) except: pass sah_cols = list(df_all.filter(like=x).columns.values) print(sah_cols) df_all['{}eighborall'.format(x)] = np.where(((df_all[sah_cols] == 1.0).any(axis=1)),1,0) df_all['avg_{}eighborall'.format(x)] =df_all[sah_cols].mean(axis=1) # + jupyter={"outputs_hidden": true} df_all[(df_all[ 'FIPS']==12021)|(df_all['FIPS']==12086)][['date','post_nyt_sah','eased_nyt_sah']+list(df_all.filter(like='post_nyt_sah_n').columns.values)+list(df_all.filter(like='eased_nyt_sah_n').columns.values)].to_csv('chk.csv') # + df_all[['post_state_sah', 'post_state_mandate','post_state_non_ess','post_state_lgb','post_state_sc','post_state_bar_res', 'post_state_emergency']].corr() # - # # Models - data transformation import statsmodels.formula.api as smf import statsmodels as sm from statsmodels.iolib.summary2 import summary_col # + usda = pd.read_csv('./data/clean_usda2.csv') #Select certain variables to add into model: usda_vars = ['FIPS','PCT_LACCESS_POP15', 'PCT_LACCESS_LOWI15','GROC14','SUPERC14','CONVS14','SPECS14', 'PCT_SNAP16','FOODINSEC_13_15'] usda = usda[usda_vars] #combine num grocery stores, supserstores, convenience stores and specialty food stores into totalstores usda['total stores'] = usda['GROC14']+ usda['SUPERC14']+ usda['CONVS14']+ usda['SPECS14'] #rename columns: newcols = ['FIPS','perc_pop_low_access15', 'perc_low_access_low_income15', 'grocery14', 'superstores14', 'convenience14', 'specialized14', 'perc_SNAP16', 'perc_food_insecure1315','total_stores'] d = {} for i in range(len(newcols)): d[list(usda.columns)[i]] = newcols[i] usda.rename(columns = d, inplace = True) usda.head() # + jupyter={"outputs_hidden": true} df_all[df_all['state_abb_x']=='TX'].groupby(['FIPS'])['date'].count().to_csv('chk.csv') # - df_all.columns.values df_all[df_all['eased_state_sah']==1].drop_duplicates(subset=['state_x']) df_all[['retail','food_drugs','work','homes','pct_social_distancing']].describe() #descriptives of missing vrs = ['retail','food_drugs','work','homes','pct_social_distancing'] for x in vrs: df_all['miss_{}'.format(x)] = np.where(df_all[x].isnull(),1,0) print('\nMissing Comparison for -{}\n{}'.format(x,df_all.drop_duplicates(['FIPS','miss_{}'.format(x)]).groupby(['miss_{}'.format(x)])['POP_ESTIMATE_2018'].describe())) #descriptives of missing df_all['obs'] = 1 df_all['nomiss_3'] = np.where(df_all[['retail','food_drugs','work']].isnull().any(axis=1),0,1) df_all['consecutive_days_nomiss_3'] = df_all.groupby(['FIPS','nomiss_3'])['obs'].transform(sum) df_all['max_days'] = np.where(df_all['consecutive_days_nomiss_3'] == df_all['consecutive_days_nomiss_3'].max(),1,0) df_all['ln_pop'] = np.log(df_all['POP_ESTIMATE_2018']) df_all.drop_duplicates(['FIPS','max_days']).hist(column='ln_pop',by='max_days',sharex=True) df_all[df_all['max_days']==1][['eased_state_sah_neighborall','eased_state_non_ess_neighborall','eased_state_lgb_neighborall','eased_state_bar_res_neighborall']].describe() df_all.columns.values df_all.shape df_all.groupby(['FIPS','state_x','county_x']).agg({'cases':'sum','population':'max'}).to_csv('chk.csv') df_all[~df_all['county_x'].isin(['Kings County','Queens County','New York County','Bronx County','Richmond County'])].shape df_all['date'] = pd.to_datetime(df_all['date'],errors='coerce') state_dumms = pd.get_dummies(df_all.state_abb_x) states = state_dumms.columns.values day_dumms = pd.get_dummies(df_all['date'].dt.date) print(len(day_dumms.columns)) day_dumms.columns = ['day' + str(i) for i in list(range(0,len(day_dumms.columns)))] days = day_dumms.columns.values df_fin = pd.concat([df_all,state_dumms,day_dumms],axis=1) df_fin = df_fin[df_fin['no_nyt_order']==0].copy() df_fin = df_fin[~df_fin['county_x'].isin(['Kings County','Queens County','New York County','Bronx County','Richmond County'])].copy() df_fin = df_fin[~df_fin['state_abb_x'].isin(['PR','HI','AK'])].copy() df_fin['cases'] = df_fin['cases'].fillna(0) df_fin['deaths'] = df_fin['deaths'].fillna(0) df_fin['ln_cases'] = np.log(df_fin['cases']+1 ) df_fin['ln_deaths'] = np.log(df_fin['deaths']+1 ) df_fin['pct_social_distancing'] = df_fin['pct_social_distancing']*100 df_fin['pct_leaving_home'] = df_fin['pct_leaving_home']*100 df_fin = df_fin[(df_fin['state_abb_x']!="AK")|df_fin['state_abb_x']!="HI"].copy() df_fin.rename(columns={'Percent of adults with less than a high school diploma, 2014-18':'pct_less_hs', 'Percent of adults with a high school diploma only, 2014-18':'pct_only_hs', "Percent of adults completing some college or associate's degree, 2014-18":'pct_some_co', "Percent of adults with a bachelor's degree or higher, 2014-18":'pct_has_co'}, inplace=True) df_fin = df_fin.merge(usda,on=['FIPS'],how='left') extra_controls = ['ln_total_stores','perc_pop_low_access15','pct_less_hs','pct_only_hs','pct_some_co','PCTPOVALL_2018'] df_fin['ln_pop_18'] = df_fin['POP_ESTIMATE_2018'].apply(np.log) df_fin['ln_pop_density'] = ((df_fin['population'] / df_fin['amount_land'])+1).apply(np.log) df_fin['ln_income_18'] = df_fin['Median_Household_Income_2018'].apply(np.log) df_fin['ln_income'] = df_fin['med_hh_income'].apply(np.log) df_fin['ln_employed_18'] = df_fin['Employed_2018'].apply(np.log) df_fin['ln_unemployed_18'] = df_fin['Unemployed_2018'].apply(np.log) df_fin['ln_total_stores'] = df_fin['total_stores'].apply(np.log) df_fin['obs'] = 1 df_fin = df_fin.dropna(subset= ['party']) print(df_fin.shape) df_fin['nomiss_3'] = np.where(df_fin[['pct_social_distancing','pct_leaving_home','work']].isnull().any(axis=1),0,1) df_fin['consecutive_days_nomiss_3'] = df_fin.groupby(['FIPS','nomiss_3'])['obs'].transform(sum) df_fin['max_days'] = np.where(df_fin['consecutive_days_nomiss_3'] == df_fin['consecutive_days_nomiss_3'].max(),1,0) df_fin['googl'] = np.where(df_fin['work'].notnull(),1,0) df_fin.sort_values(by=['FIPS','date'],inplace=True) # + jupyter={"outputs_hidden": true} df_fin.to_csv('data/analysis_data_{}.csv'.format(str(datetime.now().date()))) # - df_fin.groupby(['obs']).agg({'date':'max','FIPS':'nunique'}) df_fin.columns.values # set one one = {'name':'one', 'lhs':['pct_social_distancing','pct_leaving_home','work','retail'], 'rhs':[['post_nyt_sah_neighborall','post_nyt_sah'],['avg_post_nyt_sah_neighborall','post_nyt_sah'],['all_p_nyt_sah','eased_nyt_sah_neighborall','eased_nyt_sah']], 'controls':['vote_share_rep','ln_cases','ln_deaths','ln_income','ln_pop_density','perc_elderly','ln_population','perc_employed'] + list(days)[1:] + list(states)[1:], 'sample':['obs','googl']} runs_1 = [one] df_fin['ln_cases'].describe() # First Set res = [] models = [] for r in runs_1: for x in r['rhs']: for y in r['lhs']: for s in r['sample']: vrs = x + r['controls'] + [y] vrs = [re.split(r'\*| \+ ',i.replace('C(','').replace(')','')) for i in vrs] vrs = list(itertools.chain.from_iterable(vrs)) sam = df_fin[(df_fin[s]==1)&(df_fin[y].notnull())][vrs].copy() X = x + r['controls'] strng = '{} ~ '.format(y) + ' + '.join([str(i) for i in X]) print(sam[y].describe()) mod = smf.ols(strng,data=sam) print(r['name'],s,y,x) fitted = mod.fit(cov_type='HC1', return_type='dataframe') res.extend([fitted]) title = 'Sample={}\nLHS={}\nRHS={}'.format(s,y,x) models.extend([title]) sam = None results = summary_col(res,stars=True,float_format='%0.2f', model_names=models, info_dict={'N':lambda x: "{0:d}".format(int(x.nobs)), 'R2':lambda x: "{:.2f}".format(x.rsquared), 'R2adj':lambda x: "{:.2f}".format(x.rsquared_adj), 'F-pval':lambda x: "{:.2f}".format(int(x.f_pvalue))}) pd.DataFrame(results.tables[0]).to_csv('models/neighbors_run_v2a.csv') # + jupyter={"outputs_hidden": true}
notebooks/Analysis Neighbors v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import seaborn as sns from pandas import DataFrame from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split # - df=pd.read_csv('../data/glass1.dat') df.columns # %matplotlib inline sns.countplot(x='Outcome',data=df) df_min=df[df['Outcome']==' positive'] df_majority=df[df['Outcome']==' negative'] df_min.to_csv('glass1_minority.csv',index=False) df_majority.to_csv('glass1_majority.csv',index=False) # + data=np.array(df.values) pos=data.shape[1]-1 for i in range(data.shape[0]): if data[i][pos]==' negative': data[i][pos]=0 else: data[i][pos]=1 min_data=np.array(df_min) maj_data=np.array(df_majority) # - validation=0.30 seed=5 data.shape df_min.shape min_data.shape maj_data.shape X=data[:,:9].astype(float)# getting the feature values Y=data[:,9]# getting prediction X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=validation,random_state=seed) X_train.shape Y_train=Y_train.reshape((Y_train.shape[0],1)) Y_train.shape train_Data=np.concatenate((X_train,Y_train),axis=1) train_Data[:10] Y_test=Y_test.reshape((Y_test.shape[0],1)) test_Data=np.concatenate((X_test,Y_test),axis=1) train_Data=DataFrame(train_Data) test_Data=DataFrame(test_Data) # %matplotlib inline sns.countplot(x=9,data=train_Data) # %matplotlib inline sns.countplot(x=9,data=test_Data) train_Data train_Data.to_csv('train_Data.csv',index=False) test_Data.to_csv('test_Data.csv',index=False)
glass1-seed -5/data preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Nelson-Muteti/Regression-Project/blob/main/Nelson_Muteti_Week_7_Regression_IP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hAwOv926DqIg" # ## Defining The Question # # As a Data Scientist,I work for Hass Consulting Company which is a real estate leader with over 25 years of experience. I have been tasked to study the factors that affect housing prices using the given information on real estate properties that was collected over the past few months. Later onwards, I am supposed to create a model that would allow the company to accurately predict the sale of prices upon being provided with the predictor variables. # + id="K7OSve-ff5ER" import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.linear_model import Lasso import seaborn as sns # + [markdown] id="uquts5-IEErq" # ## Defining Metrics for Success # A succesful project will build several regression models, evaluate their metrics for accuracy using the R squared and Root Mean Squared Error. The best predictive model would be the model with the Highest R squared and lowest RMSE. # + [markdown] id="JdmOnltdE135" # ## Understanding The context. # The context in this case is a real estate one. We have to understand what factors consumers look into when making a house buying / purchasing decsision. For this, we have to engage with the data and explore it in depth. # + [markdown] id="ZSxMPIIWF8Qx" # ## Experimental Design # I will first clean the data and remove any outliers and null values that may skew our model. We then conduct a thorough EDA and get any relationships in the data. Afterwards, we will build the different regression models and compare their RMSEs before selecting the model with the least RMSE. # + [markdown] id="Wl4NAEADH6zf" # ## Reading the Data # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="8qC_HVAGgQ9L" outputId="af3bba7f-22ee-4133-9840-33c31f5c432c" #lets read the data into a pandas dataframe df = pd.read_csv('http://bit.ly/IndependentProjectWeek7Dataset') df.head() # + colab={"base_uri": "https://localhost:8080/"} id="uzOnzJvQgh6C" outputId="f4be73a1-8147-4c37-d2f3-00d9fe9dac81" df.shape #check the make of data # + colab={"base_uri": "https://localhost:8080/"} id="uaHSeJnGizYD" outputId="5df6cda3-2e61-4445-cbb7-9b786e34bb2e" df.info() #check the additional information about the data # + colab={"base_uri": "https://localhost:8080/", "height": 317} id="BGvxePf8icQw" outputId="201a7694-06b8-4605-b128-07272308bbb6" df.describe().apply(lambda s: s.apply('{0:.5f}'.format)) #describe dataset without exponential terms # + [markdown] id="xn_S2IzfIRKb" # ## External Data Validation # We check for data integrity to ensure that the data conforms with what is expected . For this,I used [real estate data from datarade.ai](https://datarade.ai/data-categories/real-estate-data) # + [markdown] id="LiE2X3YwJUjq" # ## Data Cleaning # + colab={"base_uri": "https://localhost:8080/"} id="Of64yqz2iRAz" outputId="35d6c28c-2bca-4f18-9a84-1633f2a9aa3e" df.isnull().sum() #check for null values in data # + id="DmxVZLydiZ6d" #some cleaning df.drop(['id','lat','long'],axis=1,inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 131} id="no52GEp-jS2I" outputId="4fc1362d-90a9-4a37-fd77-40548a07b080" df.head(2) # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="rN-H5w66juNo" outputId="52c879e0-bc79-480b-da6b-2121c2ef10ac" sns.countplot(x = 'floors',palette='hls',data = df) #some countplots to check the number of values within each column of datasets. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="q08Yadc7jwio" outputId="12688372-7881-4fc5-c31f-7f7bb900cc5b" sns.countplot(x = 'bedrooms',palette='hls',data = df) # + colab={"base_uri": "https://localhost:8080/"} id="DgrJuszFj6ek" outputId="8ca605f7-d06e-4b68-a106-a9994b5fc29e" df.columns # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="DsfTaXnkkOVc" outputId="a0edf2f2-ffb0-4332-d1a2-7d8bf9b3fac3" df.boxplot(column = 'price') #there seems to be outliers in this column # + colab={"base_uri": "https://localhost:8080/"} id="ojbKG0GMkyX-" outputId="d9cd1578-30f4-497f-e802-d4cfea4ec76b" #remove the outliers #removing the outliers and anomalies in dataset #Get the Interquartile range Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 newdf = df[~((df < (Q1 - 1.5 * IQR)) | (df > (Q3 + 1.5 * IQR))).any(axis=1)] print(newdf.shape) print(df.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="7T4JuRnQlGBu" outputId="e869b7c8-2ab9-43c9-bd23-1771fa82b936" newdf.boxplot(column = 'price') #now the outliers are removed # + id="fZgL2MtcldjK" newdf.to_csv('Housing_data.csv') #store the clean data in a csv file for future reference # + [markdown] id="WJLoQU1qJ7Ap" # ## Exploratory Data Analysis # + colab={"base_uri": "https://localhost:8080/", "height": 253} id="To8LSdgxlLA9" outputId="a21424b4-233b-40bb-c814-a4d3b4752032" #lets do some EDA #Univariate exploratory Data Analysis labels = newdf['floors'].astype('category').cat.categories.tolist() counts = newdf['floors'].value_counts() sizes = [counts[var_cat] for var_cat in labels] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True) ax1.axis('equal') plt.show() #about 52.2% of houses in the data have only 1 floor #while about 0.4% have 2.5 floors # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="0brHlMd4mHoV" outputId="c6ff70c2-34a1-44d7-b43a-e5156fdc27cd" sns.countplot(x = 'bathrooms',palette='hls',data = newdf) #most of the houses in the dataset have about 2.5 bathrooms #very few have above 3 bathrooms. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="Ka0Dw-zzmOsv" outputId="5d72c90d-732d-4d97-a36e-2495c8aa7141" sns.countplot(x = 'bedrooms',palette='hls',data = df) #most of the houses in the dataset have about 3 bedrooms while #very few have above 5 bedrooms # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="aG-yojMGni_P" outputId="4fbb32ae-c600-42e5-c0ff-b99e0b48e4b5" #check distribution of prices newdf['price'].hist() # + colab={"base_uri": "https://localhost:8080/"} id="V6T4YUGtn5b3" outputId="618d57e1-35bc-4ff7-fb9d-227cd95b93c9" #checking the skewness of prices from the data #The positive value means the distribution of house prices is positively skewed. newdf['price'].skew() # + colab={"base_uri": "https://localhost:8080/"} id="ekKa5df9oFGl" outputId="aeb9a745-1629-4e0c-a2f7-0be31fe2994d" # Finding the min and max values of the house prices max_price = newdf['price'].max() min_price = newdf['price'].min() print('Maximum is ',max_price) print('Minimum is ',min_price) # Calculating the range of house prices print('Range of prices is ',max_price - min_price) #maximum house price is 1.12 Million dollars while minimum price is 82,000 dollars # + colab={"base_uri": "https://localhost:8080/"} id="JmYAS5GPmTzK" outputId="afcfd1af-d1b5-4428-80be-7e2adb63f0a4" newdf['price'].describe().apply(lambda x: format(x, 'f')) #get summary statistics from target variable # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="erLi8mOZo0gK" outputId="30f509be-eab5-40c5-8703-a3214828252c" #now for some bivariate EDA #plot relationship between house price and square feet of house plt.scatter(newdf['sqft_living'],newdf['price'],color = 'green') plt.title('Square Feet vs Price') plt.xlabel('Square Feet') plt.ylabel('price * 10 ^ 6') plt.show() coef = newdf['sqft_living'].corr(newdf['price']) print(coef) #relatively moderate correlation between house price and square footage # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="dsw0wlGEqVCS" outputId="723d02e0-9b2f-419a-8d3c-7d5f90b30fbb" #plot relationship between house price and square feet of house plt.scatter(newdf['sqft_above'],newdf['price'],color = 'blue') plt.title('Square Feet above vs Price') plt.xlabel('Square Feet above') plt.ylabel('price * 10 ^ 6') plt.show() coef = newdf['sqft_above'].corr(newdf['price']) print(coef) #relatively weak to moderate correlation between house price and square footage above the house # + colab={"base_uri": "https://localhost:8080/", "height": 290} id="auRkCYu0tBwf" outputId="ee0898b1-d530-4aa6-8718-b4b404fc296e" #plot the box plot of prices by no. of bedrooms sns.boxplot(newdf['bedrooms'], newdf['price']) plt.show() #as the number of bedrooms increase, the median houseprice seems to increase # + colab={"base_uri": "https://localhost:8080/", "height": 722} id="bsBhSTdWTL4C" outputId="c08a39d3-ee55-4202-974d-f8e8a319c034" new_df = newdf.groupby('zipcode')['price'].mean() new_df = new_df.sort_values(ascending=False) new_df = new_df.head(10) plt.figure(figsize= (10,8)) sns.barplot(x = new_df.index, y = new_df.values) plt.title('Zip Codes with highest average house prices', fontsize = 16) plt.ylabel('House Prices', fontsize=14) plt.xlabel('Zip Codes', fontsize=12) print(new_df) plt.show() #checking the zipcodes with the highest average house prices. # + colab={"base_uri": "https://localhost:8080/", "height": 290} id="iloYcGeXwPuP" outputId="51f5d348-a9e6-4236-d89c-a1e3eb77c54c" #plot the box plot of price bt no. of floors sns.boxplot(newdf['floors'], newdf['price']) plt.show() # + [markdown] id="mD-iAfTeKKCg" # ## Investigate Multicollinearity # + colab={"base_uri": "https://localhost:8080/", "height": 565} id="eWvirAwRwWVY" outputId="5a95f813-8033-4c4f-91f5-b429460fbeb7" #check for mutlicollinearity independent_only = newdf.drop(columns=['price']) # Let's display the correlations between the variables correlations = independent_only.corr() correlations #the columns square foot above and square foot living are strongly correlated #we have to drop one of the columns # + id="Yi_0NvAwxoFR" newdf.drop(['sqft_above','sqft_lot15'],axis=1,inplace=True) #dropping highly multicollinear columns # + colab={"base_uri": "https://localhost:8080/"} id="yNdQ7Nv5xwTK" outputId="954574d0-9207-446a-fac7-a09452036034" newdf.columns # + colab={"base_uri": "https://localhost:8080/", "height": 503} id="gUkty2bLx8Az" outputId="21f310c3-3c36-4de7-fcb0-c36d93907dec" #check for mutlicollinearity mycorre = newdf.drop(columns=['price']) # Let's display the correlations between the variables corrs = mycorre.corr() corrs # + colab={"base_uri": "https://localhost:8080/"} id="zogW1BEdYh_q" outputId="96cebd7b-5990-4887-db95-6a2a402ab7fc" newdf.columns # + id="4Yzylf9_tdOd" mydf = newdf # + [markdown] id="b1nB5nAzKRG8" # # Implementing The Solution # + [markdown] id="RDRyFUUmKZII" # ## Bartlett's Test and Mutlivariate Linear Regression # Check for homoskedasticity in the data by plotting a residual plot and using the chisquare test to prove an hypothesis # + [markdown] id="_ceHCtDmKwCP" # Also, **Fit a Multivariate Linear Regression** and check its accuracy metrics # + id="swH_b9ety2Do" #use bartletts test for Homoskedasticity #fitting a linear Regression Model in order to Check for Homoskedasticity from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler X = newdf[['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors','waterfront', 'view', 'condition', 'grade', 'sqft_basement', 'yr_built','yr_renovated', 'zipcode', 'sqft_living15']].values y = newdf['price'].values #first standardise the data scaler = StandardScaler() scaler.fit(X) X = scaler.transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) regressor = LinearRegression() regressor.fit(X_train, y_train) # This is our prediction for home goals based on our model y_pred = regressor.predict(X_test) # We now create the residual by substracting the test value from the predicted # value for each row in our dataset residuals = np.subtract(y_pred, y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="cH4hUuHEzdXU" outputId="605a8614-741b-491e-f9d4-c3206b4f4ac9" #plot a residual plots import matplotlib.pyplot as plt plt.scatter(y_pred, residuals, color='black') plt.ylabel('residual') plt.xlabel('fitted values') plt.axhline(y= residuals.mean(), color='red', linewidth=1) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="I5XLwZOLznWU" outputId="bb389cb7-1fb7-4019-900d-fc2abf4ea3e2" import scipy as sp test_statistic, p_value = sp.stats.bartlett(y_pred, residuals) print('Test statistic is :',test_statistic) # To interpret the results we must also compute a critical value of the chi squared distribution degree_of_freedom = len(y_pred)-1 probability = 1 - p_value critical_value = sp.stats.chi2.ppf(probability, degree_of_freedom) print('Critical Value is :',critical_value) # If the test_statistic is greater than the critical value, then we reject our null # hypothesis. This would mean that there are patterns to the variance of the data # Otherwise, we can identify no patterns, and we accept the null hypothesis that # the variance is homogeneous across our data if (test_statistic > critical_value): print('\nThe variances are unequal, and the model should be reassessed') else: print('\nThe variances are homogeneous!') # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="dhpKbRekz-2s" outputId="fb99073d-b71d-44d0-e3e7-c5fa395bea61" #check a simple dataframe to see how the linear regression model performs my_frame = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) my_frame.head(10) #a quick look shows the model may be way off the mark # + [markdown] id="gMTRG2yLLJ8Z" # **Check the RMSE and R squared for Multivariate Linear Regression** # + colab={"base_uri": "https://localhost:8080/"} id="FvyagePw0c-U" outputId="a5595eae-4d93-4aec-9920-8a4f29f47e09" #get the root MSE and R squared value for Multivariate Linear Regression print('MSE is : ',np.sqrt(mean_squared_error(y_test,y_pred))) print('R Squared is : ',regressor.score(X,y)) # + [markdown] id="-aOnTMevOvVb" # The Multivariate Linear Regression has an RMSE less than the average price of the house. Therefore ,the model is a moderately good estimator. Also, about 50% of the variances in house prices can be explained by the independent variables. We will investigate how the RMSE changes with other Regression models. # + [markdown] id="9Mnp3M3JLVT1" # ## Ridge Regression # + id="N5UgMgNz3CCP" #now, build a Ridge Regression Model and measure the accuracy #first get the appropriate alpha value from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Ridge ridge = Ridge(normalize=True) search = GridSearchCV(estimator=ridge,param_grid={'alpha':np.logspace(-5,2,8)},scoring='neg_mean_squared_error',n_jobs=1,refit=True,cv=10) # + colab={"base_uri": "https://localhost:8080/"} id="KIQ_XLK93xcj" outputId="53e4dcf7-d6ac-4982-fd66-4a6de7383578" #find the best alpha value search.fit(X,y) search.best_params_ # + [markdown] id="TxipcjXuLaac" # Now that we have the alpha value as 0.01, lets fit it into our ridge regression model. # + colab={"base_uri": "https://localhost:8080/"} id="-uNqBgTR3-PL" outputId="84aeab37-d69f-4aea-c3c8-6aca09b190c2" #the alpha value is 0.01 ridge_regressor = Ridge(alpha=0.01) ridge_regressor.fit(X_train, y_train) pred_test_rr= ridge_regressor.predict(X_test) print('MSE is : ',np.sqrt(mean_squared_error(y_test,pred_test_rr))) print('R Squared is : ',ridge_regressor.score(X,y)) #there is not much of a change compared to the Linear Regression Metrics. # + [markdown] id="bzWYU9OoPinf" # The ridge regression performed almost the same as the Linear Regression. This can be attributed to the fact that ridge regression does not eliminate some predictor variables and thus some noise may still be present in the model. This ultimately led to a case of high Variance (Overfitting) as the ridge regression model learnt the noise in the data. # + [markdown] id="AyYrXKHyLkWV" # ## Lasso Regression # + colab={"base_uri": "https://localhost:8080/"} id="VDjTnu9A4w3A" outputId="81c7705a-dce7-41f1-81ca-ea51da2747dc" #now using Lasso regression to build a model from sklearn.model_selection import train_test_split from sklearn import linear_model from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings("ignore") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=101) parameters = {'alpha': np.concatenate((np.arange(0.1,2,0.1), np.arange(2, 5, 0.5), np.arange(5, 25, 1)))} lasso = linear_model.Lasso() gridlasso = GridSearchCV(lasso, parameters, scoring ='r2') # Fitting models and print the best parameters, R-squared scores, MSE, and coefficients gridlasso.fit(X_train, y_train) print("lasso best parameters:", gridlasso.best_params_) # + [markdown] id="HSTkk2agLpHP" # Lets fit the alpha value acquired as 19.0 # + colab={"base_uri": "https://localhost:8080/"} id="j6EOezAA5dCh" outputId="20dfcd3a-e762-405e-804e-feb2e1ae9e8b" #now that we have an alpha value of 19, lets include it in the model #and build a lasso Regression Model gridlasso = Lasso(alpha=19) gridlasso.fit(X_train, y_train) pred_test_lasso= gridlasso.predict(X_test) print('MSE is : ',np.sqrt(mean_squared_error(y_test,pred_test_lasso))) print('R Squared is : ',gridlasso.score(X,y)) #the r squared for this model is relatively higher compared to the Ridge and Linear Regressions # + [markdown] id="bDD5qkL2L1AW" # Lets check the variables that were not important in predictive modelling. These include the **view, waterfront and year of renovation.** # + colab={"base_uri": "https://localhost:8080/"} id="MStB29RZfNuU" outputId="ca7d9963-53d1-4f7a-fa45-fc3c712ee0ae" #now check the lasso coefficients and determine which features were important in the data gridlasso.coef_ #the view, waterfront and year renovated columns were not important for predictive modelling # + [markdown] id="Pn_NpJlfQdFi" # The Lasso regression has by far the best RMSE as compared to the Ridge and Linear Regressions. This could be attributed to the fact that some noise was removed when some variable coeffcients were reduced to zero. This means that a bias was introduced and L1 Regularisation applied leading to only the important predictor variables being used for modelling. # # A disadvantage for Lasso regression would have been underfitting since a higher bias was introduced to the less significant predictor variables # + [markdown] id="MDMLowjLMEnL" # ## Elastic Net Regression # + id="DI1UByNK6nu3" #now to build an elastic net Regression Model on the data from sklearn.linear_model import ElasticNet elastic=ElasticNet(normalize=True) search=GridSearchCV(estimator=elastic,param_grid={'alpha':np.logspace(-5,2,8),'l1_ratio':[.2,.4,.6,.8]},scoring='neg_mean_squared_error',n_jobs=1,refit=True,cv=10) # + colab={"base_uri": "https://localhost:8080/"} id="EAOCGEXkZrBv" outputId="c9f92dd6-c15a-4d61-801f-318940ae517e" #get the optimum alpha and l1_ratio values search.fit(X,y) search.best_params_ # + [markdown] id="TKZPRTtOMaVk" # Now that we have the **alpha and l1_ratio values**, lets fit the elastic net regression model into the train data and predict the prices of houses. # + colab={"base_uri": "https://localhost:8080/"} id="uCK2UmCbaABm" outputId="b7f781b0-e45c-4341-92bd-110a13db26e4" #fit these values in the net elastic regression model elastic=ElasticNet(normalize=True,alpha=1e-05,l1_ratio=0.8) elastic.fit(X_train,y_train) y_pred = elastic.predict(X_test) net_model = np.sqrt(mean_squared_error(y_test,y_pred)) print('R Squared is : ',elastic.score(X,y)) print('RMSE is : ',net_model) #the r squared is about 50 % #This is a moderate value and more can be done to increase it. # + colab={"base_uri": "https://localhost:8080/"} id="4X33o3PbbwQX" outputId="46782829-e7cb-495f-da1d-278fb7a910f8" #check the coeffcients of the model elastic.coef_ #the view, waterfront and Year of renovation columns did not cotribute much to house prices # + [markdown] id="soPcODzSOKxD" # The elastic net regression performed worse than the Lasso regression This is probably because the effect of some variables were removed from the predictive model. # # The removal of these variables would have led to underfitting just as in Lasso regression and thus reduced our predictive accuracy metrics. # + [markdown] id="pFio6arqMvtr" # ## Quantile Regression # # Now lets investigate the effect of house prices on different quantiles across the predictor variables # + id="UxHsdczBpIli" x = mydf[['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors','waterfront', 'view', 'condition', 'grade', 'sqft_basement', 'yr_built','yr_renovated', 'zipcode', 'sqft_living15']] y = mydf['price'] # + colab={"base_uri": "https://localhost:8080/"} id="GhrLglftleUP" outputId="d174ce95-f42f-4084-8337-2abe15826e0f" import statsmodels.formula.api as smf mod = smf.quantreg('price ~ bedrooms + bathrooms + sqft_living + sqft_lot + floors + sqft_basement ', data=mydf) res = mod.fit(q=0.5) # Then print out the summary of our model # print(res.summary()) # + colab={"base_uri": "https://localhost:8080/"} id="vuuLU5nmuBxI" outputId="98c30f7d-d4c1-4c43-df56-0a1ad8af45fd" quantiles = np.arange(.05, .96, .1) def fit_model(q): res = mod.fit(q=q) return [q, res.params['Intercept'], res.params['sqft_living']] + \ res.conf_int().loc['sqft_living'].tolist() models = [fit_model(x) for x in quantiles] models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub']) ols = smf.ols('price ~ sqft_living', mydf).fit() ols_ci = ols.conf_int().loc['sqft_living'].tolist() ols = dict(a = ols.params['Intercept'], b = ols.params['sqft_living'], lb = ols_ci[0], ub = ols_ci[1]) print(models) print(ols) #quantile coefficients for house prices against Square Feet of Living Space # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="eF8MKxrQvkXD" outputId="761f4c06-70db-4a38-ca6a-4c0ab853747a" x = np.arange(mydf.sqft_living.min(), mydf.sqft_living.max(), 50) get_y = lambda a, b: a + b * x fig, ax = plt.subplots(figsize=(8, 6)) for i in range(models.shape[0]): y = get_y(models.a[i], models.b[i]) ax.plot(x, y, linestyle='dotted', color='grey') y = get_y(ols['a'], ols['b']) ax.plot(x, y, color='red', label='OLS') ax.scatter(mydf.sqft_living, mydf.price, alpha=.2) ax.set_xlim((400, 4400)) ax.set_ylim((50000,1200000)) legend = ax.legend() ax.set_xlabel('Square Feet Of Living Space', fontsize=16) ax.set_ylabel('House Prices ( * 10 ^ 6) ', fontsize=16); ax.set_title('House prices vs square feet per quantile') #The dispersion of House prices increases with increase in square feet of living space # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="BHVCutpayYXV" outputId="8abeec26-5255-4d1f-cd25-548f13c48a27" n = models.shape[0] p1 = plt.plot(models.q, models.b, color='black', label='Quantile Regression') p2 = plt.plot(models.q, models.ub, linestyle='dotted', color='black') p3 = plt.plot(models.q, models.lb, linestyle='dotted', color='black') p4 = plt.plot(models.q, [ols['b']] * n, color='red', label='OLS') p5 = plt.plot(models.q, [ols['lb']] * n, linestyle='dotted', color='red') p6 = plt.plot(models.q, [ols['ub']] * n, linestyle='dotted', color='red') plt.ylabel('Square Feet Of Living space') plt.xlabel('Quantiles of the conditional house prices distribution') plt.title(' Plotting effect of Square Feet of Living Space across the house prices distribution.') plt.legend() plt.show() #most of the quantiles of house price fall outside the OLS line meaning that the effect of square feet is different #across different quantiles of prices. #The dotted black lines form 95% point-wise confidence band around 10 quantile regression estimates (solid black line). #The red lines represent OLS regression results along with their 95% confidence interval. # + colab={"base_uri": "https://localhost:8080/"} id="h6rK1E7Q0bOD" outputId="f0fdac33-e6d6-4779-d15d-616957a24446" #now lets investigate the quantiles of house prices with respect to floors quantiles = np.arange(.05, .96, .1) def fit_model(q): res = mod.fit(q=q) return [q, res.params['Intercept'], res.params['floors']] + \ res.conf_int().loc['floors'].tolist() models = [fit_model(x) for x in quantiles] models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub']) ols = smf.ols('price ~ floors', mydf).fit() ols_ci = ols.conf_int().loc['floors'].tolist() ols = dict(a = ols.params['Intercept'], b = ols.params['floors'], lb = ols_ci[0], ub = ols_ci[1]) print(models) print(ols) #quantile coefficients for house prices against no. of floors # + colab={"base_uri": "https://localhost:8080/"} id="SeOMO8fF28_m" outputId="71030c82-0d38-4ce6-f753-c8d1bc0b74c4" quantiles = np.arange(.05, .96, .1) def fit_model(q): res = mod.fit(q=q) return [q, res.params['Intercept'], res.params['sqft_basement']] + \ res.conf_int().loc['sqft_basement'].tolist() models = [fit_model(x) for x in quantiles] models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub']) ols = smf.ols('price ~ sqft_basement', mydf).fit() ols_ci = ols.conf_int().loc['sqft_basement'].tolist() ols = dict(a = ols.params['Intercept'], b = ols.params['sqft_basement'], lb = ols_ci[0], ub = ols_ci[1]) print(models) print(ols) #coefficients for square feet for basement vs house price per quantile # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="Y9VifcM6AQEo" outputId="43bff619-8777-4173-b283-bf7bf0a5aa51" x = np.arange(mydf.sqft_basement.min(), mydf.sqft_basement.max(), 50) get_y = lambda a, b: a + b * x fig, ax = plt.subplots(figsize=(8, 6)) for i in range(models.shape[0]): y = get_y(models.a[i], models.b[i]) ax.plot(x, y, linestyle='dotted', color='grey') y = get_y(ols['a'], ols['b']) ax.plot(x, y, color='red', label='OLS') ax.scatter(mydf.sqft_basement, mydf.price, alpha=.2) ax.set_xlim((0, 1500)) ax.set_ylim((50000,1200000)) legend = ax.legend() ax.set_xlabel('Square Feet Of Basement', fontsize=16) ax.set_ylabel('House Prices ( * 10 ^ 6) ', fontsize=16); ax.set_title('House prices vs square feet of Basement per quantile') # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="C26xKcMyBQcs" outputId="d8c2066c-c1f8-419b-f668-0d639175a0e4" n = models.shape[0] p1 = plt.plot(models.q, models.b, color='black', label='Quantile Regression') p2 = plt.plot(models.q, models.ub, linestyle='dotted', color='black') p3 = plt.plot(models.q, models.lb, linestyle='dotted', color='black') p4 = plt.plot(models.q, [ols['b']] * n, color='red', label='OLS') p5 = plt.plot(models.q, [ols['lb']] * n, linestyle='dotted', color='red') p6 = plt.plot(models.q, [ols['ub']] * n, linestyle='dotted', color='red') plt.ylabel('Square Feet Of Basement') plt.xlabel('Quantiles of the conditional house prices distribution') plt.title(' Plotting effect of Square Feet of Basement across the house prices distribution.') plt.legend() plt.show() #all the quantiles of house prices fall outside the OLS regression line #from about the 0.5 quantile, the unit price of houses falls with an increament in basement square feet space # + [markdown] id="y0jHR8YMM_dj" # The quantile regression model is important for deeper investigation into the prices at different quantiles since the measure parameter is the medians. However, the disadvantage is that parameters are harder to estimate as compared to a Generalised Linear Model. # + [markdown] id="UM6F5dlzRWwA" # ## Recommendations and Challenging the solution # # # + [markdown] id="WCgHMKYDR1DL" # ### Recommendations # For this case, the best model to use would be the **Lasso Regression** model since it offered the best Root Mean Squared Error while providing the best R squared value. Also, feature extraction was done and the less important predictor variables were eliminated ensuring there was no overfitting. # + [markdown] id="ukdtubxFR7Q8" # ### Challenging the Solution # A glossary on the data would have certainly helped in discovering what some values meant and in turn enable us to tune the models. # # K fold cross validation may have better trained the data as opposed to the train_test_split leading to better metrics. #
Nelson_Muteti_Week_7_Regression_IP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### **Project Summary:** # # Telco is concerned about its significant customer attrition rate and has asked Codeup's Data Science team to identify drivers of churn. To do this, we created a machine learning model which predicts future customer churn as accurately as possible. # # **Project Goals:** # - Find drivers for customer churn at Telco # - Construct a ML classification model that accurately predicts customer churn. # # **Using a Random Forest Model, we identified the following predictors of churn:** # - lack of tech support # - month-to-month contract # - non-enrollment in auto-pay # - fiber optic internet # - not subscribing to streaming movies and/or tv # # We have created a csv file, ```churn_probability.csv``` which details the prediction and probability of churn for each Telco customer in our test data. We have also developed several recommendations to reduce future customer churn. # # All files referenced in this presentation are available in the github repository for this project: https://github.com/barbmarques/classification-project-telco. # # # # # # # # ____________________________________________________________________________________________________ # ### ```Planning``` # # **Database Features** # - We began with a dataset containing 24 attributes of 7,043 customers # - A data dictionary is included in the README.md file on the github repository at: https://github.com/barbmarques/classification-project-telco/blob/main/README.md # # # # **Initial Questions:** # - Does the type of internet service (DSL/fiber optic) have an effect customer churn? # - What services (streaming/support) might increase customer retention? # # # **The Pipeline:** ```Plan -> Acquire -> Prepare -> Explore -> Model & Evaluate -> Deliver ``` # - Each step in the our process is recorded and staged on a Trello board at: https://trello.com/b/vOXbVcbl # ____________________________________________________________________________________________________ # ### ```Acquiring the Data Set``` # The data was acquired by running the ```get_telco_data()```, a function included in the ```acquire.py``` file found in our github repository. Step-by-step instructions for reproducting our findings are included in the README.md file. # + # Necessary imports import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from prepare import train_validate_test_split #remove warning boxes import warnings warnings.filterwarnings("ignore") #imports for acquire/prepare/explore import acquire import env import prepare import explore from scipy import stats import graphviz from graphviz import Graph from acquire import get_telco_data # imports for modeling/evaluating from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.tree import export_graphviz from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix import logistic_regression_util from prepare import train_validate_test_split # - # Acquire the data from the telco_churn database on the Codeup data science database server. df = get_telco_data() # Getting to know the data df.customer_id.nunique() df.info() # #### Visualizing Individual Columns cat_vars = ['churn','contract_type','internet_service_type', 'streaming_movies','streaming_tv','tech_support','device_protection','online_backup', 'online_security', 'payment_type', 'gender'] quant_vars = ['monthly_charges','tenure'] explore.explore_univariate(df, cat_vars, quant_vars) # + # cat_vars = ['churn', 'paperless_billing','contract_type','internet_service_type','multiple_lines', # 'streaming_movies','streaming_tv','tech_support','device_protection','online_backup', # 'online_security','phone_service', 'payment_type', 'gender','senior_citizen','partner', # 'dependents', ] # quant_vars = ['monthly_charges','tenure'] # explore.explore_univariate(df, cat_vars, quant_vars) # - # ____________________________________________________________________________________________________ # ### Acquire Takeaways - Customer Characteristics: # - 27% of all customers have churned # - Gender is split 50/50, so it is not likely a driver of churn # - 45% of all customers use some sort of automatic payments # - 55% of all customers are on MTM contracts # - 44% of all customers have dsl internet # - 19% of all customers are senior citizens # # # ____________________________________________________________________________________________________ # ### ```Preparing the Data Set``` # # The function clean_telco(df): # # **Handled missing values:** # - total_charges has 11 missing values # - these missing values are customers with zero tenure, so it was reasonable to replace NaNs with zero # # **Changed data types:** # - data types of all categorical variables were encoded or converted to bools and then to ints/floats for compatibility with modeling algorithms (except customer_id which is alpha-numeric) # - total_charges, a continuous variable, was converted to a float64 # # **Deleted gender column** # - since visualizations shows gender is as an even split, neither can be identified as a driver of churn. # # **Deleted duplicate columns:** # - payment_type, contract_type, internet_service_type # # **Renamed columns** # - renamed several columns to clarify the boolean value # # **Engineered features:** # - streamer: combines steaming content: streaming_tv and/or streaming_movies # - auto-pay: combines both forms of automatic payment: auto bank draft & automatic credit card charge # - single-line, multi-line and no_phone_service were combined into phone__service # - tenure years: represent tenure months in years (tenure/12) # - four distinct features to reflect partner/dependent relationships # # The data was cleaned by running the ```clean_telco()``` function which is found in ```prepare.py``` file found in our github repository. Step-by-step instructions for this process are included in the README.md file. telco_churn = prepare.clean_telco(df) telco_churn.head() telco_churn.shape telco_churn.info() train, validate, test = train_validate_test_split(telco_churn) # ____________________________________________________________________________________________________ # ### ```Exploring the Data Set``` # + # # Visualizations of individual columns of TRAIN data # cat_vars = ['auto_pay', 'e_Check', 'sends_check','month_to_month', 'one_year', 'two_year','dsl','fiber','no_internet','churn', # 'paperless_billing','streamer','no_tech_support', 'no_device_protection','no_online_backup','no_online_security', # 'phone_services', 'senior_citizen','is_single_no_dep','has_partner_no_dep','is_single_with_dep', 'family'] # quant_vars = ['monthly_charges','tenure_months','tenure_years'] # explore.explore_univariate(train, cat_vars, quant_vars) # + # cat_vars = ['has_auto_pay', 'e_Check', 'sends_check','month_to_month', 'one_year', 'two_year','dsl','fiber','no_internet', # 'paperless_billing','streamer','no_tech_support', 'no_device_protection','no_online_backup','no_online_security', # 'phone_services','senior_citizen','is_single_no_dep','has_partner_no_dep','is_single_with_dep', 'family'] # quant_vars = ['monthly_charges','tenure_months','tenure_years'] # explore.explore_bivariate(train, 'has_churned', cat_vars, quant_vars) # - # + cat_vars = ['has_auto_pay', 'fiber', 'no_tech_support','not_streamer'] quant_vars = ['monthly_charges','tenure_months','tenure_years'] explore.explore_bivariate(train, 'churn', cat_vars, quant_vars) # - # ## Takeaways for Explore Visualizations: # - Customers not on some type of auto pay churn more # - Customers on fiber churn more # - Customers without streaming services churn more # - Customers without tech support churn more # _______________________________________________________________________________________________________________________ # ## Hypothesis Testing: # # **1. Does whether a customer has tech support affect whether they will churn?** # # H*o*: There is no relationship between churn and tech support. -- REJECT # # H*a*: There is a dependent relationship between tech support and churn. # # #### Chi-Square Test # - checking for relationship between two categorical variables. #crosstab for tech support and churn observed = pd.crosstab(telco_churn['has_churned'],telco_churn['no_tech_support']) observed # + # Set our alpha alpha = .01 #run chi2 test chi2, p, degf, expected = stats.chi2_contingency(observed) print(f'p-value is: {p}') print() print(f'chi2 = {chi2}') print() if p < alpha: print("Since p < .05, we reject the null hypothesis.") else: print("We fail to reject the null") # - # _______________________________________________________________________________________________________________________ # ## Hypothesis Test #2 # # **Does having access to streaming content affect whether customers will churn?** # # H*o*: There is no relationship between streaming content and churn. -- REJECT # # H*a*: There is a dependent relationship between streaming content and churn. # #### Chi-Square Test # - checking for relationship between two categorical variables. #crosstab for auto_pay and churn observed_2 = pd.crosstab(telco_churn['has_churned'],telco_churn['not_streamer']) observed_2 # + # Set our alpha alpha = .01 #run chi2 test chi2, p, degf, expected = stats.chi2_contingency(observed_2) print(f'p-value is: {p}') print() print(f'chi2 = {chi2}') print() if p < alpha: print("Since p < .05, we reject the null hypothesis.") else: print("We fail to reject the null") # - # _______________________________________________________________________________________________________________________ # ## Hypothesis Test #3 # # **Does having auto pay affect whether customers will churn?** # # H*o*: There is no relationship between auto-pay and rate of churn. # # H*a*: They are dependent. # # #### Chi-Square Test # - checking for relationship between two categorical variables. # #crosstab for auto_pay and churn observed_3 = pd.crosstab(telco_churn['has_churned'],telco_churn['has_auto_pay']) observed_3 # + # Set our alpha alpha = .01 #run chi2 test chi2, p, degf, expected = stats.chi2_contingency(observed_3) print(f'p-value is: {p}') print() print(f'chi2 = {chi2}') print() if p < alpha: print("Since p < .05, we reject the null hypothesis.") else: print("We fail to reject the null") # - # _______________________________________________________________________________________________________________________ # ### ```Modeling & Evaluating``` # ### Splitting Data for Modeling # + # Splitting out target variable from X_train features = ['no_tech_support', 'has_auto_pay','month_to_month','streamer','fiber'] target = ['has_churned'] # dropped 'has_churned' because it is target variable # dropped 'customer_id' because it is alpha-numeric X_train = train.drop(columns = ['has_churned', 'total_charges', 'customer_id'],axis=1) X_validate = validate.drop(columns = ['has_churned', 'total_charges', 'customer_id'],axis=1) X_test = test.drop(columns = ['has_churned'],axis=1) # Establishing target as y_train -- 1 column only (our target variable) y_train = train.has_churned y_validate = validate.has_churned y_test = test.has_churned # - # _______________________________________________________________________________________________________________________ # ### Establishing a Baseline Accuracy y_train.value_counts() # + baseline = pd.DataFrame(y_train) baseline['baseline'] = 0 # baseline.columns = ['actual','baseline'] # baseline.head() # + # cross tab of our baseline versus actual # pd.crosstab(baseline['baseline'], baseline['actual']) # + # let's calculate the accuracy # positive will be not churned # (TP + TN) / (TP + TN + FP + FN) #predicting not churned and the customer has not churned TP = 2897 #predicting not churned and the customer has churned FP = 1046 #predicting the customer has churned and they have churned TN = 0 #predicting the customer has churned and they have not churned FN = 0 base_acc = (TP + TN) / (TP + TN + FP + FN) print("The baseline accuracy is",round(base_acc * 100, 2),"percent.") # - # _______________________________________________________________________________________________________________________ # ## Logistic Regression Models # #### LR Model 1 # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','streamer','fiber'] # - target = ['has_churned'] # - Accuracy of Logistic Regression classifier on training set: 77% # - Baseline = 73% # # # # + # Create the logistic regression logit = LogisticRegression(random_state=123) # specify the target and features we're using features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] target = ['has_churned'] # Fit a model using only these specified features # logit.fit(X_train[["age", "pclass", "fare"]], y_train) logit.fit(X_train[features], y_train) # Since we .fit on a subset, we .predict on that same subset of features y_pred = logit.predict(X_train[features]) print("Baseline is", round(base_acc, 2)) print('Accuracy of Logistic Regression classifier on training set: {:.2f}' .format(logit.score(X_train[features], y_train))) # - # make prediction y_pred = logit.predict(X_train[features]) #classification report print(classification_report(y_train, y_pred)) # #### Model 2 # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','fiber'] # - target = ['has_churned'] # - Accuracy of Logistic Regression classifier on training set: 78% # - Baseline = 73% # # # # + # Create the logistic regression logit2 = LogisticRegression(random_state=123) # specify the target and features we're using features2 = ['no_tech_support', 'has_auto_pay','month_to_month','fiber'] target = ['has_churned'] # Fit a model using only these specified features logit2.fit(X_train[features2], y_train) # Since we .fit on a subset, we .predict on that same subset of features y_pred2 = logit2.predict(X_train[features2]) print("Baseline is", round(base_acc, 2)) print('Accuracy of Logistic Regression classifier on training set: {:.2f}' .format(logit2.score(X_train[features2], y_train))) # - # create predictions y_pred2 = logit2.predict(X_train[features2]) #classification report print(classification_report(y_train, y_pred2)) # _______________________________________________________________________________________________________________________ # ## Decision Tree # #### DT Model 1 # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] # - target = ['has_churned'] # - Accuracy of Decision Tree classifier on training set: 77% # - Baseline = 73% # # # + # Generate a blank, decision tree model clf1 = DecisionTreeClassifier(max_depth=3) #Specify the features features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] # Train(fit) the model dt1 = clf1.fit(X_train[features], y_train) dt1 # + # Visualize the model so it can explain itself! dot_data = export_graphviz(clf1, feature_names= features, rounded=True, filled=True, out_file=None) graph = graphviz.Source(dot_data) graph.render('churn_decision_tree', view=True) # + y_pred3 = clf1.predict(X_train[features]) y_pred3_proba = clf1.predict_proba(X_train[features]) #evaluate metrics print('Accuracy of model with all features:', clf1.score(X_train[features], y_train)) print('\nThe confusion matrix:\n',confusion_matrix(y_train, y_pred)) print('\nClassification report:\n',classification_report(y_train, y_pred)) # - # Model score on accuracy: accuracy = clf1.score(X_train[features],y_train) accuracy # _______________________________________________________________________________________________________________________ # ## K-Nearest Neighbor # #### KNN Model 1 # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] # - target = ['has_churned'] # - Accuracy of KNN on training set: 74% # - Baseline = 73% # # #making the model knn = KNeighborsClassifier() # + #Specify the features features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] #train/fit the model knn_fit = knn.fit(X_train[features], y_train) # - # evaluating the model y_pred = knn.predict(X_train[features]) # check model accuracy accuracy = knn.score(X_train[features], y_train) print(f'Model accuracy is {accuracy:.3}') # + # Classification metrics report # y_train is the actual labels for the target variable # y_pred is the predictions that the model makes based off our X features print(classification_report(y_train, y_pred)) # - # _______________________________________________________________________________________________________________________ # #### KNN Model 2 # - k = 20 # - features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] # - target = ['has_churned'] # - Accuracy of KNN on training set: 74% # - Baseline = 73% # # #making the model k = 20 knn_20 = KNeighborsClassifier(n_neighbors=k) # + #Specify the features features20 = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] #train/fit the model knn_20_fit = knn_20.fit(X_train[features20], y_train) # - # evaluating the model y_pred_20 = knn_20_fit.predict(X_train[features20]) # check model accuracy accuracy_20 = knn_20_fit.score(X_train[features20], y_train) print(f'Model accuracy is {accuracy:.3}') # + # Classification metrics report # y_train is the actual labels for the target variable # y_pred is the predictions that the model makes based off our X features print(classification_report(y_train, y_pred)) # - # _______________________________________________________________________________________________________________________ # ### Random Forest Model: # # ### RF Model 1 # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','streamer','fiber'] # - target = ['has_churned'] # - Accuracy of Random Forest Classifier on training set: 78% # - Baseline = 73% # # # # + # Features to be used in the model features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', min_samples_leaf=3, n_estimators=100, max_depth=3, random_state=123) # - # Fit the model rf.fit(X_train[features], y_train) # + # Feature Importance print(rf.feature_importances_) # Make Predictions y_pred = rf.predict(X_train[features]) # Estimate probability y_pred_proba = rf.predict_proba(X_train[features]) # - print(classification_report(y_train, y_pred)) print('Accuracy of random forest classifier on training set: {:.2f}' .format(rf.score(X_train[features], y_train))) print('Confusion Matrix:') print(confusion_matrix(y_train, y_pred)) # _______________________________________________________________________________________________________________________ # ### Run best performing model on out-of-sample data (Validate) # # #### LR Model 1 # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','streamer','fiber'] # - target = ['has_churned'] # - Accuracy of Logistic Regression Classifier on training set: 77% # - Accuracy of Logistic Regression Classifier on validate set: 78% # - Baseline = 73% # # # # + # specify the target and features we're using features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] # Make prediction for validate dataset y_pred_validate = logit.predict(X_validate[features]) print("Model 1: solver = lbfgs, c = 1") print('Accuracy: {:.2f}'.format(logit.score(X_validate[features], y_validate))) print(confusion_matrix(y_validate, y_pred_validate)) print(classification_report(y_validate, y_pred_validate)) # - # _______________________________________________________________________________________________________________________ # ### Random Forest Model: # # ### RF Model on Validate Set # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','streamer','fiber'] # - target = ['has_churned'] # - Accuracy of Random Forest Classifier on training set: 78% # - Accuracy of Random Forest Classifier on validate set: 77% # - Baseline = 73% # # # # + # Features to be used in the model features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] rf2 = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', min_samples_leaf=3, n_estimators=100, max_depth=3, random_state=123) # - # Fit the model rf2.fit(X_validate[features], y_validate) # + # Feature Importance print(rf2.feature_importances_) # Make Predictions y_pred2 = rf2.predict(X_validate[features]) # Estimate probability y_pred2_proba = rf2.predict_proba(X_validate[features]) # - print(classification_report(y_validate, y_pred2)) print('Accuracy of random forest classifier on validate set: {:.2f}' .format(rf2.score(X_validate[features], y_validate))) print('Confusion Matrix:') print(confusion_matrix(y_validate, y_pred2)) # _______________________________________________________________________________________________________________________ # ## TEST DATA SET # # ### Random Forest Model on Test Set # # - features = ['no_tech_support', 'has_auto_pay','month_to_month','streamer','fiber'] # - target = ['has_churned'] # - Accuracy of Random Forest Classifier on training set: 78% # - Accuracy of Random Forest Classifier on validate set: 77% # - **Accuracy of Random Forest Classifier on test set: 79%** # - Baseline = 73% # # # # + # Features to be used in the model features = ['no_tech_support', 'has_auto_pay','month_to_month','not_streamer','fiber'] rf3 = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', min_samples_leaf=3, n_estimators=100, max_depth=3, random_state=123) # - # Fit the model rf3.fit(X_test[features], y_test) # + # Feature Importance print(rf3.feature_importances_) # Make Predictions y_pred3 = rf3.predict(X_test[features]) # Estimate probability y_pred3_proba = rf3.predict_proba(X_test[features]) # - print(classification_report(y_validate, y_pred2)) # Create new dataframe of test data customer (probability & prediction) to write to csv test_churn = X_test.copy() test_churn.drop(columns = ['senior_citizen', 'tenure_months', 'paperless_billing','monthly_charges','tenure_years', 'phone_services', 'e_Check', 'sends_check', 'has_auto_pay', 'dsl', 'fiber', 'no_internet', 'no_tech_support', 'no_online_security', 'no_online_backup', 'no_device_protection', 'not_streamer', 'is_single_no_dep', 'family','month_to_month', 'one_year','two_year', 'total_charges', 'has_partner_no_dep', 'is_single_with_dep' ], inplace = True) test_churn test_churn['churn_prediction'] = y_pred3 #test_churn prob = pd.DataFrame(y_pred3_proba, columns=['n','y']) prob test_churn.head() # + # pd.concat([test_churn, prob], axis=1, ignore_index=True) # - test_churn.shape, prob.shape test_churn['no'] = prob.n.values test_churn['yes'] = prob.y.values test_churn # + # write test_churn dataframe to .csv file # - test_churn.to_csv('churn_probability.csv') print('Accuracy of random forest classifier on test set: {:.2f}' .format(rf3.score(X_test[features], y_test))) print('Confusion Matrix:') print(confusion_matrix(y_test, y_pred3)) # _______________________________________________________________________________________________________________________ # ### ```Key Findings & Takeaways``` # # **My analysis revealed that the following factors are predictors of customer churn:** # - Customer does not receive technical support # - Customer does not participate in auto-pay # - Customer does not subscribe to streaming services # - Customer is on a month-to-month contract # - Customer has fiber optic internet # # **My recommendations to reduce churn include:** # - Offering reduced prices on tech support for internet customers. # - Offer packages for streaming movies and tv # - Encourage customers to participate in some form of automatic payment (bank draft or credit card). # - Incentivize 1- and 2-year contracts # # **With additional time to work on this project, I will:** # - Analyze combinations of features that may be driving churn in fiber optic customers. # - Run predictions involving packages of services or features # - Run additional models, varying the hyperparameters to see if model performance can be improved. # - Investigate our pricing structure across various services to identify if our pricing strategy may be suboptimal. # # **A .csv file containing a prediction of churn for customers is included for your review.** # + #pd.concat([test_churn, prob], axis=1) # -
Final_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from selenium import webdriver # + driver = webdriver.Firefox() driver.get('https://my3.raceresult.com/110552/#0_2C3B48') table = driver.find_element_by_id("divRRPublish") # - txt = table.text print(txt) driver.close() lines = txt.splitlines() bib = 1 numbers = [] for i, line in enumerate(lines): try: int(line) numbers.append((i, line.strip())) except ValueError: pass bibs = [] for current, later in zip(numbers, numbers[1:]): if (int(current[0] + 1 < later[0])): # empty laps in the end bibs.append(current) bibs.append(numbers[-1]) lst = [] for i, pair in enumerate(bibs): d = dict() d['number'] = pair[1] d['name'] = lines[pair[0]+1] d['num_laps'] = lines[pair[0]+2] try: d['laps'] = lines[pair[0]+3:bibs[i+1][0]] except: d['laps'] = lines[pair[0]+3:] lst.append(d) df = pd.DataFrame.from_dict(lst) df.num_laps = df.num_laps.str.replace("Laps", "").astype('int64') # https://stackoverflow.com/a/46983212/9968316 column_to_explode = "laps" res = (df .set_index([x for x in df.columns if x != column_to_explode])[column_to_explode] .apply(pd.Series) .stack() .reset_index()) res = res.rename(columns={ res.columns[-2]:'exploded_{}_index'.format(column_to_explode), res.columns[-1]: '{}_exploded'.format(column_to_explode)}) tmp = res.laps_exploded.str.split(" ", expand = True).dropna(axis=1) tmp.head() res.head() res['lap'] = tmp[0] res['measurement'] = tmp[1] res['lap_split'] = tmp[2] res['rest_time'] = tmp[3] res.drop(["exploded_laps_index", "laps_exploded"], inplace=True, axis=1) res.head() res.to_csv("../data/2018_laps.csv", index=False)
scripts/get_laps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How to make a spectrogram # Spectrograms are important tools in phonetics, and it can be helpful to understand exactly how they are made. This tutorial steps through the key concepts of spectrograms without diving too deeply into the underlying mathematics. # # We'll start by importing some standard scientific libraries, and setting up our notebook to plot our figures inline with the tutorial. # + import numpy as np import scipy.signal import scipy.io.wavfile import matplotlib.pyplot as plt # %matplotlib inline plt.rc('figure', figsize=(16, 4)) # - # ## Load audio data # We'll use `scipy.io.wavfile` to read the audio data. The `read` function returns a tuple containing the sampling frequency first, then an array of the data samples. Note that there are different "flavors" of wavfile that store audio data in different ways; for example, some wavfiles store the amplitude of each sample as an integer number of bits while others store amplitude as a decimal value between `-1.0` and `+1.0`. [The documentation for the `read` function](https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.read.html) explains that `scipy` preserves those differences, so here we'll explicitly convert our wavfile sample data to floating-point values between `-1.0` and `1.0`. Side note: `scipy` won't read audio files that use 24 bits per sample to store the data (it *will* read 8-, 16-, or 32-bit audio files). If you have a 24-bit audio file, you can use [the `wavio` module](https://pypi.python.org/pypi/wavio), or you can convert the file's bit depth before loading it with `scipy`. # + sampling_frequency, wav_data = scipy.io.wavfile.read('23-09_NWM02.wav') #standarizing amplitud values to range between -1.0 to 1.0 #len(wav_data) #print(sampling_frequency) def convert_wav_to_float(data): #print(data.dtype) if data.dtype == np.int8: #2**7 data = (data - 128) / 128. elif data.dtype == np.int16: #2**15 data = data / 32768. elif data.dtype == np.int32: #2**31 data = data / 2147483648. return data wav_data = convert_wav_to_float(wav_data) # - # At this point we'll also create a vector of time points in seconds, so that when we plot the waveform or spectrogram, our x-axis will show time in seconds instead of sample numbers. n_samples = len(wav_data) total_duration = n_samples / sampling_frequency sample_times = np.linspace(0, total_duration, n_samples) # At this point we can plot the waveform: plt.plot(sample_times, wav_data, color='k'); # ## What goes into a spectrogram? # Other than the audio data, we need to specify several other parameters to make a spectrogram. Since a spectrogram is made by analysing the frequencies present in short segments of the audio (sometimes called "analysis frames"), we have to define how long we want those short segments to be. The first choice, then, is to specify the **window length**. If you're familiar with the notion of "narrowband" versus "wideband" spectrograms, window length is the parameter that underlies the difference between them: the longer the window length, the more precisely you can estimate the exact frequencies that are present within that window (and hence, the narrower the frequency bands will be; thus "narrowband" spectrograms have relatively longer window lengths). However, the whole window is analyzed as a chunk, and yields one column of values in the spectrogram, so longer windows means each column is "wider" in time, which means less precise information about *when* certain frequency components were loud or soft. Below, when we write a spectrogram function, we'll set our default window duration at 5 ms, which is typical for a wideband spectrogram (pretty good temporal resolution, and frequency resolution that blurs individual harmonics, making vocal tract formants easier to see), but we can always override the default by passing in a different value. # # A second aspect of the window that we must specify is its **shape**. The shape determines how each sample within the analysis frame gets weighted (i.e., whether they are all weighted equally, or whether some get more weight than others when performing the fourier analysis). You might think that weighting all samples equally would be the best approach, but in fact that is not the case. Instead, it turns out that tapering off at the beginning and end of the window, and weighting the samples in the middle more strongly, tends to yield a more useful picture of the signal. For speech analysis, the best choice is a [Gaussian window](https://en.wikipedia.org/wiki/Gaussian_function), because (unlike other window shapes) it does not result in substantial [ringing](https://en.wikipedia.org/wiki/Ringing_artifacts) ("ringing" is alternating bands of spurious energy in frequencies above and below the frequencies actually present in the signal). Here's an example of what ringing looks like: a spectrogram of a 250 Hz sine wave made with a Bartlett (triangular) window has intense horizontal stripes indicating ringing (left plot); the same signal analyzed with a Gaussian window does not. duration = 4 sine_frequency = 250 time = np.linspace(0, duration, 1000 * duration) sine_wave = np.sin(2 * np.pi * sine_frequency * time) fig, axs = plt.subplots(1, 2, sharex=True, sharey=True) kwargs = dict(x=sine_wave, Fs=1000, NFFT=256, noverlap=128, pad_to=512) axs[0].specgram(window=np.bartlett(256), **kwargs) #specgram function #Bartlett is triangular axs[1].specgram(window=scipy.signal.gaussian(256, int(256 / 6)), **kwargs) axs[0].set_ylim(175, 325) axs[0].set_ylabel('frequency (Hz)') axs[0].set_xlabel('time') axs[1].set_xlabel('time') axs[0].set_title('Triangular window') axs[1].set_title('Gaussian window'); # + #help(plt.specgram) #help(dict) # - # If the signal is just a single sine wave like here, the ringing is maybe not such a big deal (we can still tell what the dominant frequency is). However, when lots of frequencies are present at different intensities (like the harmonics in a speech sound), ringing can really obscure what is going on. Since we'll pretty much always want a Gaussian window, we'll hard-code that into our custom spectrogram function; if you ever need a different window shape you can edit the function, or use the function `scipy.signal.spectrogram`. # # Another quantity that goes into making a spectrogram is how far apart we want our analysis frames to be, a parameter called the **step size**. Usually, the step size is smaller than the window length, so that there is *overlap* between adjacent windows. Exactly how much overlap to include is a trade-off between computation time (more windows = slower computation) and how much detail we want to see. For a Gaussian window, the step size never needs to be smaller than $\frac{\textrm{window length}}{8 \sqrt{\pi}}$. Making the windows overlap any more than that only adds redundant information, unnecessarily slowing down the computation. In our function, we'll default to using that formula inside our function to make our spectrograms show as much detail as possible without doing any unnecessary computations, but we'll allow the user to pass in a different step size if they want to. # # Finally, we have to decide how to map energy in a particular frequency band and time window into a particular color value. This is usually done by specifying the **dynamic range**, or the ratio between the quietest part that is treated as non-zero, and loudest part. For speech, there is a difference of 100 dB or more between the loudest parts of the recording and the quietest of the higher formants, so we'll set the default at 120 dB. For field recordings with a lot of background noise, you may want to set this value lower, to increase the contrast between the loud parts of the speech and the background noise. # ## Putting it all together # # There are two more parameters to our function we haven't yet discussed. The first is `cmap` which lets you specify a color mapping different than the default grayscale. The second is `ax` which lets you plot the spectrogram on a pre-created `matplotlib.axes.Axes` object (this can be handy if you want to plot the waveform and spectrogram in adjacent subplots, for example). Here's the function, with comments every few lines explaining what we're doing. def gaussian_spectrogram(x, fs, window_dur=0.005, step_dur=None, dyn_range=120, cmap=None, ax=None): from scipy.signal import spectrogram, gaussian from matplotlib.colors import LogNorm from matplotlib.cm import get_cmap # set default for step_dur, if unspecified. This value is optimal for Gaussian windows. if step_dur is None: step_dur = window_dur / np.sqrt(np.pi) / 8. # convert window & step durations from seconds to numbers of samples (which is what # scipy.signal.spectrogram takes as input). window_nsamp = int(window_dur * fs * 2) step_nsamp = int(step_dur * fs) # make the window. A Gaussian filter needs a minimum of 6σ - 1 samples, so working # backward from window_nsamp we can calculate σ. window_sigma = (window_nsamp + 1) / 6 window = gaussian(window_nsamp, window_sigma) # convert step size into number of overlapping samples in adjacent analysis frames noverlap = window_nsamp - step_nsamp # compute the power spectral density freqs, times, power = spectrogram(x, detrend=False, mode='psd', fs=fs, scaling='density', noverlap=noverlap, window=window, nperseg=window_nsamp) p_ref = 2e-5 # 20 μPa, the standard reference pressure for sound in air # set lower bound of colormap (vmin) from dynamic range. The upper bound defaults # to the largest value in the spectrogram, so we don't need to set it explicitly. dB_max = 10 * np.log10(power.max() / (p_ref ** 2)) vmin = p_ref * 10 ** ((dB_max - dyn_range) / 10) # set default colormap, if none specified if cmap is None: cmap = get_cmap('Greys') # or if cmap is a string, get the actual object elif isinstance(cmap, str): cmap = get_cmap(cmap) # create the figure if needed if ax is None: fig, ax = plt.subplots() # other arguments to the figure extent = (times.min(), times.max(), freqs.min(), freqs.max()) # plot ax.imshow(power, origin='lower', aspect='auto', cmap=cmap, norm=LogNorm(), extent=extent, vmin=vmin, vmax=None) return ax ax = gaussian_spectrogram(wav_data, sampling_frequency) ax.set_ylim(0, 12000) ax.set_xlabel('time (s)') ax.set_ylabel('frequency (Hz)'); # ## Notes # # For those used to spectrograms in [Praat](http://www.fon.hum.uva.nl/praat/), note that the calculation here is slightly different. Praat converts power spectral density from units of Pa²/Hz into dB/Hz before displaying a spectrogram, like this: # # ```python # log_psd = 10 * np.log10(power / (p_ref ** 2)) # ``` # # where `p_ref` is 20 μPa (the standard reference pressure for sound in air). Instead, here the original power spectral density is used, along with a logarithmic color mapping (that's what `LogNorm()` does in the call to `ax.imshow()` near the end of the `gaussian_spectrogram` function). This approach seems to make higher-frequency energy more visible compared to Praat's approach (which Praat mitigates by using "pre-emphasis" on the spectrogram to attenuate the lower frequencies). Here's what the difference looks like: # + def praat_spectrogram(x, fs, window_dur=0.005, dyn_range=120, ax=None): # this function is less flexible than the other (to make it shorter, # since it's just for demo purposes). from scipy.signal import spectrogram, gaussian from matplotlib.cm import get_cmap step_dur = window_dur / np.sqrt(np.pi) / 8. window_nsamp = int(window_dur * fs * 2) step_nsamp = int(step_dur * fs) window_sigma = (window_nsamp + 1) / 6 window = gaussian(window_nsamp, window_sigma) noverlap = window_nsamp - step_nsamp freqs, times, power = spectrogram(x, detrend=False, mode='psd', fs=fs, scaling='density', noverlap=noverlap, window=window, nperseg=window_nsamp) p_ref = 2e-5 log_psd = 10 * np.log10(power / (p_ref ** 2)) # key difference dB_max = log_psd.max() vmin = p_ref * 10 ** ((dB_max - dyn_range) / 10) cmap = get_cmap('Greys') if ax is None: fig, ax = plt.subplots() extent = (times.min(), times.max(), freqs.min(), freqs.max()) # next line: pass `log_psd` instead of `power`; no `LogNorm` ax.imshow(log_psd, origin='lower', aspect='auto', cmap=cmap, extent=extent, vmin=vmin, vmax=None) return ax fig, axs = plt.subplots(1, 2) ax = gaussian_spectrogram(wav_data, sampling_frequency, ax=axs[0]) ax.set_ylim(0, 12000) ax.set_xlabel('time (s)') ax.set_ylabel('frequency (Hz)') ax.set_title('Power spectral density & logarithmic colormap') ax = praat_spectrogram(wav_data, sampling_frequency, ax=axs[1]) ax.set_ylim(0, 12000) ax.set_xlabel('time (s)') ax.set_ylabel('frequency (Hz)') ax.set_title('Log power spectral density & linear colormap'); # - # You can probably see that for most phonetic analysis, either one will probably work fine (the first 4-5 formants are visible in both, and the temporal and spectral resolution is the same). In fact, the spectrogram *objects* in Praat contain the power spectral density numbers; it's only when displaying spectrograms on screen that Praat uses the log-transformed version.
spectrogram.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome to Carnets # **Carnets** is a stand-alone implementation of Jupyter notebooks for iOS. If you already know Jupyter notebooks, you can probably skip this. If you don't know Jupyter notebooks, they are a way to write and execute small bits of code in Python. # For example, here, we define a variable: a = 4 # and make operations on it: a = 48 * a print("The result is: ", a) # You can place bits of text, like this one, written in `Markdown` language between cells of code. # In **Carnets**, you have access to many Python packages, including `numpy`, `pandas` and `matplotlib`, to plot curves and surfaces. # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt t = np.arange(0, 5, 0.2) plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^') plt.show() # - # On an iPad, there are two toolbars. The toolbar at the top acts on cells. It lets you cut, copy, paste cells. The arrows let you move the selected cell up or down. `Run` executes the selected cell. # ![title](top.png) # The toolbar at the bottom appears when the keyboard is active (when you are in edit mode). It acts on the current cell. You can undo, redo, cut, copy and paste text. `Esc` leaves the cell and goes back to command mode. # ![title](bottom.png) # The arrows let you change the active cell (move to the cell above or below), and the run symbol executes the current cell (on an iPhone, there is only the top toolbar). # Notebooks can contain links to other notebooks, or to web pages. You can navigate back and forward using the blue arrows at the top of the page. If they are not visible, left-edge-swipe takes you back one page, and right-edge-swipe goes forward. # # For example, here is the full documentation on [Jupyter notebooks](https://jupyter-notebook.readthedocs.io).
welcome/Welcome to Carnets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sentiment Analysis with an RNN # <img src="http://www.polyvista.com/blog/wp-content/uploads/2015/06/sentiment-customer-exp-large.png"> # <p style="font-size:10px;">http://www.polyvista.com/blog/wp-content/uploads/2015/06/sentiment-customer-exp-large.png</p> # ## What is Sentiment Analysis? # # Sentiment Analysis also know as opinion mining refers to the identification, extraction and study of sentiment states by using natural language processing, text analysis, computational linguistics and biometrics. # ## Sentiment Analysis with an Recurrent Neural Network # # We will use a RNN for sentiment analysis because we care for the sequence in the data. # ### Imports # + import re import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from keras.models import Sequential, load_model from keras.layers import Dense, LSTM, Embedding, Dropout from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences # - # ### Loading in Dataset data = pd.read_csv('Tweets.csv') data = data.sample(frac=1).reset_index(drop=True) print(data.shape) data.head() # Removing all columns except the airline_sentiment and text column. data = data[['airline_sentiment', 'text']] data.head() # ### Data exploration data['airline_sentiment'].value_counts().sort_index().plot.bar() data['text'].str.len().plot.hist() # ### Preprocessing data['text'] = data['text'].str.replace('@VirginAmerica', '') data.head() data['text'].apply(lambda x: x.lower()) #transform text to lowercase data['text'] = data['text'].apply(lambda x: re.sub('[^a-zA-z0-9\s]', '', x)) data['text'].head() # + tokenizer = Tokenizer(num_words=5000, split=" ") tokenizer.fit_on_texts(data['text'].values) X = tokenizer.texts_to_sequences(data['text'].values) X = pad_sequences(X) # padding our text vector so they all have the same length X[:5] # - # ### Creating model model = Sequential() model.add(Embedding(5000, 256, input_length=X.shape[1])) model.add(Dropout(0.3)) model.add(LSTM(256, return_sequences=True, dropout=0.3, recurrent_dropout=0.2)) model.add(LSTM(256, dropout=0.3, recurrent_dropout=0.2)) model.add(Dense(3, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() y = pd.get_dummies(data['airline_sentiment']).values [print(data['airline_sentiment'][i], y[i]) for i in range(0,5)] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # ### Training model # + batch_size = 32 epochs = 8 model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=2) # - model.save('sentiment_analysis.h5') # ### Testing model # + predictions = model.predict(X_test) [print(data['text'][i], predictions[i], y_test[i]) for i in range(0, 5)] # + pos_count, neu_count, neg_count = 0, 0, 0 real_pos, real_neu, real_neg = 0, 0, 0 for i, prediction in enumerate(predictions): if np.argmax(prediction)==2: pos_count += 1 elif np.argmax(prediction)==1: neu_count += 1 else: neg_count += 1 if np.argmax(y_test[i])==2: real_pos += 1 elif np.argmax(y_test[i])==1: real_neu += 1 else: real_neg +=1 print('Positive predictions:', pos_count) print('Neutral predictions:', neu_count) print('Negative predictions:', neg_count) print('Real positive:', real_pos) print('Real neutral:', real_neu) print('Real negative:', real_neg) # - # ## Improvements we could implement # <ul> # <li>Weight classes (because data is skew)</li> # <li>Train more epochs</li> # <li>Use bigger network</li> # <li>Try other word number</li> # </ul> # ## Resources # # <ul> # <li><a href="https://programmingwithgilbert.firebaseapp.com/videos/machine-learning-explained/recurrent-neural-networks-lstm-explained">Recurrent Neural Networks Explained (my own post and video)</a></li> # <li><a href="https://en.wikipedia.org/wiki/Sentiment_analysis">Sentiment Analysis (Wikipedia)</a></li> # <li><a href="https://www.quora.com/What-is-the-best-way-to-do-sentiment-analysis-with-Python-I%E2%80%99m-looking-for-a-sentiment-analysis-API-that-I-can-add-an-emoticon-dictionary-to-I-have-no-idea-how-to-use-NLTK-Can-anyone-help-me-with-that">What is the best way to do sentiment analysis with Python? (Quora)</a></li> # <li><a href="https://www.youtube.com/watch?v=si8zZHkufRY">How to Do Sentiment Analysis (<NAME>)</a></li> # </ul>
Keras-Tutorials/6. Sentiment Analysis/Sentiment Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # #### Version Check # Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version. import plotly plotly.__version__ # #### Default Privacy # By default, `plotly.iplot()` and `plotly.plot()` create public graphs (which are free to create). With a [plotly subscription](https://plot.ly/plans) you can easily make charts private or secret via the sharing argument. # #### Public Graphs # + import plotly.plotly as py import plotly.graph_objs as go data = [ go.Scatter( x=[1, 2, 3], y=[1, 3, 1] ) ] py.iplot(data, filename='privacy-public', sharing='public') # - # Below is the URL of this public plot. Anyone can view public plots even if they are not logged into Plotly. Go ahead and try it out: py.plot(data, filename='privacy-public', sharing='public') # ### Private Graphs py.iplot(data, filename='privacy-private', sharing='private') # Below is the URL of the private plot above. Only the owner can view the private plot. You won't be able to view this plot, try it out: py.plot(data, filename='privacy-private', sharing='private') # ### Secret Graphs py.iplot(data, filename='privacy-secret', sharing='secret') # Below is the URL of this secret plot. Anyone with the secret link can view this chart. However, it will not appear in the Plotly feed, your profile, or search engines. Go ahead and try it out: py.plot(data, filename='privacy-secret', sharing='secret') # ### Make All Future Plots Private # To make all future plots private, you can update your configuration file to create private plots by default: import plotly plotly.tools.set_config_file(world_readable=False, sharing='private') # ### Make All Existing Plots Private # This example uses [Plotly's REST API](https://api.plot.ly/v2/) import json import requests from requests.auth import HTTPBasicAuth # Define variables, including YOUR [USERNAME and API KEY](https://plot.ly/settings/api) # + username = 'private_plotly' # Replace with YOUR USERNAME api_key = '<KEY>' # Replace with YOUR API KEY auth = HTTPBasicAuth(username, api_key) headers = {'Plotly-Client-Platform': 'python'} page_size = 500 # - # Collect filenames of <b>ALL</b> of your plots and <br>update `world_readable` of each plot with a PATCH request # + def get_pages(username, page_size): url = 'https://api.plot.ly/v2/folders/all?user='+username+'&filetype=plot&page_size='+str(page_size) response = requests.get(url, auth=auth, headers=headers) if response.status_code != 200: return page = json.loads(response.content.decode('utf-8')) yield page while True: resource = page['children']['next'] if not resource: break response = requests.get(resource, auth=auth, headers=headers) if response.status_code != 200: break page = json.loads(response.content.decode('utf-8')) yield page def make_all_plots_private(username, page_size=500): for page in get_pages(username, page_size): for x in range(0, len(page['children']['results'])): fid = page['children']['results'][x]['fid'] requests.patch('https://api.plot.ly/v2/files/'+fid, {"world_readable": False}, auth=auth, headers=headers) print('ALL of your plots are now private - visit: https://plot.ly/organize/home to view your private plots!') make_all_plots_private(username) # - # ### Reference help(py.plot) # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'privacy.ipynb', 'python/privacy/', 'Privacy', 'How to set the privacy settings of plotly graphs in python. Three examples of different privacy options: public, private and secret.', title = 'Privacy | plotly', name = 'Privacy', language='python', has_thumbnail= True, thumbnail= 'thumbnail/privacy.jpg', display_as='file_settings', order=2, ipynb= '~notebook_demo/97') # -
_posts/python/fundamentals/privacy/privacy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pytorch] # language: python # name: conda-env-pytorch-py # --- # + from __future__ import print_function, absolute_import import argparse import os.path as osp import numpy as np import sys import torch from torch import nn from torch.backends import cudnn from torch.utils.data import DataLoader import reid from reid import datasets from reid import models from reid.dist_metric import DistanceMetric from reid.trainers import Trainer from reid.evaluators import Evaluator from reid.utils.data import transforms as T from reid.utils.data.preprocessor import Preprocessor from reid.utils.logging import Logger from reid.utils.serialization import load_checkpoint, save_checkpoint # - import os os.getcwd() # + def get_data(name, split_id, data_dir, height, width, batch_size, workers, combine_trainval): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train num_classes = (dataset.num_trainval_ids if combine_trainval else dataset.num_train_ids) train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader( #images_dir = osp.join(self.root, 'images') Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) train_loader_head = DataLoader( Preprocessor(train_set, root="data/viper/images_head", transform=train_transformer), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) train_loader_upper = DataLoader( Preprocessor(train_set, root="data/viper/images_upper", transform=train_transformer), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) train_loader_lower = DataLoader( Preprocessor(train_set, root="data/viper/images_lower", transform=train_transformer), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) val_loader = DataLoader( Preprocessor(dataset.val, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) val_loader_head = DataLoader( Preprocessor(dataset.val, root="data/viper/images_head", transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) val_loader_upper = DataLoader( Preprocessor(dataset.val, root="data/viper/images_upper", transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) val_loader_lower = DataLoader( Preprocessor(dataset.val, root="data/viper/images_lower", transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader = DataLoader( Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader_head = DataLoader( Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root="data/viper/images_head", transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader_upper = DataLoader( Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root="data/viper/images_upper", transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader_lower = DataLoader( Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root="data/viper/images_lower", transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, train_loader_head, train_loader_upper, train_loader_lower,\ val_loader, val_loader_head, val_loader_upper, val_loader_lower, test_loader, test_loader_head, \ test_loader_upper, test_loader_lower def main(args): np.random.seed(args.seed) torch.manual_seed(args.seed) cudnn.benchmark = True # Redirect print to both console and log file if not args.evaluate: sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt')) # Create data loaders if args.height is None or args.width is None: args.height, args.width = (144, 56) if args.arch == 'inception' else \ (256, 128) dataset, num_classes, train_loader, train_loader_head, train_loader_upper, train_loader_lower,\ val_loader, val_loader_head, val_loader_upper, val_loader_lower,\ test_loader, test_loader_head, test_loader_upper, test_loader_lower= \ get_data(args.dataset, args.split, args.data_dir, args.height, args.width, args.batch_size, args.workers, args.combine_trainval) # create model1 model2 model3 然后修改optimizer? # Create model model = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes) model_head = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes) model_upper = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes) model_lower = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes) # Load from checkpoint start_epoch = best_top1 = 0 # if args.resume: # checkpoint = load_checkpoint(args.resume) # model.load_state_dict(checkpoint['state_dict']) # start_epoch = checkpoint['epoch'] # best_top1 = checkpoint['best_top1'] # print("=> Start epoch {} best top1 {:.1%}" # .format(start_epoch, best_top1)) model = nn.DataParallel(model).cuda() model_head = nn.DataParallel(model_head).cuda() model_upper = nn.DataParallel(model_upper).cuda() model_lower = nn.DataParallel(model_lower).cuda() # Distance metric metric = DistanceMetric(algorithm=args.dist_metric) # Evaluator evaluator = Evaluator(model, model_head, model_upper, model_lower) # if args.evaluate: # metric.train(model, train_loader) # print("Validation:") # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric) # print("Test:") # evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric) # return # Criterion criterion = nn.CrossEntropyLoss().cuda() # Optimizer if hasattr(model.module, 'base'): base_param_ids = set(map(id, model.module.base.parameters())) new_params = [p for p in model.parameters() if id(p) not in base_param_ids] param_groups = [ {'params': model.module.base.parameters(), 'lr_mult': 0.1}, {'params': new_params, 'lr_mult': 1.0}] else: param_groups = model.parameters() optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) optimizer_head = optimizer optimizer_upper = optimizer optimizer_lower = optimizer # Trainer trainer = Trainer(model, criterion) trainer_head = Trainer(model_head, criterion) trainer_upper = Trainer(model_upper, criterion) trainer_lower = Trainer(model_lower, criterion) # Schedule learning rate def adjust_lr(epoch): step_size = 60 if args.arch == 'inception' else 40 lr = args.lr * (0.1 ** (epoch // step_size)) for g in optimizer.param_groups: g['lr'] = lr * g.get('lr_mult', 1) # Start training for epoch in range(start_epoch, args.epochs): adjust_lr(epoch) trainer.train(epoch, train_loader, optimizer) trainer_head.train(epoch, train_loader_head, optimizer_head) trainer_upper.train(epoch, train_loader_upper, optimizer_upper) trainer_lower.train(epoch, train_loader_lower, optimizer_lower) if epoch < args.start_save: continue top1 = evaluator.evaluate(val_loader,val_loader_head, val_loader_upper, val_loader_lower, dataset.val, dataset.val) is_best = top1 > best_top1 best_top1 = max(top1, best_top1) save_checkpoint({ 'state_dict': model.module.state_dict(), 'epoch': epoch + 1, 'best_top1': best_top1, }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'), opath=osp.join(args.logs_dir, 'model_best.pth.tar')) save_checkpoint_head({ 'state_dict': model_head.module.state_dict(), 'epoch': epoch + 1, 'best_top1': best_top1, }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint_head.pth.tar'), opath=osp.join(args.logs_dir, 'model_head_best.pth.tar')) save_checkpoint({ 'state_dict': model_upper.module.state_dict(), 'epoch': epoch + 1, 'best_top1': best_top1, }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint_upper.pth.tar'), opath=osp.join(args.logs_dir, 'model_upper_best.pth.tar')) save_checkpoint({ 'state_dict': model_lower.module.state_dict(), 'epoch': epoch + 1, 'best_top1': best_top1, }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint_lower.pth.tar'), opath=osp.join(args.logs_dir, 'model_lower_best.pth.tar')) print('\n * Finished epoch {:3d} top1: {:5.1%} best: {:5.1%}{}\n'. format(epoch, top1, best_top1, ' *' if is_best else '')) # Final test print('Test with best model:') checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar')) checkpoint_head = load_checkpoint(osp.join(args.logs_dir, 'model_head_best.pth.tar')) checkpoint_upper = load_checkpoint(osp.join(args.logs_dir, 'model_upper_best.pth.tar')) checkpoint_lower = load_checkpoint(osp.join(args.logs_dir, 'model__lower_best.pth.tar')) model.module.load_state_dict(checkpoint['state_dict']) model_head.module.load_state_dict(checkpoint_head['state_dict']) model_upper.module.load_state_dict(checkpoint_upper['state_dict']) model_lower.module.load_state_dict(checkpoint_lower['state_dict']) metric.train(model, train_loader) metric.train(model_head, train_loader_head) metric.train(model_upper, train_loader_upper) metric.train(model_lower, train_loader_lower) evaluator.evaluate(test_loader, test_loader_head, test_loader_upper, test_loader_lower, dataset.query, dataset.gallery, metric) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Softmax loss classification") # data parser.add_argument('-d', '--dataset', type=str, default='cuhk03', choices=datasets.names()) parser.add_argument('-b', '--batch-size', type=int, default=256) parser.add_argument('-j', '--workers', type=int, default=4) parser.add_argument('--split', type=int, default=0) parser.add_argument('--height', type=int, help="input height, default: 256 for resnet*, " "144 for inception") parser.add_argument('--width', type=int, help="input width, default: 128 for resnet*, " "56 for inception") parser.add_argument('--combine-trainval', action='store_true', help="train and val sets together for training, " "val set alone for validation") # model parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.names()) parser.add_argument('--features', type=int, default=128) parser.add_argument('--dropout', type=float, default=0.5) # optimizer parser.add_argument('--lr', type=float, default=0.1, help="learning rate of new parameters, for pretrained " "parameters it is 10 times smaller than this") parser.add_argument('--momentum', type=float, default=0.9) parser.add_argument('--weight-decay', type=float, default=5e-4) # training configs parser.add_argument('--resume', type=str, default='', metavar='PATH') parser.add_argument('--evaluate', action='store_true', help="evaluation only") parser.add_argument('--epochs', type=int, default=50) parser.add_argument('--start_save', type=int, default=0, help="start saving checkpoints after specific epoch") parser.add_argument('--seed', type=int, default=1) parser.add_argument('--print-freq', type=int, default=1) # metric learning parser.add_argument('--dist-metric', type=str, default='euclidean', choices=['euclidean', 'kissme']) # misc working_dir = osp.dirname(osp.abspath(__file__)) parser.add_argument('--data-dir', type=str, metavar='PATH', default=osp.join(working_dir, 'data')) parser.add_argument('--logs-dir', type=str, metavar='PATH', default=osp.join(working_dir, 'logs')) main(parser.parse_args())
examples/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Lorenz system # https://en.wikipedia.org/wiki/Lorenz_system from IPython.display import display from ipywidgets import interact import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from pyodesys.symbolic import SymbolicSys # %matplotlib notebook def f(t, s, p): x, y, z = s['x'], s['y'], s['z'] sigma, rho, beta = p['sigma'], p['rho'], p['beta'] return { 'x': sigma*(y - x), 'y': x*(rho - z) - y, 'z': x*y - beta*z } odesystem = SymbolicSys.from_callback( f, names='xyz', param_names='sigma rho beta'.split(), dep_by_name=True, par_by_name=True, indep_name='t') y0 = {'x': -10, 'y': -15, 'z': 15} params = {'rho': 28, 'sigma': 10, 'beta': 8/3.} integrate_kwargs = dict(integrator='cvode', nsteps=5000) res = odesystem.integrate(10, y0, params, **integrate_kwargs) #fig = plt.figure() res.plot(names='z') _ = plt.legend() fig = plt.figure() stack = [] def integrate_and_plot_3d(rho=28, sigma=10, beta=2.5): xout, yout, info = odesystem.integrate( 10, y0, dict(sigma=sigma, rho=rho, beta=beta), integrator='cvode', nsteps=5000) ax = fig.gca(projection='3d') for lines in stack: lines[0].set_alpha(0.6*lines[0].get_alpha()) stack.append(ax.plot(*yout.T, alpha=1.0, color='g')) if len(stack) > 4: lines = stack.pop(0) lines[0].remove() display(fig) interact(integrate_and_plot_3d)
examples/lorenz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='https://www.udemy.com/user/joseportilla/'><img src='../Pierian_Data_Logo.png'/></a> # ___ # <center><em>Content Copyright by <NAME></em></center> # # Timing your code # Sometimes it's important to know how long your code is taking to run, or at least know if a particular line of code is slowing down your entire project. Python has a built-in timing module to do this. # ## Example Function or Script # # Here we have two functions that do the same thing, but in different ways. # How can we tell which one is more efficient? Let's time it! def func_one(n): ''' Given a number n, returns a list of string integers ['0','1','2',...'n] ''' return [str(num) for num in range(n)] func_one(10) def func_two(n): ''' Given a number n, returns a list of string integers ['0','1','2',...'n] ''' return list(map(str,range(n))) func_two(10) # ### Timing Start and Stop # # We can try using the time module to simply calculate the elapsed time for the code. Keep in mind, due to the time module's precision, the code needs to take **at least** 0.1 seconds to complete. import time # STEP 1: Get start time start_time = time.time() # Step 2: Run your code you want to time result = func_one(1000000) # Step 3: Calculate total time elapsed end_time = time.time() - start_time end_time # STEP 1: Get start time start_time = time.time() # Step 2: Run your code you want to time result = func_two(1000000) # Step 3: Calculate total time elapsed end_time = time.time() - start_time end_time # ### Timeit Module # # What if we have two blocks of code that are quite fast, the difference from the time.time() method may not be enough to tell which is fater. In this case, we can use the timeit module. # # The timeit module takes in two strings, a statement (stmt) and a setup. It then runs the setup code and runs the stmt code some n number of times and reports back average length of time it took. import timeit # The setup (anything that needs to be defined beforehand, such as def functions.) setup = ''' def func_one(n): return [str(num) for num in range(n)] ''' stmt = 'func_one(100)' timeit.timeit(stmt,setup,number=100000) # Now let try running func_two 10,000 times and compare the length of time it took. setup2 = ''' def func_two(n): return list(map(str,range(n))) ''' stmt2 = 'func_two(100)' timeit.timeit(stmt2,setup2,number=100000) # It looks like func_two is more efficient. You can specify more number of runs if you want to clarify the different for fast performing functions. timeit.timeit(stmt,setup,number=1000000) timeit.timeit(stmt2,setup2,number=1000000) # ## Timing you code with Jupyter "magic" method # # **NOTE: This method is ONLY available in Jupyter and the magic command needs to be at the top of the cell with nothing above it (not even commented code)** # %%timeit func_one(100) # %%timeit func_two(100) # Great! Check out the documentation for more information: # https://docs.python.org/3/library/timeit.html
4-assets/BOOKS/Jupyter-Notebooks/06-Timing_your_code_-_timeit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.5 64-bit (''CarND-LaneLines-P1'': pipenv)' # language: python # name: python37564bitcarndlanelinesp1pipenv63f1cd7bb1ec4cae97bb8bb43652e5df # --- # + # This notebook is a light wrapper around the `p2` module for display and experimenting. # The "real" code is in the `p2` directory! # You could also run my code like `python -m p2.main`. # %reload_ext autoreload # %autoreload 2 from IPython.core.display import display, HTML import p2.main as p2 from p2.main import View inputs = [] inputs += p2.test_images_paths() inputs += [p2.PROJECT_VIDEO_PATH] # inputs += [p2.CHALLENGE_VIDEO_PATH] # inputs += [p2.CHALLENGE2_VIDEO_PATH] params = p2.default_params() params.out_path_timestamp=True # Prevent browser caching - different output filename every run # params.subclip = (22, 24) views = [View.FULL] # views = [View.HISTOGRAM_WINDOWS] print(inputs, views) for item in p2.main(inputs=inputs, params=params, views=views): item.ipython()
P2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MolSSI Workshop # # # Introduction # + #variable_name = variable_value deltaH = -541.5 #kJ/mole deltaS = 10.4 #kJ/(mole K) temperature = 298 #Kelvin deltaG=deltaH - temperature*deltaS # - print(deltaG) deltaG_joules = deltaG*1000 print(deltaG_joules) deltaG_string= str(deltaG) type(deltaG_string) energy_kcal=[1,2,-3,-4,5] # + # Taking a slice - new list that is a subset of an old list # new_list = list_name[start:end] short_list = energy_kcal[:2] print(short_list) slice1 = energy_kcal[1:] slice2= energy_kcal[:3] print('Slice 1 is:', slice1) print('Slice 2 is:', slice2) # + negative_energy=[] for number in energy_kcal: if number<0: negative_energy.append(number) print(negative_energy) # -
data/introduction.ipynb