code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Capstone # language: python # name: capstone # --- # # Exploring the Discharge Notes Data # + # import core libraries import os from dotenv import load_dotenv, find_dotenv import numpy as np import pandas as pd # import NLP pre-processing libraries from nltk import word_tokenize from nltk.corpus import stopwords import string # import visualization libraries import matplotlib.pyplot as plt import seaborn as sns from sklearn.feature_extraction.text import CountVectorizer from yellowbrick.text import FreqDistVisualizer from sklearn.manifold import TSNE # set style sns.set_style('dark') plt.style.use('ggplot') # %matplotlib inline # %load_ext watermark PROJ_ROOT = os.path.join(os.pardir) # - # %watermark -a "<NAME>" -d -t -v -p numpy,pandas,matplotlib,seaborn,nltk,sklearn # load the data train_df = pd.read_csv(os.path.join(PROJ_ROOT, 'data', 'processed','clinic_train_data.csv'), index_col=0) train_df.head() train_df.info() # ### How imbalanced is the dataset? print(train_df.readmission.value_counts()) # + _ = train_df.readmission.value_counts().plot(kind='bar', title='Readmission Class Imbalance') _ = plt.xticks([0,1],['Not Readmitted','Readmitted'], rotation=0) pct0 = round(len(train_df[train_df.readmission == 0]) / len(train_df.readmission) * 100, 2) pct1 = round(len(train_df[train_df.readmission == 1]) / len(train_df.readmission) * 100, 2) _ = plt.text(-0.12,len(train_df[train_df.readmission == 0])*0.9, str(pct0)+'%', color='w', fontsize=14, fontweight='bold') _ = plt.text(0.89,len(train_df[train_df.readmission == 1])*1.1, str(pct1)+'%', color='tomato', fontsize=14, fontweight='bold') plt.savefig('../reports/figures/class_imb_pct.png') # - # ## Explore Vectorized Text def clean_and_tokenize(text): ''' tokenize the text by replacing punctuation and numbers with spaces and lowercase all words ''' punc_list = string.punctuation + string.digits t = str.maketrans(dict.fromkeys(punc_list, " ")) text = text.lower().translate(t) tokens = word_tokenize(text) return tokens # + # instantiate Count Vectorizer vect = CountVectorizer(max_features=3000, tokenizer=clean_and_tokenize, stop_words='english') # fit training data to vectorizer vect_fit = vect.fit_transform(train_df.text.values) # + # get the feature names features = vect.get_feature_names() # plot word distributions visualizer = FreqDistVisualizer(features=features, size=(800,800)) visualizer.fit(vect_fit) visualizer.poof(outpath=os.path.join(PROJ_ROOT, 'reports', 'figures','word_freq_countvec.png')) # + # visualize word frequency of patients not readmitted vect = CountVectorizer(max_features=3000, tokenizer=clean_and_tokenize, stop_words='english') no_readmin_vect = vect.fit_transform(text for text in train_df[train_df.readmission == 0].text.values) no_readmin_features = vect.get_feature_names() no_readmin_viz = FreqDistVisualizer(features=no_readmin_features, size=(800,800)) no_readmin_viz.fit(no_readmin_vect) no_readmin_viz.poof(outpath=os.path.join(PROJ_ROOT, 'reports', 'figures','word_freq_no_readmin.png')) # + # visualize word frequency of patients readmitted vect = CountVectorizer(max_features=3000, tokenizer=clean_and_tokenize, stop_words='english') no_readmin_vect = vect.fit_transform(text for text in train_df[train_df.readmission == 1].text.values) no_readmin_features = vect.get_feature_names() no_readmin_viz = FreqDistVisualizer(features=no_readmin_features, size=(800,800)) no_readmin_viz.fit(no_readmin_vect) no_readmin_viz.poof(outpath=os.path.join(PROJ_ROOT, 'reports', 'figures','word_freq_readmin.png')) # + from yellowbrick.text import TSNEVisualizer # tSNE plot tsne = TSNEVisualizer(colors=['seagreen','coral'], size=(800,800)) tsne.fit(vect_fit, train_df.readmission, labels=['Not Readmitted','Readmitted']) tsne.poof(outpath=os.path.join(PROJ_ROOT, 'reports', 'figures','tsne_countvec.png')) # + # plot distribution of days between admission days_between = train_df[train_df.days_between_admit.notnull()]['days_between_admit'].values first_100_days = train_df[(train_df.days_between_admit >=0) & (train_df.days_between_admit <= 100)]['days_between_admit'].values fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16,5)) sns.distplot(days_between, ax=ax[0]) #train_df.days_between_admit.hist(bins=20, ax=ax[0]) ax[0].set_title('Distribution of Days Between Hospital Admissions') ax[0].set_xlabel('Days Between Admission') ax[0].set_ylabel('Count') ax[0].set_xlim([0,4700]) sns.distplot(first_100_days, ax=ax[1]) #train_df.days_between_admit.hist(bins=10, ax=ax[1]) ax[1].set_title('Distribution of Days Between Hospital Admissions (0-100 days)') ax[1].set_xlabel('Days Between Admission') ax[1].set_ylabel('Count') ax[1].set_xlim([0,100]) plt.savefig(os.path.join(PROJ_ROOT, 'reports', 'figures','days_between_distro.png')) # -
notebooks/1.1-TheeChris-DataExploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:bua] * # language: python # name: conda-env-bua-py # --- # + import os import io import tqdm import detectron2 # import some common detectron2 utilities from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog # import some common libraries import numpy as np import cv2 import torch # Show the image in ipynb from IPython.display import clear_output, Image, display import PIL.Image def showarray(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = io.BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) # + # Load VG Classes data_path = '/scratch/gobi1/johnchen/new_git_stuff/py-bottom-up-attention/data' vg_classes = [] with open(os.path.join(data_path, 'objects_vocab.txt')) as f: for object in f.readlines(): vg_classes.append(object.split(',')[0].lower().strip()) vg_attrs = [] with open(os.path.join(data_path, 'attributes_vocab.txt')) as f: for object in f.readlines(): vg_attrs.append(object.split(',')[0].lower().strip()) MetadataCatalog.get("vg").thing_classes = vg_classes MetadataCatalog.get("vg").attr_classes = vg_attrs # - cfg = get_cfg() cfg.merge_from_file("../configs/VG-Detection/faster_rcnn_R_101_C4_attr_caffemaxpool.yaml") cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300 cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2 # VG Weight cfg.MODEL.WEIGHTS = "http://nlp.cs.unc.edu/models/faster_rcnn_from_caffe_attr.pkl" predictor = DefaultPredictor(cfg) import pandas as pd from collections import defaultdict # + NUM_OBJECTS = 36 from detectron2.modeling.postprocessing import detector_postprocess from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs, fast_rcnn_inference_single_image def doit(raw_image): with torch.no_grad(): raw_height, raw_width = raw_image.shape[:2] print("Original image size: ", (raw_height, raw_width)) # Preprocessing image = predictor.transform_gen.get_transform(raw_image).apply_image(raw_image) print("Transformed image size: ", image.shape[:2]) image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) inputs = [{"image": image, "height": raw_height, "width": raw_width}] images = predictor.model.preprocess_image(inputs) # Run Backbone Res1-Res4 features = predictor.model.backbone(images.tensor) # Generate proposals with RPN proposals, _ = predictor.model.proposal_generator(images, features, None) proposal = proposals[0] print('Proposal Boxes size:', proposal.proposal_boxes.tensor.shape) # Run RoI head for each proposal (RoI Pooling + Res5) proposal_boxes = [x.proposal_boxes for x in proposals] features = [features[f] for f in predictor.model.roi_heads.in_features] box_features = predictor.model.roi_heads._shared_roi_transform( features, proposal_boxes ) feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 print('Pooled features size:', feature_pooled.shape) # Predict classes and boxes for each proposal. pred_class_logits, pred_attr_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(feature_pooled) outputs = FastRCNNOutputs( predictor.model.roi_heads.box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, predictor.model.roi_heads.smooth_l1_beta, ) probs = outputs.predict_probs()[0] boxes = outputs.predict_boxes()[0] attr_prob = pred_attr_logits[..., :-1].softmax(-1) max_attr_prob, max_attr_label = attr_prob.max(-1) # Note: BUTD uses raw RoI predictions, # we use the predicted boxes instead. # boxes = proposal_boxes[0].tensor # NMS for nms_thresh in np.arange(0.5, 1.0, 0.1): instances, ids = fast_rcnn_inference_single_image( boxes, probs, image.shape[1:], score_thresh=0.2, nms_thresh=nms_thresh, topk_per_image=NUM_OBJECTS ) if len(ids) == NUM_OBJECTS: break instances = detector_postprocess(instances, raw_height, raw_width) roi_features = feature_pooled[ids].detach() max_attr_prob = max_attr_prob[ids].detach() max_attr_label = max_attr_label[ids].detach() instances.attr_scores = max_attr_prob instances.attr_classes = max_attr_label print(instances) return instances, roi_features # all_features.append((instances,features)) # instances, features = doit(all_imgs[-1]) # print(instances.pred_boxes) # print(instances.scores) # print(instances.pred_classes) # print(instances.attr_classes) # print(instances.attr_scores) # + import os from tqdm.auto import tqdm images_path = "/scratch/gobi1/johnchen/new_git_stuff/lxmert/data/medvqa/VQA-Med-2020-Task1-VQAnswering-TrainVal-Sets/VQAMed2020-VQAnswering-TrainingSet/VQAnswering_2020_Train_images" LIMIT_EXAMPLES = 10000 df = defaultdict(list) def process_images(): for root,dirs,files in os.walk(images_path): for i,file in enumerate(tqdm(files)): if i >LIMIT_EXAMPLES: break im = cv2.imread(os.path.join(root,file)) im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) instances, features = doit(im_rgb) # print("juck fupyter") # print(instances) # print(vars(instances)) # print(instances.image_size[0]) # print(instances.attr_scores.shape) df["img_id"].append(file) df["img_h"].append(instances.image_size[0]) df["img_w"].append(instances.image_size[1]) df["num_boxes"].append(len(instances.pred_classes)) df["objects_id"].append(instances.pred_classes.cpu().numpy()) df["objects_conf"].append( instances.scores.cpu().numpy()) df["attrs_id"].append( instances.attr_classes.cpu().numpy()) df["attrs_scores"].append( instances.attr_scores.cpu().numpy()) df["boxes"].append( instances.pred_boxes.tensor.cpu().numpy()) df["features"].append( features.cpu().numpy()) all_features = pd.DataFrame(df) all_features.to_csv("test_{}.csv".format(LIMIT_EXAMPLES), ) # all_imgs.append(im_rgb) # showarray(im_rgb) process_images() # im = cv2.imread("/scratch/gobi1/johnchen/new_git_stuff/py-bottom-up-attention/data/synpic593.jpg") # im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # showarray(all_imgs[-1]) # - pd.read_csv("test_small.csv") instances.attr_classes (instances.pred_boxes) instances.pred_boxes.tensor instances.pred_boxes.tensor.cpu().numpy().shape instances.attr_scores.cpu().numpy().shape import pandas as pd from collections import defaultdict pd.DataFrame(df) # Show the boxes, labels, and features pred = instances.to('cpu') v = Visualizer(im[:, :, :], MetadataCatalog.get("vg"), scale=1.2) v = v.draw_instance_predictions(pred) showarray(v.get_image()[:, :, ::-1]) print('instances:\n', instances) print() print('boxes:\n', instances.pred_boxes) print() print('Shape of features:\n', features.shape) # Verify the correspondence of RoI features pred_class_logits, pred_attr_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(features) pred_class_probs = torch.nn.functional.softmax(pred_class_logits, -1)[:, :-1] max_probs, max_classes = pred_class_probs.max(-1) print("%d objects are different, it is because the classes-aware NMS process" % (NUM_OBJECTS - torch.eq(instances.pred_classes, max_classes).sum().item())) print("The total difference of score is %0.4f" % (instances.scores - max_probs).abs().sum().item())
demo/demo_feature_extraction_attr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ### 1. Write an SQL query to print the first three characters of FIRST_NAME from Worker table. # Answer: # # `SELECT SUBSTRING(FIRST_NAME,1,3) FROM worker;` # ### 2. Write an SQL query to find the position of the alphabet (‘a’) in the first name column ‘Amitabh’ from Worker table. # Answer: # # `SELECT POSITION("a" IN first_name) FROM worker WHERE first_name = "Amitabh";` # ### 3. Write an SQL query to print the name of employees having the highest salary in each department. # Answer: # # ```SELECT w.department, first_name from worker as w, (SELECT DEPARTMENT, MAX(Salary) as salary FROM worker GROUP BY DEPARTMENT) as s # where w.department = s.department and w.salary = s.salary;``` # ##### select * from worker order by salary desc: # # 4 Amitabh Singh 500000 2014-02-20 09:00:00 Admin<br/> # 5 Vivek Bhati 500000 2014-06-11 09:00:00 Admin<br/> # 3 Vishal Singhal 300000 2014-02-20 09:00:00 HR<br/> # 6 Vipul Diwan 200000 2014-06-11 09:00:00 Account<br/> # 1 Monika Arora 100000 2014-02-20 09:00:00 HR<br/> # 8 Geetika Chauhan 90000 2014-04-11 09:00:00 Admin<br/> # 2 Niharika Verma 80000 2014-06-11 09:00:00 Admin<br/> # 7 Satish Kumar 75000 2014-01-20 09:00:00 Account<br/> # # ##### Output of answer given above: # # HR Vishal<br/> # Admin Amitabh<br/> # Admin Vivek<br/> # Account Vipul<br/> # # *NOTE: both Amitabh and Vivek have 500000 salary which is highest in Admin department
assignments/SQL/SQL_Assignment_2.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.3.8 # language: julia # name: julia-0.3 # --- # # Abaqus umat interface # Author(s): <NAME> # **Abstract:** making the initial version to call Abaqus umat run(`wget http://www.eng.ox.ac.uk/NP/ICP/plasticity_imp/code_imp.f`) run(`head -30 code_imp.f`) # ## some implicit type castings f = open("ABA_PARAM.INC","w") write(f," implicit real*8(a-h,o-z)\n") write(f," parameter (nprecd=2)\n") close(f) # ## Let's compile the shared library run(`gfortran -shared -fPIC -o libumat.so code_imp.f`) # ## Some Abaqus umat interface definitions # # |variable | explanation | # |--------------------|------------------------------------------| # |DDSDDE(NTENS,NTENS) | Jacobian matrix of the constitutive model| # |STRESS(NTENS) | the stress tensor (in vector format) | # |STATEV(NSTATV) | An array containing the solution-dependent state variables. | # |SSE | Specific elastic strain energy | # |SPD | plastic dissipation | # |SCD | “creep” dissipation | # |RPL | Volumetric heat generation per unit time | # |DDSDDT(NTENS) | Variation of the stress increments with respect to the temperature. | # |DRPLDE(NTENS) | Variation of RPL with respect to the strain increments.| # |DRPLDT | Variation of RPL with respect to the temperature. | # |RPL | RPL is used to indicate whether or not a cohesive element is open to the tangential flow of pore fluid.| # |PNEWDT | Ratio of suggested new time increment to the time increment being used | # |STRAN(NTENS) | An array containing the total strains at the beginning of the increment. | # |DSTRAN(NTENS) | Array of strain increments. | # |TIME(1) | Value of step time at the beginning of the current increment. | # |TIME(2) | Value of total time at the beginning of the current increment. | # |DTIME | Time increment.| # |TEMP | Temperature at the start of the increment. | # |DTEMP | Increment of temperature. | # |PREDEF | Array of interpolated values of predefined field variables at this point at the start of the increment, based on the values read in at the nodes.| # |DPRED | Array of increments of predefined field variables. | # |CMNAME | User-defined material name, left justified. | # |NDI | Number of direct stress components at this point. | # |NSHR | Number of engineering shear stress components at this point. | # |NTENS | Size of the stress or strain component array (NDI + NSHR). | # |NSTATV | Number of solution-dependent state variables that are associated with this material type | # |PROPS(NPROPS) | User-specified array of material constants associated with this user material. | # |NPROPS | User-defined number of material constants associated with this user material. | # |COORDS | An array containing the coordinates of this point. | # |DROT(3,3) | Rotation increment matrix. | # |CELENT | Characteristic element length | # |DFGRD0(3,3) | Array containing the deformation gradient at the beginning of the increment. | # |DFGRD1(3,3) | Array containing the deformation gradient at the end of the increment. | # |NOEL | Element number. | # |NPT | Integration point number. | # |LAYER | Layer number (for composite shells and layered solids). | # |KSPT | Section point number within the current layer. | # |KSTEP | Step number. | # |KINC | Increment number. | STRESS = [0. 0. 0. 0.] p = 0. # EFFECTIVE PLASTIC STRAIN r = 0. # ISOTROPIC HARDENING VARIABLE STATEV = [p r] NTENS = 4 DDSDDE = zeros(NTENS,NTENS) SSE = {} # Not used in this example SPD = {} # Not used in this example SCD = {} # Not used in this example RPL = {} # Not used in this example DDSDDT = {} # Not used in this example DRPLDE = {} # Not used in this example DRPLDT = {} # Not used in this example STRAN = [0. 0. 0. 0.] DSTRAN = [0. 0. 0. 0.] TIME = [0. 0.1] # CHECK TIME(2) DTIME = {} # Not used in this example TEMP = {} # Not used in this example DTEMP = {} # Not used in this example PREDEF = {} # Not used in this example DPRED = {} # Not used in this example CMNAME = {} # Not used in this example CHARACTER*80 CMNAME NDI = {} # Not used in this example NSHR = {} # Not used in this example #NTENS correct place NSTATV = length(STATEV) PROPS = {} # Not used in this example NPROPS = {} # Not used in this example COORDS = {} # Not used in this example DROT = {} # Not used in this example PNEWDT = {} # Not used in this example EXPLANATION MISSING CELENT = {} # Not used in this example DFGRD0 = {} # Not used in this example DFGRD1 = {} # Not used in this example NOEL = {} # Not used in this example NPT = {} # Not used in this example LAYER = {} # Not used in this example KSPT = {} # Not used in this example KSTEP = {} # Not used in this example KINC = {} # Not used in this example # ## Finally the ccall of the umat ccall((:umat_, "./libumat"), Int64, (Ptr{Float64},Ptr{Float64},Ptr{Float64},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, Ptr{Float64},Ptr{Float64},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, Ptr{Int64},Ptr{Int64},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}), STRESS,STATEV,DDSDDE,SSE,SPD,SCD,RPL,DDSDDT,DRPLDE,DRPLDT, STRAN,DSTRAN,TIME,DTIME,TEMP,DTEMP,PREDEF,DPRED,CMNAME,NDI,NSHR, &NTENS,&NSTATV,PROPS,NPROPS,COORDS,DROT,PNEWDT,CELENT,DFGRD0,DFGRD1, NOEL,NPT,LAYER,KSPT,KSTEP,KINC) # ## Something happened DDSDDE STATEV # ## Let's wrap this to simplified Julia function for testing function isotropichardening!(stress,p,r,jacobian,strain,DSTRAN) local STRESS = stress local STATEV = [p r] local DDSDDE = jacobian local STRAN = strain o = ccall((:umat_, "./libumat"), Int64, (Ptr{Float64},Ptr{Float64},Ptr{Float64},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, Ptr{Float64},Ptr{Float64},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, Ptr{Void},Ptr{Int64},Ptr{Int64},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}), STRESS,STATEV,DDSDDE,SSE,SPD,SCD,RPL,DDSDDT,DRPLDE,DRPLDT, STRAN,DSTRAN,TIME,DTIME,TEMP,DTEMP,PREDEF,DPRED,CMNAME,NDI, NSHR,&NTENS,&NSTATV,PROPS,NPROPS,COORDS,DROT,PNEWDT,CELENT,DFGRD0,DFGRD1, NOEL,NPT,LAYER,KSPT,KSTEP,KINC) stress = STRESS p = STATEV[1] r = STATEV[2] jacobian = DDSDDE strain = STRAN + DSTRAN if o != 0 throw("UMAT failed. Return code is $o") end return o end # ## Now somebody should know how to use this function (help needed) p = 0.0 r = 0.0 S = [0. 0. 0. 0.] strain = [0. 0. 0. 0.] for i in range(0,0.0001,11) DSTRAN = [i 0. 0. 0.] out = isotropichardening!(S,p,r,DDSDDE,strain,DSTRAN) println(out,S,p,r, DSTRAN) #println(i) end # ## Let's find out how many different funtions & subroutines are called fil = open("code_imp.f") sub_list = Set{ASCIIString}() for line in readlines(fil) # Fortran comment = something non whitespce at the firts character if ismatch(r"^[^\s]",line) #println(line) continue end if ismatch(r"call",lowercase(line)) call = split(lowercase(line))[2] #divede by white space sub = split(call,"(")[1] #divide by "(" #println(sub) push!(sub_list,sub) end end close(fil) sub_list # ## Next let's find out how many functions and subroutines are defined fil = open("code_imp.f") fun_list = Set{ASCIIString}() for line in readlines(fil) if ismatch(r"^[^\s]",line) #println(line) continue end comp = lowercase(line) if ismatch(r"subroutine",comp) || ismatch(r"funtion",comp) || ismatch(r"external",comp) #println(line) call = split(lowercase(line),"(")[1] #divede by white space sub = split(call)[end] #divide by "(" #if length(sub) > 1 # sub = sub[2] #end #println(sub) push!(fun_list,sub) end end close(fil) fun_list # ## And Finally are all called subroutines defined setdiff(sub_list,fun_list)
docs/tutorials/2015-07-24-Abaqus-umat-interface.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} tags=[] # # Neural Estimation via DV bound # - # $\def\abs#1{\left\lvert #1 \right\rvert} # \def\Set#1{\left\{ #1 \right\}} # \def\mc#1{\mathcal{#1}} # \def\M#1{\boldsymbol{#1}} # \def\R#1{\mathsf{#1}} # \def\RM#1{\boldsymbol{\mathsf{#1}}} # \def\op#1{\operatorname{#1}} # \def\E{\op{E}} # \def\d{\mathrm{\mathstrut d}}$ # Estimating MI well neither requires nor implies the divergence/density to be estimated well. However, # - MI estimation is often not the end goal, but an objective to train a neural network to return the divergence/density. # - The features/representations learned by the neural network may be applicable to different downstream inference tasks. # + [markdown] tags=[] # ## Neural estimation of KL divergence # - # To explain the idea of neural estimation, consider the following characterization of divergence: # --- # # **Proposition** # :label: DV1 # # $$ # \begin{align} # D(P_{\R{Z}}\|P_{\R{Z}'}) & = \sup_{Q\in \mc{P}(\mc{Z})} E \left[ \log \frac{dQ(\R{Z})}{dP_{\R{Z}'}(\R{Z})} \right] # \end{align} # $$ (D1) # # where the unique optimal solution is $Q=P_{\R{Z}}$. # # --- # {eq}`D1` is {eq}`D` but with $P_{\R{Z}}$ replaced by a parameter $Q$. # # - The proposition essentially gives a tight lower bound on KL divergence. # - The unknown distribution is recovered as the optimal solution. # --- # # **Proof** # # To prove {eq}`D1`, # # $$ # \begin{align*} # D(P_{\R{Z}}\|P_{\R{Z}'}) &= D(P_{\R{Z}}\|P_{\R{Z}'}) - \inf_{Q\in \mc{P}(\mc{Z})} \underbrace{D(P_{\R{Z}}\|Q)}_{\geq 0 \text{ with equality iff } Q=P_{\R{Z}}\kern-3em} \\ # &= \sup_{Q\in \mc{P}(\mc{Z})} \underbrace{D(P_{\R{Z}}\|P_{\R{Z}'})}_{=E \left[\log \frac{dP_{\R{Z}}(\R{Z})}{dP_{\R{Z}'}(\R{Z})}\right]} - \underbrace{D(P_{\R{Z}}\|Q)}_{=E \left[\log \frac{dP_{\R{Z}}(\R{Z})}{dQ(\R{Z})}\right]}\\ # &= \sup_{Q\in \mc{P}(\mc{Z})} E \left[\log \frac{dQ(\R{Z})}{dP_{\R{Z}'}(\R{Z})}\right] # \end{align*} # $$ # # --- # The idea of neural estimation is to # # - estimate the expectation in {eq}`D1` by the sample average # # $$ # \frac1n \sum_{i\in [n]} \log \underbrace{\frac{dQ(\R{Z}_i)}{dP_{\R{Z}'}(\R{Z}_i)}}_{\text{(*)}}, # $$ # - use a neural network to compute the density ratio (*), and train the network to maximizes the expectation, e.g., by gradient ascent on the above sample average. # Since $Q$ is arbitrary, the sample average above is a valid estimate. # **But how to compute the density ratio?** # We will first consider estimating the KL divergence $D(P_{\R{Z}}\|P_{\R{Z}'})$ when both $P_{\R{Z}}$ and $P_{\R{Z}'}$ are unknown. # ## Donsker-Varadhan formula # If $P_{\R{Z}'}$ is unknown, we can apply a change of variable # $$ # r(z) = \frac{dQ(z)}{dP_{\R{Z}'}(z)}, # $$ (Q->r) # which absorbs the unknown reference into the parameter. # --- # # **Proposition** # :label: DV2 # # $$ # \begin{align} # D(P_{\R{Z}}\|P_{\R{Z}'}) & = \sup_{\substack{r:\mc{Z}\to \mathbb{R}_+\\ E[r(\R{Z}')]=1}} E \left[ \log r(\R{Z}) \right] # \end{align} # $$ (D2) # # where the optimal $r$ satisfies # $ # r(\R{Z}) = \frac{dP_{\R{Z}}(\R{Z})}{dP_{\R{Z}'}(\R{Z})}. # $ # # --- # **Exercise** # # Show using {eq}`Q->r` that the optimal solution satisfies the constraint stated in the supremum {eq}`D2`. # + [markdown] nbgrader={"grade": true, "grade_id": "optional-r", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false} # **Solution** # # The constraint on $r$ is obtained from the constraint on $Q\in \mc{P}(\mc{Z})$, i.e., with $dQ(z)=r(z)dP_{\R{Z}'}(z)$, # # $$ # \begin{align*} # dQ(z) \geq 0 &\iff r(z)\geq 0\\ # \int_{\mc{Z}}dQ(z)=1 &\iff E[r(\R{Z}')]=1. # \end{align*} # $$ # # - # The next step is to train a neural network that computes $r$. What about? # $$ # \begin{align} # D(P_{\R{Z}}\|P_{\R{Z}'}) \approx \sup_{\substack{r:\mc{Z}\to \mathbb{R}_+\\ \frac1{n'}\sum_{i\in [n']} r(\R{Z}'_i)=1}} \frac1n \sum_{i\in [n]} \log r(\R{Z}_i) # \end{align} # $$ (avg-D1) # **How to impose the constraint on $r$ when training a neural network?** # We can apply a change of variable: # # $$ # \begin{align} # r(z)&=\frac{e^{t(z)}}{E[e^{t(\R{Z}')}]}. # \end{align} # $$ (r->t) # **Exercise** # # Show that $r$ defined in {eq}`r->t` satisfies the constraint in {eq}`D1` for all real-valued function $t:\mc{Z}\to \mathbb{R}$. # + [markdown] nbgrader={"grade": true, "grade_id": "r-t", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false} # **Solution** # # $$ # \begin{align} # E\left[ \frac{e^{t(\R{Z}')}}{E[e^{t(\R{Z}')}]} \right] = \frac{E\left[ e^{t(\R{Z}')} \right]}{E[e^{t(\R{Z}')}]} = 1. # \end{align} # $$ # - # Substituting {eq}`r->t` into {eq}`D1` gives the well-known *Donsker-Varadhan (DV)* formula {cite}`donsker1983asymptotic`: # --- # # **Corollary** Donsker-Varadhan # :label: DV3 # # $$ # \begin{align} # D(P_{\R{Z}}\|P_{\R{Z}'}) = \sup_{t: \mc{Z} \to \mathbb{R}} E[t(\R{Z})] - \log E[e^{t(\R{Z}')}] # \end{align} # $$ (DV) # # where the optimal $t$ satisfies # # $$ # \begin{align} # t(\R{Z}) = \log \frac{dP_{\R{Z}}(\R{Z})}{dP_{\R{Z}'}(\R{Z})} + c # \end{align} # $$ (DV:sol) # # almost surely for some constant $c$. # # --- # The divergence can be estimated as follows instead of {eq}`avg-D1`: # $$ # \begin{align} # D(P_{\R{Z}}\|P_{\R{Z}'}) \approx \sup_{t: \mc{Z} \to \mathbb{R}} \frac1n \sum_{i\in [n]} t(\R{Z}_i) - \frac1{n'}\sum_{i\in [n']} e^{t(\R{Z}'_i)} # \end{align} # $$ (avg-DV) # In summary, the neural estimation of KL divergence is a sample average of {eq}`D` but # # $$ # D(P_{\R{Z}}\| P_{\R{Z}'}) = \underset{\stackrel{\uparrow}\sup_t}{} \overbrace{E}^{\op{avg}} \bigg[ \log \underbrace{\frac{P_{\R{Z}}(\R{Z})}{P_{\R{Z}'}(\R{Z})}}_{\frac{e^{t(\R{Z})}}{\underbrace{E}_{\op{avg}}[e^{t(\R{Z}')}]}} \bigg]. # $$ # # but with the unknown density ratio replaced by {eq}`r->t` trained as a neural network. # #
source/part1/DonskerVaradhan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## RKR Computation # All Code and Markdown written by <NAME>, Chapman University Student and member of the LaRue Cat Lab # # All equations and information within this notebook originated from <i>The Computation of RKR Potential Energy Curves of Diatomic Molecules using Matematica</i>, written by <NAME>. # # The RKR method is a procedure used to determine the potential energy curves of diatomic molecules by calculating the classical turning points, $r_-$ and $r_+$ from the molecule's diatomic constants. # # # The turning points, $r_{\pm}$, can be computed using the following equation: # $$r_{\pm} = \frac{f(v)}{2} \cdot \left[\sqrt{1 + \frac{1}{f(v)g(v)}} \pm 1\right]$$ # <br> # It should be noted that in the above equation the fraction in the square root has a one in the numerator while in the paper by <NAME> the numerator has a four. The four is not present in the Matematica code in Senn's paper and in <i>Lecture #21: Construction of Potential Curves by the Rydberg-Klein-Rees Method (RKR)</i> from MIT OpenCourseWare, the four is not included as well, leading to the omission of the four in the equation presented here to provide an exact equation to that used within the below code. # <br><br> # $f(v)$ and $g(v)$ are defined as follows: # $$f(v) = \int^v_{\frac{-1}{2}}{ \left[ \frac{dv^{'}}{\sqrt{E(v)-E(v^{'})}{}} \right] }$$ # <br> # $$g(v) = \int_{\frac{-1}{2}}^v{\left[ \frac{\frac{\partial E}{\partial J}}{\sqrt{E(v) - E(v^{'})}}\right]dv^{'}}$$ # # <br> # # In the original paper by Senn, there are constants in front of the integrals that are omitted here as they are used to scale the values to a desired unit system. Since this RKR code will use wavenumbers for energy and angstroms for bond distance, the constant of $\frac{8.211609}{\sqrt{\mu}}$ will be multiplied by the final answers to perform this conversion. # <br> # # For vibronic states with no rotation and $J=0$, $E(v)$ and $\frac{\partial E}{\partial J}$ can be represented as: # $$E(v) = \omega_e(v+ \frac{1}{2}) - \omega_ex_e(v+\frac{1}{2})^2 + \omega_ey_e(v+\frac{1}{2})^3 + \omega_ez_e(v+\frac{1}{2})^4 + \dots$$ # <br> # $$\frac{\partial E}{\partial J} = B(v) = B_e -\alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2 + \dots$$ # # <br> # # An important computational issue to note with the RKR method is that the integrand term $\frac{dv^{'}}{\sqrt{E(v) - E(v^{'})}}$ will become one over zero since the integration is from $\frac{-1}{2}$ to $v$, thus the integrand will become $\frac{1}{\sqrt{E(v) - E(v)}}$ when $v^{'} = v$ at the end of the integration.<br> # In order to deal with this issue, the integral is changed from $\int^{v}_{\frac{-1}{2}}$ to $\int^{v-\delta}_{\frac{-1}{2}}$, where $\delta$ is some extremely small value. A correction term is then added to both $f(v)$ and $g(v)$ to account for the missing endpoint of the integration resulting in new and more easily computable versions of $f(v)$ and $g(v)$: # # $$f(v) = \int^{v-\delta}_{\frac{-1}{2}}{\frac{dv^{'}}{\sqrt{E(v) - E(v^{'})}} + \sqrt{\frac{\delta}{Q_v}}}$$ # <br> # $$g(v) = \int^{v-\delta}_{\frac{-1}{2}}{\frac{B(v^{'})}{\sqrt{E(v)-E(v^{'})}}dv^{'} } + 2B(v)\sqrt{\frac{\delta}{Q_v}} $$ # <br> # Where $Q_v$ is the following series: # $$Q_v = w_e-2\omega_ex_e(v+\frac{1}{2}) + 3\omega_ey_e(v+\frac{1}{2})^2 + 4\omega_ez_e(v+\frac{1}{2})^3 + \dots$$ # # <br> # # The following code only uses the terms listed above in the computation for all series listed. # ## RKR With Excited States # <br> # In order to allow for RKR computations with excited states, $E(v)$ and $\frac{\partial E}{\partial J}$ must include the $J$ term. The full $E(v)$ equation is included below: # \begin{align} # E(v) &= \omega_e(v+\frac{1}{2}) - \omega_ex_e(v+\frac{1}{2})^2 + \omega_ey_e(v+\frac{1}{2})^3 + \omega_ez_e(v+\frac{1}{2}) + \left[B_e-\alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2\right] \cdot J(J+1)-D_eJ^2(J+1)^2 \\ # E(v) &= c(v) + \left[B_e-\alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2\right] \cdot J(J+1)-D_eJ^2(J+1)^2 \\ # E(v) &= c(v) + f(v, J) - g(J) # \end{align} # <br><br> # \begin{align} # \frac{\partial E}{\partial J} &= \frac{\partial}{\partial J}\left[ c(v) + f(v, J) - g(J)\right] \\ # \frac{\partial E}{\partial J} &= 0 + \frac{\partial}{\partial J}f(v, J) - \frac{\partial}{\partial J}g(J) \\ # \frac{\partial E}{\partial J} &= \frac{\partial}{\partial J}f(v, J) - \frac{\partial}{\partial J}g(J) \\ # \end{align} # <br><br> # \begin{align} # \frac{\partial }{\partial J}f(v, J) &= \frac{\partial }{\partial J}\left[d(v)\cdot h(J)\right] \\ # &= d(v) \frac{\partial}{\partial J}h(v) \\ # &= d(v) \frac{\partial}{\partial J}\left[J(J+1)\right] \\ # &= d(v) \cdot \left(2J + 1\right) \\ # &= \left[B_e-\alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2\right] \cdot \left(2J + 1\right) \\ # &= B_e \cdot \left(2J + 1\right) - \alpha_e(v+\frac{1}{2})\left(2J + 1\right) + y_e(v+\frac{1}{2})^2 \left(2J + 1\right) \\ # &= 2JB_e + B_e -2J\alpha_e(v+\frac{1}{2}) -2\alpha_e(v+\frac{1}{2} ) + 2Jy_e(v+\frac{1}{2})^2 + y_e(v+\frac{1}{2})^2 \\ # &= 2J\left[B_e -\alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2\right] + B_e - \alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2 \\ # &= 2Jd(v) + d(v) \\ # \frac{\partial}{\partial J}f(v, J) &= d(v)\left(2J + 1\right) # \end{align} # <br><br> # \begin{align} # \frac{\partial }{\partial J}g(J) &= \frac{\partial}{\partial J} \left[D_e J^2(J+1)^2 \right] \\ # &= D_e \frac{\partial}{\partial J}\left[ J^2(J+1)^2\right] \\ # &= D_e \left[ \frac{\partial}{\partial J}[J^2] \cdot (J+1)^2 + J^2 \cdot \frac{\partial}{\partial J}[(J+1)^2]\right] \\ # &= D_e \left[ 2J \cdot (J+1)^2 + J^2 \cdot 2(J+1)\right] \\ # &= 2J(J+1)D_e \left[ (J+1) + J\right] \\ # &= 2J(J+1)D_e\left[ 2J + 1 \right] \\ # \frac{\partial}{\partial J}g(J) &= 2D_eJ(J+1)(2J+1) # \end{align} # <br><br> # \begin{align} # \frac{\partial E}{\partial J} &= \frac{\partial}{\partial J}f(v, J) - \frac{\partial}{\partial J}g(J) \\ # \frac{\partial E}{\partial J} &= d(v)(2J + 1) - 2D_eJ(J+1)(2J+1) \\ # \frac{\partial E}{\partial J} &= \left[B_e -\alpha_e(v+\frac{1}{2}) + y_e(v+\frac{1}{2})^2\right](2J+1) - 2D_eJ(J+1)(2J+1) # \end{align} # + #Allow Notebook to Import from Comp_Chem_Package import sys sys.path.append("..\\Comp_Chem_Package") from compChemGlobal import * from nistScraper import getDiatomicConstants #Global variables to be modified by the user #Distance from v that the integration should stop at #Should be a very very small number strictly greater than 0 #But, the smaller the value, the slower the integrals will take to compute delta = pow(10, -3) resolution = 10 #provide the name of the diatomic constants that are desired dc = getDiatomicConstants("CO") # + #Define All Functions Here def E(v, J=0): term = v + 0.5 sumValue = (dc["w"] * term) - (dc["wx"]*pow(term, 2)) + (dc["wy"]*pow(term, 3)) + (dc["wz"]*pow(term,4)) return sumValue * (2*J + 1) - 2*dc["D"]*J*(J+1)*(2*J+1) def EPrime(v, J=0): term = v + 0.5 return dc["w"] - 2*dc["wx"]*term + 3*dc["wy"]*pow(term, 2) + 4*dc["wz"]*pow(term, 3) def B(v): term = v + 0.5 return dc["B"] - (dc["a"] * term) + (dc["y"]*pow(term, 2)) #Used in the correctionFactor calculation def Q(v): term = v + 0.5 return dc["w"] - (2*dc["wx"]*term) + (3*dc["wy"]*pow(term, 2)) + (4*dc["wz"]*pow(term,3)) #Used to correct integrals that stop delta away from v def correctionFactor(v): return 2 * sqrt(delta / Q(v)) def integralRadical(v, vPrime): return sqrt(E(v) - E(vPrime)) def f(v): integrand = lambda vPrime: 1 / integralRadical(v, vPrime) return integrate(integrand, -0.5, v-delta) + correctionFactor(v) def g(v): integrand = lambda vPrime : B(vPrime) / integralRadical(v, vPrime) return integrate(integrand, -0.5, v-delta) + (B(v)*correctionFactor(v)) #v refers to a float energy level for the potential well #returns the tuple (r+, r-) def RKR(v): fValue = f(v) c0 = (8.211609 * fValue ) / (2 * sqrt(dc["u"])) radicand = 1 / (fValue * g(v)) c1 = sqrt(1 + radicand) return c0 * (c1 + 1), c0 * (c1 - 1) # + #Graphing and Computation Code import numpy as np #Lists to hold data r = [] EList = [] ddr = [] ddE = [] ddr2 = [] ddE2 = [] leftAsympCutOff = False v = -0.499 dv = 1 / resolution loadingBar = widgets.FloatProgress( value = v, min = -EPrime(v), max = 0, ) display(widgets.HBox([ widgets.Label(value="Building RKR Potential"), loadingBar ])) #Loop to generate all data to graph #for v in tqdm(np.arange(vStart, 17, 1/resolution)): while(EPrime(v) > 0): energy = E(v) rPoints = RKR(v) if(not leftAsympCutOff and len(r) != 0): #Compute First Derivative ddr.append( (rPoints[1] + r[-2:][-1]) / 2 ) ddE.append( (energy-EList[-1]) / ( rPoints[1] - r[-2:][-1] )) if(len(ddr) > 1): #Compute 2nd Derivative ddr2.append( (ddr[-2] + ddr[-1]) / 2 ) ddE2.append( (ddE[-1] - ddE[-2]) / (ddr[-1] - ddr[-2]) ) #Determine if Cutoff should be used if(ddE2[-1] <= 0): leftAsympCutOff = True if(not leftAsympCutOff): r.extend( rPoints ) EList.extend( [energy] * 2 ) else: r.append(max(rPoints)) EList.append(energy) v += dv loadingBar.value = -EPrime(v) #Manually add in re point r.append(dc["re"]) EList.append(0) #Prepare data for graphing figure = { "data":[ { "type":"scatter", "x":r, "y":EList, "connectgaps":True, "mode":"markers", "name":"RKR Potential", } ], "layout":{ "xaxis":{"title":"r in Angstroms"}, "yaxis":{"title":"Energy in Wavenumbers"}, "title":{"text":"RKR Generated Potential Energy Surface"} } } display(plot.go.FigureWidget(data=figure)) # + #Test for the RKR Class #Allow Notebook to Import from Comp_Chem_Package import sys sys.path.append("..\\Comp_Chem_Package") from rkr import rkr as rkrClass from compChemGlobal import * from nistScraper import getDiatomicConstants dc = getDiatomicConstants("CO") rkr = rkrClass(dc) rkr.compute(resolution=50, delta = pow(10, -3)) print("Graphing Potential") rkr.graph() # -
Learning/RKR/RKR-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Se quiere saber a partir de diversos datos meteorológicos si es un buen día para jugar al tenis. # Utilizando el archivo tenis.csv: # # - Obtener los datos # - Entrenar el modelo # - Realizar predicciones y evaluar el modelo # # ¿Qué resultados da el modelo? ¿Por qué? # # ¿Qué resultado se obtiene para las siguientes condiciones? # # - Outlook: Rain # - Temperature: Cool # - Humidity: Normal # - Wind: Strong # import pandas as pd from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn import metrics import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder df = pd.read_csv("tenis.csv") df # # Procesamiento de datos encoder = LabelEncoder() df = df.apply(encoder.fit_transform) df # # Datos data = df.values[:, 0:4] data # # Etiquetas labels = df.values[:,4] labels # # Conjuntos de entrenamiento y pruebas X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.3,random_state=100) # # Entrenamientos gnb = GaussianNB() gnb.fit(X_train, y_train) # # Pruebas particulares # - Outlook: Rain # - Temperature: Cool # - Humidity: Normal # - Wind: Strong pred = [[1, 0, 1, 0]] y_pred = gnb.predict(pred) y_pred # # Pruebas y_pred = gnb.predict(X_test) y_pred # # Comprobacion metrics.accuracy_score(y_pred, y_test) metrics.confusion_matrix(y_test, y_pred) disp = metrics.plot_confusion_matrix(gnb, X_test, y_test, cmap=plt.cm.Blues) disp.ax_.set_title('Confusion Matrix') plt.show() # # Conclusiones # El algoritmo no es apropiado para el conjunto de datos que se estan procesando, ademas, los resultados no son consistentes
tp3/Ejercicio3 -Tenis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Bioinformatics with Jupyter Notebooks for WormBase:** # ## **Utilities 2 - WormCat** # Welcome to the fourteenth jupyter notebook in the WormBase tutorial series. Over this series of tutorials, we will write code in Python that allows us to retrieve and perform simple analyses with data available on the WormBase sites. # # This tutorial will deal with using WormCat, which is a tool for annotating and visualizing gene set enrichment data from _C. elegans_ microarray, RNA seq or RNAi screen data. Let's get started! # The required packages for this tutorial can be installed using the next 3 cells. A more detaied explanation is there in the setup (Tutorial-00) notebook.? # !pip install rpy2 # !pip install wormcat_batch # %load_ext rpy2.ipython # + language="R" # install.packages("devtools") # # library("devtools") # # install_github("trinker/plotflow") # # install_github("dphiggs01/wormcat") # # install.packages("argparse") # # library(wormcat) # - # Let's start by importing the required libraries for the tutorial. import pandas as pd from IPython.core.display import SVG # We will start by assigning values to some variables. # # First, we will have to assign the value to the Annotation_File variable. These are annotation databases that have been defined by, and created by WormCat. Depending on your use-case, you can choose a suitable annotation database. # # The different annotation databases are- # # [1] kn_jan-18-2021.csv # # [2] orf_jan-18-2021.csv # # [3] whole_genome_jul-03-2019.csv.bk # # [4] ahringer_jan-02-2019.csv # # [5] whole_genome_jul-03-2019.csv # # [6] orfeome_jan-31-2019.csv # # [7] two_jan-18-2021.csv # # Based on this, assign a number to the variable. # Next, we assign a name to the Output Directory and also assign the name of the input excel sheet ('.xlsx' file) to the Input_File variable. # # # # The input '.xlsx' file needs to follow certain rules: # - Each sheet in the .xlsx file is a different gene set. Each Sheet requires a column header which MUST be 'Sequence ID' or 'Wormbase ID' (The column header is case sensitive.) which is followed by the gene list. # - The Spreadsheet Name should ONLY be composed of Letters, Numbers, and Underscores (_) and has an extension .xlsx, .xlt, .xls. # - The individual Sheet Names (i.e., Tab name) within the spreadsheet should ONLY be composed of Letters, Numbers, and Underscores (_). # + Annotation_File = '2' Output_Directory = 'WormCat_Output' Input_File = 'data/Murphy_TS.xlsx' command_input = Annotation_File + '\n' + Output_Directory + '\ny\n' + Input_File command_input # - # Now we have the command ready for running the wormcat program. Let's run it and extract the results! # !printf "$command_input" | wormcat_cli # Let's read in the output file that provides us enrichment data from the nested annotation list with broad categories in Category 1 (Cat1) and more specific categories in Cat2 and Cat3. # # For details about the three categories, download this file from the WormCat website - http://wormcat.com/static/download/Category_Definitions.csv output = 'WormCat_Output/Out_Murphy_TS.xlsx' Output_Cat1 = pd.read_excel(output, 'Cat1') Output_Cat2 = pd.read_excel(output, 'Cat2') Output_Cat3 = pd.read_excel(output, 'Cat3') Output_Cat1 # We then assign the gene set (i.e., sheet name) that we want to take a closer look at and also the category of output. Once specified, we can explore the other outputs. # # WormCat output provides scaled bubble charts with enrichment scores that meet a Bonferroni false discovery rate cut off of 0.01. # # It also includes CSV files on the data used for the graph. gene_set = 'hypodermis' category = '1' SVG(filename = 'WormCat_Output/' + gene_set + '/rgs_fisher_cat' + category + '_apv.svg') graph_csv = pd.read_csv('WormCat_Output/' + gene_set + '/rgs_fisher_cat' + category + '_apv.csv') graph_csv # This is the end of the tutorial on using WormCat to deal with WormBase data! # # In the next tutorial, we will generate Chromosome Maps with WormBase data. # # Acknowledgements: # - WormCat (http://wormcat.com/) # - WormCat publication - 'WormCat: an online tool for annotation and visualization of Caenorhabditis elegans genome-scale data.' <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. GENETICS February 1, 2020 vol. 214 no. 2 279-294;
Tutorial-14-utilities-wormcat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true from os import path from scipy.ndimage import imread from IPython.display import display from visualization import Context from visualization.visualizers.input import MahendranEtAlVisualizer from visualization.visualizers import as_image # + deletable=true editable=true # # !cd ../models && wget https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip && unzip inception5h.zip && cd ../notebooks # + deletable=true editable=true context = Context(path.join('..', 'models', 'tensorflow_inception_graph.pb')) # + deletable=true editable=true input_image_path = path.join('..', 'resources', 'eagle.jpg') input_image = imread(input_image_path) display(as_image(input_image)) # + deletable=true editable=true tensors = [ ('maxpool0:0', 20), ('mixed3a:0', 188), ('mixed4e:0', 3) ] mahendranetal_visualizer = MahendranEtAlVisualizer(context, l2_decay_weight=2e-1, total_variation_weight=5e-1) for t, i in tensors: reconstruction = mahendranetal_visualizer.reconstruct_input( tensor_name=t, channel_index=i, input_image=input_image, iterations=100, step=100.0, log_per=25 ) display(as_image(reconstruction, size=[224, 224])) # -
notebooks/Mahendran et al.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 11 # ## In-Class Activity Workbook and Homework # # ## Learning Objectives # ### In this notebook you will learn about and practice: # 1. Section 1: <a id='Section 1'></a>[Section 1: Find Replace](#Section-1) # 2. Section 2: <a id='Section 2'></a>[Section 2: Missing Data imputation](#Section-2) # 3. Section 3: <a id='Section 3'></a> [Section 3: Rename](#Section-3) # # ### Additional Sources # >- Check out the `pandas` cheat sheets provided by Data Camp and posted on Canvas # >>- https://www.datacamp.com/community/blog/python-pandas-cheat-sheet # >- Pretty good article walking through `map()`, `apply()`, and `applymap()` # >>- https://towardsdatascience.com/introduction-to-pandas-apply-applymap-and-map-5d3e044e93ff # + [markdown] slideshow={"slide_type": "slide"} # # Section 0 # ## Read and Review Data # - # ## Reading and Writing Files with the python `pandas` module # # ### Read csv or Excel files # >- csv files: `pd.read_csv('fileName.csv')` # >- Excel files: `pd.read_excel('fileName.xlsx')` # >- Multiple sheets from the same Excel file: # >>- `xlsx = pd.ExcelFile('file.xls')` # reads in the entire workbook # >>- `df1 = pd.read_excel(xlsx, 'Sheet1')` # reads in sheet you specify # >>- `df2 = pd.read_excel(xlsx, 'Sheet2')` # # ### Write csv or Excel files # >- csv files: `pd.to_csv('YourDataFrame.csv')` # >- Excel files: `pd.to_excel('YourDataFrame.xlsx')` # ### Step 1: Check your working directory and make sure you have the `winemag-data-130k-v2.csv` files there # ### Step 2: import the `pandas` module and alias it `pd` # ### Step 3: Read the ``winemag-data-130k-v2.csv` file into a pandas dataframe named, `wine` # >- Set the index to column 0 # >- Look at the first five records to make sure `wine` is imported correctly # # #### Loading a CSV file # function: `pd.read_csv()` # # [Docu read_csv](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html?highlight=read_excel#pandas.read_excel) # #### Show the last five records of `wine` # + [markdown] slideshow={"slide_type": "subslide"} # #### Show a tuple of the number of columns and rows in `wine` # - # #### Show the columns in `wine` # #### Show the datatypes that are in `wine` # ### Show the summary stats for all columns in `wine` # >- Check the documentation on `describe()` to learn how to show descriptive analytics for all columns regardless of data type # >>- https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.describe.html # ### Show a list of unique wine tasters # ### Show the unique counts of values for all columns in the `wine` dataframe # ### Show how many times each tasters name occurs # # Section 1 # ## Finding and Replacing Values # ### Q1: What is the data type of the `points` column in the winemag csv? # ### Q2: Create a new Series from the records in the `points` column: # >- Convert the entries to strings # >- Name your Series `points_string` and add it to your `wine` dataframe # >>- Hint: look up the `astype` function # >- Show the first five records of `wine` after you add the new column # ### Q3: How many reviews in the dataset are missing a price? # ### Q4: How many records are missing a review (points) score? # ### Q5: How many null values are there in every column? # ### Q6: What are the most common wine-producing regions? # #### Subquestion: how many wines are missing `region_1` in our dataset? # # >- First, replace all null values in the `region_1` series with 'Unknown' # >- Then define a Series named, `region_reviews`, that will store the counts # >>- Use `region_1` as the region field you are trying to count # >- Sort the results in descending order # >- Hint: You might want to use `fillna()`, `value_counts()`, and/or `sort_values()` to help answer this question # #### Show the top 5 wine producing regions based on the `region_1` field # >- Exclude 'Unknown' from your results # # Section 2 # ## Dealing with missing data # >- Missing data is one of the most pervasive problems in data analysis # >- No matter what field you work in you will likely come across datasets that contain incomplete data for some records # >>- Missing data can occur because experimental units may die (e.g, rats in a clinical study), equipment malfunctions, respondents to surveys do not answer all questions, or simply someone that is in charge of recording data goofs. # >- The seriousness of the missing data depends on the amount of missing data, the pattern of missing data, and why it is missing # >>- The why and the pattern of missing data is more important that the amount of missing data. However, missing data will have a larger impact on small datasets than larger datasets # # >- This section focuses on some common strategies for handling missing data # ## Common strategies for dealing with missing data # # Tabachnick & Fidell (2019) give us several commonly used methods for handling missing data values values. # # 1. Remove any records that contain missing data # >- If only a few records/cases have missing data and they seem to be a random subsample of the whole sample, deletion can be a good method of dealing with missing data # 2. Estimating missing data # >- A second option is to estimate (impute) missing values and then use the estimates during analysis. Here are some common estimation methods # >>- Use prior knowledge to estimate the value. Here, the analyst/researcher replaces missing values with an educated case based on expertise in the area. # >>- Mean replacement. Calculate the overall mean of the feature and impute that for all missing values. In absence of all other information, the mean is the best guess about the value of a feature/variable # >>- Median replacement. Calculate the median of the feature and impute that for all missing values # >>- Regression replacement. A more sophisticated approach would be to use a regression model and impute missing values based on the values of other features that we do have data on # # #### Regardless of what method is used for missing data, it is recommended to: # 1. Create a new feature that stores information on whether or not missing data was imputed # >- This is a binary column (usually 0's and 1's) indicating if missing data was imputed for a record or not # 2. Repeat the analysis with and without missing data and imputation methods and determine if conclusions are the same under each circumstance # # #### Reference: Tabachnick & Fidell (2019). *Using Multivariate Statistics*. Pearson. # ## Practice imputing mean values for missing data # ### Q7: Calculate the mean price and store the mean in a variable, `meanPrice` # >- Round to two decimal places # ### Q8: Replace all missing prices in the `wine` DataFrame with `meanPrice` # >- Store the prices with imputed mean prices in a new column named, `priceImpute` in the wine DataFrame # ##### Now check the number of missing values in all columns # >- Double check your new `priceImpute` column doesn't contain missing values # + [markdown] slideshow={"slide_type": "subslide"} # ### Q9: What are the descriptive statistics for the original `price` column and the `priceImpute` column? # >- Compare the descriptive stats between the two fields0 and determine if the difference is significant # - # + [markdown] slideshow={"slide_type": "subslide"} # ### Q10: Create a column, `imputeFlag`, that stores a 1 if the record used an imputed mean and a 0 if it does not # - # ### One way to do this is to use the `map()` function # >- `map()` is used to substitute each value in a Series with another value # >- General syntax: `Series.map(arg,na_action=None)` # >>- Where *arg* can be a function, a dictionary, or a Series # # Source: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.map.html # # >- For example, `wine['price'].map(lambda row: 0 if row >0 else 1)` uses a lambda function as the *arg* passed to *map()* to transform all the values in the `price` column to a Series of 0's or 1's based on the logic used in the lambda function # >- You could also define your own function and then pass that function into *map()* # #### Show the first five records of `wine` with all of your changes # #### Show all the column names in `wine` # #### Show the total null values in each column in `wine` # # Section 3 # 1. Renaming Columns # 2. More Descriptive Analytics Practice # ### Q11: Create a new DataFrame, wineUS, from `wine` for only US wines # #### In the next few cells go through the usual initial data examination steps that we have covered anytime we define a new data frame # >- Show the first five records of `wineUS`, show the columns, show value counts, etc # ### Q12: How many records/rows have null prices? # ### Q12: Rename the following columns # >- 'province' to 'state' # >- 'taster_name' to 'taster' # >- 'taster_twitter_handle' to 'twitter_handle' # >- 'points' to 'rating' # # ##### Note: Make all of these name changes in-place # #### Show the first 5 records after your change # ### Q13: How many records in `wineUS` have imputed values for `price`? # ### Q14: What are the descriptive stats for `price` and the imputed price field? # ### Q15: Create a new column that transforms all the state values into state abbreviations based on using the first two letters of the state # >- Name the new column `st` # >- Make sure to have the abbreviations stored as upper case for both letters # >- Insert this column to the right of the state column # ### Q16: What are the average ratings and prices for each state? # >- Look the results using both `price` and `priceImpute` # >- Show the sample size (counts) as well as the means # >- Round everything to 2 decimal places # ### Q17: What is the correlation between ratings and price? # >- Look at the correlations between rating and both price fields # >- Are the results what you would expect? Why or why not? # # Reference: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html # ### Q18: Show the number of imputed prices by state? # >- Is there anything in the results that would be a red flag for non-random missing data? # >- Do any states have a disproportionate number of missing values relative to their total wines/records? # >- After you look at this by state, look at the results by state then region_1 # #### Now look at the imputed price values by state and region # >- Sort the results in descending order # >- What state and regions have the highest number of missing prices?
Week 11/pandas5%266_FindReplaceRename_Activity_student.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys, os import argparse sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../../"))) sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../"))) sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), ""))) # + from fedml_api.data_preprocessing.cifar10.data_loader import load_partition_data_cifar10 from fedml_api.standalone.fedavg.my_model_trainer_classification import MyModelTrainer as MyModelTrainerCLS from fedml_api.model.contrastive_cv.resnet_with_embedding import Resnet56 from CovaMNet import CovaMResnet56 import torch from torch import nn from collections import OrderedDict import torch.nn.functional as F import matplotlib.pyplot as plt from sklearn.manifold import TSNE import numpy as np import random import pickle # + dataset = 'cifar10' data_dir = "./../../../data/cifar10" # partition_method = 'hetero' partition_method = 'homo' partition_alpha = 0.5 client_num_in_total = 3 batch_size = 100 total_epochs = 500 save_model_path = 'model/cs_{0}_{1}_client_{2}_better_orcal_no_bnneck_resnet_{3}.pt' device = 'cuda:3' with open(f'dataset_{partition_method}_{client_num_in_total}.pickle', 'rb') as f: dataset = pickle.load(f) # + def train_model(client, epochs): lr = 0.01 wd = 0.001 client.model.to(client.device) client.model.train() criterion = nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(client.model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, client.model.parameters()), lr=lr, # weight_decay=wd, amsgrad=True) # optimizer = torch.optim.Adam(client.model.parameters(), lr=0.001, betas=(0.5, 0.9)) epoch_loss = [] for epoch in range(epochs): batch_loss = [] for batch_idx, (x, labels) in enumerate(client.train_data): x, labels = x.to(device), labels.to(device) client.model.zero_grad() log_probs, _ = client.model(x) loss = criterion(log_probs, labels) loss.backward() # to avoid nan loss torch.nn.utils.clip_grad_norm_(client.model.parameters(), 1.0) optimizer.step() # logging.info('Update Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( # epoch, (batch_idx + 1) * args.batch_size, len(train_data) * args.batch_size, # 100. * (batch_idx + 1) / len(train_data), loss.item())) batch_loss.append(loss.item()) scheduler.step() if epoch % 50 == 0: torch.save(client.model.state_dict(), str.format(save_model_path, client_num_in_total, partition_method, client.id, epoch)) epoch_loss.append(sum(batch_loss) / len(batch_loss)) print('Client Index = {}\tEpoch: {}\tLoss: {:.6f}'.format( client.id, epoch, sum(epoch_loss) / len(epoch_loss))) torch.save(client.model.state_dict(), str.format(save_model_path, client_num_in_total, partition_method, client.id, epochs)) class Client(object): def __init__(self, client_index, train_data_local_dict, train_data_local_num_dict, test_data_local_dict, device, model): self.id = client_index self.train_data = train_data_local_dict[self.id] self.local_sample_number = train_data_local_num_dict[self.id] self.test_local = test_data_local_dict[self.id] self.device = device self.model = model # clients = [] # for i in range(3): # client = Client(i, train_data_local_dict, train_data_local_num_dict, test_data_local_dict, device, resnet56(class_num=class_num)) # train_model(client, epochs) # - model = Resnet56(class_num=dataset[-1], neck='no') # [train_data_num, test_data_num, train_data_global, test_data_global, \ # # train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ # # class_num] client = Client(0, dataset[5], dataset[4], dataset[6], device, model) # model.load_state_dict(torch.load('model/cs_3_homo_client_0_oral_epochs_200.pt')) train_model(client, 200) # + def test(model, test_data, device): model.to(device) model.eval() metrics = { 'test_correct': 0, 'test_loss': 0, 'test_total': 0 } criterion = nn.CrossEntropyLoss().to(device) with torch.no_grad(): for batch_idx, (x, target) in enumerate(test_data): x = x.to(device) target = target.to(device) pred, feat = model(x) print(feat.sum()) loss = criterion(pred, target) _, predicted = torch.max(pred, -1) correct = predicted.eq(target).sum() metrics['test_correct'] += correct.item() metrics['test_loss'] += loss.item() * target.size(0) metrics['test_total'] += target.size(0) return metrics metrics = test(client.model, dataset[3], device) test_correct = metrics['test_correct']/metrics['test_total'] test_loss = metrics['test_loss']/metrics['test_total'] print(metrics['test_total']) print(f'test_correct: {test_correct}; test_loss: {test_loss}') # + def cal_covariance(input): CovaMatrix_list = [] mean_list = [] for i in range(len(input)): support_set_sam = input[i] # print(support_set_sam.shape) support_set_sam = torch.unsqueeze(support_set_sam, 0) B, C, h, w = support_set_sam.size() support_set_sam = support_set_sam.permute(1, 0, 2, 3) support_set_sam = support_set_sam.contiguous().view(C, -1) # print(support_set_sam.shape) mean_support = torch.mean(support_set_sam, 1, True) mean_list.append(mean_support) support_set_sam = support_set_sam-mean_support covariance_matrix = support_set_sam@torch.transpose(support_set_sam, 0, 1) covariance_matrix = torch.div(covariance_matrix, h*w*B-1) CovaMatrix_list.append(covariance_matrix) return CovaMatrix_list, mean_list cl = [torch.zeros((256,256)) for i in range(10)] ml = [torch.zeros((256,64)) for i in range(10)] lbd = 0.999 labels = [] def extract_features(model, data_loader, device): model.to(device) model.eval() features = [] with torch.no_grad(): for batch_idx, (x, l) in enumerate(data_loader): x, l = x.to(device), l.to(device) score, feats = model(x) covaM_list, mean_list = cal_covariance(feats) for covaM, f, label in zip(covaM_list, feats, l): labels.append(label.cpu()) for i in range(len(cl)): if label.data.cpu() == i: # print(i) cl[i] = lbd * cl[i] + (1-lbd) * covaM.cpu() f = torch.unsqueeze(f, 0) B, C, h, w = f.size() f = f.permute(1, 0, 2, 3) f = f.contiguous().view(C, -1) # print(f.shape) ml[i] = lbd * ml[i] + (1-lbd) * f.cpu() extract_features(client.model, dataset[2], device) covaMs_means = [cl, ml] # print(cl[0].shape) # with open(f'better_orca_no_bnneck_l_covaMs_means.pickle', 'wb') as f: # pickle.dump(covaMs_means, f) # - model1 = CovaMResnet56(class_num=dataset[-1], neck='no', with_cova=True) # + a = torch.ones((10, 256)) for key in model1.state_dict().keys(): print(key) if key == 'ce_classifier.weight': t = model1.state_dict()[key] print(t) t.copy_(a) t.require_grad = False if key == 'ce_classifier.bias': t = model1.state_dict()[key] t.copy_(torch.zeros(t.shape)) model1.ce_classifier.requires_grad = False for key in model1.state_dict().keys(): if key == 'ce_classifier.weight': t = model1.state_dict()[key] print(t) # - metrics = test(client.model, dataset[3], device) test_correct = metrics['test_correct']/metrics['test_total'] test_loss = metrics['test_loss']/metrics['test_total'] print(metrics['test_total']) print(f'test_correct: {test_correct}; test_loss: {test_loss}')
fedml_experiments/distributed/contrastive_fed/oral_resnet_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: flatiron # language: python # name: flatiron # --- # ## Inspecting the Water Feature Group import pickle import matplotlib.pyplot as plt import seaborn as sns sns.set_context('notebook') with open('../data/train_test_split.pkl', mode='rb') as f: train_test_split = pickle.load(f) X_train = train_test_split['X_train'] # There are no missing values in this group. waterTable = X_train[['basin', 'water_quality', 'quality_group', 'quantity', 'source', 'source_type', 'source_class']] waterTable.isnull().any() # Below I have plotted the distribution of waterpoints with respect to each of the variables in this group. Notice that `water_quality` and `quality_group` have nearly identical distributions and values. Similarly, `source` and `source_type` have nearly identical distributions and values. def barplot_waterpoints(df): for col in df.columns: fig, ax = plt.subplots() fig.set_figheight(8) fig.set_figwidth(10) ax.set_title(f'Distribution of Waterpoints by {col}') sns.countplot(y=col, data=df, order=df[col].value_counts().index, ax=ax); fig.savefig(f'../images/waterpoints_by_{col}.png', bbox_inches='tight') barplot_waterpoints(waterTable) # ## Conclusion # Data in this group is complete. It seems likely that there is functional dependence between `water_quality` and `quality_group`. Similarly, there is likely a functional dependence between `source` and `source_type`.
code/.ipynb_checkpoints/InspectGroup-Water-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # language: python # name: python3 # --- # **Bokeh charting example for CPI, Core CPI, and Stick CPI** # + import pandas as pd from requests import get from io import StringIO keys = ["<KEY>CORESTICKM159SFRBATL"] ColNames = ["Date", "CPI", "Core", "Sticky"] start, end = pd.Timestamp(2005, 1, 1), pd.Timestamp.today() dfs = [] headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"} for i in range(len(keys)): url = "https://fred.stlouisfed.org/graph/fredgraph.csv?id=" + keys[i] df = pd.read_csv(StringIO(get(url, headers=headers).text)) df["DATE"] = pd.to_datetime(df["DATE"]) df = df.loc[(df["DATE"] >= start) & (df["DATE"] <= end)] df = df.reset_index(drop=True) if df[keys[i]][0] >= 150: df[keys[i]] = df[keys[i]].pct_change(12, fill_method="ffill") * 100 dfs.append(df) df = pd.concat(dfs, axis=1).dropna().reset_index(drop=True) df = df.loc[:, ~df.columns.duplicated()] df.columns = ColNames df # + from bokeh.io import output_notebook from FRED_Bokeh import * output_notebook() show(chartFRED(df)) # -
Bokeh_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import mahotas as mh import numpy as np from sklearn.datasets import load_digits import matplotlib.pyplot as plt from tpot import TPOT from sklearn.cross_validation import train_test_split import dautil as dl context = dl.nb.Context('extracting_texture') lr = dl.nb.LatexRenderer(chapter=11, start=5, context=context) lr.render(r' C_{\Delta x, \Delta y}(i,j)=\sum_{p=1}^n\sum_{q=1}^m\begin{cases} 1, &amp; \text{if }I(p,q)=i\text{ and }I(p+\Delta x,q+\Delta y)=j \\ 0, &amp; \text{otherwise}\end{cases}') lr.render(r'''\begin{align} Angular \text{ } 2nd \text{ } Moment &amp;= \sum_{i} \sum_{j} p[i,j]^{2}\\ Contrast &amp;= \sum_{n=0}^{Ng-1} n^{2} \left \{ \sum_{i=1}^{Ng} \sum_{j=1}^{Ng} p[i,j] \right \} \text{, where } |i-j|=n\\ Correlation &amp;= \frac{\sum_{i=1}^{Ng} \sum_{j=1}^{Ng}(ij)p[i,j] - \mu_x \mu_y}{\sigma_x \sigma_y} \\ Entropy &amp;= -\sum_{i}\sum_{j} p[i,j] log(p[i,j])\\ \end{align}''') # + digits = load_digits() X = digits.data.copy() for i, img in enumerate(digits.images): np.append(X[i], mh.features.haralick( img.astype(np.uint8)).ravel()) X_train, X_test, y_train, y_test = train_test_split( X, digits.target, train_size=0.75) # - tpot = TPOT(generations=6, population_size=101, random_state=46, verbosity=2) tpot.fit(X_train, y_train) # %matplotlib inline context = dl.nb.Context('extracting_texture') dl.nb.RcWidget(context) # + print('Score {:.2f}'.format(tpot.score(X_train, y_train, X_test, y_test))) dl.plotting.img_show(plt.gca(), digits.images[0]) plt.title('Original Image') plt.figure() dl.plotting.img_show(plt.gca(), digits.data[0].reshape((8, 8))) plt.title('Core Features') plt.figure() dl.plotting.img_show(plt.gca(), mh.features.haralick( digits.images[0].astype(np.uint8))) plt.title('Haralick Features') # -
Module2/Python_Data_Analysis_code/Chapter 11/extracting_texture.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Science and Business Analytics Internship-Dec20 # #### GRIP @ The Sparks Foundation # ### Create the Decision Tree classifier and visualize it graphically. # ### Task-6 Prediction using Decision Tree Algorithm # ### Author: <NAME> # ##### Batch: December-2020 # + # Import Neccessary Dependency import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn import metrics import mglearn from sklearn.tree import export_graphviz from IPython.display import Image import pydotplus from IPython.display import display from io import StringIO from sklearn.metrics import confusion_matrix,accuracy_score, classification_report # - # To Load Dataset ds = pd.read_csv('IRIS.csv') # To see the first five row ds.head() # To see the dataset Information ds.info() # To see the description of dataset ds.describe() # To see the Correlation corr = ds.corr() corr sns.heatmap(corr, annot=True, cmap='magma_r') # to see null value ds.isnull().sum() # To See the pairplot sns.pairplot(data=ds,hue="Species", hue_order=None, palette=None, kind='scatter', diag_kind='hist', markers=None, height=2.5) plt.show() plt.figure(figsize=(25,5)) part = ds.iloc[:,1:] part.groupby('Species').plot.bar() plt.show() part.hist(figsize=(15,15)) plt.show() feature_v = ds[["SepalLengthCm","SepalWidthCm","PetalLengthCm","PetalWidthCm"]].values target_v = ds["Species"].values feature_v target_v feature_names=["SepalLengthCm","SepalWidthCm","PetalLengthCm","PetalWidthCm"] target_name=['setosa', 'versicolor', 'virginica'] # Select feature data and target data X = part.iloc[:,:-1] y = part.iloc[:,-1] # to devide dataset into train and test X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.35, random_state = 42) # To load the DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train) c_pred=dtc.predict(X_test) print("Confusion Matrix:",confusion_matrix(c_pred,y_test)) print("Classification Report:",classification_report(c_pred,y_test)) print("Accuracy of Test data :",accuracy_score(c_pred,y_test)*100,"%") # Creat DataFrame to comparing Actual vs Predicted df=pd.DataFrame({'Actual': y_test, 'Predicted': c_pred}) df.reset_index() sns.heatmap(confusion_matrix(c_pred,y_test),annot = True,cmap = 'RdGy_r') from sklearn import tree plt.figure(figsize = (18,18)) tree.plot_tree(dtc,filled = True,rounded = True,proportion = True,node_ids = True , feature_names = feature_names) plt.show() # To Predict for class X. dtc.predict([[5.1, 3.5, 1.4, 0.2]]) # # Thank you
Task-6 Prediction using Decision Tree Algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="CpOIFrRwECAM" # # Arquitectura de Una Red Neuronal # # En resumen, la arquitectura de una ANN está definida por el número y la forma de sus capas. En el estado del arte de estas ANN esas arquitecturas suelen ser un poco complejas, con algunas capas saltando sobre otras capas o capas que retornan información de regreso a capas anteriores. # # En este notebook veremos un ejemplo sencillo de una ANN y cual es la función de cada una de las capas que la componen. Para diseñar esta ANN usaremos el framework [PyTorch](https://pytorch.org/) y el dataset estándar para la clasificación de tres especies de flores [IRIS](https://archive.ics.uci.edu/ml/datasets/Iris). # + [markdown] id="ZwjzcjdKVRVs" # # Ejercicio # # En este notebook construiremos una ANN sencilla para hacer un ejercicio de clasificación usando el dataset [IRIS](https://archive.ics.uci.edu/ml/datasets/Iris). # + id="Ge75HTieVNKa" #--- Importamos paquetes escenciales import numpy as np import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="wJ1_uc7xWEh6" outputId="a7a7f0a8-9b47-4207-909d-828ec7b73e5b" from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler iris = load_iris() iris.keys() # + colab={"base_uri": "https://localhost:8080/"} id="S_bpHB4MUkup" outputId="d39a5f90-09bb-45fb-e1bc-8247cff50147" #-- Extraemos la infromación más importante X = iris['data'] Y = iris['target'] names = iris['target_names'] feature_names = iris['feature_names'] #-- Normalizamos los datos para que tengan media 0 y desviación 1 scaler = StandardScaler() X_scaled = scaler.fit_transform(X) #-- Dividimos los datos entre un conjunto de entrenamiento y testeo X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.2, random_state=42) #-- Visualizamos el tamaño de los datos de entrenamiento np.shape(X_train), np.shape(Y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 334} id="EP2LguctW7bh" outputId="2ed8f015-5d2d-4cbf-de7e-23e57a0134ef" #--Visualizamos los datos class0 = Y == 0 class1 = Y == 1 class2 = Y == 2 fig = plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.plot(X_scaled[:,0][class0],X_scaled[:,1][class0],'o',c='red', label=names[0]) plt.plot(X_scaled[:,0][class1],X_scaled[:,1][class1],'o',c='blue', label=names[1]) plt.plot(X_scaled[:,0][class2],X_scaled[:,1][class2],'o',c='green', label=names[2]) plt.grid() plt.legend() plt.xlabel(feature_names[0]) plt.ylabel(feature_names[1]) plt.subplot(1,2,2) plt.plot(X_scaled[:,2][class0],X_scaled[:,3][class0],'o',c='red', label=names[0]) plt.plot(X_scaled[:,2][class1],X_scaled[:,3][class1],'o',c='blue', label=names[1]) plt.plot(X_scaled[:,2][class2],X_scaled[:,3][class2],'o',c='green', label=names[2]) plt.grid() plt.legend() plt.xlabel(feature_names[2]) plt.ylabel(feature_names[3]) plt.show() # + [markdown] id="hTOprWXNmrTR" # # Artificial Neural Network con Pytorch # # Vamos a crear una ANN de 3 capas lineales, las dos primeras tendrán funciones de activación tipo ReLU y la última una función de activación tipo Softmax. # + colab={"base_uri": "https://localhost:8080/"} id="cJ9KyXnnbZkh" outputId="948211b8-2f10-4ba2-95fd-0596137227cc" #--- Importamos el paquete torch import torch from torch.autograd import Variable # Para convertir los datos a tensores #--- Definimos la secuencia de capas de la ANN input_dim = X_train.shape[1] model = torch.nn.Sequential( torch.nn.Linear(input_dim, 50), torch.nn.ReLU(), torch.nn.Linear(50, 50), torch.nn.ReLU(), torch.nn.Linear(50, input_dim), torch.nn.Softmax(dim=1) ) model # + id="bv8lkGEWjpbK" #-- Definimos el optimizador y el criterio de evaluación optimizer = torch.optim.Adam(model.parameters(), lr=0.001) criterion = torch.nn.CrossEntropyLoss() # + colab={"base_uri": "https://localhost:8080/"} id="E7-Fq-bBjEOM" outputId="ff022449-6aa0-436a-8cdc-dc4aa11cd4e2" #--- Definimos el número de épocas n_epoch = 100 #-- Convertimos los datos a tensores X_train_tensor = Variable(torch.from_numpy(X_train)).float() Y_train_tensor = Variable(torch.from_numpy(Y_train)).long() X_test_tensor = Variable(torch.from_numpy(X_test)).float() Y_test_tensor = Variable(torch.from_numpy(Y_test)).long() #-- Definimos párametros para almacenar la evalaución el entrenamiento de la red en función del número de épocas loss_list = np.zeros(n_epoch) accuracy_list = np.zeros(n_epoch) #-- Iniciamos el entrenamiento for epoch in range(n_epoch): Y_pred = model(X_train_tensor) loss = criterion(Y_pred, Y_train_tensor) loss_list[epoch] = loss.item() # Gradiente Cero optimizer.zero_grad() # Backpropagation loss.backward() # Nuevo paso optimizer.step() correct = (torch.argmax(Y_pred, dim=1) == Y_train_tensor).type(torch.FloatTensor) accuracy_list[epoch] = correct.mean() print('Epoch [{}/{}], loss: {}, acc: {}'.format(epoch+1,n_epoch,loss_list[epoch],accuracy_list[epoch])) # - torch.argmax(Y_pred, dim=1) # + colab={"base_uri": "https://localhost:8080/", "height": 280} id="_KcUwOAdkKL-" outputId="d1665674-c593-4a61-82eb-c81793b2f424" #-- Graficamos la precisión y la perdida para el set de entrenamiento fig = plt.figure(figsize=(10, 4)) plt.subplot(1,2,1) plt.plot(accuracy_list) plt.ylabel("training accuracy") plt.xlabel("epcohs") plt.grid() plt.subplot(1,2,2) plt.plot(loss_list) plt.ylabel("training loss") plt.xlabel("epochs") plt.grid() plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="ty8WY5PuDDKb" outputId="d81b1c91-3d05-4e26-df39-d26f4133cfa2" #-- Evaluamos con el set de testeo Y_pred = model(X_test_tensor) loss = criterion(Y_pred, Y_test_tensor) correct = (torch.argmax(Y_pred, dim=1) == Y_test_tensor).type(torch.FloatTensor) print('Loss: {}, Acc:{}'.format(loss, correct.mean()))
notebooks/07_Arquitectura_RN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How to work with openpyxl # # A lot of times we need to use python to either create Excel reports with fancy formating and formulas or update existing reports with new data. Up to now there is no perfect python library that does a good job working with Excel files # # ### Introduction on openpyxl # # #### Installation # - The installation is pretty straight forward: `pip install openpyxl` # - For more details, go to this page: https://openpyxl.readthedocs.io/en/stable/index.html # # #### Example setup # - `sample.xlsx` has two worksheets # - `PFC.csv` file is a csv file. Data in this file will be inserted into `sample.xlsx` file # # #### Example 1: Insert rows # - This example shows how to insert rows into an existing worksheet. # - New rows will be inserted under the header row and on top of existing data rows. # - The screenshot before inserting new rows # # ![title](img/openpyxl-1.png) # # - The screenshot after new rows are inserted # # ![title](img/openpyxl-2.png) # # #### Example 2: Replace with new data # - This example shows how to first clear contents in an worksheet and then push new data to the worksheet # - First data in a worksheet will be cleared; Then new data will be pushed to the worksheet, but the formatting will be kept. # - The screenshot before replacing worksheet with new data # # ![title](img/openpyxl-3.png) # # - The screenshot after replacing worksheet with new data # # ![title](img/openpyxl-4.png) # # ### Limitations # - one big limitation is this package can't read password protected Excel files though it can create password protected excel files # # # #load libraries import pandas as pd from openpyxl import Workbook, load_workbook from openpyxl.utils.dataframe import dataframe_to_rows #read sample excel file wb = load_workbook(filename = 'sample.xlsx') print(wb.sheetnames) #read new data file df_new = pd.read_csv('PFE.csv') df_new.shape df_new.head() # #### Example 1: Insert rows #demostrates how to insert new rows on top (below the header row) ws = wb['PFE'] ws.insert_rows(idx=2, amount=5) i = 0 for row in ws['A2:G6']: j = 0 for cell in row: cell.value = df_new.iloc[i, j] j = j+1 i = i+1 # #### Example 2: Replace with new data # + #first clear contents #then assign values ws = wb['PFE2'] print(ws.max_row, ws.max_column) for row in ws['A2:G' + str(ws.max_row)]: for cell in row: cell.value = None print(ws.max_row) i = 0 for row in ws['A2:G' + str(df_new.shape[0])]: j = 0 for cell in row: cell.value = df_new.iloc[i, j] j = j+1 i = i+1 # - wb.save(filename = 'sample.xlsx')
openpyxl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <span style="color:orange">Association Rule Mining Tutorial (ARUL101)</span> # **Date Updated: Feb 25, 2020** # # # 1.0 Objective of Tutorial # Welcome to Association Rule Mining Tutorial (#ARUL101). This tutorial assumes that you are new to PyCaret and looking to get started with Association Rule Mining using `pycaret.arules` Module. # # In this tutorial we will learn: # # # * **Getting Data:** How to import data from PyCaret repository? # * **Setting up Environment:** How to setup experiment in PyCaret to get started with association rule mining? # * **Create Model:** How to create a model to evaluate results? # * **Plot Model:** How to analyze model using various plots? # # Read Time : Approx. 15 Minutes # # # ## 1.1 Installing PyCaret # First step to get started with PyCaret is to install pycaret. Installing pycaret is easy and take few minutes only. Follow the instructions below: # # #### Installing PyCaret in Local Jupyter Notebook # `pip install pycaret` <br /> # # #### Installing PyCaret on Google Colab or Azure Notebooks # `!pip install pycaret` # # # ## 1.2 Pre-Requisites # - Python 3.x # - Latest version of pycaret # - Internet connection to load data from pycaret's repository # - Basic Knowledge of Association Rule Mining # # ## 1.3 For Google colab users: # If you are running this notebook on Google colab, below code of cells must be run at top of the notebook to display interactive visuals.<br/> # <br/> # `from pycaret.utils import enable_colab` <br/> # `enable_colab()` # # 2.0 What is Association Rule Mining? # Association rule learning is a rule-based machine learning method for discovering interesting relations between variables in large databases. It is intended to identify strong rules discovered in databases using some measures of interestingness. For example, the rule {onions, potatoes} --> {burger} found in the sales data of a supermarket would indicate that if a customer buys onions and potatoes together, they are likely to also buy burger. Such information can be used as the basis for decisions about marketing activities such as, e.g., promotional pricing or product placements. # # __[Learn More about Association Rule Mining](https://en.wikipedia.org/wiki/Association_rule_learning)__ # # 3.0 Overview of Association Rule Module in PyCaret # PyCaret's association rule module (`pycaret.arules`) is a supervised machine learning module which is used for discovering interesting relations between variables in dataset. This module automatically transforms any transactional database into shape that is acceptable for apriori algorithm. Apriori is an algorithm for frequent item set mining and association rule learning over relational databases. # # 4.0 Dataset for the Tutorial # For this tutorial we will use a small sample from UCI dataset called **Online Retail Dataset**. This is a transactional dataset which contains transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail. The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers. Short description of attributes are as follows: # # - **InvoiceNo:** Invoice number. Nominal, a 6-digit integral number uniquely assigned to each transaction. If this code starts with letter 'c', it indicates a cancellation. # - **StockCode:** Product (item) code. Nominal, a 5-digit integral number uniquely assigned to each distinct product. # - **Description:** Product (item) name. Nominal. # - **Quantity:** The quantities of each product (item) per transaction. Numeric. # - **InvoiceData:** Invice Date and time. Numeric, the day and time when each transaction was generated. # - **UnitPrice:** Unit price. Numeric, Product price per unit in sterling. # - **CustomerID:** Customer number. Nominal, a 5-digit integral number uniquely assigned to each customer. # - **Country:** Country name. Nominal, the name of the country where each customer resides. # # #### Dataset Acknowledgement: # Dr <NAME>, Director: Public Analytics group. <EMAIL>, School of Engineering, London South Bank University, London SE1 0AA, UK. # # # The original dataset and data dictionary can be __[found here.](http://archive.ics.uci.edu/ml/datasets/online+retail)__ # # 5.0 Getting the Data # You can download the data from the original source __[found here](http://archive.ics.uci.edu/ml/datasets/online+retail)__ and load it using pandas __[(Learn How)](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)__ or you can use PyCaret's data respository to load the data using `get_data()` function (This will require internet connection). from pycaret.datasets import get_data data = get_data('france') # **Note:** If you are downloading the data from original source, you have to filter `Country` for 'France' only, if you wish to reproduce the results in this experiment. #check the shape of data data.shape # # 6.0 Setting up Environment in PyCaret # `setup()` function initializes the environment in pycaret and transforms the transactional dataset into shape that is acceptable to Apriori algorithm. It requires two mandatory parameters: `transaction_id` which is the name of column representing transaction id and will be used to pivot the matrix; and `item_id` which is the name of column used for creation of rules. Normally, this will be the variable of interest. You can also pass an optional parameter `ignore_items` to ignore certain values for creation of rule. from pycaret.arules import * exp_arul101 = setup(data = data, transaction_id = 'InvoiceNo', item_id = 'Description') # Once the setup is succesfully executed it prints the information grid that contains few important information: # # - **# Transactions :** Unique number of transactions in the dataset. In this case unique `InvoiceNo`. <br/> # <br/> # - **# Items :** Unique number of items in the dataset. In this case `Description`. <br/> # <br/> # - **Ignore Items :** Items to be ignored in rule mining. Many times there are relations which are too obvious and you might want to ignore them for this analysis. For example: many transactional dataset will contain shipping cost which is very obvious relationship that can be ignored in `setup()` using `ignore_items` parameter. In this tutorial, we will run the `setup()` twice, first without ignoring any items and later with ignoring items. <br/> # # 7.0 Create a Model # Creating a association rule model is simple. `create_model()` requires no mandatory parameter. It has 4 optional parameters which are as follows: # # - **metric:** Metric to evaluate if a rule is of interest. Default is set to confidence. Other available metrics include 'support', 'lift', 'leverage', 'conviction'. <br/> # <br/> # - **threshold:** Minimal threshold for the evaluation metric, via the `metric` parameter, to decide whether a candidate rule is of interest. Default is set to `0.5`. <br/> # <br/> # - **min_support:** A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction `transactions_where_item(s)_occur / total_transactions`. Default is set to `0.05`. <br/> # <br/> # - **round:** Number of decimal places metrics in score grid will be rounded to. <br/> # # Let's create an association rule model with all default values. model1 = create_model() #model created and stored in model1 variable. print(model1.shape) #141 rules created. model1.head() #see the rules # ___ # # 8.0 Setup with `ignore_items` # In `model1` created above, notice that the number 1 rule is of `JUMBO BAG WOODLAND ANIMALS` with `POSTAGE` which is very obvious. In example below, we will use `ignore_items` parameter in `setup()` to ignore `POSTAGE` from the dataset and re-create the association rule model. exp_arul101 = setup(data = data, transaction_id = 'InvoiceNo', item_id = 'Description', ignore_items = ['POSTAGE']) model2 = create_model() print(model2.shape) #notice how only 45 rules are created vs. 141 above. model2.head() # # 9.0 Plot Model plot_model(model2) plot_model(model2, plot = '3d')
Tutorials/Association Rule Mining Tutorial - ARUL01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import random # # Old way # + from Gillespie_SEIR_function_diffnetwork import single_model_run_SEIR n = 20 final_timepoint = 10 total_pop = 10**4 b = 0.8 # gamma: probability of becoming infectious g = 1 # mu: probability of recovery m = 1/5 # lambda: probability of losing immunity l = 2/365 # p: probability of moving p = 0.01 t,x,em = single_model_run_SEIR( final_timepoint, total_pop, n, [b,g,m,l,p], edges_mat ) # - # # Splitting it up into functions (although I don't really understand what they do) # + def create_edge_matrix(n_nodes, min_edges): n = n_nodes max_edges = np.floor(np.sqrt(n)) # maximum number of edges for a node max_edges = max_edges.astype(int) # get poisson distribution of degrees pdf = [x**(-3) for x in range(min_edges,max_edges+1)] cdf = np.cumsum(pdf) n_edges_per_node = np.random.uniform(0,cdf[-1],n) list_of_stubs = [] # get number of edges per node, save and save in list of stubs for i in range(n_nodes): c_edges = 0 # find minimum number of degrees for which U(0,1) is more than poisson prob. while n_edges_per_node[i] > cdf[c_edges]: c_edges += 1 n_edges_per_node[i] = c_edges + min_edges # save number of edges list_of_stubs.extend([i]*n_edges_per_node[i].astype(int)) # save number of stubs for network building if len(list_of_stubs)%2 != 0: #if the number of edges is not even, we need to fix that r_int = random.randint(0,n-1) while list_of_stubs.count(list_of_stubs[r_int]) <= min_edges: # check that we don't decrease degree belwo minimum r_int = random.randint(0,n-1) list_of_stubs.remove(r_int) edges_mat = np.zeros((n,n)) # initiate edges matrix list_of_edges = [] # initiate list of all edges in network while len(list_of_stubs)>0: if (len(list_of_stubs) == 2) & (list_of_stubs[0] == list_of_stubs[1]): # cannot connect to own node # break up previously made edge, and make two new ones edges_mat[last_edge[0]][last_edge[1]] = 0 edges_mat[last_edge[1]][last_edge[0]] = 0 edges_mat[last_edge[0]][list_of_stubs[0]] = 1 edges_mat[last_edge[1]][list_of_stubs[1]] = 1 edges_mat[list_of_stubs[0]][last_edge[0]]= 1 edges_mat[list_of_stubs[1]][last_edge[1]] = 1 break edge = random.sample(list_of_stubs,2) # create edge from two stubs if (edge[0] != edge[1]) & ~(edge in list_of_edges) & ~([edge[1], edge[0]] in list_of_edges): # check if not connecting to self and edge doesn't already exist edges_mat[edge[0]][edge[1]] = 1 # connect nodes in edges matrix edges_mat[edge[1]][edge[0]] = 1 list_of_stubs.remove(edge[0]) # remove stubs from list list_of_stubs.remove(edge[1]) last_edge = edge list_of_edges.append(edge) return edges_mat def create_stochiometric_matrix( n_compartments, n_reactions_per_compartment, edges_mat): n = edges_mat.shape[0] c = n_compartments n_rxn = n_reactions_per_compartment rxn = np.zeros((n*(n_rxn+n*c), n*c)) for i in range(n): # compartment reactions StoE = np.repeat(0,n*c) StoE[i*c] = -1 StoE[i*c+1] = 1 rxn[i*(n_rxn+(n*c))] = StoE EtoI = np.repeat(0,n*c) EtoI[i*c+1] = -1 EtoI[i*c+2] = 1 rxn[i*(n_rxn+(n*c))+1] = EtoI ItoR = np.repeat(0,n*c) ItoR[i*c+2] = -1 ItoR[i*c+3] = 1 rxn[i*(n_rxn+(n*c))+2] = ItoR RtoS = np.repeat(0,n*c) RtoS[i*c+3] = -1 RtoS[i*c] = 1 rxn[i*(n_rxn+(n*c))+3] = RtoS # movement reactions #count = 0 for j in range(n): if edges_mat[i][j] == 1: Sitoj = np.repeat(0,n*c) Sitoj[i*c] = -1 Sitoj[j*c] = 1 rxn[i*(n_rxn+(n*c)) + c + j*c] = Sitoj Eitoj = np.repeat(0,n*c) Eitoj[i*c + 1] = -1 Eitoj[j*c + 1] = 1 rxn[i*(n_rxn+(n*c)) + c + j*c + 1] = Eitoj Iitoj = np.repeat(0,n*c) Iitoj[i*c + 2] = -1 Iitoj[j*c + 2] = 1 rxn[i*(n_rxn+(n*c)) + c + j*c + 2] = Iitoj Ritoj = np.repeat(0,n*c) Ritoj[i*c + 3] = -1 Ritoj[j*c + 3] = 1 rxn[i*(n_rxn+(n*c)) + c + j*c + 3] = Ritoj return rxn def calculate_propensities(n_nodes, n_compartments, n_edges_per_node, x, parameters): b = parameters.rate_infection g = parameters.rate_infectious m = parameters.rate_recovery l = parameters.rate_waning p = parameters.rate_moving n = n_nodes c = n_compartments prop = np.array([]) for i in range(n): # patch 1: S to E, E to I, I to R, R to S, move 1 to 2 (for each comp), move 1 to 3 (for each comp) N = np.sum(x[i*c:(i+1)*c]) if N == 0: pinfect = np.zeros(c) pmove = np.zeros(n*c) else: pinfect = np.array([(1-(1-b/N)**x[i*c+2])*x[i*c], g*x[i*c+1], m*x[i*c+2], l*x[i*c+3]]) pmove = np.zeros(n*c) for j in range(n): if edges_mat[i][j] == 1: first_index = j*c pmove[first_index:first_index+c] = p/n_edges_per_node[i] *np.array([x[i*c], x[i*c+1], x[i*c+2], x[i*c+3]]) pmove = np.array(pmove) p_temp = np.concatenate((pinfect, pmove)) prop = np.concatenate((prop, p_temp)) return prop class Parameters(): def __init__(self, rate_infection, rate_infectious, rate_recovery, rate_waning, rate_moving): self.rate_infection = rate_infection self.rate_infectious = rate_infectious self.rate_recovery = rate_recovery self.rate_waning = rate_waning self.rate_moving = rate_moving def run_model( t_final, total_pop, parameters, edges_mat, n_compartments, n_reactions_within_compartments): c = n_compartments n_rxn = n_reactions_within_compartments n = edges_mat.shape[0] approx_total_number_individuals = total_pop avg_pop_size = approx_total_number_individuals / n x = np.repeat(0,n*c) # initialized compartments for all nodes x[0:-1:c] = np.rint(np.random.normal(1,0.5,n)*avg_pop_size) # initial all susceptible populations # infect random node init_node = random.randint(0,n-1)*c x[init_node:init_node+c] = [x[init_node]*0.95, 0, x[init_node]*0.05, 0] # initial infections in first node x[x < 0] = 0 x_store = [x] t_store = [0] t = 0 rand_to_generate = 10**6 random_numbers = np.random.uniform(0,1,rand_to_generate) random_number_count = 0 n_edges_per_node = [np.sum(x) for x in edges_mat] rxn = create_stochiometric_matrix( n_compartments, n_reactions_within_compartments, edges_mat) tracker = 0 while t <= t_final: if tracker > 1: print(t) tracker = 0 prop = calculate_propensities(n, c, n_edges_per_node, x, parameters) prop_cumsummed = np.cumsum(prop) if random_number_count + 1 >= rand_to_generate: random_numbers = np.random.uniform(0,1,rand_to_generate) random_number_count = 0 # compute time step dt = - np.log(random_numbers[random_number_count])*(1/prop_cumsummed[-1]) random_number_count += 1 t = t + dt tracker += dt t_store.append(t) # choose reaction rn = random_numbers[random_number_count] random_number_count += 1 prop_cumsummed = np.divide(prop_cumsummed,prop_cumsummed[-1]) for count in range(len(prop)): if rn <= prop_cumsummed[count]: break # find reaction in list from above current_rxn = rxn[count] # add reaction to population vector x = [sum(temp) for temp in zip(x, current_rxn)] x_store.append(x) return t_store, np.array(x_store), edges_mat # + edges_mat = create_edge_matrix(20, 2) parameters = Parameters( rate_infection=0.8, rate_infectious=1, rate_recovery=1/5, rate_waning=2/365, rate_moving=0.01) n_compartments = 4 n_reactions_within_compartments = 4 t,x,em = run_model( 10, 10**4, parameters, edges_mat, n_compartments, n_reactions_within_compartments) # - # Open question: can we use available software to make an Erdos-Renyi graph? # + import networkx as nx n = 10 # 10 nodes m = 20 # 20 edges seed = 20160 # seed random number generators for reproducibility # Use seed for reproducibility G = nx.gnm_random_graph(n, m, seed=seed) # Makes adjacency matrix A = nx.adjacency_matrix(G) A[1,0] # -
s_test_run.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''tensorflow2_p36'': conda)' # name: python3 # --- # + [markdown] id="EQFg6BDHKcc9" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/KEYPHRASE_EXTRACTION.ipynb) # + [markdown] id="iOqOdIKkECq1" # # **Extract keyphrases from documents** # + [markdown] id="99Qqhw7sEFyx" # You can look at the example outputs stored at the bottom of the notebook to see what the model can do, or enter your own inputs to transform in the "Inputs" section. Find more about this keyphrase extraction model in another notebook [here](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/9.Keyword_Extraction_YAKE.ipynb). # + [markdown] id="mX0gNZv8MRtQ" # ## 1. Colab setup # + [markdown] id="MuXQkGilKWu7" # Install dependencies # + colab={"base_uri": "https://localhost:8080/"} id="C-Nz_2A8Jos8" outputId="33006a91-2566-4858-ce81-922b82d9629d" # Install PySpark and Spark NLP # ! pip install -q pyspark==3.1.2 spark-nlp # + [markdown] id="vb1y9TT8Ke_U" # Import dependencies # + id="WGMK0q_IIO_I" import json import pandas as pd import numpy as np # Import pyspark from pyspark.sql import SparkSession from pyspark.ml import PipelineModel from pyspark.sql import functions as F # Import SparkNLP import sparknlp from sparknlp.annotator import * from sparknlp.base import * # Start Spark session spark = sparknlp.start() # + [markdown] id="JYDxs7_CMtpf" # ## 2. Inputs # + [markdown] id="CLk2XcmDMwTI" # Enter inputs as strings in this list. Later cells of the notebook will extract keyphrases from whatever inputs are entered here. # + id="c_L-3nRuM0d4" input_list = [ """Extracting keywords from texts has become a challenge for individuals and organizations as the information grows in complexity and size. The need to automate this task so that text can be processed in a timely and adequate manner has led to the emergence of automatic keyword extraction tools. Yake is a novel feature-based system for multi-lingual keyword extraction, which supports texts of different sizes, domain or languages. Unlike other approaches, Yake does not rely on dictionaries nor thesauri, neither is trained against any corpora. Instead, it follows an unsupervised approach which builds upon features extracted from the text, making it thus applicable to documents written in different languages without the need for further knowledge. This can be beneficial for a large number of tasks and a plethora of situations where access to training corpora is either limited or restricted.""", """Iodine deficiency is a lack of the trace element iodine, an essential nutrient in the diet. It may result in metabolic problems such as goiter, sometimes as an endemic goiter as well as cretinism due to untreated congenital hypothyroidism, which results in developmental delays and other health problems. Iodine deficiency is an important global health issue, especially for fertile and pregnant women. It is also a preventable cause of intellectual disability. Iodine is an essential dietary mineral for neurodevelopment among offsprings and toddlers. The thyroid hormones thyroxine and triiodothyronine contain iodine. In areas where there is little iodine in the diet, typically remote inland areas where no marine foods are eaten, iodine deficiency is common. It is also common in mountainous regions of the world where food is grown in iodine-poor soil. Prevention includes adding small amounts of iodine to table salt, a product known as iodized salt. Iodine compounds have also been added to other foodstuffs, such as flour, water and milk, in areas of deficiency. Seafood is also a well known source of iodine.""", """The Prague Quadrennial of Performance Design and Space was established in 1967 to bring the best of design for performance, scenography, and theatre architecture to the front line of cultural activities to be experienced by professional and emerging artists as well as the general public. The quadrennial exhibitions, festivals, and educational programs act as a global catalyst of creative progress by encouraging experimentation, networking, innovation, and future collaborations. PQ aims to honor, empower and celebrate the work of designers, artists and architects while inspiring and educating audiences, who are the most essential element of any live performance. The Prague Quadrennial strives to present performance design as an art form concerned with creation of active performance environments, that are far beyond merely decorative or beautiful, but emotionally charged, where design can become a quest, a question, an argument, a threat, a resolution, an agent of change, or a provocation. Performance design is a collaborative field where designers mix, fuse and blur the lines between multiple artistic disciplines to search for new approaches and new visions. The Prague Quadrennial organizes an expansive program of international projects and activities between the main quadrennial events – performances, exhibitions, symposia, workshops, residencies, and educational initiatives serve as an international platform for exploring the practice, theory and education of contemporary performance design in the most encompassing terms.""", """Author <NAME> explained that the "approach to the sheer physicality of sound" integral to dream pop was "arguably pioneered in popular music by figures such as <NAME> and <NAME>". The music of the Velvet Underground in the 1960s and 1970s, which experimented with repetition, tone, and texture over conventional song structure, was also an important touchstone in the genre's development George Harrison's 1970 album All Things Must Pass, with its Spector-produced Wall of Sound and fluid arrangements, led music journalist <NAME> to credit it as a progenitor of the genre. Reynolds described dream pop bands as "a wave of hazy neo-psychedelic groups", noting the influence of the "ethereal soundscapes" of bands such as Cocteau Twins. Rolling Stone's Kory Grow described "modern dream pop" as originating with the early 1980s work of Cocteau Twins and their contemporaries, while PopMatters' AJ Ramirez noted an evolutionary line from gothic rock to dream pop. Grow considered Julee Cruise's 1989 album Floating into the Night, written and produced by <NAME> and <NAME>, as a significant development of the dream pop sound which "gave the genre its synthy sheen." The influence of Cocteau Twins extended to the expansion of the genre's influence into Cantopop and Mandopop through the music of Faye Wong, who covered multiple Cocteau Twins songs, including tracks featured in Chungking Express, in which she also acted. Cocteau Twins would go on to collaborate with Wong on original songs of hers, and Wong contributed vocals to a limited release of a late Cocteau Twins single. In the early 1990s, some dream pop acts influenced by My Bloody Valentine, such as Seefeel, were drawn to techno and began utilizing elements such as samples and sequenced rhythms. Ambient pop music was described by AllMusic as "essentially an extension of the dream pop that emerged in the wake of the shoegazer movement", distinct for its incorporation of electronic textures. Much of the music associated with the 2009-coined term "chillwave" could be considered dream pop. In the opinion of Grantland's David Schilling, when "chillwave" was popularized, the discussion that followed among music journalists and bloggers revealed that labels such as "shoegaze" and "dream pop" were ultimately "arbitrary and meaningless".""", """North Ingria was located in the Karelian Isthmus, between Finland and Soviet Russia. It was established 23 January 1919. The republic was first served by a post office at the Rautu railway station on the Finnish side of the border. As the access across the border was mainly restricted, the North Ingrian postal service was finally launched in the early 1920. The man behind the idea was the lieutenant colonel <NAME>, head of the governing council of North Ingria. He was also known as an enthusiastic stamp collector. The post office was opened at the capital village of Kirjasalo. The first series of North Ingrian stamps were issued in 21 March 1920. They were based on the 1917 Finnish "Model Saarinen" series, a stamp designed by the Finnish architect <NAME>. The first series were soon sold to collectors, as the postage stamps became the major financial source of the North Ingrian government. The second series was designed for the North Ingrian postal service and issued 2 August 1920. The value of both series was in Finnish marks and similar to the postal fees of Finland. The number of letters sent from North Ingria was about 50 per day, most of them were carried to Finland. They were mainly sent by the personnel of the Finnish occupying forces. Large number of letters were also sent in pure philatelic purposes. With the Treaty of Tartu, the area was re-integrated into Soviet Russia and the use of the North Ingrian postage stamps ended in 4 December 1920. Stamps were still sold in Finland in 1921 with an overprinting "Inkerin hyväksi" (For the Ingria), but they were no longer valid. Funds of the sale went for the North Ingrian refugees.""" ] # Change these to wherever you want your inputs and outputs to go INPUT_FILE_PATH = "inputs" OUTPUT_FILE_PATH = "outputs" # + [markdown] id="nF1kBJ9vA9kx" # Write the example inputs to the input folder. # + id="UITSel_Yr4IC" # ! mkdir -p $INPUT_FILE_PATH for i, text in enumerate(input_list): open(f'{INPUT_FILE_PATH}/Example{i + 1}.txt', 'w') \ .write(text[:min(len(text) - 10, 100)] + '... \n' + text) # + [markdown] id="J0zS5R_7MV7T" # ## 3. Pipeline creation # + [markdown] id="8z7wW-eIMoN2" # Create the NLP pipeline. # + id="bpIz2L-bIO_Y" # Transforms the raw text into a document readable by the later stages of the # pipeline document_assembler = DocumentAssembler() \ .setInputCol('text') \ .setOutputCol('document') # Separates the document into sentences sentence_detector = SentenceDetector() \ .setInputCols(['document']) \ .setOutputCol('sentences')# \ #.setDetectLists(True) # Separates sentences into individial tokens (words) tokenizer = Tokenizer() \ .setInputCols(['sentences']) \ .setOutputCol('tokens') \ .setContextChars(['(', ')', '?', '!', '.', ',']) # The keyphrase extraction model. Change MinNGrams and MaxNGrams to set the # minimum and maximum length of possible keyphrases, and change NKeywords to # set the amount of potential keyphrases identified per document. keywords = YakeKeywordExtraction() \ .setInputCols('tokens') \ .setOutputCol('keywords') \ .setMinNGrams(2) \ .setMaxNGrams(5) \ .setNKeywords(100) \ .setStopWords(StopWordsCleaner().getStopWords()) # Assemble all of these stages into a pipeline, then fit the pipeline on an # empty data frame so it can be used to transform new inputs. pipeline = Pipeline(stages=[ document_assembler, sentence_detector, tokenizer, keywords ]) empty_df = spark.createDataFrame([[""]]).toDF('text') pipeline_model = pipeline.fit(empty_df) # LightPipeline is faster than Pipeline for small datasets light_pipeline = LightPipeline(pipeline_model) # + [markdown] id="NZsaT1M_Mapv" # ## 4. Output creation # + [markdown] id="InE4U2Fq-ih4" # Utility functions to create more useful sets of keyphrases from the raw data frame produced by the model. # + id="xR7dJVA53qKK" def adjusted_score(row, pow=2.5): """This function adjusts the scores of potential key phrases to give better scores to phrases with more words (which will naturally have worse scores due to the nature of the model). You can change the exponent to reward longer phrases more or less. Higher exponents reward longer phrases.""" return ((row.result.count(' ') + 1) ** pow / (float(row.metadata['score']) + 0.1)) def get_top_ranges(phrases, input_text): """Combine phrases that overlap.""" starts = sorted([row['begin'] for row in phrases]) ends = sorted([row['end'] for row in phrases]) ranges = [[starts[0], None]] for i in range(len(starts) - 1): if ends[i] < starts[i + 1]: ranges[-1][1] = ends[i] ranges.append([starts[i + 1], None]) ranges[-1][1] = ends[-1] return [{ 'begin': range[0], 'end': range[1], 'phrase': input_text[4][range[0]:range[1] + 1] # [4] for last text } for range in ranges] def remove_duplicates(phrases): """Remove phrases that appear multiple times.""" i = 0 while i < len(phrases): j = i + 1 while j < len(phrases): if phrases[i]['phrase'] == phrases[j]['phrase']: phrases.remove(phrases[j]) j += 1 i += 1 return phrases def get_output_lists(df_row): """Returns a tuple with two lists of five phrases each. The first combines key phrases that overlap to create longer kep phrases, which is best for highlighting key phrases in text, and the seocnd is simply the keyphrases with the highest scores, which is best for summarizing a document.""" keyphrases = [] for row in df_row.keywords[4]: # [4] for last text keyphrases.append({ 'begin': row.begin, 'end': row.end, 'phrase': row.result, 'score': adjusted_score(row) }) keyphrases = sorted(keyphrases, key=lambda x: x['score'], reverse=True) return ( get_top_ranges(keyphrases[:20], df_row.text)[:5], remove_duplicates(keyphrases[:10])[:5] ) # + [markdown] id="u9EswUu9-vPo" # Transform the example inputs to create a data frame storing the identified keyphrases. # + id="4YAV9JFfIO_f" df = spark.createDataFrame(pd.DataFrame({'text': input_list})) result = light_pipeline.transform(df).toPandas() # + [markdown] id="sPQG2x_--3Ik" # For each example, create two JSON files containing selections of the best keyphrases for the document. See the docstring of `get_output_lists` two cells above to learn more about the two JSON files produced. These JSON files are used directly in the public demo app for this model. # + id="X-y9D7PRCiS8" # ! mkdir -p $OUTPUT_FILE_PATH for i in range(len(result)): top_ranges, top_summaries = get_output_lists(result.iloc[i]) with open(f'{OUTPUT_FILE_PATH}/Example{i + 1}.json', 'w') as ranges_file: json.dump(top_ranges, ranges_file) with open(f'{OUTPUT_FILE_PATH}/Example{i + 1}_summaries.json', 'w') \ as summaries_file: json.dump(top_summaries, summaries_file) # + [markdown] id="PIJtDyaRI6hh" # ## 5. Visualize outputs # + [markdown] id="LaZpx6SaJCBN" # The raw pandas data frame containing the outputs # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="mzgZwqv9I6Cl" outputId="7ab459e5-9bb7-424b-c4f0-0ac377359917" result # + [markdown] id="6EbaXFSRJX8s" # The list of the top keyphrases (with overlapping keyphrases merged) for the last example # - top_ranges, top_summaries = get_output_lists(result.loc[4:,["keywords","text"]]) # + colab={"base_uri": "https://localhost:8080/"} id="a5j9LTWLJI_L" outputId="a9f77026-30f6-4b87-abb1-19ba4ce093de" top_ranges # + [markdown] id="3SrNEDaOJl6V" # The list of the best summary kephrases (with duplicates removed) for the last example # + colab={"base_uri": "https://localhost:8080/"} id="C0I68Le1JM_8" outputId="ebbd510c-da97-4d9d-f461-d07e6732535d" top_summaries
tutorials/streamlit_notebooks/KEYPHRASE_EXTRACTION.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Avancée dans les structures itératives # # Dans ce *notebook*, nous allons compléter les données compilées du fichier CSV avec des moyennes actualisées pour chaque discpline ainsi qu’une moyenne générale tout aussi actualisée. # # Par exemple, l’élève que nous suivons obtient sa première note de l’année, un 13, en français, le 26 septembre. Ses moyennes générale et dans la discipline sont donc toutes deux de 13 en date du 26 septembre. Le 2 octobre il reçoit un 12 en Histoire-Géographie puis le lendemain un 8 en français. Nous pouvons en déduire que : # - en date du 2 octobre : sa moyenne générale est passée de 13 à 12,5. # - en date du 3 octobre : sa moyenne générale est passée de 12,5 à 11 et sa moyenne en français a chuté de 13 à 10,5 # # En termes pythonesques, pour chaque nouvel enregistrement, nous allons calculer automatiquement les moyennes générale et dans la discipline afin de rajouter l’information dans la structure de données. Si l’état de notre structure est la suivante : # # ```py # { # 'Français': [ # ('2020-09-26', '13'), # ('2020-12-13', '7'), # ('2020-12-25', '6') # ], # 'Anglais': [ # ('2020-09-03', '6'), # ('2020-09-29', '9'), # ('2020-10-14', '9') # ] # } # ``` # # L’objectif est de l’enrechir de cette manière : # # ```py # { # 'Français': [ # ('2020-09-26', '13', 13), # ('2020-12-13', '7', 10), # ('2020-12-25', '6', 8.66) # ], # 'Anglais': [ # ('2020-09-03', '6', 6), # ('2020-09-29', '9', 7.5), # ('2020-10-14', '9', 8) # ], # 'Générale': { # '2020-09-03': 6, # '2020-09-26': 9.5, # '2020-09-29': 9.33, # '2020-10-14': 9.25, # '2020-12-13': 8.8, # '2020-12-25': 8.33 # } # } # ``` # # Commençons par récupérer le dictionnaire de nos données : # + # Importation des modules import csv from collections import defaultdict # Chargement de la ressource dans une variable 'fichier' with open('notes.csv') as fichier: # Création d'un lecteur de fichier lecteur = csv.DictReader(fichier, delimiter='\t') # Sauvegarde des lignes du fichier data = [ ligne for ligne in lecteur ] # Une liste de tuples par discipline data_dict = defaultdict(list) for d in data: data_dict[d['discipline']].append( ( d['date'], d['note'] ) ) # Tri en place des dates par discpline for discipline, dates in data_dict.items(): dates.sort() # - # ## Focus sur les boucles # # Les structures du type `for … in … :` sont appelées *structures itératives*, ou *boucles*. Elles mobilisent un itérateur afin d’interroger successivement et de manière systématique chaque élément de la structure. # # Le langage Python mobilise un mécanisme interne qui consiste à initialiser un compteur à 0 qui s’incrémentera à chaque passage jusqu’à atteindre le nombre total d’éléments dans la structure, moins une unité. # # Pour illustrer le mécanisme, imaginons la liste des ingrédients d’une pizza : pizza = ['tomate', 'poivrons', 'mozzarella', 'thon'] # Cette pizza dispose de quatre ingrédients au total. Python attribue un indice à chacun de ces ingrédients, en commençant à 0 : # - indice 0 : tomate # - indice 1 : poivrons # - indice 2 : mozzarella # - indice 3 : thon # # Pour te convaincre, il suffit d’accéder de manière atomique à chaque élément de la liste : print(pizza[0]) print(pizza[1]) print(pizza[2]) print(pizza[3]) # Revenons à notre boucle `for … in … :`. # # Au premier passage dans la boucle, un compteur est initialisé à 0. Python traite la donnée enregistrée à l’indice de la valeur du compteur, à savoir 0, qui contient la tomate. À la fin du traitement, le compteur est incrémenté, ce qui signifie que Python lui ajoute 1. # # Au second passage dans la boucle, le compteur vaut 1, aussi Python traite l’information à l’indice 1, à savoir les poivrons, et ainsi de suite jusqu’au moment où le dernier ingrédient, à l’indice 3 aura été traité. À ce moment, le compteur interne passe à 4, qui est au-dessus de l’indice maximal de la liste des ingrédients de notre pizza (3). Python sort alors de la boucle. # # Pour révéler le compteur, Python met à disposition une fonction `enumerate()` : for compteur, ingredient in enumerate(pizza): print(compteur, ingredient) # Un exemple un peu plus complexe, qui se rapproche de notre projet, est celui d’une liste de courses avec, pour chaque élément, une quantité : courses = [ ('lait', 6), ('riz', 2), ('eau', 6), ('pois cassés', 1) ] # Avec une boucle simple, nous pouvons lister les tuples : for produit in courses: print(produit) # Pour accéder indépendamment à chaque composant d’un élément (le nom du produit et la quantité à acheter), nous devons déplier le tuple. Comme chaque tuple est composé de deux composants, on utilise deux variables : for produit, quantite in courses: print(quantite, produit) # Si maintenant on veut en plus révéler l’itérateur interne de l’objet avec la fonction `enumerate()`, on doit procéder de manière plus subtile : for i, (produit, quantite) in enumerate(courses): print(i, quantite, produit) # La raison est assez simple à comprendre : la fonction `enumerate()` renvoie une structure composée de deux éléments (l’itérateur et l’objet, peu importe sa complexité). Dans le cas d’un objet complexe, ici un tuple, on a besoin de révéler sa structure dans l’expression de la boucle. # ## Calculer la moyenne actualisée d’une discipline # # Avant d’opérer sur le fichier complet, concentrons-nous sur une discipline, les mathématiques : # + # Sélection des notes en maths maths = data_dict["Mathématiques"] # Pour chaque date et note… for date, note in maths: # … afficher la date : note print(f"{date} : {note}") # - # En t’aidant de la fonction `enumerate()` et de ce que nous avons expliqué à la section précédente, tu devrais être en mesure de révéler l’itérateur de la variable `maths` tout en accédent de manière individuelle à ses composants : # + # Pour chaque élément de "maths" # Imprimer l'état du compteur, la date de la note et la note # - # Attardons-nous sur les deux premières lignes : # # ``` # 0 2020-09-04 10 # 1 2020-09-05 10 # ``` # # En date du 4 septembre, notre élève a obtenu sa première note en mathématique, un 10. Sa moyenne vaut donc 10. Sachant que le calcul d’une moyenne arithmétique s’obtient en faisant la somme des notes que l’on divise ensuite par le nombre de notes, pour obtenir une moyenne de 10, nous avons effectué l’opération suivante : # # $10 \div 1 = 10$ # # Au tour suivant de la boucle, lorsque le compteur vaudra 1, le calcul deviendra : # # $(10 + 10) \div 2 = 10$ # # On remarque une règle linéaire entre le compteur et le nombre de tours dans la boucle, que l’on peut traduire dans un tableau : # # |Compteur|Nb de tours| # |-|-| # |0|1| # |1|2| # |2|3| # |…|…| # |$c$|$c+1$| # # Maintenant, essaie d’afficher côte-à-côte le compteur et le nombre de tours dans la boucle : # + # Pour chaque élément de "maths" # Imprimer l'état du compteur et le nombre de tours # - # À ce stade, nous avons le nombre de notes obtenues à une date précise (égal au nombre de tours dans la boucle) et la note du jour. Il nous manque un calcul : la somme des notes obtenues jusque-là. # # Le plus simple est de créer une variable, en dehors de la boucle, qui vaut 0 au départ et à laquelle on additionne chaque valeur analysée. Un exemple simple avec une liste de notes : # + # Une liste de notes notes = [8, 12, 15, 9] # Total initialisé à 0 total = 0 # Pour chaque… for i, note in enumerate(notes): # On ajoute au total la note actuelle # Syntaxe abrégée : total += note total = total + note # Affichage du total à chaque étape print(i, total) # - # À toi d’inclure cette astuce dans ton algorithme pour calculer à chaque étape le total et pour le diviser par le nombre de tours pour obtenir la moyenne. On voudrait obtenir un affichage tel que : # # ``` # 2020-09-04 10 10 # 2020-09-05 10 10 # 2020-09-30 8 9.33 # 2020-10-09 10 9.5 # … # ``` # # **Attention !** Souviens-toi de la fonction `int()` qui permet d’effectuer une conversion de type d’une chaîne de caractères vers un entier numérique. # # **Attention !** Si tu obtiens une erreur résultat d’une division par zéro, rappelle-toi aussi qu’elle est réputée indéfinie en mathématiques. Pour contourner le problème, mets en application la règle PEMDAS que tu as dû voir en cours, relative à la priorité des opérations. # + # Total initialisé à 0 # Pour chaque élément de "maths" # Ajouter la note, convertie en entier numérique, au total # Imprimer la date, la note et la moyenne # - # Il ne reste plus qu’à modifier la structure de données pour intégrer le calcul de la moyenne. Faisons cette opération ensemble : # + # Total initialisé à 0 total = 0 # Pour chaque tour dans la boucle for i, (date, note) in enumerate(maths): # Calcul du total total += int(note) # Moyenne moyenne = total / (i + 1) # La ligne courante (date, note), devient (date, note, moyenne) maths[i] = (date, note, moyenne) # - # ## Une moyenne générale # # L’idée maintenant est de calculer la moyenne générale. La seule difficulté réside dans le fait qu'il est possible d’avoir plusieurs notes le même jour ! Utilisons une structure minimale pour observer les étapes à accomplir : d = { 'Français': [ ('2020-09-26', '13', 13), ('2020-12-13', '7', 10), ('2020-12-25', '6', 8.66) ], 'Anglais': [ ('2020-09-03', '6', 6), ('2020-09-29', '9', 7.5), ('2020-10-14', '9', 8), ('2020-12-13', '12', 9) ] } # Si l’on a calculé une moyenne pour chaque date dans une même discipline, il est naturel de poursuivre la logique à la moyenne générale. Mettons au point les grandes lignes de notre algorithme : # # 1. Ajouter une clé `Générale` au dictionnaire, qui décrira un sous-dictionnaire avec autant de clés que de dates ; # 2. initialiser un compteur et une variable pour le total ; # 3. pour chaque date : # - trouver les notes à date dans les disciplines ; # - incrémenter le compteur ; # - calculer la moyenne ; # - ajouter un enregistrement dans la moyenne générale. # ### 1e étape : compléter le dictionnaire # # Le dictionnaire sur lequel nous travaillons pour cet exercice est constitué de deux clés : `Français` et `Anglais`. Nous pouvons en rajouter une troisième, nommée `Générale`, chargée d'enregistrer un dictionnaire de `dates: moyennes` : # Insertion d'une clé "Générale" d['Générale'] = dict() # Il s’agit maintenant de la remplir avec les dates trouvées dans le reste du dictionnaire et, pour chacune, une valeur nulle. Essayons avec ce que nous connaissons : # Pour chaque résultat dans le dictionnaire for resultat in d.values(): # Pour chaque triplet de date, note et moyenne for date, note, moyenne in resultat: # Ajouter la date à la liste de dates d['Générale'][date] = float() # Python lève une exception `ValueError` pour la simple et bonne raison qu’il s’attend, pour chaque clé, à trouver un triplet de données. Or, rappelle-toi que nous venons juste d’insérer une nouvelle clé `Générale` qui, elle, est vierge ! # # On a besoin d’ajouter une nouvelle instruction, qui vérifie que la discipline que l’on est en train de parcourir ne correspond à la clé `Générale`. Note bien au passage la méthode `items()` sur le dictionnaire qui permet de récupérer aussi les clés : # Pour chaque résultat dans le dictionnaire for discipline, resultat in d.items(): # Si la discipline est différente de "Générale" if discipline != 'Générale': # Pour chaque triplet de date, note et moyenne for date, note, moyenne in resultat: # Ajouter une clé date avec pour valeur une structure float vide d['Générale'][date] = float() # Pourquoi une structure `float()` ? La structure `int()` que nous avons utilisé jusqu’ici permet de décrire des entiers numériques. Un `float()` peut quant à lui enregistrer des nombres décimaux, ce qui est la caractéristique d’une moyenne. # ### 2e étape : initialiser les variables compteur et total # # Il n’y a pas plus simple, à toi d’initialiser les variables à 0 ! # Compteur et total initialisés à 0 compteur, total = # ### 3e étape : calculer la moyenne générale # # Pour cette étape, opérons ensemble. Attention au piège qui survient dès le début : un dictionnaire ne conserve pas l’ordre d’insertion de ses éléments. Il y a une raison à cela, bien entendu, mais ne nous éparpillons pas. On doit par conséquent bien penser à trier les dates pour la clé `Générale` : # Pour chaque date unique for date_unique in sorted(d['Générale']): # Parcourir le dictionnaire for discipline, resultat in d.items(): # Seulement si la discipline n'est pas "Générale" if discipline != 'Générale': # On déplie le triplet for date, note, moyenne in resultat: # On vérifie que la date du résultat correspond à la date analysée if date == date_unique: # Incrémentation du nombre de notes compteur += 1 # Addition au total total += int(note) # Modification de la moyenne pour le jour d['Générale'][date] = total / compteur # Il ne nous reste plus qu’à vérifier si le résultat est cohérent. Un calcul à la main nous permet de trouver une moyenne de : # # $(13+7+6+6+9+9+12) \div 7 = 8.857142857142858$ # # Quand l’enregistrement `'2020-12-25'` correspondant à la dernière date de la clé `Générale` vaut : print(d['Générale']['2020-12-25']) # ## Application # # À présent, essaie de répéter les étapes pour calculer la moyenne générale de l‘élève que nous suivons ! # + # Ton code ici !
5. Boucles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="9d7d41b4-3445-6941-79c1-bbdfc85b254a" # # House price prediction using multiple regression analysis # # # Part 1: Exploratory Data Analysis # # The following notebook presents a thought process of predicting a continuous variable through Machine Learning methods. More specifically, we want to predict house prices based on multiple features using regression analysis. # # As an example, we will use a dataset of house sales in King County, where Seattle is located. # # In this notebook we will first apply some Exploratory Data Analysis (EDA) techniques to summarize the main characteristics of the dataset. # # ## 1. Preparation # # ### 1.1 Load the librairies # + _cell_guid="17660125-75d4-0b6e-b977-674fb1c72abf" import numpy as np # NumPy is the fundamental package for scientific computing import pandas as pd # Pandas is an easy-to-use data structures and data analysis tools pd.set_option('display.max_columns', None) # To display all columns import matplotlib.pyplot as plt # Matplotlib is a python 2D plotting library # %matplotlib inline # A magic command that tells matplotlib to render figures as static images in the Notebook. import seaborn as sns # Seaborn is a visualization library based on matplotlib (attractive statistical graphics). sns.set_style('whitegrid') # One of the five seaborn themes import warnings warnings.filterwarnings('ignore') # To ignore some of seaborn warning msg from scipy import stats, linalg import folium # for map visualization from folium import plugins # + [markdown] _cell_guid="0319e255-0292-2bc9-871a-d3607c6b8afc" # humm, looks like folium isn't available on kaggle. We'll work around it # # ### 1.2 Load the dataset # # Let's load the data from CSV file using pandas and convert some columns to category type (for better summarization). # + _cell_guid="577a53cd-5839-ea43-7933-63764d071432" data = pd.read_csv("../input/kc_house_data.csv", parse_dates = ['date']) data['waterfront'] = data['waterfront'].astype('category',ordered=True) data['view'] = data['view'].astype('category',ordered=True) data['condition'] = data['condition'].astype('category',ordered=True) data['grade'] = data['grade'].astype('category',ordered=False) data['zipcode'] = data['zipcode'].astype(str) data = data.sort('date') data.head(2) # Show the first 2 lines # + _cell_guid="ef6feb79-6c8a-f5df-c4b7-84dd19205ca9" data.dtypes # + [markdown] _cell_guid="d60bcaa0-f3e9-723f-94a1-a1dc8999c62a" # ## 2. Descriptive statistics # # The initial dimension of the dataset # + _cell_guid="6d069cae-99f1-2811-626d-3ac248c9cf69" data.shape # + [markdown] _cell_guid="58a56c75-ca10-dc76-d874-ed985de813af" # Let's summarize the main statistics of each parameters # + _cell_guid="7c63e82d-a7ac-9b38-e874-d324b132a41a" data.describe(include='all') # + [markdown] _cell_guid="e1944485-3c92-6f5e-1d92-95061b0c1e55" # ## 3. Setting the context (map visualization) # # Before we dive into exploring the data, we’ll want to set the context of the analysis. One good way to do this is with exploratory charts or maps. In this case, we’ll map out the positions of the houses, which will help us understand the problem we’re exploring. # # In the below code, we: # # * Setup a map centered on King County. # * Add a marker to the map for each house sold in the area. # * Display the map. # + _cell_guid="1aea0afe-26d6-1d45-e165-2af81a65d690" houses_map = folium.Map(location = [data['lat'].mean(), data['long'].mean()], zoom_start = 10) marker_cluster = folium.MarkerCluster().add_to(houses_map) MAX_RECORDS = 100 for name, row in data.iterrows(): folium.Marker([row["lat"], row["long"]], popup="Sold for {0}$ on: {1}. Features: {2} sqft, {3} bedrooms, {4} bathrooms, year built: {5}"\ .format(row["price"], row["date"], row['sqft_living'], row['bedrooms'], row['bathrooms'], row['yr_built'])).add_to(marker_cluster) houses_map.create_map('houses.html') houses_map # + [markdown] _cell_guid="9cf4c7f6-94e9-9fe9-ec6e-f0960d96a6e5" # ![houses map][1] # # Interactive map is available [here][2] # # The map is helpful but it's hard to see where the houses in our dataset are located. Instead, we could make a heatmap: # # # [1]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/housesmap.png # [2]: https://harlfoxem.github.io/houses.html # + _cell_guid="0bcfeb82-b3f3-9fa3-02ec-e4b9cbd44f35" houses_heatmap = folium.Map(location = [data['lat'].mean(), data['long'].mean()], zoom_start = 9) houses_heatmap.add_children(plugins.HeatMap([[row["lat"], row["long"]] for name, row in data.iterrows()])) houses_heatmap.create_map("heatmap.html") houses_heatmap # + [markdown] _cell_guid="cffd81db-8569-eaa5-a206-219f5bb77191" # ![heatmap][1] # # (again, map: [here][2]) # # Heatmaps are good for mapping out gradients, but we’ll want something with more structure to plot out differences in house sale accross the county. Zip codes are a good way to visualize this information. # # We could for example compute the mean house price by zip code, then plot this out on a map. In the below code, we'll: # # * group the dataframe by zipcode, # * Compute the average price of each column # * add a column with the total number of observations (i.e., house sales) per zipcode # # # [1]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/heatmap.png # [2]: https://harlfoxem.github.io/heatmap.html # + _cell_guid="55f8c188-65e7-c96e-e0f5-c0d4be0f6acf" zipcode_data = data.groupby('zipcode').aggregate(np.mean) zipcode_data.reset_index(inplace=True) data['count'] = 1 count_houses_zipcode = data.groupby('zipcode').sum() count_houses_zipcode.reset_index(inplace=True) count_houses_zipcode = count_houses_zipcode[['zipcode','count']] data.drop(['count'], axis = 1, inplace = True) zipcode_data = pd.merge(zipcode_data, count_houses_zipcode, how='left', on=['zipcode']) zipcode_data.head(2) # + [markdown] _cell_guid="1fbfce22-f225-a4be-56f4-1fc668c45340" # We’ll now be able to plot the average value of a specific attribute for each zip code. In order to do this, we’ll read data in GeoJSON format to get the shape of each zip code, then match each zip code shape with the attribute score. Let's first create a function. # # GeoJSON file available [here][1] # # # [1]: https://github.com/harlfoxem/House_Price_Prediction/blob/master/zipcode_king_county.geojson # + _cell_guid="2a3500bf-8349-f7cb-7545-c248e17edf53" def show_zipcode_map(col): geo_path = 'zipcode/zipcode_king_county.geojson' zipcode = folium.Map(location=[data['lat'].mean(), data['long'].mean()], zoom_start=9) zipcode.geo_json(geo_path = geo_path, data=zipcode_data, columns = ['zipcode', col], key_on = 'feature.properties.ZCTA5CE10', fill_color='OrRd', fill_opacity=0.9,line_opacity=0.2) zipcode.save(col + '.html') return zipcode # + [markdown] _cell_guid="bc3f2631-1be4-eccf-c7c8-1a95e3918d23" # Now that we have our function ready, let's make a plot using the variable count. # + _cell_guid="45a3c326-ac91-1274-71a0-3c247dd98a23" show_zipcode_map('count') # + [markdown] _cell_guid="8b8af262-e34b-0cad-acee-782f855325a5" # ![count map][1] # # Again, map [here][2] # # The map helps us understand a few things about the dataset. First, we can see that we don't have data for every zip code in the county. This is especially true for the inner suburbs of Seattle. Second, some zipcodes have a lot more house sales recorded than others. The number of observations range from ~50 to ~600. # Let's show a few more maps: # # # [1]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/count.png # [2]: https://harlfoxem.github.io/count.html # + _cell_guid="f833cc2a-8d00-622b-e436-3a75ab674eaa" show_zipcode_map('price') show_zipcode_map('sqft_living') show_zipcode_map('yr_built') # + [markdown] _cell_guid="68448977-1558-21e9-32e4-47440e3a5239" # ![price map][1] # # ![sqft map][2] # # ![yr built map][3] # # The three interactive maps are available [here][4], [here][5] and [here][6] # # We can see that on average, the houses on the eastern suburbs of Seattle are more expensive. They are also bigger in sqft. # # The houses close to the metropolitan are of Seattle are relatively old compare to the houses in the rural area. # # # [1]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/price.png # [2]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/sqft.png # [3]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/yrbuilt.png # [4]: https://harlfoxem.github.io/price.html # [5]: https://harlfoxem.github.io/sqft_living.html # [6]: https://harlfoxem.github.io/yr_built.html # + [markdown] _cell_guid="40f1b1fc-1706-59ad-ce7d-9caf4eac76b4" # ## 3. The Output Variable # # Now that we've set the context by plotting out where the houses in our dataset are located, we can move into exploring different angles for our regression analysis. # # Let's first display the distribution of the target column (price) using a boxplot. # ![boxplot definition][1] # # # [1]: https://harlfoxem.github.io/img/King_County_House_Prediction_files/boxplot.png # + _cell_guid="b1a34fe9-9083-6352-fc93-5da9650eb9e3" fig, ax = plt.subplots(figsize=(12,4)) sns.boxplot(x = 'price', data = data, orient = 'h', width = 0.8, fliersize = 3, showmeans=True, ax = ax) plt.show() # + [markdown] _cell_guid="76f81f25-cee2-0e31-db2d-f9cd65a79e58" # There seems to be a lot of outliers at the top of the distribution, with a few houses above the 5000000`$` value. If we ignore outliers, the range is illustrated by the distance between the opposite ends of the whiskers (1.5 IQR) - about 1000000`$` here. # Also, we can see that the right whisker is slightly longer than the left whisker and that the median line is gravitating towards the left of the box. The distribution is therefore slightly skewed to the right. # # ## 4. Associations and Correlations between Variables # # Let's analyze now the relationship between the independent variables available in the dataset and the dependent variable that we are trying to predict (i.e., price). # These analysis should provide some interesting insights for our regression models. # # We'll be using scatterplots and correlations coefficients (e.g., Pearson, Spearman) to explore potential associations between the variables. # # ### 4.1 Continuous Variables # # For example, let's analyze the relationship between the square footage of a house (sqft_living) and its selling price. Since the two variables are measured on a continuous scale, we can use Pearson's coefficient r to measures the strength and direction of the relationship. # + _cell_guid="0c97e787-980e-b54c-aeb9-d30abe3a1cfc" # A joint plot is used to visualize the bivariate distribution sns.jointplot(x="sqft_living", y="price", data=data, kind = 'reg', size = 7) plt.show() # + [markdown] _cell_guid="b19cec98-5e1e-b076-b8d2-5d449fa9a75b" # There is a clear linear association between the variables (r = 0.7), indicating a strong positive relationship. sqft_living should be a good predicator of house price. # (note: sqft_living distribution is also skewed to the right) # # Let's do the same with the 7 remaining continuous variables: # # * sqft_lot # * sqft_above (i.e., sqft_above = sqft_living - sqft_basement) # * sqft_basement # * sqft_living15, the average house square footage of the 15 closest neighbours # * sqft_lot15, the average lot square footage of the 15 closest neighbours # * yr_built # * yr_renovated # * lat # * long # + _cell_guid="b49c42f4-83c4-11a8-0912-afb4b4390155" language="javascript" # IPython.OutputArea.auto_scroll_threshold = 9999; # //First, a simple command to increase the maximum size of the output cells in the notebook # + _cell_guid="0ceadfd0-34de-7d92-f093-7c5d527923d4" sns.jointplot(x="sqft_lot", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="sqft_above", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="sqft_basement", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="sqft_living15", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="sqft_lot15", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="yr_built", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="yr_renovated", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="lat", y="price", data=data, kind = 'reg', size = 5) sns.jointplot(x="long", y="price", data=data, kind = 'reg', size = 5) plt.show() # + [markdown] _cell_guid="7f45dd21-d1f9-8fb9-6611-7d5b758d222e" # sqft_lot, sqft_lot15 and yr_built seem to be poorly related to price. # # We can see that there is a lot of zeros in the sqft_basement distribution (i.e., no basement). Similarly, there is a lot of zeros in the yr_renovated variable. # # Let's rerun the association tests for these two variables without the zeros. # + _cell_guid="1bf80e55-06c8-2b3e-2f08-c86db5e2f7e7" # Create 2 new columns for the analysis data['sqft_basement2'] = data['sqft_basement'].apply(lambda x: x if x > 0 else None) data['yr_renovated2'] = data['yr_renovated'].apply(lambda x: x if x > 0 else None) # Show the new plots with paerson correlation sns.jointplot(x="sqft_basement2", y="price", data=data, kind = 'reg', dropna=True, size = 5) sns.jointplot(x="yr_renovated2", y="price", data=data, kind = 'reg', dropna=True, size = 5) plt.show() # + [markdown] _cell_guid="331345eb-511d-09e7-ad0f-08c51892d98f" # The house price is moderately correlated with the size of the basement (if basement present). There is also a small correlation with the year of the renovation (if renovated). # # It might be more interesting for our analysis to classify basement and renovation as dichotomous variables (e.g., 0 for no basement, 1 for basement present). Let's create two new columns in our dataset. # + _cell_guid="ac0fd0d8-aec4-a2cf-eb22-cfbdbecd6853" data['basement_present'] = data['sqft_basement'].apply(lambda x: 1 if x > 0 else 0) data['basement_present'] = data['basement_present'].astype('category', ordered = False) data['renovated'] = data['yr_renovated'].apply(lambda x: 1 if x > 0 else 0) data['renovated'] = data['renovated'].astype('category', ordered = False) # + [markdown] _cell_guid="13ec8ff3-b979-8911-5894-2332de74d79f" # We will analyse these new variables as categorical (see in few cells below). # # But first, let's go back to the plots above and the two variables: sqft_above and sqft_living15. They seem to be strongly related to price. Let's analyse their associations (along with sqft_living) using the pairgrid() function from seaborn. This function creates a matrix of axes and shows the relationship for each pair of the selected variables. # # We will draw the univariate distribution of each variable on the diagonal Axes, and the bivariate distributions using scatterplots on the upper diagonal and kernel density estimation on the lower diagonal. We will create a function to display the paerson coefficient of each pair. # + _cell_guid="efc4a54c-2498-d430-f863-f683e489f7ff" # define a function to display pearson coefficients on the lower graphs def corrfunc(x, y, **kws): r, _ = stats.pearsonr(x, y) ax = plt.gca() ax.annotate("pearsonr = {:.2f}".format(r), xy=(.1, .9), xycoords=ax.transAxes) g = sns.PairGrid(data, vars = ['sqft_living', 'sqft_living15', 'sqft_above'], size = 3.5) # define the pairgrid g.map_upper(plt.scatter) g.map_diag(sns.distplot) g.map_lower(sns.kdeplot, cmap="Blues_d") g.map_lower(corrfunc) plt.show() # + [markdown] _cell_guid="713441e9-ce25-513f-40c5-06138e1ec1fb" # As envisaged, there is a strong positive relationship between the 3 variables (r>0.7). It was kind of obvious for sqft_above which is equal to sqft_livng - sqft_basement. So we know that they both have an impact on price. # # For sqft_living15 however, we are not sure if the relationship with house price is actually due to the average square footage of the 15th closest houses. This is because of the high correlation between sqft_living15 and sqft_living. # # To assess the true relationship between price and sqft_living15, we can use the Pearson Partial Correlation test. The correlation can assess the association between two continuous variables whilst controlling for the effect of other continuous variables called covariates. In our example, we will test the relationship between price and sqft_living15 using sqft_living as covariate. # + _cell_guid="c969b479-c3a2-78da-a7d5-65f310c24039" # a Function to returns the sample linear partial correlation coefficients between pairs of variables in C, controlling # for the remaining variables in C (clone of Matlab's partialcorr). def partial_corr(C): C = np.asarray(C) p = C.shape[1] P_corr = np.zeros((p, p), dtype=np.float) for i in range(p): P_corr[i, i] = 1 for j in range(i+1, p): idx = np.ones(p, dtype=np.bool) idx[i] = False idx[j] = False beta_i = linalg.lstsq(C[:, idx], C[:, j])[0] beta_j = linalg.lstsq(C[:, idx], C[:, i])[0] res_j = C[:, j] - C[:, idx].dot( beta_i) res_i = C[:, i] - C[:, idx].dot(beta_j) corr = stats.pearsonr(res_i, res_j)[0] P_corr[i, j] = corr P_corr[j, i] = corr return P_corr # Convert pandas dataframe to a numpy array using only three columns partial_corr_array = data.as_matrix(columns = ['price', 'sqft_living', 'sqft_living15']) # Calculate the partial correlation coefficients partial_corr(partial_corr_array) # + [markdown] _cell_guid="c98b9d20-1636-d6e0-64b2-6c505c7c2493" # We can see now that the average house size of the surrounding # houses has no effect on the sell price when controlling for the size of the house (r = 0.06). # # ### 4.2 Categorical Variables # # Let's now analyze the relationship between house price and the categorical variables. # # As a first example, we will try to assess if having a waterfront is related to a higher house value. waterfront is a dichotomous variable with underlying continuous distribution (having a waterfront is better that not having a waterfront). We can use a point-biserial correlation coefficient to highlight the relationship between the two variables. # + _cell_guid="504bf137-81cb-06eb-a94d-2947347efc22" # Let's show boxplots first fig, ax = plt.subplots(figsize=(12,4)) sns.boxplot(y = 'waterfront', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = ax) plt.show() # Calculate the correlation coefficient r, p = stats.pointbiserialr(data['waterfront'], data['price']) print ('point biserial correlation r is %s with p = %s' %(r,p)) # + [markdown] _cell_guid="5520809d-adec-ccf4-dcbb-e2f04ef278a2" # Comments: # # * The no waterfront box plot is comparatively short. This suggests that overall, house prices in this group are very close to each other. # * The waterfront box plot is comparatively tall. This suggests that house prices differ greatly in this group. # * There is obvious shape differences between the two distributions, suggesting a higher sell price, in general, for houses with a waterfront. This is validated by a positive value of the point-biserial correlation. # * The correlation if however small (r<0.3). Note that we haven't test here the 3 main assumptions of the point-biserial correlation and can't rely too much on the result (1: There should be no significant outliers in the two groups of the dichotomous variable in terms of the continuous variable, 2: There should be homogeneity of variances, 3: The continuous variable should be approximately normally distributed for each group of the dichotomous variable). # # We can run the same test on the basement_present variable and whether or not the house had been renovated in the past. # + _cell_guid="abecaa5d-1949-77fb-d9e3-1303ef343981" # basement_present variable fig, ax = plt.subplots(figsize=(12,4)) sns.boxplot(y = 'basement_present', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = ax) plt.show() r, p = stats.pointbiserialr(data['basement_present'], data['price']) print ('point biserial correlation r between price and basement_present is %s with p = %s' %(r,p)) # renovated variable fig, ax = plt.subplots(figsize=(12,4)) sns.boxplot(y = 'renovated', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = ax) print ('') plt.show() r, p = stats.pointbiserialr(data['renovated'], data['price']) print ('point biserial correlation r between price and renovated is %s with p = %s' %(r,p)) # + [markdown] _cell_guid="4a0eb56a-6bac-093f-4c8e-542b48776526" # Associations exist but they are fairly small (0.1 < r < 0.3). # # Let's move on to our ordinal variables and asses their association with house price. We will show the distribution of the categories of each variable using boxplots. # + _cell_guid="6e84b35a-1a54-d8b7-d439-03eb940ca9a6" fig, axarr = plt.subplots(6, figsize=(12,40)) sns.boxplot(y = 'bedrooms', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = axarr[0]) sns.boxplot(y = 'bathrooms', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = axarr[1]) sns.boxplot(y = 'floors', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = axarr[2]) sns.boxplot(y = 'view', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = axarr[3]) sns.boxplot(y = 'condition', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = axarr[4]) sns.boxplot(y = 'grade', x = 'price', data = data,width = 0.8,orient = 'h', showmeans = True, fliersize = 3, ax = axarr[5]) plt.show() # + [markdown] _cell_guid="49f72411-db5b-b615-d65e-7d88e8286786" # As expected, they all seem to be related to the house price. # # We can use the Spearman's rank-order correlation to measure the strength and direction of the relationships between house price and these variables. # + _cell_guid="eb2ba64b-cd50-9086-5f00-f07dc09efd7f" r, p = stats.spearmanr(data['bedrooms'], data['price']) print ('spearman correlation r between price and bedrooms is %s with p = %s' %(r,p)) r, p = stats.spearmanr(data['bathrooms'], data['price']) print ('spearman correlation r between price and bathrooms is %s with p = %s' %(r,p)) r, p = stats.spearmanr(data['floors'], data['price']) print ('spearman correlation r between price and floors is %s with p = %s' %(r,p)) r, p = stats.spearmanr(data['view'], data['price']) print ('spearman correlation r between price and view is %s with p = %s' %(r,p)) r, p = stats.spearmanr(data['condition'], data['price']) print ('spearman correlation r between price and condition is %s with p = %s' %(r,p)) r, p = stats.spearmanr(data['grade'], data['price']) print ('spearman correlation r between price and grade is %s with p = %s' %(r,p)) # + [markdown] _cell_guid="c133f524-aa1c-94c9-449a-5318d572e5f9" # There is indeed associations between these variables and the house price (except for condition). grade seems to be the best indicator. # # ## Conclusion # # In this post, we analyzed the relationship between the output variable (house price) and the dependent variables in our dataset. # # More specifically, we highlighted that: # # * sqft_living, sqft_above and sqft_basement were moderately/strongly associated with price. Paerson r was equal to 0.70, 0.61 and 0.41, respectively. The 3 variables were also strongly related to each other as sqft_living = sqft_above and sqft_basement. # * sqft_living15, the average house square footage of the 15 closest neighbors, was also strongly related to price (r = 0.59) . However, when controlling for sqft_living, the relationship disappeared ($r = 0.06$). # * sqft_lot, sqft_lot15 (average lot size of the 15 closest houses) and *yr_built* were poorly related to price. # * The three dichotomous variables (waterfront, basement_present, renovated) were associated with price. The associations were small (r < 0.3) # * Five of the ordinal parameters (bedrooms, bathrooms, floors, views, grade) were also moderately to strongly associated with price. # # Our multiple regression analysis models in Part 2 will be built on these results. # + _cell_guid="eaf103b9-e702-7045-4134-68a354e51581"
downloaded_kernels/house_sales/kernel_33.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib -l # Lets try notebook instead of inline # %matplotlib notebook import pandas as pd import matplotlib.pyplot as plt # Read a csv file. df stands for DataFrame df = pd.read_csv('../height_weight.csv') df # if emtpy shows up, restart kernel, because only one is possible to have df.plot() # zoom in , zoom out, select part of the figure, expand the image etc. # ONLY one graph in a jupyter notebook can be interacted with at a time
class8_plots/matplotlib_backends.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.022601, "end_time": "2021-06-11T17:53:33.841961", "exception": false, "start_time": "2021-06-11T17:53:33.819360", "status": "completed"} tags=[] # # # <img src="https://i.ytimg.com/vi/yjprpOoH5c8/maxresdefault.jpg" width="300" height="300" align="center"/> # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 6.460871, "end_time": "2021-06-11T17:53:40.324878", "exception": false, "start_time": "2021-06-11T17:53:33.864007", "status": "completed"} tags=[] import numpy as np import tensorflow as tf seed=1234 np.random.seed(seed) tf.random.set_seed(seed) # %config IPCompleter.use_jedi = False # + [markdown] papermill={"duration": 0.020574, "end_time": "2021-06-11T17:53:40.367126", "exception": false, "start_time": "2021-06-11T17:53:40.346552", "status": "completed"} tags=[] # ## Tensors # # What is a `Tensor` anyway?<br> # Although the meaning of `Tensor` is much diverse than what we typically use in ML, whenever we say `tensor` in ML, we mean that it is a **`multi-dimensional array`** where all the values have a uniform `dtype`. There are many ways to create a TF tensor. We will take a look at a few of them, a few important ones. # + [markdown] papermill={"duration": 0.020817, "end_time": "2021-06-11T17:53:40.409141", "exception": false, "start_time": "2021-06-11T17:53:40.388324", "status": "completed"} tags=[] # `tf.constant(..)`: This is the simplest way yet with some `gotchas` to create a tensor object. First, let's try to create a tensor with it, and then we will look at the gotchas later on. # + papermill={"duration": 0.043196, "end_time": "2021-06-11T17:53:40.473622", "exception": false, "start_time": "2021-06-11T17:53:40.430426", "status": "completed"} tags=[] # A zero rank tensor. A zero rank tensor is nothing but a single value x = tf.constant(5.0) print(x) # + [markdown] papermill={"duration": 0.021783, "end_time": "2021-06-11T17:53:40.520939", "exception": false, "start_time": "2021-06-11T17:53:40.499156", "status": "completed"} tags=[] # As you can see above, that the tensor object has a `shape` and a `dtype`. There are other attributes/properties as well that are associated with a tensor object. # # # 1. Shape: The length (number of elements) of each of the axes of a tensor. # 2. Rank: Number of axes. For example, a matrix is a tensor of rank 2. # 3. Axis or Dimension: A particular dimension of a tensor. # 4. Size: The total number of items in the tensor. # + papermill={"duration": 0.049282, "end_time": "2021-06-11T17:53:40.592483", "exception": false, "start_time": "2021-06-11T17:53:40.543201", "status": "completed"} tags=[] # We can convert any tensor object to `ndarray` by calling the `numpy()` method y = tf.constant([1, 2, 3], dtype=tf.int8).numpy() print(f"`y` is now a {type(y)} object and have a value == {y}") # + [markdown] papermill={"duration": 0.021817, "end_time": "2021-06-11T17:53:40.639060", "exception": false, "start_time": "2021-06-11T17:53:40.617243", "status": "completed"} tags=[] # **A few important things along with some gotchas**<br> # 1. People confuse `tf.constant(..)` with an operation that creates a `constant` tensor. There is no such relation. This is related to how we embed a node in a `tf.Graph` # 2. Any tensor in TensorFlow is **immutable** by default i.e. you cannot change the values of a tensor once created. You always create a new one. This is different from `numpy` and `pytorch` where you can actually modify the values. We will see an example on this in a bit # 3. One of the closest member to `tf.constant` is the `tf.convert_to_tensor()` method with a few difference which we will see later on # 4. `tf.constant(..)` is just one of the many ways to create a tensor. There are many other methods as well # + papermill={"duration": 0.034107, "end_time": "2021-06-11T17:53:40.695191", "exception": false, "start_time": "2021-06-11T17:53:40.661084", "status": "completed"} tags=[] # Immutability check # Rank-1 tensor x = tf.constant([1, 2], dtype=tf.int8) # Try to modify the values try: x[1] = 3 except Exception as ex: print(type(ex).__name__, ex) # + papermill={"duration": 0.037489, "end_time": "2021-06-11T17:53:40.760070", "exception": false, "start_time": "2021-06-11T17:53:40.722581", "status": "completed"} tags=[] # tf.constant(..) is no special. Let's create a tensor using a diff method x = tf.ones(2, dtype=tf.int8) print(x) try: x[0] = 3 except Exception as ex: print("\n", type(ex).__name__, ex) # + papermill={"duration": 0.037238, "end_time": "2021-06-11T17:53:40.821864", "exception": false, "start_time": "2021-06-11T17:53:40.784626", "status": "completed"} tags=[] # Check all the properties of a tensor object print(f"Shape of x : {x.shape}") print(f"Another method to obtain the shape using `tf.shape(..)`: {tf.shape(x)}") print(f"\nRank of the tensor: {x.ndim}") print(f"dtype of the tensor: {x.dtype}") print(f"Total size of the tensor: {tf.size(x)}") print(f"Values of the tensor: {x.numpy()}") # + [markdown] papermill={"duration": 0.023582, "end_time": "2021-06-11T17:53:40.869845", "exception": false, "start_time": "2021-06-11T17:53:40.846263", "status": "completed"} tags=[] # Not able to do assignment in Tensor objects is a bit (more than bit TBH) frustrating. What's the solution then?<br> # The best way that I have figured out, that has always worked for my use case is to create a mask or to use [tf.tensor_scatter_nd_update](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update). Let's look at an example.<br> # # Original tensor -> `[1, 2, 3, 4, 5]` <br> # Output tensor we want -> `[1, 200, 3, 400, 5]`<br> # + papermill={"duration": 0.069175, "end_time": "2021-06-11T17:53:40.961959", "exception": false, "start_time": "2021-06-11T17:53:40.892784", "status": "completed"} tags=[] # Create a tensor first. Here is another way x = tf.cast([1, 2, 3, 4, 5], dtype=tf.float32) print("Original tensor: ", x) mask = x%2 == 0 print("Original mask: ", mask) mask = tf.cast(mask, dtype=x.dtype) print("Mask casted to original tensor type: ", mask) # Some kind of operation on an tensor that is of same size # or broadcastable to the original tensor. Here we will simply # use the range object to create that tensor temp = tf.cast(tf.range(1, 6) * 100, dtype=x.dtype) # Output tensor # Input tensor -> [1, 2, 3, 4, 5] # Mask -> [0, 1, 0, 1, 0] out = x * (1-mask) + mask * temp print("Output tensor: ", out) # + papermill={"duration": 0.054312, "end_time": "2021-06-11T17:53:41.041413", "exception": false, "start_time": "2021-06-11T17:53:40.987101", "status": "completed"} tags=[] # Another way to achieve the same thing indices_to_update = tf.where(x % 2 == 0) print("Indices to update: ", indices_to_update) # Update the tensor values updates = [200., 400.] out = tf.tensor_scatter_nd_update(x, indices_to_update, updates) print("\nOutput tensor") print(out) # + [markdown] papermill={"duration": 0.023608, "end_time": "2021-06-11T17:53:41.088802", "exception": false, "start_time": "2021-06-11T17:53:41.065194", "status": "completed"} tags=[] # Let's take a look at another interesting thing now. # + papermill={"duration": 0.034138, "end_time": "2021-06-11T17:53:41.147444", "exception": false, "start_time": "2021-06-11T17:53:41.113306", "status": "completed"} tags=[] # This works! arr = np.random.randint(5, size=(5,), dtype=np.int32) print("Numpy array: ", arr) print("Accessing numpy array elements based on a condition with irregular strides", arr[[1, 4]]) # + papermill={"duration": 0.037518, "end_time": "2021-06-11T17:53:41.209880", "exception": false, "start_time": "2021-06-11T17:53:41.172362", "status": "completed"} tags=[] # This doesn't work try: print("Accessing tensor elements based on a condition with irregular strides", x[[1, 4]]) except Exception as ex: print(type(ex).__name__, ex) # + [markdown] papermill={"duration": 0.024182, "end_time": "2021-06-11T17:53:41.258896", "exception": false, "start_time": "2021-06-11T17:53:41.234714", "status": "completed"} tags=[] # What now? If you want to extract multiple elements from a tensor with irregular strides, or not so well defined strides, then [tf.gather](https://www.tensorflow.org/api_docs/python/tf/gather) and [tf.gather_nd](https://www.tensorflow.org/api_docs/python/tf/gather_nd) are your friend. Let;s try it again! # + papermill={"duration": 0.0397, "end_time": "2021-06-11T17:53:41.323427", "exception": false, "start_time": "2021-06-11T17:53:41.283727", "status": "completed"} tags=[] print("Original tensor: ", x.numpy()) # Using the indices that we used for mask print("\nIndices to update: ", indices_to_update.numpy()) # This works! print("\n Accesing tensor elements using gather") print("\n", tf.gather(x, indices_to_update).numpy()) # + [markdown] papermill={"duration": 0.025504, "end_time": "2021-06-11T17:53:41.377699", "exception": false, "start_time": "2021-06-11T17:53:41.352195", "status": "completed"} tags=[] # There is another method `tf.convert_to_tensor(..)` to create a tensor. This is very similar to `tf.constant(..)` but with a few subtle differences:<br> # 1. Whenever you pass a non tf.Tensor object like a Python list or a ndarray to an op, `convert_to_tensor(..)` is always called automaically # 2. It doesn't take `shape` as an input argument. # 3. It allows to pass even `symbolic tensors`. We will take a look at it in a bit. # # When to use `tf.convert_to_tensor(..)`? It's up to your mental model! # + papermill={"duration": 0.045875, "end_time": "2021-06-11T17:53:41.448328", "exception": false, "start_time": "2021-06-11T17:53:41.402453", "status": "completed"} tags=[] # An example with a python list y = tf.convert_to_tensor([1, 2, 3]) print("Tensor from python list: ", y) # An example with a ndarray y = tf.convert_to_tensor(np.array([1, 2, 3])) print("Tensor from ndarray: ", y) # An example with symbolic tensors with tf.compat.v1.Graph().as_default(): y = tf.convert_to_tensor(tf.compat.v1.placeholder(shape=[None, None, None], dtype=tf.int32)) print("Tensor from python list: ", y) # + [markdown] papermill={"duration": 0.024239, "end_time": "2021-06-11T17:53:41.497340", "exception": false, "start_time": "2021-06-11T17:53:41.473101", "status": "completed"} tags=[] # ### Other kind of Tensor objects available # + [markdown] papermill={"duration": 0.024259, "end_time": "2021-06-11T17:53:41.546284", "exception": false, "start_time": "2021-06-11T17:53:41.522025", "status": "completed"} tags=[] # #### String tensors # + papermill={"duration": 0.040005, "end_time": "2021-06-11T17:53:41.610991", "exception": false, "start_time": "2021-06-11T17:53:41.570986", "status": "completed"} tags=[] # String as a tensor object with dtype==tf.string string = tf.constant("abc", dtype=tf.string) print("String tensor: ", string) # String tensors are atomic and non-indexable. # This doen't work as expected! print("\nAccessing second element of the string") try: print(string[1]) except Exception as ex: print(type(ex).__name__, ex) # + [markdown] papermill={"duration": 0.02545, "end_time": "2021-06-11T17:53:41.663280", "exception": false, "start_time": "2021-06-11T17:53:41.637830", "status": "completed"} tags=[] # #### Ragged tensors # In short, a tensor with variable numbers of elements along some axis. # + papermill={"duration": 0.035471, "end_time": "2021-06-11T17:53:41.724525", "exception": false, "start_time": "2021-06-11T17:53:41.689054", "status": "completed"} tags=[] # This works! y = [[1, 2, 3], [4, 5], [6] ] ragged = tf.ragged.constant(y) print("Creating ragged tensor from python sequence: ", ragged) # + papermill={"duration": 0.035841, "end_time": "2021-06-11T17:53:41.787304", "exception": false, "start_time": "2021-06-11T17:53:41.751463", "status": "completed"} tags=[] # This won't work print("Trying to create tensor from above python sequence\n") try: z = tf.constant(y) except Exception as ex: print(type(ex).__name__, ex) # + [markdown] papermill={"duration": 0.02553, "end_time": "2021-06-11T17:53:41.838752", "exception": false, "start_time": "2021-06-11T17:53:41.813222", "status": "completed"} tags=[] # #### Sparse tensors # + papermill={"duration": 0.043587, "end_time": "2021-06-11T17:53:41.908750", "exception": false, "start_time": "2021-06-11T17:53:41.865163", "status": "completed"} tags=[] # Let's say you have a an array like this one # [[1 0 0] # [0 2 0] # [0 0 3]] # If there are too many zeros in your `huge` tensor, then it is wise to use `sparse` # tensors instead of `dense` one. Let's say how to create this one. We need to specify: # 1. Indices where our values are # 2. The values # 3. The actual shape sparse_tensor = tf.SparseTensor(indices=[[0, 0], [1, 1], [2, 2]], values=[1, 2, 3], dense_shape=[3, 3] ) print(sparse_tensor) # You can convert sparse tensors to dense as well print("\n", tf.sparse.to_dense(sparse_tensor)) # + [markdown] papermill={"duration": 0.026559, "end_time": "2021-06-11T17:53:41.963403", "exception": false, "start_time": "2021-06-11T17:53:41.936844", "status": "completed"} tags=[] # **Exercise for readers**: # 1. Create a random 10x10 sparse tensor # 2. Gather the elements that are > 5 # 3. Update these elements with a value of 500 # + [markdown] papermill={"duration": 0.027099, "end_time": "2021-06-11T17:53:42.017512", "exception": false, "start_time": "2021-06-11T17:53:41.990413", "status": "completed"} tags=[] # That's it for part 1! We will be looking at other things in the next tutorial!<br> # # # **References**: # 1. https://www.tensorflow.org/guide/tensor # 2. https://keras.io/getting_started/intro_to_keras_for_researchers/ # + papermill={"duration": 0.026699, "end_time": "2021-06-11T17:53:42.070942", "exception": false, "start_time": "2021-06-11T17:53:42.044243", "status": "completed"} tags=[]
src/notebooks/tensorflow_tutorials/chapter_1_tensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np from PIL import Image import matplotlib.pyplot as plt # %matplotlib inline imgs = np.load("imgs.npy") def openTensor(imgStr): img = [] for imgName in imgStr: img.append(np.array(Image.open(imgName))) return np.array(img)/255 X = tf.placeholder(dtype=tf.float32, shape = ((None, 112, 92, 1))) Y = tf.placeholder(dtype=tf.float32, shape = ((None, 1))) # + Z1 = tf.nn.conv2d(input = X, filter = tf.Variable(initial_value=tf.random_normal(shape = [3, 3, 1, 16])), strides=[1, 2, 2, 1], padding="SAME") A1 = tf.nn.relu(Z1/np.max(Z1)) A1 = tf.nn.max_pool(A1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") Z2 = tf.nn.conv2d(input= A1, filter = tf.Variable(initial_value=tf.random_normal(shape = [3, 3, 16, 32])), strides=[1, 2, 2, 1], padding="SAME") A2 = tf.nn.relu(Z2/np.max(Z2)) A2 = tf.nn.max_pool(A2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") Z3 = tf.nn.conv2d(input= A2, filter = tf.Variable(initial_value=tf.random_normal(shape = [3, 3, 32, 64])), strides=[1, 2, 2, 1], padding="SAME") A3 = tf.nn.relu(Z3/np.max(Z3)) A3 = tf.nn.max_pool(A3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") Z4 = tf.nn.conv2d(input= A3, filter = tf.Variable(initial_value=tf.random_normal(shape = [3, 3, 64, 128])), strides=[1, 2, 2, 1], padding="SAME") A4 = tf.nn.relu(Z4/np.max(Z4)) A4 = tf.nn.max_pool(A4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") Z5 = tf.nn.conv2d(input= A4, filter = tf.Variable(initial_value=tf.random_normal(shape = [3, 3, 128, 256])), strides=[1, 2, 2, 1], padding="SAME") A5 = tf.nn.relu(Z5/np.max(Z5)) A5 = tf.nn.max_pool(A5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") # - F = tf.contrib.layers.flatten(inputs= A5) # + Z10 = tf.matmul(F, tf.Variable(initial_value= tf.random_normal(shape = [256, 256]))) A10 = tf.nn.relu(Z10/np.max(Z10)) Z11 = tf.matmul(A10, tf.Variable(initial_value= tf.random_normal(shape = [256, 200]))) A11 = tf.nn.relu(Z11/np.max(Z11)) Z12 = tf.matmul(A11, tf.Variable(initial_value= tf.random_normal(shape = [200, 128]))) A12 = tf.nn.relu(Z12/np.max(Z12)) Z13 = tf.matmul(tf.reshape(tf.concat(values = [A12[0], A12[1]], axis = 0), [1, 256]), tf.Variable(initial_value= tf.random_normal(shape = [256, 1]))) + tf.Variable(initial_value=tf.random_normal(shape = [1, 1])) A13 = tf.nn.sigmoid(Z13) # - def lossFn(Y, y_): # return -tf.reduce_mean(Y * (tf.clip_by_value(y_[:, 0], 1e-10,1.0)) + (1 - Y) * (1 - tf.clip_by_value(y_[:, 0], 1e-10,1.0))) return tf.reduce_mean(tf.keras.backend.binary_crossentropy(target = Y, output= A13)) LOSS = lossFn(Y, A13) opt = tf.train.AdamOptimizer(learning_rate= 0.09) train_step = opt.minimize(LOSS) sess = tf.Session() sess.run(tf.global_variables_initializer()) def train(epochs = 100): for e in range(epochs): loss = 0.0 for img in imgs[:1000]: y = np.array([0]).reshape(-1, 1) if img[0][:2] == img[1][:2]: y = np.array([1]).reshape(-1, 1) _, L = sess.run(fetches = [train_step, LOSS], feed_dict={X: openTensor(img).reshape(-1, 112, 92, 1), Y: y}) loss += L if e % 5 == 0: print("epoch {}, loss {}".format(e, loss)) train(200) l = sess.run(A9, feed_dict={X: openTensor(imgs[0]).reshape(-1, 112, 92, 1)}) l[0].shape A11[0, :]
Projects/Face-Recognition/.ipynb_checkpoints/face-recognition-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- # + [markdown] id="Z5FDRhEUfxn3" colab_type="text" # # Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # # > https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # + [markdown] id="3DdgocStf_61" colab_type="text" # # Image Preprocessing # # In this tutorial, we are going to use the [Pillow python lirbrary](https://pillow.readthedocs.io/en/5.1.x/) to show how to apply basic transformations on images. You can safely skip this tutorial if you are already familiar with Pillow. # + [markdown] id="vMWi70VigmN3" colab_type="text" # First of all, let's import all the libraries we need. # + id="oguRARzJgraZ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17} outputId="bde4a787-623b-4d0c-93c0-1c2e1610db53" executionInfo={"status": "ok", "timestamp": 1529607392583, "user_tz": 240, "elapsed": 554, "user": {"displayName": "Jie Fan", "photoUrl": "//lh4.googleusercontent.com/-geDEcPJTfKo/AAAAAAAAAAI/AAAAAAAAACw/JdrMykxPN5o/s50-c-k-no/photo.jpg", "userId": "104480808619944829617"}} from google.colab import files from io import BytesIO # Display images. from IPython.display import display from PIL import Image, ImageEnhance # + [markdown] id="VzlKSDlXg2Yb" colab_type="text" # Next, let's upload a PNG image which we will apply all kinds of transformations on, and resize it to 500x500. # + id="BOyg9qB3g1Vt" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} # Please assign the real file name of the image to image_name. image_name = '' uploaded_files = files.upload() size = (500, 500) # (width, height) image = Image.open(BytesIO(uploaded_files[image_name])).resize(size) display(image) # + [markdown] id="1i5TdJN-h8vg" colab_type="text" # Now that we have the image uploaded, let's try rotate the image by 90 degrees cunter-clockwise. # + id="y8DWcmkViFTL" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} image = image.transpose(Image.ROTATE_90) display(image) # + [markdown] id="_WGQeeIfnGjv" colab_type="text" # Now let's flip the image horizontally. # + id="7Ipoi7qUnpzG" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} image = image.transpose(Image.FLIP_LEFT_RIGHT) display(image) # + [markdown] id="V6YBqaCRpuaC" colab_type="text" # As a next step, let's adjust the contrast of the image. The base value is 1 and here we are increasing it by 20%. # + id="27Gs_k5Zp78X" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} contrast = ImageEnhance.Contrast(image) image = contrast.enhance(1.2) display(image) # + [markdown] id="5m_WO1rVqaxb" colab_type="text" # And brightness and sharpness. # + id="s5YB4rjWqiMJ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} brightness = ImageEnhance.Brightness(image) image = brightness.enhance(1.1) display(image) # + id="XdS_Gdkzq2w6" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} sharpness = ImageEnhance.Sharpness(image) image = sharpness.enhance(1.2) display(image) # + [markdown] id="djc4oiIFtkKO" colab_type="text" # There are a whole lot more transformations we can make on images, please take a look at the [official documentation](https://pillow.readthedocs.io/en/5.1.x/) if you'd like to know more.
datathon/nusdatathon18/tutorials/image_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scenario C - Peak Number Variation (multiple runs) # # In this scenario the number of peaks in a generated dataset is varied from low to high, # the rest of the parameters is kept constant (noise level = 1%). The number of peaks expected by the probabilistic model is varied between the low and high peak number. # # The model used in the inference of the parameters is formulated as follows: # # \begin{equation} # \large y = f(x) = \sum\limits_{m=1}^M \big[A_m \cdot e^{-\frac{(x-\mu_m)^2}{2\cdot\sigma_m^2}}\big] + \epsilon # \end{equation} # # This file runs a series of inference runs for a set of generated spectra. New spectra are generated for each run and stored. After running inference, only the summary statistics are stored and the next run is started. # + # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import pymc3 as pm import arviz as az #az.style.use('arviz-darkgrid') print('Running on PyMC3 v{}'.format(pm.__version__)) # - # ## Import local modules import os import sys sys.path.append('../../modules') import datagen as dg import models as mdl import results as res import figures as fig import settings as cnf # ## Local configuration # + # output for results and images out_path = './output_mruns_5x5' file_basename = out_path + '/scenario_peaks' # if dir does not exist, create it if not os.path.exists(out_path): os.makedirs(out_path) conf = {} # scenario name conf['scenario'] = 'peak variation' # initialization method for sampler conf['init_mode'] = 'adapt_diag' # probabilistic model (priors) conf['prior_model'] = 'lognormal' # provide peak positions to the model as testvalues ('yes'/'no') conf['peak_info'] = 'yes' # data mode ('generate'/'preload') conf['data_mode'] = 'generate' # number of runs conf['nruns'] = 4 # number of cores to run sampling chains on conf['ncores'] = 2 # number of samples per chain conf['nsamples'] = 2000 # - conf # ## Save configuration cnf.save(out_path, conf) # # Generate data and plot # + # list of wavelengths (x-values) xval = [i for i in range(200, 400, 2)] # number of spectra per peak number nsets = 4 # number of peaks in the spectrum peak_numbers = [2,3,4,5,6] # total number of datasets tsets = nsets * len(peak_numbers) # total number of inference runs (per run) truns = nsets * len(peak_numbers)**2 # generate nruns sets of spectra for r in range(conf['nruns']): print("Generating dataset {0} of {1}".format(r+1,conf['nruns'])) ldata, lpeaks, lpeakdata = [], [], [] # create output directory for data out_dir = out_path + '/run_{0:02d}'.format(r+1) if not os.path.exists(out_dir): os.makedirs(out_dir) for pn in peak_numbers: for i in range(nsets): df, peaks, df_peakinfo = dg.data_generator(xvalues=xval, nsamples=15, npeaks=pn) ldata.append(df) lpeaks.append(peaks) lpeakdata.append(df_peakinfo) # save data and peak information to disk for i in range(len(ldata)): ldata[i].to_csv(out_dir + '/dataset_{0:02d}.csv'.format(i+1), index=False) lpeakdata[i].to_csv(out_dir + '/peakdata_{0:02d}.csv'.format(i+1), index=False) dg.data_save(out_dir + '/peakinfo.csv', lpeaks) # plot datasets filen = out_dir + '/scenario_peaks' fig.plot_datasets(ldata, lpeaks, dims=(int(tsets/2),2), figure_size=(12,int(tsets*(1.8))), savefig='yes', fname=filen) # - print("total number of multiple runs : {0}".format(conf['nruns'])) print("total number of peak numbers : {0}".format(len(peak_numbers))) print("total number of datasets per peak number : {0}".format(nsets)) print("total number of datasets per model : {0}".format(tsets)) print("total number of inference runs (per single loop) : {0}".format(truns)) # # Load data, run inference, visualize, collect results and save # convert pandas data to numpy arrays x_val = np.array(xval, dtype='float32') # + # dataframe to hold multiple run results res_df = pd.DataFrame() # run the whole loop of inference, posterior sampling, results collection and saving for r in range(conf['nruns']): print("starting loop {0}/{1}".format(r+1,conf['nruns'])) models, traces, lmodpeak = [], [], [] # load datasets from disk data_dir = out_path + '/run_{0:02d}'.format(r+1) ldata, lpeaks, lpeakdata = dg.data_load(tsets, data_dir) # store dataset y-values in list cols = ldata[0].columns y_val = [ldata[i][cols].values for i in range(len(ldata))] # actual inference run number inf_run = 1 for pn in peak_numbers: print("running {0}-peak model".format(pn)) for i in range(len(ldata)): if conf['peak_info'] == 'yes': # Get the peak numbers from the list. If the actual peak number in the spectrum is # lower than what the model is expecting, then expand the list to the expected size, # duplicating the existing peak mu values, else truncate the list (taking the peaks # with the highest amplitude). plist = sorted(lpeaks[i]) if len(plist) < pn: pl = sorted(np.resize(plist, (1,pn)).flatten()) else: # sort peak info dataframe on amplitude value l1 = lpeakdata[i].sort_values('amp', ascending=False) # truncate list to expected peak number pl = l1['mu'].values[:pn] model_g = mdl.model_pvoigt(xvalues=x_val, observations=y_val[i], npeaks=pn, mu_peaks=pl, pmodel=conf['prior_model']) else: model_g = mdl.model_pvoigt(xvalues=x_val, observations=y_val[i], npeaks=pn, pmodel=conf['prior_model']) models.append(model_g) with model_g: print("({6}:{2}/{3}) running inference on dataset #{0}/{1} [{4}-peak model:{5}-peak spectrum]" .format(i+1,len(ldata),inf_run,truns,pn,len(plist),r+1)) lmodpeak += [(pn,len(plist))] trace_g = pm.sample(conf['nsamples'], init=conf['init_mode'], cores=conf['ncores']) traces.append(trace_g) inf_run += 1 # save model figure as image (once) if r == 0: img = pm.model_to_graphviz(models[0]) img.render(filename=file_basename + '_model', format='png'); # posterior predictive traces ppc = [pm.sample_posterior_predictive(traces[i], samples=500, model=models[i]) for i in range(len(traces))] # collect the results, concat single run result to overall result varnames = ['amp', 'mu', 'sigma', 'epsilon'] lruns = ['{0}'.format(r+1) for i in range(truns)] df = res.get_results_summary(varnames, traces, ppc, y_val, epsilon_real=0.05, sets=tsets, labels=lmodpeak, runlist=lruns) res_df = res_df.append(df, ignore_index=True) # - # ## Show results and save res_df # save results to .csv res_df.to_csv(out_path + '/scenario_peaks_mruns.csv', index=False) cnf.close(out_path)
code/scenarios/scenario_c/scenario_peaks_mruns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # <NAME> # ## Research question/interests # I want to examine the point distributions through the years and evaluate the impact that nationality may have on scores. import pandas as pd import numpy as np import matplotlib.pylab as plt import seaborn as sns import csv # + jp-MarkdownHeadingCollapsed=true tags=[] f1data = pd.read_csv("../data/raw/f1data.csv") # - # ## Milestone 3, Task 1 f1data.shape # This lets me know that our data has 1573 rows of data with 6 keys (unique columns). f1data.columns # + tags=[] f1data.describe().apply(lambda s: s.apply(lambda x: format(x, 'f'))) # - hex=sns.jointplot(x=f1data["Year"], y=f1data["PTS"], kind="hex") hex.fig.suptitle("Frequency of Points Scored") hex.fig.tight_layout() hex.ax_joint.set_ylabel("Points") hex.fig.subplots_adjust(top=0.95) # + [markdown] tags=[] # This graphs dramatically illustrates that the majority of drivers in F1 are very low scoring, and that rarely there are multiple high scoring drivers in a year. We can also see that before around 1990, there was no frequency of points scored over 100 but after 1990, it becomes much more frequent. # - p= sns.barplot(x="Year", y="PTS", data=f1data) p.set(ylabel="Points", title="Point Distribution") p.figure.set_size_inches(35,10) # + [markdown] tags=[] # This graph clearly shows that in recent years, there have been more total points rewarded in series. This agrees to indications in our initial research showing that there have been more races added in recent years. It also shows that the biggest jump is between 2009 and 2010, where the average points increases drastically. We can also see that 2020 appears to be an outlier, this can be due to the fact that at the time of this dataset, the 2020 F1 season had not yet concluded. # - s=sns.scatterplot(data=f1data, x="Nationality", y="PTS") s.set(ylabel="Points", title="Points by Nationality") s.figure.set_size_inches(18,5) # + [markdown] tags=[] # This graph breaks down the all time points of all nationalities in F1 pointing towards those which have scored the most points. We can see that GBR (Great Britian), GER (Germany), and FIN (Finland) are the top three highest scoring nationalies but they also appear to have the most data points. # - c = sns.countplot(x="Nationality", data=f1data) c.set(ylabel="Number of Drivers", title="Frequency of Drivers by Nationality") c.figure.set_size_inches(15,5) # Expanding on the previous graph, I wanted to explore how many data points existed for each Nationality. This is important as it indicates how many drivers raced for each nationality. We can see that similarily, Great Britian has the highest number of drivers with over 250 but Germany and Finland have a much lower number of drivers. Instead ITA (Italy), FRA (France), and USA (United States) have a much higher number of drivers in comparison. s2=sns.stripplot(data=f1data, x="Pos", y="PTS",order=['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','DQ']) s2.set(xlabel="Position", ylabel="Points", title="Points per Position") s2.figure.set_size_inches(10,5) # + [markdown] tags=[] # This graph displays the total points scored by the varying positions each year. It should be noted that since there is not a fixed number of racers every year, there are some positions that will not have scores each year. There is also notably a special consideration of a DQ, disqualification, one year. As expected, we can see that there are a higher number of points distributed to the higher positions which makes sense considering it is the reward for earning the position. It is interesting however, that there almost appears to be 2 unique patterns between position 1 and 10. We can see the lower portion which is much more saturated and 1 is around 150, and an upper portion which is less saturated and 1 is around 400. Based on our earlier graph which showed that after 2010, there was a higher point average, we can theorize that the lower portion of this graph is the earlier years of F1, and the upper portion is years after 2010. # - # ## Milestone 3, Task 2 # Renaming Columns to be a better description of the data. # + tags=[] f1data.rename(columns={"Car": "Team", "Pos": "Position", "PTS": "Points"}, inplace=True) # - # Checking to see if there are any null values. f1data.isnull().any(axis=None) print("Preview of data with null values:") print(f1data[f1data.isnull().any(axis=1)]) plt.show() # Since we can see that the only null values are in the Team column, we know that these drivers raced independantly and therefore did not belong to a team. We replace these null values instead with "Independant" to accurately represent the data. f1data["Team"].fillna("Independant", inplace=True) # We then check an example of an entry that previously had a null value to confirm that it worked as expected. f1data.loc[16,:] # Checking to see if there are any duplicate entries in our dataset. duplicateRowsDF = f1data[f1data.duplicated()] if duplicateRowsDF.empty: print("There are no duplicates."), else: print("Duplicate Rows except first occurrence based on all columns are:") print(duplicateRowsDF) # Checking to see what the current delimiter for our data is. sniffer = csv.Sniffer() dialect = sniffer.sniff("first, second, third, fourth") print(dialect.delimiter) # Since in our inital scan of the dataset, we could see that there was an outstanding error with the name of one driver. We corrected the formatting of his name throughout the dataset. f1data["Driver"] = f1data["Driver"].replace(["Kimi Räikkönen RAI"],"Kimi Räikkönen RAI") # We want to seperate the Driver ID from the Driver column as they are currently grouped. Add a new column for the Driver ID's. f1data["Driver_ID"] = f1data["Driver"].str[-3:] f1data["Driver2"] = f1data["Driver"].str[:-3] f1data = f1data.drop(columns=["Driver"]) f1data.rename(columns={"Driver2": "Driver"}, inplace=True) # Reorganize columns to be in the most logical order. f1data = f1data[["Driver", "Driver_ID", "Team", "Nationality", "Points", "Position", "Year"]] # + [markdown] tags=[] # ## Milestone 3, Task 3 # + def load_and_process(csv_file): # Method Chain 1 (Load data and deal with missing data) df1 = ( pd.read_csv(csv_file) .rename(columns={"Car": "Team", "Pos": "Position", "PTS": "Points"}) .fillna("Independant") .replace(["Kimi Räikkönen RAI"],"Kimi Räikkönen RAI") ) # Method Chain 2 (Create new columns, drop others, and do processing) df2 = ( df1 .assign(Driver_ID = df1["Driver"].str[-3:],Driver2 = df1["Driver"].str[:-3]) .drop(columns=["Driver"]) .rename(columns={"Driver2": "Driver"}) [["Driver", "Driver_ID", "Team", "Nationality", "Points", "Position", "Year"]] ) # Return the latest dataframe return df2 load_and_process("../data/raw/f1data.csv")
notebooks/analysis2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # API Interfaces # # One short example, different approaches of using APIs. # ## Curl & Friends # ! curl -s "https://geocoder.api.here.com/6.2/geocode.json"\ # ?searchtext=Invalidenstr.%20116%2C%2010115%20Berlin%2C%20Germany\ # \&app_id=$(python -c 'from credentials import *; print(APP_ID)')\ # \&app_code=$(python -c 'from credentials import *; print(APP_CODE)') # | jq . # ! http "https://geocoder.api.here.com/6.2/geocode.json"\ # ?searchtext=Invalidenstr.%20116%2C%2010115%20Berlin%2C%20Germany\ # \&app_id=$(python -c 'from credentials import *; print(APP_ID)')\ # \&app_code=$(python -c 'from credentials import *; print(APP_CODE)') # ## Requests # + from urllib.parse import quote import requests from credentials import APP_ID, APP_CODE url = 'https://geocoder.api.here.com/6.2/geocode.json' params = dict( searchtext=quote('Invalidenstr. 116, 10115 Berlin, Germany'), app_id=APP_ID, app_code=APP_CODE ) query = '&'.join(f'{k}={v}' for (k, v) in params.items()) requests.get(f'{url}?{query}').json() # - # ## Geopy # # https://github.com/geopy/geopy # + from geopy.geocoders import Here from credentials import APP_ID, APP_CODE geocoder = Here(APP_ID, APP_CODE) geocoder.geocode('Invalidenstr. 116, 10115 Berlin, Germany') # - # ## Ipyrest # # https://github.com/deeplook/ipyrest # + from urllib.parse import quote from ipyrest import Api from credentials import APP_ID, APP_CODE url = 'https://geocoder.api.here.com/6.2/geocode.json' params = dict( searchtext=quote('Invalidenstr. 116, 10115 Berlin, Germany'), app_id=APP_ID, app_code=APP_CODE ) Api(url, params=params) # - # ## Other # # - [Postman](https://getpostman.com) # - [Insomnia](https://insomnia.rest) (!)
apiinterfaces.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 (''.venv'': venv)' # name: pythonjvsc74a57bd067b393f23005f5647497c50fa99fb25b525d8642232b1bdc07a39bdb19f3ee4f # --- # + import os WM_PROJECT_USER_DIR=os.environ['WM_PROJECT_USER_DIR'] import sys sys.path.append(f"{WM_PROJECT_USER_DIR}/utilities") import numpy as np import pandas as pd import postProcess.polyMesh2d as mesh2d import postProcess.pyResconstruct as pyResconstruct import postProcess.pyFigure as pyFigure import postProcess.pyCompute as pyCompute import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import json import proplot as plot import concurrent.futures # %load_ext autoreload # %autoreload 2 # - df_rate_external=pd.read_csv("../T673_Pe1e-3_ExternalSurface/postProcess/others/ReactionRateAndBurningRate.csv") df_rate_external.sort_values(by="time",inplace=True) df_rate_rpm=pd.read_csv("../T673_Pe1e-3_modifiedRPM/postProcess/others/ReactionRateAndBurningRate.csv") df_rate_rpm.sort_values(by="time",inplace=True) # + fig, ax = plot.subplots( aspect=(4, 3), axwidth=4) c1 = plot.scale_luminance('cerulean', 0.5) c2 = plot.scale_luminance('red', 0.5) lns1=ax.plot(df_rate_external["time"],df_rate_external["vol_averaged_reaction_rate"],color=c1, label="diffuse interface model",ls="--") lns1_2=ax.plot(df_rate_rpm["time"],df_rate_rpm["vol_averaged_reaction_rate"],color=c1, label="random pore model") max_rate=df_rate_rpm["vol_averaged_reaction_rate"].max() ax.format(xlabel="Time (s)",ylabel="Volume-averaged coke reaction rate (kg/m$^3$/s)", ycolor=c1,ylim=(-0.1,max_rate*1.1)) ax2 = ax.twinx() lns2=ax2.plot(df_rate_external["time"],df_rate_external["burning_fraction"]*100,color=c2,label="diffuse interface model",ls="--",marker="o",ms=4) lns2_2=ax2.plot(df_rate_rpm["time"],df_rate_rpm["burning_fraction"]*100,color=c2,label="random pore model",marker="^",ms=4) ax2.format(xlabel="Time (s)",ylabel="Conversion (%)",ycolor=c2, ylim=(-1,100)) lns = lns1+lns1_2+lns2+lns2_2 labs = [l.get_label() for l in lns] ax.legend(lns, labs, loc="upper right", ncol=2,fancybox=True) # - fig.savefig("ReactionRateAndBurningRate2.jpg",bbox_inches='tight') file_path_external="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_ExternalSurface/postProcessing/minMaxComponents2/32.51/fieldMinMax.dat" transverse_data_folder_external="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_ExternalSurface/postProcess/transverseAveragedData" df_combined_external=pyCompute.computeMaxTemperatureAndOutletO2ConcHistory(file_path_external,transverse_data_folder_external) df_combined_external.head() file_path_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_modifiedRPM/postProcessing/minMaxComponents2/22.65/fieldMinMax.dat" transverse_data_folder_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_modifiedRPM/postProcess/transverseAveragedData" df_combined_rpm=pyCompute.computeMaxTemperatureAndOutletO2ConcHistory(file_path_rpm,transverse_data_folder_rpm) df_combined_rpm.head() # + fig, ax = plot.subplots( aspect=(4, 3), axwidth=4) lns1=ax.plot(df_combined_external["Time"],df_combined_external["Transverse_Tmax"],color=c1,label="diffuse interface model",linestyle="--") lns1_2=ax.plot(df_combined_rpm["Time"],df_combined_rpm["Transverse_Tmax"],color=c1,label="random pore model",linestyle="-") ax.format(xlabel="Time (s)",ylabel="Max transverged-averaged temperature (K)",ycolor=c1,ylim=(672,720)) ax2 = ax.twinx() lns2= ax2.plot(df_combined_external["Time"],df_combined_external["O2ConcAtOutlet"],color=c2,linestyle="--",label="diffuse interface model",marker="o",ms=4) lns2_2= ax2.plot(df_combined_rpm["Time"],df_combined_rpm["O2ConcAtOutlet"],color=c2,linestyle="-",label="random pore model",marker="^",ms=4) max_O2=df_combined_external["O2ConcAtOutlet"].max() ax2.format(xlabel="Time (s)",ylabel="O$_2$ mole concentration At Outlet (mol/m$^3$)", ycolor=c2) lns = lns1+lns1_2+lns2+lns2_2 labs = [l.get_label() for l in lns] ax.legend(lns, labs,loc="upper right", ncol=2, fancybox=False) # - fig.savefig("MaxTransverseTemperature_OutletO2ConcHistory2.jpg",bbox_inches='tight') # + maxT_file_external="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_ExternalSurface/postProcessing/minMaxComponents/0.01/fieldMinMax.dat" maxT_file_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_modifiedRPM/postProcessing/minMaxComponents/0.15/fieldMinMax_0.1500012.dat" maxT_external=pyFigure.read_min_max_field(maxT_file_external,100,"T") maxT_rpm=pyFigure.read_min_max_field(maxT_file_rpm,100,"T") # + fig, ax = plot.subplots( aspect=(4, 3), axwidth=4) lns1=ax.plot(maxT_external["Time"],maxT_external["max"],color=c1,label="diffuse interface model",linestyle="--") lns1_2=ax.plot(maxT_rpm["Time"],maxT_rpm["max"],color=c1,label="random pore model",linestyle="-") ax.format(xlabel="Time (s)",ylabel="Maximum temperature (K)",ycolor=c1,ylim=(672,720)) ax2 = ax.twinx() lns2= ax2.plot(df_combined_external["Time"],df_combined_external["O2ConcAtOutlet"],color=c2,linestyle="--",label="diffuse interface model",marker="o",ms=4) lns2_2= ax2.plot(df_combined_rpm["Time"],df_combined_rpm["O2ConcAtOutlet"],color=c2,linestyle="-",label="random pore model",marker="^",ms=4) max_O2=df_combined_external["O2ConcAtOutlet"].max() ax2.format(xlabel="Time (s)",ylabel="O$_2$ mole concentration At Outlet (mol/m$^3$)", ycolor=c2) lns = lns1+lns1_2+lns2+lns2_2 labs = [l.get_label() for l in lns] ax.legend(lns, labs,loc="upper right", ncol=2, fancybox=False) # - fig.savefig("MaxTemperature_OutletO2ConcHistory2.jpg",bbox_inches='tight') # ## plot contours # ### RPM # + rawData_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_modifiedRPM/postProcess/rawdata" savefolder_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/reactionSurfaceModel/rpm_images2" fields=["eps","O2Conc","Qdot","T"] xranges={"T":{"vmin":673,"vmax":701}, "Qdot":{"vmin":0,"vmax":2.8e9}} time_instant="10.15" pyFigure.read_plot_multiple_field_contourf_save(rawData_rpm,fields,time_instant,savefolder_rpm,xranges) # + rawData_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/T673_Pe1e-3_ExternalSurface/postProcess/rawdata" savefolder_rpm="/home/anoldfriend/OpenFOAM/anoldfriend-7/run/cokeCombustion/reactionSurfaceModel/external_images" fields=["eps","O2Conc","Qdot","T"] xranges={"T":{"vmin":673,"vmax":701}, "Qdot":{"vmin":0,"vmax":2.8e9}} time_instant="10.01" pyFigure.read_plot_multiple_field_contourf_save(rawData_rpm,fields,time_instant,savefolder_rpm,xranges)
run/cokeCombustion/reactionSurfaceModel/reactionSurfaceModel2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Sensitivity analysis # ============ # # __Goal__: # - run sensitivity analysis to show the impact of a given parameter on the SMRT output # # __Learning__: # # # SMRT is able to iterate on several arguments when it is unambiguous. For instance, a sensor with multiple frequencies, angles or polarizations is automatically understood. The `result` contains all the values which can be accessed with arguments in TbV() and similar functions. E.g. TbV(frequency=37e9) # # This is similar when a list of snowpacks is given to `run`. The `result` contains all the computations. The 'snowpack' dimension is automatically added but we can also propose a custom name for this dimension. # # In the following, we show different approaches to conduct sensitivity studies that you can run and then apply to a study case of your choice: # - take the Dome C snowpack and study the sensitivity of TbH 55° to superficial density # - take any snowpack previously defined and investigated the sensivitiy to liquid_water # - etc # # + import numpy as np import matplotlib.pyplot as plt # %matplotlib notebook from smrt import make_model, make_snowpack, sensor_list # - # Build a list of snowpack # -------------------------------- # # The key idea is to build a list of snowpack. E.g. we want to test the sensitivity of TB's to the radius. We first build a list of snowpack with different radius. # + # prepare the snowpack density = 300.0 radius = np.arange(0.05, 0.5, 0.01) *1e-3 # from 0 to 0.5mm # the NAIVE APPROACH: snowpack = list() for x in radius: sp = make_snowpack([1000.0], "sticky_hard_spheres", density=density, temperature=265, radius=x, stickiness=0.15) snowpack.append(sp) # - # In simple cases (as this one), it is easier to use "list comprehension", a nice python feature to create list. # with list comprehension snowpack = [make_snowpack([1000.0], "sticky_hard_spheres", density=density, temperature=265, radius=x, stickiness=0.15) for x in radius] # + # prepare the sensor and model model = make_model("iba", "dort") sensor = sensor_list.passive(37e9, 55) #run! # - # Now we have a list of snowpacks, we want to call the model for each snowpack. We can use list comprehension again. results = [model.run(sensor, sp) for sp in snowpack] # This return a list of results. To extract the TB V for each result can be done with another list comprehension. And then we plot the results. tbv = [res.TbV() for res in results] plt.figure() plt.plot(radius, tbv) # Nice ? We can do much better because `Model` can directly run on a list of snowpacks. It does not return a list of results, but **a unique result with a new coordinate** which is much more convenient. results = model.run(sensor, snowpack, snowpack_dimension=('radius', radius)) print(type(results)) # look results is a Result, not a list print(results.coords) # look, we have several coordinates, one is call corr_legn # This is more compact and nicer, `results` explicitly show the radius dimension. Plotting is thus easier: plt.figure() plt.plot(results.radius, results.TbV()) # And it is easy to save the result to disk: results.save("radius-sensitivity.nc") # Recap: # --------- # + snowpack = [make_snowpack([1000.0], "sticky_hard_spheres", density=density, temperature=265, radius=x, stickiness=0.15) for x in radius] model = make_model("iba", "dort") sensor = sensor_list.passive([19e9, 37e9], 55) results = model.run(sensor, snowpack, snowpack_dimension=('radius', radius)) plt.figure() plt.plot(results.radius, results.TbV(frequency=19e9), label="19 GHz") plt.plot(results.radius, results.TbV(frequency=37e9), label="37 GHz") plt.legend() # - results.TbV()
02_using_smrt/03_sensitivity_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Link to this document's Jupyter Notebook](./0202--BASH_pre-class-assignment.ipynb) # In order to successfully complete this assignment you must do the required reading, watch the provided videos and complete all instructions. The embedded survey form must be entirely filled out and submitted on or before **11:59pm on Tuesday February 2**. Students must come to class the next day prepared to discuss the material covered in this assignment. # --- # # # # Pre-Class Assignment: BASH Scripting # # # <a href="https://www.theurbanpenguin.com/scripting-power-repetition/shell-scripting-bash/"><img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBw8SBhUSEhMWFhIVGCAWFxYVFRoYGRYXGh8XHiEeGBkgICkiGh8nHRgXIjEiJykuLi4uGh82ODUtOSgtLysBCgoKDg0OGhAQGiseIB8tLS4uLSs3OC0rLTAtLisrLS0uKy0xLS0tLSstLS0tLy03LS0yLS0vKy03LS0uKzcrLf/AABEIAJgBTAMBIgACEQEDEQH/xAAcAAEAAQUBAQAAAAAAAAAAAAAABgMEBQcIAQL/xABMEAABAwICBQgGBQgGCwAAAAABAAIDBBEFEgYHITFRExRBU4GRktEiUmFxcrEVMjZCsiM3YnN0gqHBFjTC0uHwFyczNUdUg4SUw/H/xAAZAQEAAwEBAAAAAAAAAAAAAAAAAQIEAwX/xAAtEQEAAQIFA<KEY>EREBERAREQERE<KEY>" alt="Urban Penguin"></a> # # <a href="https://swcarpentry.github.io/shell-novice/"><img src="https://software-carpentry.org/assets/img/logo-blue.svg" alt="software carpentry"></a> # # Goals for today's pre-class assignment # # 1. [BASH Whitespace sensitivity](#BASH-Whitespace-sensitivity) # 2. [Running BASH Scripts](#Running-BASH-Scripts) # 3. [Executable files in Linux](#Executable-files-in-Linux) # 4. [ Explore Online Resources](#-Explore-Online-Resources) # 5. [Assignment wrap up](#Assignment-wrap-up) # # # --- # <a name=BASH-Whitespace-sensitivity></a> # # 1. BASH Whitespace sensitivity # # # **WARNING** Bash is highly "Whitespace sensitive" (Much worse than Python and Python is bad enough). This means that adding spaces and tabs can change how the programming language works. A common example is variable assignment. Consider the following command which creates a variable called ```num_of_files``` which is the number of (non-hidden) files in the current directory: # # num_of_files=`la -la * | wc -l` # # If you add a few spaces to make it "easier" to read you can break the command. For example adding spaces around the ```=``` will cause a "Command not found error": # # num_of_files = `la -la * | wc -l` # # This sensitivity to whitespace can be quite annoying. For this reason, pay close attention when you copy/paste examples from the internet. # # # --- # <a name=Running-BASH-Scripts></a> # # 2. Running BASH Scripts # # # # There are multiple ways to run a BASH script. # # ### Option 1: Run bash directly # In this option you run a new bash command inside of your bash terminal and then pass the bash command the name of your script file. For example: # # bash ./myscript.sh # # ### Option 2: Source the Bash File # You can also use the ```source``` command to run your script: # # source ./myscript.sh # # The difference between the ```source``` command and the ```bash``` command is that ```source``` runs your script inside your current instance of bash as if you typed the commands on the command line. While, ```bash``` starts a new bash interpreter, runs the commands in that new interpreter and then returns control back to your bash command. The ```source``` command can be extreamly useful if your bash script changes some enviornment variables and you want to keep the variables. # # If you use the first, ```bash``` option, the bash command creates a new interpreter, your script would set some variables and then the interpreter would be closed before returning to yout main bash prompt. # # If you use the second, ```source``` option, the source command will execute in your current bash environment and change the variables in the scope of that environment. # # ### Option 3: Make your script an executable # For each file in Linux there is a "flag" indicating if the file is an executable. If a file's executable flag is set (see below on how to set a files executable bit), you can run the file just by type the file name. For Example: # # ./myscript.sh # # If the file is in your PATH, then you can just type the name without the writing the path to the file. For example: # # myscript.sh # # **NOTE:** Since we are running our scripts form inside BASH the Linux operating system assumes they are BASH scripts. However, we may not want to count on this assumption. The first line of a executable script can be used to indicate what program Linux should use to run the script. For example: # # # #!/bin/bash # # This line tells Linux to run this ASCII file inside of the ```/bin/bash``` program. # # --- # <a name=Executable-files-in-Linux></a> # # 3. Executable files in Linux # # # # You can check if a file's executable bit is set by running the "ls -la" command which has output similar to the following: # # >ls -la # total 48K # drwxrwxr-x 2 colbrydi cmse 5 Jan 8 09:29 . # drwxrwxr-x 14 colbrydi cmse 35 Jan 24 08:34 .. # -rwxrwxr-x 1 colbrydi cmse 6.8K Jan 8 09:29 example # -rw-rw-r-- 1 colbrydi cmse 747 Jan 8 09:29 example_calc_e.c # -rw-rw-r-- 1 colbrydi cmse 209 Jan 8 09:29 README # # The first cryptic looking string with 10 letters is the properties string for each file and looks something like that looks like ```drwxrwxr-x```. The first character is the directory "bit which has the following meaning: # # - ```d``` or ```-``` : Indicates if the file is a directory (or not) # # The first set of three letters (after the directory bit) are the permissions for the **user** of the file (in the above file the owner is ```colbrydi```). The second set of three letters are the permissions for the **group** (in the above example the group is ```cmse```). The final set of three letters are permissions for everyone or **all**. # # Each set of three letters have the following meanings: # # - ```r``` or ```-``` : indicates if the file is readable (or not) # - ```w``` or ```-``` : Indicates if the file is writable (or not) # - ```x``` or ```-``` : indicates if the file is executable (or not) # # **NOTE:** if the file is a directory (i.e. the directory bit is set to ```d```) then the executable bit indicates the category of users that can change into that directory. # &#9989; **<font color=red>QUESTION:</font>** What category of users can execute a file with -rwxrwxr-- permissions? # Put your answer to the above question here. # You can use the ```chmod``` (aka change mode) command to change permission bits. For example, the following command takes the file ```myfile``` and sets the (o)wner's e(x)ecutable flag to true: # # chmod u+x myfile # # You can also "turn off" the (o)wners e(x)ecutable flag by using the following command: # # chmod u-x myfile # # The format of the ```chmod``` command is quite simple. You type ```chmod``` followed by the user catigory you want to change ( **u**ser, **g**oup, **a**ll). Then use a ```+``` to add that permission or ```-``` to remove the permission. Then the combination of letters ```rwx``` that you want to change. # &#9989; **<font color=red>QUESTION:</font>** What command would you write to change the permissions or the ```myscript.sh``` file in your current directory so that the owner (i.e. user) has all of the permission bits set (read, write and execute). # Put your answer to the above question here. # # # --- # <a name=-Explore-Online-Resources></a> # # 4. Explore Online Resources # # # BASH has been around for many decades. There are hundreds of tutorials available on line to help you learn how to use BASH effectively. For example, the above images have links too two tutorials that you may find helpful. # # BASH is a programming language in it's own right. So far in class we have learned: # # - BASH environment variables (env and export) # - Comments in BASH (hint same as python) # - How BASH finds commands to run (i.e. PATH and which) # - Navigating the file system in BASH (ex. ls, cd, ., ~) # # In class, we will be building more complex BASH scripts. To do more complex stuff in bash we need more complex commands. Specifically you need to know how to build BASH "if" statements and loops. # # &#9989; **<font color=red>DO THIS:</font>** Explore the web to teach yourself how to use BASH ```if``` statements and ```for``` loops. Hint, use the keyword ```BASH``` when doing searches. This will narrow the search down to examples you can use. Test what you learn by writing a submission script with an ```if``` example and a ```for``` example. # &#9989; **<font color=red>QUESTION:</font>** What is the keyword used to end an ```if``` statement in bash? # Put your answer to the above question here. # &#9989; **<font color=red>QUESTION:</font>** Did you find any additional and helpful BASH tutorials not mentioned above? Please provide a URL link to that tutorial. # Put your answer to the above question here. # --- # <a name=Assignment-wrap-up></a> # # 5. Assignment wrap up # # # Please fill out the form that appears when you run the code below. **You must completely fill this out in order to receive credits for the assignment!** # # [Direct Link to Google Form](https://cmse.msu.edu/cmse401-pc-survey) # # # If you have trouble with the embedded form, please make sure you log on with your MSU google account at [googleapps.msu.edu](https://googleapps.msu.edu) and then click on the direct link above. # &#9989; **<font color=red>Assignment-Specific QUESTION:</font>** Did you find any additional and helpful BASH tutorials not mentioned above? Please provide a URL link to that tutorial. # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** Summarize what you did in this assignment. # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** What questions do you have, if any, about any of the topics discussed in this assignment after working through the jupyter notebook? # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** How well do you feel this assignment helped you to achieve a better understanding of the above mentioned topic(s)? # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** What was the **most** challenging part of this assignment for you? # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** What was the **least** challenging part of this assignment for you? # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** What kind of additional questions or support, if any, do you feel you need to have a better understanding of the content in this assignment? # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** Do you have any further questions or comments about this material, or anything else that's going on in class? # Put your answer to the above question here # &#9989; **<font color=red>QUESTION:</font>** Approximately how long did this pre-class assignment take? # Put your answer to the above question here from IPython.display import HTML HTML( """ <iframe src="https://cmse.msu.edu/cmse401-pc-survey" width="100%" height="500px" frameborder="0" marginheight="0" marginwidth="0"> Loading... </iframe> """ ) # # # --------- # ### Congratulations, we're done! # # To get credit for this assignment you must fill out and submit the above survey from on or before the assignment due date. # ### Course Resources: # # # - [Website](https://msu-cmse-courses.github.io/cmse802-f20-student/) # - [ZOOM](https://msu.zoom.us/j/98207034052) # - [JargonJar](https://docs.google.com/document/d/1ahg48CCFhRzUL-QIHzlt_KEf1XqsCasFBU4iePHhcug/edit#) # - [GIT](https://gitlab.msu.edu/colbrydi/cmse401-s21.git) # # # Written by Dr. <NAME>, Michigan State University # <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>. # ---- # ----
assignments/0202--BASH_pre-class-assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Before your start: # - Read the README.md file # - Comment as much as you can and use the resources in the README.md file # - Happy learning! # # Challenge - Passing a Lambda Expression to a Function # # In the next excercise you will create a function that returns a lambda expression. Create a function called `modify_list`. The function takes two arguments, a list and a lambda expression. The function iterates through the list and applies the lambda expression to every element in the list. # + def modify_list(lst, lmbda): """ Input: list and lambda expression Output: the transformed list """ for i in range(len(lst)): lst[i] = lmbda(lst[i]) return lst l = [0, 10, 20, 30, 40, 50] lam = lambda x: x*3 print(modify_list(l, lam)) # - # #### Now we will define a lambda expression that will transform the elements of the list. # # In the cell below, create a lambda expression that converts Celsius to Kelvin. Recall that 0°C + 273.15 = 273.15K # your code here celsius_to_kelvin = lambda x: x + 273.15 # Finally, convert the list of temperatures below from Celsius to Kelvin. # + temps = [12, 23, 38, -55, 24] celsius_to_kelvin = lambda x: x + 273.15 # your code here kelvin_temps = [celsius_to_kelvin(t) for t in temps] print(kelvin_temps) # - # #### In this part, we will define a function that returns a lambda expression # # In the cell below, write a lambda expression that takes two numbers and returns 1 if one is divisible by the other and zero otherwise. Call the lambda expression `mod`. # your code here mod = lambda x, y=1: 1 if x % y == 0 else 0 print(mod(4, 4)) # #### Now create a function that returns mod. The function only takes one argument - the first number in the `mod` lambda function. # # Note: the lambda function above took two arguments, the lambda function in the return statement only takes one argument but also uses the argument passed to the function. def divisor(b): """ Input: a number Output: a function that returns 1 if the number is divisible by another number (to be passed later) and zero otherwise. """ return mod(b) print(divisor(5)) # Finally, pass the number 5 to `divisor`. Now the function will check whether a number is divisble by 5. Assign this function to `divisible5` # your code here divisible5 = lambda x: True if x % 5 == 0 else False # Test your function with the following test cases: print(divisible5(10)) print(divisible5(8)) # # Bonus Challenge - Using Lambda Expressions in List Comprehensions # # In the following challenge, we will combine two lists using a lambda expression in a list comprehension. # # To do this, we will need to introduce the `zip` function. The `zip` function returns an iterator of tuples. # + # Here is an example of passing one list to the zip function. # Since the zip function returns an iterator, we need to evaluate the iterator by using a list comprehension. l = [1,2,3,4,5] [x for x in zip(l)] # - # Using the `zip` function, let's iterate through two lists and add the elements by position. # + list1 = ['Green', 'cheese', 'English', 'tomato'] list2 = ['eggs', 'cheese', 'cucumber', 'tomato'] # your code here f = lambda x, y: list(zip(x, y)) print([x for x in f(list1, list2)]) # - # # Bonus Challenge - Using Lambda Expressions as Arguments # # #### In this challenge, we will zip together two lists and sort by the resulting tuple. # # In the cell below, take the two lists provided, zip them together and sort by the first letter of the second element of each tuple. Do this using a lambda function. # + list1 = ['Engineering', 'Computer Science', 'Political Science', 'Mathematics'] list2 = ['Lab', 'Homework', 'Essay', 'Module'] # your code here import operator z = lambda x, y: list(zip(x, y)) sort_list = z(list1, list2) sort_list.sort(key=operator.itemgetter(1)) print(sort_list) # - # # Bonus Challenge - Sort a Dictionary by Values # # Given the dictionary below, sort it by values rather than by keys. Use a lambda function to specify the values as a sorting key. # + d = {'Honda': 1997, 'Toyota': 1995, 'Audi': 2001, 'BMW': 2005} d_values = d.values() # your code here print(sorted(d.items(), key = lambda kv:(kv[1], kv[0]))) # -
module-1/Lambda-Functions/your-code/main.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Scala (2.13) // language: scala // name: scala213 // --- // # Scalameta Tree Guide // // *This notebook is based on the [Scalameta Tree Guide](https://scalameta.org/docs/trees/guide.html) from the Scalameta documenation*. // // A core functionality of Scalameta is syntax trees, which enable you to read, // analyze, transform and generate Scala programs at a level of abstraction. In // this guide, you will learn how to // // - parse source code into syntax trees // - construct new syntax trees // - pattern match syntax trees // - traverse syntax trees // - transform syntax trees // // ## Installation // // Add a dependency to Scalameta in your build to get started. Scalameta supports // Scala 2.11, Scala 2.12, Scala.js and Scala Native. // // ### sbt // ```scala // // build.sbt // libraryDependencies += "org.scalameta" %% "scalameta" % "4.2.3" // // // For Scala.js, Scala Native // libraryDependencies += "org.scalameta" %%% "scalameta" % "4.2.3" // ``` // // [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.scalameta/scalameta_2.13/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.scalameta/scalameta_2.13) // // All code examples assume you have the following import // // ```scala // import scala.meta._ // // import scalafix.v1._ // ``` // ### Ammonite REPL or Jupyter Notebook with Almond // // A great way to experiment with Scalameta is to use the // [Ammonite REPL](http://ammonite.io/#Ammonite-REPL) or [Almond](https://almond.sh). // + attributes={"classes": ["scala"], "id": ""} // Ammonite REPL import $ivy.`org.scalameta::scalameta:4.2.3`, scala.meta._ // - // ### ScalaFiddle // // You can try out Scalameta online with the // [ScalaFiddle playground](scalafiddle.html). // // ## What is a syntax tree? // // Syntax trees are a representation of source code that makes it easier to // programmatically analyze programs. Scalameta has syntax trees that represent // Scala programs. // // ![](assets/tree.svg) // // Scalameta trees are **lossless**, meaning that they represent Scala programs in // sufficient to go from text to trees and vice-versa. Lossless syntax trees are // great for fine-grained analysis of source code, which is useful for a range of // applications including formatting, refactoring, linting and documentation tools // // ## Parse trees // // Scalameta comes with a parser to produce syntax trees from Scala source code. // You can parse trees from a variety of sources into different kinds of tree // nodes. // // ### From strings // // The simplest way to parse source code is from a string. As long as you have // `import scala.meta._` in your scope, you can use the `parse[Source]` extension // method val program = """object Main extends App { print("Hello!") }""" val tree = program.parse[Source].get // Once parsed, you can print the tree back into its original source code println(tree.syntax) // The problem with parsing from strings it that error messages don't include a // filename println( "object Main {".parse[Source] ) // To make error messages more helpful it's recommended to always use virtual files // when possible, as explained below. // // ### From files // // To parse a file into a tree it's recommended to first read the file contents // into a string and then construct a virtual file val path = java.nio.file.Paths.get("example.scala") val bytes = java.nio.file.Files.readAllBytes(path) val text = new String(bytes, "UTF-8") val input = Input.VirtualFile(path.toString, text) val exampleTree = input.parse[Source].get print(exampleTree.syntax) // The difference between `text.parse[Source]` and `input.parse[Source]` is that // the filename appear in error messages for `Input.VirtualFile`. println( Input.VirtualFile("example.scala", "object Main {").parse[Source] ) // ### From expressions // // To parse a simple expressions such as `a + b` use `parse[Stat]` The name `Stat` // stands for "statement". println("a + b".parse[Stat].get.structure) // If we try to parse an expression with `parse[Source]` we get an error because // `a + b` is not valid at the top-level for Scala programs println("a + b".parse[Source]) // The same solution can be used to parse other tree nodes such as types println("A with B".parse[Type].get.structure) // If we use `parse[Stat]` to parse types we get an error println("A with B".parse[Stat]) // ### From programs with multiple top-level statements // // To parse programs with multiple top-level statements such as `build.sbt` files // or Ammonite scripts we use the `Sbt1` dialect. By default, we get an error when // using `parse[Source]`. val buildSbt = """ val core = project val cli = project.dependsOn(core) """ println(buildSbt.parse[Source]) // This error happens because vals are not allowed as top-level statements in // normal Scala programs. To fix this problem, wrap the input with `dialects.Sbt1` println(dialects.Sbt1(buildSbt).parse[Source].get.stats) // The same solution works for virtual files println( dialects.Sbt1( Input.VirtualFile("build.sbt", buildSbt) ).parse[Source].get.stats ) // The difference between `dialects.Sbt1(input)` and `parse[Stat]` is that // `parse[Stat]` does not allow multiple top-level statements println(buildSbt.parse[Stat]) // Note that `dialects.Sbt1` does not accept programs with package declarations println( dialects.Sbt1("package library; object Main").parse[Source] ) // ## Construct trees // // Sometimes we need to dynamically construct syntax trees instead of parsing them // from source code. There are two primary ways to construct trees: normal // constructors and quasiquotes. // // ### With normal constructors // // Normal tree constructors as plain functions println(Term.Apply(Term.Name("function"), List(Term.Name("argument")))) // Although normal constructors are verbose, they give most flexibility when // constructing trees. // // To learn tree node names you can use `.structure` on existing tree nodes println("function(argument)".parse[Stat].get.structure) // The output of structure is safe to copy-paste into programs. // // Another good way to learn the structure of trees is // [AST Explorer](http://astexplorer.net/#/gist/ec56167ffafb20cbd8d68f24a37043a9/97da19c8212688ceb232708b67228e3839dadc7c). // // ### With quasiquotes // // Quasiquotes are string interpolators that expand at compile-time into normal // constructor calls println(q"function(argument)".structure) // You can write multiline quasiquotes to construct large programs println( q""" object Example extends App { println(42) } """.structure ) // > It's important to keep in mind that quasiquotes expand at compile-time into // > the same program as if you had written normal constructors by hand. This means // > for example that formatting details or comments are not preserved println(q"function ( argument ) // comment") // Quasiquotes can be composed together like normal string interpolators with // dollar splices `$` val left = q"Left()" val right = q"Right()" println(q"$left + $right") // A list of trees can be inserted into a quasiquote with double dots `..$` val arguments = List(q"arg1", q"arg2") println(q"function(..$arguments)") // A curried argument argument lists can be inserted into a quasiquotes with triple // dots `...$` val arguments2 = List(q"arg3", q"arg4") val allArguments = List(arguments, arguments2) println(q"function(...$allArguments)") // A common mistake is to splice an empty type parameter list into type application // nodes . Imagine we have a list of type arguments that happens to be empty val typeArguments = List.empty[Type] // If we directly splice the lists into a type application we get a cryptic error // message "invariant failed" q"function[..$typeArguments]()" // The quasiquote above is equivalent to calling the normal constructor // `Type.ApplyType(.., typeArguments)`. Scalameta trees perform strict runtime // validation for invariants such as "type application arguments must be // non-empty". To fix this problem, guard the splice against the length of the list println( (if (typeArguments.isEmpty) q"function()" else q"function[..$typeArguments]()").structure ) // To learn more about quasiquotes, consult the // [quasiquote spec](quasiquotes.html). // // ## Pattern match trees // // Use pattern matching to target interesting tree nodes and deconstruct them. A // core design principle of Scalameta trees is that tree pattern matching is the // dual of tree construction. If you know how to construct a tree, you know how to // de-construct it. // // ### With normal constructors // // Normal constructors work in pattern position the same way they work in regular // term position. "function(arg1, arg2)".parse[Term].get match { case Term.Apply(function, List(arg1, arg2)) => println("1 " + function) println("2 " + arg1) println("3 " + arg2) } // Repeated fields are always `List[T]`, so you can safely deconstruct trees with // the `List(arg1, arg2)` syntax or if you prefer the `arg1 :: arg2 :: Nil` syntax. // There is no need to use `Seq(arg1, arg2)` or `arg1 +: arg2 +: Nil`. // // ### With quasiquotes // // Quasiquotes expand at compile-time and work the same way in pattern position as // in term position. Term.Apply( Term.Name("function"), List(Term.Name("arg1"), Term.Name("arg2")) ) match { case q"$function(..$args)" => println("1 " + function) println("2 " + args) } // Use triple dollar splices `...$` to extract curried argument lists "function(arg1, arg2)(arg3, arg4)".parse[Term].get match { case q"$function(...$args)" => println("1 " + function) println("2 " + args) } // > Pattern matching with quasiquotes is generally discouraged because it's easy // > to write patterns that result in unintended match errors. q"final val x = 2" match { case q"val x = 2" => // boom! } // To fix this pattern, we specify that the `final` modifier should be ignored // using `$_` q"final val x = 2" match { case q"$_ val x = 2" => println("OK") } // ## Compare trees for equality // // Scalameta trees use reference equality by default, which may result in // surprising behavior. A common mistake is to use `==` between parsed syntax trees // and quasiquotes "true".parse[Term].get == q"true" // Comparing trees by `==` is the same as comparing them with `eq`. Even identical // quasiquotes produce different references q"true" == q"true" // Equality checks with `==` will only return true when the reference is the same.j { val treeReference = q"true" treeReference == treeReference } // The idiomatic way to compare trees for structural equality is to use pattern // matching q"true" match { case q"true" => println("YAY!") } // If you can't use pattern matching to compare trees by structural equality, you // can use `.structure` q"true".structure == q"true".structure // The `.structure` method produces large strings for large programs, which may // become prohibitively slow. The Scalameta contrib module contains a more // efficient `isEqual` helper method to compare trees structurally. import scala.meta.contrib._ q"true".isEqual(q"true") // ## Traverse trees // // Scalameta includes utilities to recursively visit tree nodes for both simple and // advanced use-cases. Simple use-cases have high-level APIs that require minimal // ceremony while advanced use-cases use lower-level APIs that typically involve // more side-effects. // // ### Simple traversals // // Use `.traverse` to visit every tree node and perform a side-effect, similarly to // `.foreach` q"val x = 2".traverse { case node => println(s"${node.productPrefix}: $node") } // Use `.collect` to visit every tree node and collect a value instead of // performing a side-effect q"val x = 2".collect { case node => node.productPrefix -> node.toString } // The methods `.traverse` and `.collect` don't support customizing the recursion. // For more fine-grained control over which tree nodes to visit implement a custom // `Traverser`. // // ### Custom traversals // // Extend `Traverser` if you need to implement a custom tree traversal val traverser = new Traverser { override def apply(tree: Tree): Unit = tree match { case Pat.Var(name) => println(s"stop: $name") case node => println(s"${node.productPrefix}: $node") super.apply(node) } } // The `super.apply(node)` call continues the recursion, so in this case we will // recursively visit all nodes except children of `Pat.Var` nodes. traverser(q"val x = 2") // There is no `.collect` equivalent for custom traversals. To collect a value, // it's recommended to use `List.newBuilder[T]` for the type you are interested in // and append values inside the `apply` method. // // ## Transform trees // // Scalameta includes utilities to transform trees for simple and advanced // use-cases. // // > Transformed trees do not preserve comments and formatting details when // > pretty-printed. Look into [Scalafix](https://scalacenter.github.io/scalafix/) // > if you need to implement fine-grained refactorings that preserve comments and // > formatting details. // // ### Simple transformations // // Use `.transform` to visit every tree node and transform interesting tree nodes. println( q"val x = 2".transform { case q"2" => q"42" } ) // The contract of `.transform` is that it will recursively visit all tree nodes, // including the transformed trees. Due to this behavior, a common mistake is to // introduce infinite recursion in `.transform` // + attributes={"classes": ["scala"], "id": ""} q"a + b".transform { case name @ Term.Name("b") => q"function($name)" }.toString // [error] java.lang.StackOverflowError // at scala.meta.transversers.Api$XtensionCollectionLikeUI$transformer$2$.apply(Api.scala:10) // at scala.meta.transversers.Transformer.apply(Transformer.scala:4) // - // The best solution to fix this problem is to implement a custom transformer to // gain fine-grained control over the recursion. // // ### Custom transformations // // Extend `Transformer` if you need to implement a custom tree transformation val transformer = new Transformer { override def apply(tree: Tree): Tree = tree match { case name @ Term.Name("b") => q"function($name)" case node => super.apply(node) } } // By avoiding the call to `super.transform` in the first case, we prevent a stack // overflow. println( transformer(q"a + b") )
notebooks/scalameta/tree-guide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #import os import numpy.ma as ma from netCDF4 import Dataset, date2index, num2date import numpy as np import pandas as pd from datetime import datetime, timedelta import datetime as dt import xarray as xr #from math import atan2, log from palettable import colorbrewer from copy import copy import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.mlab as mlaba palette = copy(plt.cm.jet) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad(alpha = 0.0) from bs4 import BeautifulSoup import requests def listFD(url, ext=''): page = requests.get(url).text #print(page) soup = BeautifulSoup(page, 'html.parser') return [url + node.get('href') for node in soup.find_all('a') if (node.get('href').endswith(ext) and node.get('href').startswith('2'))] # - filename_in='F:/data/cruise_data/saildrone/baja-2018/daily_files/sd-1002/saildrone-gen_4-baja_2018-EP-sd1002-ALL-1_min-v1.nc' #dataset=xr.open_dataset(temp_out,decode_times=False) dataset=xr.open_dataset(filename_in) dataset dataset.TIME.min() istart_data=1 minday=dataset.TIME.min() maxday=dataset.TIME.max() x = pd.to_datetime(minday.data) minday=x.timetuple().tm_yday x = pd.to_datetime(maxday.data) maxday=x.timetuple().tm_yday print(minday,maxday) print(dt.datetime(2018, 1, 1) + dt.timedelta(minday - 1)) # + istart_data=1 for days in range(minday,maxday+1): inew_data=1 for incr_day in range(-1,2): d = dt.datetime(2018, 1, 1) + dt.timedelta(days - 1) + timedelta(days=incr_day) day_of_year = d.timetuple().tm_yday url = 'https://opendap.jpl.nasa.gov/opendap/OceanTemperature/ghrsst/data/GDS2/L3U/VIIRS_NPP/OSPO/v2.41/' \ + str(d.year) + '/' + str(day_of_year).zfill(3) + '/' ext = 'nc' filenames=listFD(url, ext) ilen=len(filenames) for ic in range(1,ilen): file = filenames[ic] print(file) nc = Dataset(file) sst = nc.variables['sea_surface_temperature'][:,2500:3250,2500:3500] sst = np.flip(sst, 2) cnt = ma.count(sst) lat = nc.variables['lat'][2500:3250] lat = np.flip(lat, 0) lon = nc.variables['lon'][2500:3500] nc.close() if istart_data==1: sst_new_all = ma.copy(sst) istart_data = 0 if inew_data==1: sst_new = ma.copy(sst) inew_data = 0 if cnt<5: continue print(ic,cnt,file) mask = sst_new < -10 sst_new[mask] = sst[mask] sst_new_all[mask] = sst[mask] sst_new2 = sst_new[0,:,:] sstx = ma.masked_values (sst_new2, -9999.) sst3x = np.flip(sstx, 1) sst4x = np.flip(sst3x, 0) sst5x = ma.swapaxes(sst4x,0,1) cmap = colorbrewer.get_map('Spectral', 'diverging', 11, reverse=True).mpl_colormap fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = 27, lon_0 = -120, resolution = 'l', area_thresh = 0.1, llcrnrlon=-126.0, llcrnrlat=25.0,urcrnrlon=-114.0, urcrnrlat=38.) m.bluemarble() # m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) sub_lons=lons_usv[tem_dyr==day_of_year] sub_lats=lats_usv[tem_dyr==day_of_year] x,y = m(sub_lons,sub_lats) m.plot(x,y,'w', linewidth=3) m.plot(x,y,color='magenta', linewidth=1) m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_sst_'+str(day_of_year)+'.png' fig.savefig(filename_png, transparent=False, format='png') print('done') sub_lons=lons_usv[tem_dyr==day_of_year] sub_lats=lats_usv[tem_dyr==day_of_year] min_lon=min(sub_lons)-3 max_lon=min(sub_lons)+3 min_lat=min(sub_lats)-3 max_lat=min(sub_lats)+3 center_lon=.5*(max_lon-min_lon)+min_lon center_lat=.5*(max_lat-min_lat)+min_lat print(min_lon,max_lon,min_lat,max_lat,center_lon,center_lat) fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = center_lat, lon_0 = center_lon, resolution = 'l', area_thresh = 0.1, llcrnrlon=min_lon, llcrnrlat=min_lat,urcrnrlon=max_lon, urcrnrlat=max_lat) m.bluemarble() # m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) x,y = m(sub_lons,sub_lats) m.plot(x,y,'w', linewidth=3) m.plot(x,y,color='magenta', linewidth=1) m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_sst_'+str(day_of_year)+'_zoom.png' fig.savefig(filename_png, transparent=False, format='png') print('done') sst_new2 = sst_new_all[0,:,:] sstx = ma.masked_values (sst_new2, -9999.) sst3x = np.flip(sstx, 1) sst4x = np.flip(sst3x, 0) sst5x = ma.swapaxes(sst4x,0,1) sub_lons=lons_usv sub_lats=lats_usv fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = 27, lon_0 = -120, resolution = 'l', area_thresh = 0.1, llcrnrlon=-126.0, llcrnrlat=25.0,urcrnrlon=-114.0, urcrnrlat=38.) m.bluemarble() #m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) x,y = m(sub_lons,sub_lats) m.plot(x,y,'w', linewidth=3) m.plot(x,y,color='magenta', linewidth=1) m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_all_sst_track.png' fig.savefig(filename_png, transparent=False, format='png') print('done') # -
.ipynb_checkpoints/SST images from Saildrone cruise-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## <NAME>, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING # ## Predictive Analytics Capstone # ## Task 1: Determine Store Formats for Existing Stores # # #### 1. What is the optimal number of store formats? How did you arrive at that number? # # <img src="https://user-images.githubusercontent.com/14093302/29742321-a7db0f68-8aaf-11e7-9059-ec3533bb10cd.png" alt="Figure 1" width="400" height="250" /> # <p style="text-align: center;">Figure 1: K-Means Cluster Assessment Report</p> # # <img src="https://user-images.githubusercontent.com/14093302/29742320-a7d84616-8aaf-11e7-902d-9ef5af7b5d07.png" alt="Figure 2" width="400" height="250" /> # <p style="text-align: center;">Figure 2: Adjusted Rand Indices and Calinski-Harabasz Indices</p> # # Based on the K-means report, Adjusted Rand and Calinski-Harabasz indices below, the optimal number of store formats is **3** when both the indices registered the highest median value. # # #### 2. How many stores fall into each store format? # # Cluster 1 has 23 stores, cluster 2 has 29 stores while cluster 3 has 33 stores. # # <img src="https://user-images.githubusercontent.com/14093302/29742767-4fd833ba-8ab7-11e7-9b91-436443ee22d0.png" alt="Figure 3" width="600" height="400" /> # <p style="text-align: center;">Figure 3: Cluster Information</p> # # #### 3. Based on the results of the clustering model, what is one way that the clusters differ from one another? # # Cluster 1 stores sold more General Merchandise in terms of percentage while Cluster 2 stores sold more Produce. # # Cluster 1 stores have highest medial total sales when compared to the other 2. Its range of total sales and most of other categorical sales are also the largest. Cluster 3 stores are the most similar in terms of sales due to more compact range. # # ![Figure 4](https://user-images.githubusercontent.com/14093302/29742323-a7e3c37e-8aaf-11e7-89f4-bf3aeb4ea1b7.png) # <p style="text-align: center;">Figure 4: Tableau Visualization</p> # # #### 4. Please provide a Tableau visualization (saved as a Tableau Public file) that shows the location of the stores, uses color to show cluster, and size to show total sales. # # <img src="https://user-images.githubusercontent.com/14093302/29742322-a7dc48ba-8aaf-11e7-80c9-b62329cd72f0.png" alt="Figure 5" width="800" height="600" /> # <p style="text-align: center;">Figure 4: Location of the stores</p> # # **Tableau Profile** # https://public.tableau.com/profile/r221609#!/vizhome/Task1_39/Task1 # # ## Task 2: Formats for New Stores # # #### 1. What methodology did you use to predict the best store format for the new stores? Why did you choose that methodology? (Remember to Use a 20% validation sample with Random Seed = 3 to test differences in models.) # # The model comparison report below shows comparison matrix of Decision Tree, Forest Model and Boosted Model. # **Boosted Model** is chosen despite having same accuracy as Forest Model due to higher F1 value. # # ![Figure 6](https://user-images.githubusercontent.com/14093302/29742378-d0f2cbce-8ab0-11e7-8c3d-ec18ffa1425d.png) # <p style="text-align: center;">Figure 6: Model Comparison Report</p> # # #### 2. What are the three most important variables that help explain the relationship between demographic indicators and store formats? Please include a visualization. # # _Ave0to9_, _HVal750KPlus_ and _EdHSGrad_ are the three most important variables. # # <img src="https://user-images.githubusercontent.com/14093302/29742379-d0f46b1e-8ab0-11e7-92f5-edafa326ffea.png" alt="Figure 7" width="400" height="300" /> # <p style="text-align: center;">Figure 7: Variance Importance Plot</p> # # #### 3. What format do each of the 10 new stores fall into? Please fill in the table below. # # | Store Number | Segment | # | :----------: | :-----: | # | S0086 | 1 | # | S0087 | 2 | # | S0088 | 3 | # | S0089 | 2 | # | S0090 | 2 | # | S0091 | 1 | # | S0092 | 2 | # | S0093 | 1 | # | S0094 | 2 | # | S0095 | 2 | # <p style="text-align: center;">Table 1: Store Number and Segment</p> # ## Task 3: Predicting Produce Sales # # #### 1. What type of ETS or ARIMA model did you use for each forecast? Use ETS(a,m,n) or ARIMA(ar, i, ma) notation. How did you come to that decision? # # **ETS(M,N,M) with no dampening** is used for ETS model. # # The seasonality shows increasing trend and should be applied multiplicatively. The trend is not clear and nothing should be applied. Its error is irregular and should be applied multiplicatively. # # ![p9 f8](https://user-images.githubusercontent.com/14093302/29742639-7758a020-8ab5-11e7-8437-d4eb95191404.png) # # **ARIMA(0,1,2)(0,1,0)** is used as seasonal difference and seasonal first difference were performed. There is a lag-2. # # ![p9 f9](https://user-images.githubusercontent.com/14093302/29742636-773891fe-8ab5-11e7-85e9-21eb855612ac.png) # ![p9 f10](https://user-images.githubusercontent.com/14093302/29742635-7735a372-8ab5-11e7-86dc-302f96d967ba.png) # ![p9 f11](https://user-images.githubusercontent.com/14093302/29742638-7748231c-8ab5-11e7-9452-0a718c71a6fc.png) # # **ETS model’s accuracy is higher** when compared to ARIMA model. A holdout sample of 6 months data is used. Its RMSE of **1,020,597** is lower than ARIMA’s **1,429,296** while its MASE is **0.45** compared to ARIMA’s **0.53**. ETS also has a higher AIC at **1,283** while ARIMA’s AIC is **859**. # # <img src="https://user-images.githubusercontent.com/14093302/29742637-7746ff32-8ab5-11e7-8cde-3f3b9abd8b6c.png" alt="Figure 12" width="400" height="200" /> # <img src="https://user-images.githubusercontent.com/14093302/29742634-77314e08-8ab5-11e7-9649-a1f5f33c43c3.png" alt="Figure 13" width="400" height="200" /> # # The graph and table below shows actual and forecast value with 80% & 95% confidence level interval. # # <img src="https://user-images.githubusercontent.com/14093302/29742633-772bd6f8-8ab5-11e7-88ff-c7ede725fb02.png" alt="Figure 14" width="400" height="300" /> # <img src="https://user-images.githubusercontent.com/14093302/29742641-77815a7e-8ab5-11e7-9d43-e88b97c65809.png" alt="Figure 15" width="600" height="400" /> # # #### 2. Please provide a Tableau Dashboard (saved as a Tableau Public file) that includes a table and a plot of the three monthly forecasts; one for existing, one for new, and one for all stores. Please name the tab in the Tableau file "Task 3". # # Table below shows the forecast sales for existing stores and new stores. New store sales is obtained by using **ETS(M,N,M)** analysis with all the 3 individual cluster to obtain the average sales per store. The average sales value (x3 cluster 1, x6 cluster 2, x1 cluster 3) are added up produce New Store Sales. # # | Year | Month | New Store Sales | Existing Store Sales | # | :--: | :---: | :-------------: | :------------------: | # | 2016 | 1 | 2,626,198 | 21,539,936 | # | 2016 | 2 | 2,529,186 | 20,413,771 | # | 2016 | 3 | 2,940,264 | 24,325,953 | # | 2016 | 4 | 2,774,135 | 22,993,466 | # | 2016 | 5 | 3,165,320 | 26,691,951 | # | 2016 | 6 | 3,203,286 | 26,989,964 | # | 2016 | 7 | 3,244,464 | 26,948,631 | # | 2016 | 8 | 2,871,488 | 24,091,579 | # | 2016 | 9 | 2,552,418 | 20,523,492 | # | 2016 | 10 | 2,482,837 | 20,011,749 | # | 2016 | 11 | 2,597,780 | 21,177,435 | # | 2016 | 12 | 2,591,815 | 20,855,799 | # <p style="text-align: center;">Table 2: Sales for Existing and New Stores</p> # # <img src="https://user-images.githubusercontent.com/14093302/29742640-77752754-8ab5-11e7-96d5-471b40422992.png" alt="Figure 16" width="800" height="600" /> # # The chart above shows the historical and forecast sales for existing stores and new stores over the period from Mar-12 to Dec-16. # # **Tableau Profile** # https://public.tableau.com/profile/r221609#!/vizhome/Task3_53/Task3 # # Alteryx Workflow # # <img src="https://user-images.githubusercontent.com/14093302/29742800-1d5c433a-8ab8-11e7-99ac-c42492ddbc11.png" alt="Workflow 1" width="800" height="600" /> # <p style="text-align: center;">Workflow 1: Workflow for Task 1</p> # # <img src="https://user-images.githubusercontent.com/14093302/29742799-1d5b748c-8ab8-11e7-9732-c926af3cdaf7.png" alt="Workflow 2" width="800" height="600" /> # <p style="text-align: center;">Workflow 2: Workflow for Task 2</p> # # <img src="https://user-images.githubusercontent.com/14093302/29742798-1d5672de-8ab8-11e7-80f2-52005789acac.png" alt="Workflow 3" width="800" height="600" /> # <p style="text-align: center;">Workflow 3: Workflow for Task 3</p>
Predictive Analytics/Project 7 - Segmentation & Clustering/7.1-Combining-Predictive-Techniques.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Implementation of K-means clustering algorithm # This works shows the implementation of k-means clustering from scratch. # Using numpy for vectorized calculation # csv to read in our file # matpplotlib to visualize the results # step1 import libraries import numpy as np import csv import matplotlib.pyplot as plt # %matplotlib inline # read in our file and find out how many clusters are needed :) def file_reader(): file = open(r"k.csv") reader = csv.reader(file) data = list(reader) file.close() return data data = np.asarray(file_reader(), dtype=float) print(data.shape) k = int(input('enter number of clusters: ') )# number of clusters # Randomize initial centroids def randomize_centroids(data, centroids, k): for cluster in range(0, k): centroids.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist()) # print('Initial_centroids',centroids) return centroids # helper method to let us know if the clusters have converged def has_converged(centroids, old_centroids, iterations): MAX_ITERATIONS = 1000 if iterations > MAX_ITERATIONS: return True return old_centroids == centroids # Method for calculating euclidean distance between data points with respect to available centroids # ![euclidean_distance](ed0.png "euclidean distance") def euclidean_dist(data, centroids, clusters): for instance in data: # Find which centroid is the closest # to the given data point. # print('instance',instance) # print('centroids',centroids) mu_index = min([(i[0], np.linalg.norm(instance-centroids[i[0]])) \ for i in enumerate(centroids)], key=lambda t:t[1])[0] try: clusters[mu_index].append(instance) except KeyError: clusters[mu_index] = [instance] # If any cluster is empty then assign one point # from data set randomly so as to not have empty # clusters and 0 means. for cluster in clusters: if not cluster: cluster.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist()) return clusters # Final pipeline to categorize our data def kmeans(data, k): centroids = [] centroids = randomize_centroids(data, centroids, k) old_centroids = [[] for i in range(k)] iterations = 0 while not (has_converged(centroids, old_centroids, iterations)): iterations += 1 clusters = [[] for i in range(k)] # assign data points to clusters clusters = euclidean_dist(data, centroids, clusters) # recalculate centroids index = 0 for cluster in clusters: old_centroids[index] = centroids[index] centroids[index] = np.mean(cluster, axis=0).tolist() index += 1 # print('oldcenroids' + str(old_centroids)) centroids = np.asarray(centroids,dtype=float) print("The total number of data data-points is: " + str(len(data))) print("The total number of iterations necessary is: " + str(iterations)) print("The means of each cluster are: " + str(centroids)) print("\n The clusters are as follows:\n") for cluster in clusters: print (cluster) return centroids # run the method centroids = kmeans(data,k) # plot the results # + fig = plt.figure() x = data[:,0] y = data[:,1] plt.scatter(x,y) cent_x = centroids [:,0] cent_y = centroids [:,1] plt.scatter(cent_x,cent_y,s=100,marker='x',c='r') plt.show()
K-Means clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Jupyter notebook to explore AMR++ results # # ### This notebook explores results from the Noyes's laboratory research in collaboration with the National Pork Board # # ### This jupyter notebook relies on the "staging_script.R" script that has to be manually filled out. # # # Source the staging_script.R # This will load all the necessary R libraries, load the AMR++ results, output exploratory figures on a-priori # variables, and create R objects you can use for further analysis source('staging_script.R') # I just added this next script to create phyloseq objects, so please excuse any errors source('scripts/load_phyloseq_data.R') # ## Exploratory figures # # You can find exploratory figures in the "graphs" directory # # To further analyze your data and create custom figures, use these R objects containing counts in the "melted" format # * Sample metadata # * metadata # * microbiome_metadata # * Resistome count data ("RequiresSNPConfirmation" counts removed) # * amr_melted_analytic # * amr_melted_raw_analytic # * Microbiome count data # * microbiome_melted_analytic # * microbiome_melted_analytic # # You can also use the following list objects which contain metagenomeSeq data: # * Resistome metagenomeSeq objects # * AMR_analytic_data # * Microbiome metagenomeSeq objects # * microbiome_analytic_data # # Finally, here are two phyloseq objects that we are still experimenting with: # * amr.ps # * kraken_microbiome.ps # + print("kraken phyloseq object") kraken_microbiome.ps print("AMR phyloseq object") amr.ps # + ### Start of code for figures, combine table objects to include meta setkey(amr_melted_raw_analytic,ID) setkey(amr_melted_analytic,ID) setkey(microbiome_melted_analytic,ID) # Set keys for both metadata files setkey(metadata,ID) setkey(microbiome_metadata,ID) microbiome_melted_analytic <- microbiome_melted_analytic[microbiome_metadata] amr_melted_raw_analytic <- amr_melted_raw_analytic[metadata] amr_melted_analytic <- amr_melted_analytic[metadata] # - head(amr_melted_analytic) # # Example of how to make figures using phyloseq # + phylum_kraken.ps <- tax_glom(kraken_microbiome.ps, "phylum") phylum_kraken.ps.rel <- transform_sample_counts(phylum_kraken.ps, function(x) x / sum(x) ) phylum_kraken.ps.rel.melt <- psmelt(phylum_kraken.ps.rel) phylum_kraken.ps.rel.melt <- phylum_kraken.ps.rel.melt %>% group_by(phylum) %>% mutate(mean_phylum_rel_abundance = mean(Abundance)) phylum_kraken.ps.rel.melt$phylum <- as.character(phylum_kraken.ps.rel.melt$phylum) phylum_kraken.ps.rel.melt$mean_phylum_rel_abundance <- as.numeric(phylum_kraken.ps.rel.melt$mean_phylum_rel_abundance) phylum_kraken.ps.rel.melt$phylum[phylum_kraken.ps.rel.melt$mean_phylum_rel_abundance < 0.005] <- "Low abundance phyla (< 0.5%)" # - ##### Plot phyla relative abundances ggplot(phylum_kraken.ps.rel.melt, aes(x = Sample, y = Abundance, fill = phylum)) + geom_bar(stat = "identity") + facet_wrap(~TreatmentGroup, scales = "free_x") + labs(x = "Treatment group", y = "Relative abundance", col = "Phylum", title = "Relative abundance of phyla by treatment group") + theme(axis.text.x = element_blank(), axis.title.x = element_text(size = 10), axis.text.y = element_text(size = 10), axis.title.y = element_text(size = 10), panel.background = element_rect(fill = "white")) + scale_fill_brewer(palette="Set3")
Jupyter_metagenomic_analysis.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # **By: <NAME>** # # # # The World Happiness Report: *An Exploratory Data Analysis Project* # ## Introduction # As a global civilization, we are awakening to the realization of just how much happiness in our day-to-day lives really matters. The positive effects of a happy mind not only propagate throughout the various dimensions of a person's life, but they furthermore ripple throughout society in a stone-in-the-pond fashion. Corporations, governments and organizations are becoming increasingly aware that its employees, citizens and members are better able to contribute to their respective tasks when minimum happiness levels are met, and that it is therefore in their best interests to contribute to the well-beings of these people. As a result, happiness metrics are becoming increasingly utilized in making policy decisions - a win for everyone. # # In 2012 the first iteration of the World Happiness Report was produced, a survey that looks at nationally representative happiness levels across countries as a measure of national progress. It was an impressive feat that required the contributions of leading experts in fields such as statistics, data science, economics, health, psychology and public policy, among many others. To evaluate happiness levels, the report used surveys provided by the Gallup World Poll, the gold standard of worldwide polling. More specifically, happiness scores are based on the Cantril Ladder, a subjective metric that asks each participant to evaluate his or her quality of life on a scale from 0 to 10, with 10 being the dream life. The poll furthermore includes geographical data and data for six characteristics that are hypothesized to be correlated with national happiness levels; it is these features and how they relate to happiness that will be the main focus of this project. Since 2012 a new updated report has been published each year excluding 2014, and in this project we will look specifically at the years 2015-2017. # # **Credits:** The data from the World Happiness Report was borrowed from a reviewed Kaggle Kernel of the same name. Although this dataset is relatively clean in isolation, once merged with the second dataset of this project (world map data), further cleaning will be necessary. # # **THE SIX CHARACTERISTICS** # # The values for these features are to be understood as ratings for each of the characteristics, not the true values for the characteristics, with higher values corresponding to countries that are more favorable in those characteristics. # # 1) GDP per capita <br /> # 2) Family quality of citizens <br /> # 3) Health of citizens <br /> # 4) Freedom of citizens <br /> # 5) Generosity of citizens <br /> # 6) Trust in government of citizens <br /> # # **THE TARGET** # # The mean happiness score of each country # # **THE CONVENTIONS** # # • Dystopia is a hypothetical nation where happiness levels are lower than they are in any other country: 2.10, 2.33 and 1.85 out of 10 in 2015, 2016 and 2017 respectively. The purpose of defining Dystopia is to provide a baseline against which the other nations can be compared. <br /> # • Dystopia is defined to have the minimum value for each of the characteristics, which by convention is 0. <br /> # • Each characteristic represents how much more value that characteristic contributes to the happiness score (0-10) of a nation than it does for Dystopia. The values are always in the interval [0,2]. <br /> # • An estimate for the happiness score of a nation in a given year is found by summing the Dystopia happiness score of that year with the six characteristics of the nation in that year. # # **THE GOAL** # # This is an EDA project. However, the data is split into a training set and a test set and suggestions for what changes could be made in order to use the analysis in a machine learning problem are provided along the way. # ## Exploratory Data Analysis # # While exploring the data, appropriate changes will be made as the needs for them are found. After we have cleaned the data as best as possible we will then move on to descriptive analytics to further analyze the data. # # ### EDA Part 1: *Data Preparation* # # **Introducing The World Happiness Report Data** # # We will begin by loading the relevant libraries and the three World Happiness Report datasets, one for each year in 2015-2017. <br \> (For organizational purposes we will now also load the world map dataset that will soon be used.) # + #Load the libraries library(ggplot2) library(ggmap) library(maps) library(FSelector) #For information.gain() library(gridExtra) #For grid.arrange() library(ggthemes) #For theme_few() #load the data whr2015=read.csv("2015.csv") whr2016=read.csv("2016.csv") whr2017=read.csv("2017.csv") map = map_data("world") # - # To get an idea of what data we have access to we can inspect each of the datasets. #Inspect the data #2015 data str(whr2015) #2016 data str(whr2016) #2017 data str(whr2017) # In looking at the above datasets, notice that *whr2015*, *whr2016* and *whr2017* do not have a year variable, which we will add so that the observations can be distinguished from one another when these datasets are joined. Furthermore, the feature sets are not the same for these datasets: *Standard.Error* is unique to *whr2015*, *Lower.Confidence.Interval* and *Upper.Confidence.Interval* are unique to *whr2016*, and *Whisker.high* and *Whisker.low* are unique to *whr2017*. We will remove these variables so that these datasets can be more readily joined, and because they are not relevant for the purposes of this project. # + #Add year feature whr2015$year=2015 whr2016$year=2016 whr2017$year=2017 #Eliminate irrelevant features whr2015$Standard.Error=NULL whr2016$Lower.Confidence.Interval=NULL whr2016$Upper.Confidence.Interval=NULL whr2017$Whisker.high=NULL whr2017$Whisker.low=NULL # - # Notice furthermore that *whr2017* does not have the *Region* variable. Let's temporarily drop *Region* from *whr2015* and *whr2016* so that we can combine *whr2015*, *whr2016* and *whr2017* into a single dataset called *whr*. # + region2015=whr2015$Region #Save whr2015$Region into a variable so that we can add this information back in in a moment regionLevels=levels(whr2015$Region) #Save the region levels as names because they will be temporarily be converted to numerical form, which we will want to change back whr2015$Region=NULL #Temporarily drop whr2015$Region region2016=whr2016$Region #Save whr2016$Region into a variable so that we can add this information back in in a moment whr2016$Region=NULL #Temporarily drop whr2016$Region whr = rbind(whr2015,whr2016,whr2017) #We can bind the rows because these three datasets now have the same feature sets # - # Next we will modify *whr* for aesthetic reasons, add the 2015 and 2016 region data back in, and finally impute the 2017 *region* data using the 2015 *region* data. # + #Modify the presentation of whr colnames(whr) = c("country", "rank", "h.score", "gdp", "family", "health", "freedom", "govt", "generosity", "residual", "year") #Give the features more reader-friendly names whr=whr[,c(11,1:3,10,4:9)] #Rearrange the features in a more reader-friendly order #Add the 2015 and 2016 region data back in whr$region=rep(999,470) #Recreate the region variable in whr; the 470 values between 2015, 2016 and 2017 are initialized as 999 but will be changed in a moment whr$region[1:315]=c(region2015,region2016) #For the 315 values between 2015 and 2016 replace them with the original region values stored above #Impute the 2017 region data (observations 316 to 470 in whr) for (i in 316:470) {country=whr$country[i]; j=which(whr$country==country)[1]; whr$region[i]=whr$region[j]} # - # When we inspect *region* we see that some of the values corresponding to the observations in *whr* were not updated from their initialization values of 999. which(whr$region==999) # Why might that be? Let's manually inspect why the 2017 regions for these points were not added. # + #These two observations occurs in 2017 whr$country[348] whr$country[386] whr2015$Country[38] whr2015$Country[72] # - # We see that these country names are not exactly the same in *whr2015*, and *whr2017*, hence the two 999 values that remained. # We will first convert the names to 'Taiwan' and 'Hong Kong'. whr$country[c(348,386)] = c("Taiwan","Hong Kong") # Next we will **impute** the corresponding regions using the regions for Taiwan and Hong Kong in *whr2015*. #We will momentarily read the regions as numbers, which is the current form of the region variable in whr as.numeric(region2015[c(38,72)]) whr$region[c(348,386)]=3 # Now we can convert the regions back from numbers to their original names. # + whr$region=as.factor(whr$region) #Before this command this variable is numerical levels(whr$region) = regionLevels #Convert the levels from numbers back to their original names ("Western Europe", etc.) whr$region = factor(whr$region) #Get rid of the extra (now-empty) level that corresponded to the 999 values # - # Furthermore, in looking through the region levels we see that both 'Somaliland region' and 'Somaliland Region' occur in *whr*. whr$country[91] whr$country[255] # It is fair to assume that these are the same region, and so only one version of the name should exist. We will keep the 'Somaliland Region' version. whr$country[91]="Somaliland Region" # #### Introducing The World Map Data # # Now that we've done some cleaning on the main dataset of this project it's time to introduce the second dataset that will be used. Eventually we will merge these two datasets so that we can obtain a visualization of national happiness levels using a world map. #World map data str(map) # Notice that the country variable in map is called *region* and is in string format. To be be consistent with *whr* so that we can eventually merge the two, we will fix this. colnames(map)[5]="country" map$country = as.factor(map$country) map$subregion=NULL #We don't need this variable for this problem # Let's take a look at which countries occur in *whr* but not in *map*. which(!(levels(whr$country)%in%levels(map$country))) levels(whr$country)[c(32,33,57,105,109,127,143,150,151,163,165,166)] # These country levels from *whr* fail to appear in *map* for two reasons: # # i) *They are written differently between the two datasets* <br /> # ii) *They fail to appear in map altogether* # # ###### The countries in whr that are written differently in map w1= which(levels(whr$country)=="Congo (Brazzaville)") w2= which(levels(whr$country)=="Palestinian Territories") w3= which(levels(whr$country)=="United Kingdom") w4= which(levels(whr$country)=="United States") w5= which(levels(whr$country)=="Taiwan Province of China") c(w1,w2,w3,w4,w5) # In looking through *map* we see that the above countries are written as seen below. We will update the country names from *whr* to match those in *map*. #Update level names levels(whr$country)[32]="Democratic Republic of the Congo" #Also known as Congo (Brazzaville) levels(whr$country)[109]="Palestine" levels(whr$country)[150]="UK" levels(whr$country)[151]="USA" levels(whr$country)[166]="Taiwan" # ###### The countries in whr that fail to appear in map altogether # We have already taken care of the 'Hong Kong S.A.R., China' and 'Taiwan Province of China' levels; they are now empty levels in *whr* that require no further attention. Now we must handle the levels in *whr* that do not appear in *map*: 'Congo (Kinshasa)', 'North Cyprus', 'Hong Kong', 'Somaliland Region' and 'Trinidad and Tobago'. Fortunately this is a small list, and these countries are geographically small, so we can remove them without having much of a negative effect on the aesthetics of the world map that is to come. # # For the specific case of 'Trinidad and Tobago', note that the *map* dataset treats these as two different countries, 'Trinidad' and 'Tobago'. We will remove 'Trinidad and Tobago' from *whr* but we will impute values for 'Trinidad' and 'Tobago' in *map* using the 'Trinidad and Tobago' values from *whr*. #Identifying the observations that need to be removed which(whr$country=="Congo (Kinshasa)") which(whr$country=="North Cyprus") which(whr$country=="Hong Kong") which(whr$country=="Somaliland Region") which(whr$country=="Trinidad and Tobago") #Before we remove these observations we extract the mean happiness rating #Identifying the happiness score of Trinidad and Tobago in whr for 2015, 2016 and 2017 c(whr$h.score[41], whr$h.score[201], whr$h.score[353]) # That was unexpected: the happiness score for Trinidad and Tobago in *whr* was the same in 2015, 2016 and 2017. Let's take a quick look at two other countries that are geographically close to Trinidad and Tobago: Venezuela and Suriname. which(whr$country=="Venezuela") #The observations in 2015-2017 corresponding to Venezuela which(whr$country=="Suriname") #The observations in 2015-2016 corresponding to Suriname; 2017 is missing round(c(whr$h.score[23], whr$h.score[202], whr$h.score[397]),2) round(c(whr$h.score[40], whr$h.score[198]),2) # We see that the happiness levels in Venezuela and Suriname are comparable to those in Trinidad and Tobago, which is encouraging. Furthermore, we see that the 2015 and 2016 happiness scores in Suriname are the same. It is likely that the organizers of this dataset imputed some happiness ratings for example by taking a mean or by duplicating one year's value for the other year(s). # Now we can remove the unwanted observations. whr=whr[-c(120,283,441,66,220,376,41,201,353,72,233,386,91,255),] #Remove the undesired rows whr$country = factor(whr$country) #Remove empty factors from whr$country whr = whr[,c(1:2,12,3:11)] #Rearrange columns of whr to make it more reader-friendly # **Merge The Data** # We will perform an SQL-style left-join between *map* and *whr*; that is, we will merge the datasets in such a way that all of the information from *map* is retained. We do this so that we can make a world map visualization without holes in it corresponding to missing countries. merge = merge(map,whr,by="country", all.x=TRUE) merge = merge[order(merge$order),] #merge must be ordered by the order variable so that the map will be drawn appropriately # Let's **impute** the happiness score for both *Trinidad* and *Tobago* using 6.168. merge$h.score[merge$country=="Trinidad"] = 6.168 merge$h.score[merge$country=="Tobago"] = 6.168 # Let's quickly verify that there are no missing countries in this new dataset. sum(is.na(merge$country)) # This means that our *whr* and *map* datasets have been successfully joined in such a way that no missing values for countries occur in *merge*, as desired. This dataset will be used in the bivariate analysis section to create a world map that colors countries according to their happiness levels. # **Split The Data: *train* And *test*** # For now let's return to the *whr* dataset so that we can group the data by year. A training set will be created by combining the 2015 and 2016 data, while the 2017 data will be preserved for the test set. # + whr2015=subset(whr,year==2015) whr2016=subset(whr,year==2016) whr2017=subset(whr,year==2017) train=rbind(whr2015,whr2016) test=whr2017 # - # **Train** # # Before moving onto the second part of the EDA section let's quickly remind ourselves of how the training set currently looks. str(train) # In the upcoming sections of this project we can refer to the output above any time we are dealing with train. This concludes the first part of the explanatory data analysis section, and we are now ready to move on to the second part. # ### EDA Part 2: *Descriptive Analytics* # # This part can be divided into three sections that will be explored independently: # # i) **Univariate Analysis** <br /> # ii) **Bivariate Analysis** <br /> # iii) **Multivariate Analysis** # # Each section will furthermore be divided into two subsections to accomodate the fact that there are two types of features: # # i) ***Region*** (categorical) <br /> # ii) ***The Six Characteristics*** (numerical) # **Univariate Analysis: *Region*** # # Let's see how many observations there are in each region. table(train$region) # *Note: this table includes data from both 2015 and 2016, hence why there are for example 4 observations in 'Australia and New Zealand' and not 2. # # Unsurprisingly, the number of countries in each region varies widely. It will be of value to keep this detail in mind in the bivariate section, where boxplots representing national happiness levels by region will be displayed. # **Univariate Analysis: *The Six Characteristics*** # # Let's first plot these characteristics to get an idea of the distributions of the features, as well as to see whether or not there is some kind of ordering among each characteristic. # + par(mfrow=c(3,2)) #This allows 6 plots in base R to be plotted simultaneously in a 3x2 manner plot(train$gdp, xlab="Observation Number", ylab="GDP Per Capita", main="GDP Observations", col="brown") abline(v=157.5) #The first 157 rows in train are for year 2015; the next 158 rows are for year 2016. We add a line to each plot to separate these points plot(train$health, xlab="Observation Number", ylab="Health", main="Health Observations", col="brown") abline(v=157.5) plot(train$family, xlab="Observation Number", ylab="Family", main="Family Observations", col="brown") abline(v=157.5) plot(train$freedom, xlab="Observation Number", ylab="Freedom", main="Freedom Observations", col="brown") abline(v=157.5) plot(train$govt, xlab="Observation Number", ylab="Trust In Government", main="Trust In Government Observations", col="brown") abline(v=157.5) plot(train$generosity, xlab="Observation Number", ylab="Generosity", main="Generosity Observations", col="brown") abline(v=157.5) # - # Notice that each plot can be divided into two similar-looking plots that have been separated by a vertical line. This is because *train* is ordered by year, the data in each year is ordered by happiness scores, and the data doesn't change much from one year to the next. We might consider randomizing this order when applying machine learning. # # **Summary statistics** for the six characteristics can be computed. summary = matrix(rep(0,42), nrow=6) #6x7 matrix of 0's for initialization; 1 row for each characteristic, 1 column for each summary statistic summary[1,] = c(as.numeric(summary(train$gdp)),sd(train$gdp)) #The first row of summary are the summary statistics of gdp summary[2,] = c(as.numeric(summary(train$health)),sd(train$health)) summary[3,] = c(as.numeric(summary(train$family)),sd(train$family)) summary[4,] = c(as.numeric(summary(train$freedom)),sd(train$freedom)) summary[5,] = c(as.numeric(summary(train$govt)),sd(train$govt)) summary[6,] = c(as.numeric(summary(train$generosity)),sd(train$generosity)) summary = as.data.frame(summary) colnames(summary) = c("Min", "Q1", "Q2", "Mean", "Q3", "Max", "SD") #Rename the columns to a more reader-friendly format summary = summary[,c(1,6,2,3,5,4,7)] #Display the 7 stats in a more reader-friendly order. rownames(summary) = c("GDP", "Health", "Family", "Freedom", "Government", "Generosity") #Rename the rows to a more reader-friendly format summary = round(summary,2) summary # At a quick glance it might look suspicious that the minimum value for each of the six characteristics is 0. However, it is important to remember that these values correspond to the values in Dystopia, which by convention were chosen to be 0, so there is no problem here. # # Let's now take a look at the **histogram** for each of the six characteristics. # + par(mfrow=c(3,2)) hist(train$gdp, xlab=NULL, main="GDP Per Capita (2015-2016)", col="brown") hist(train$health, xlab=NULL, main="Health (2015-2016)", col="brown") hist(train$family, xlab=NULL, main="Family (2015-2016)", col="brown") hist(train$freedom, xlab=NULL, main="Freedom (2015-2016)", col="brown") hist(train$govt, xlab=NULL, main="Trust In Government (2015-2016)", col="brown") hist(train$generosity, xlab=NULL,main="Generosity (2015-2016)", col="brown") # - # We see that the features are somewhat Gaussian, although most are skewed. For this reason it would be a good idea when applying machine learning to apply Box-Cox transformations to these variables. # **Bivariate Analysis: *Happiness Vs. Region*** # # To get a general idea of how happiness looks in each region we can start by creating **boxplots**. ggplot(train, aes(y=h.score, x=region, fill=region)) + geom_boxplot() + labs(title="Happiness Boxplots By Region (2015-2016)", y="Happiness Score") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + scale_fill_discrete(name="Region") #Boxplot of happiness scores grouped by region # People in Australia and New Zealand, North America and Western Europe report having the highest happiness levels. These regions correspond to the most developed parts of the world, so there is not much surprise here. On the other hand people in Sub-Saharan Africa and Southern Asia are the least happy people, and these regions correspond to the least developed parts of the world, so again - there is not much surprise here. Returning to the univariate section, some of these boxplots are constructed from small sample sizes (e.g. 'Australia and New Zealand' has a sample size of only 4). Therefore, if building predictive models by geography it would likely be a better idea to make predictions based on individual countries in such regions rather than on their geographical regions themselves. # # To better visualize how happiness scores vary from country to country we are now ready to look at a **world map**. ggplot(merge,aes(x=long,y=lat,group=group,fill=h.score)) + geom_polygon() + scale_fill_gradient(high="red", low="yellow", name="Happiness Score") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + labs(title="World Happiness By Country") # **Grey areas correspond to countries with missing data from merge. Darker colored areas correspond to happier countries.* # # Once again we can see that the more developed regions of the world - North America in the top left, Australia and New Zealand in the bottom right and Western Europe in the top middle - are darker in the above heat map. Meanwhile endemic poverty in Africa and Southern Asia result in their corresponding parts of the map being of a lighter color. # **Bivariate Analysis: *Happiness Vs. The Characteristics*** # # We will start by creating the **line of best fit** between each characteristic and the happiness score so that we can extract the equation of the line of best fit and R-squared value. # + #Create the lines of best fit and keep their summaries. lm.gdp.summary = summary(lm(h.score~gdp, data=train)) lm.health.summary = summary(lm(h.score~health, data=train)) lm.family.summary = summary(lm(h.score~family, data=train)) lm.freedom.summary = summary(lm(h.score~freedom, data=train)) lm.govt.summary = summary(lm(h.score~govt, data=train)) lm.generosity.summary = summary(lm(h.score~generosity, data=train)) #Extract the coefficients for the lines of best fit #First the constant coefficient b0 = lm.gdp.summary$coefficients[1,1] b0[2] = lm.health.summary$coefficients[1,1] b0[3] = lm.family.summary$coefficients[1,1] b0[4] = lm.freedom.summary$coefficients[1,1] b0[5] = lm.govt.summary$coefficients[1,1] b0[6] = lm.generosity.summary$coefficients[1,1] b0 = round(b0,2) #Second the feature coefficient b1 = lm.gdp.summary$coefficients[2,1] b1[2] = lm.health.summary$coefficients[2,1] b1[3] = lm.family.summary$coefficients[2,1] b1[4] = lm.freedom.summary$coefficients[2,1] b1[5] = lm.govt.summary$coefficients[2,1] b1[6] = lm.generosity.summary$coefficients[2,1] b1 = round(b1,2) #Write the equations as strings so they can be added to ggplot eqn = paste0("y=",b0[1],"+",b1[1]) eqn[2] = paste0("y=",b0[2],"+",b1[2]) eqn[3] = paste0("y=",b0[3],"+",b1[3]) eqn[4] = paste0("y=",b0[4],"+",b1[4]) eqn[5] = paste0("y=",b0[5],"+",b1[5]) eqn[6] = paste0("y=",b0[6],"+",b1[6]) #Extract the R-squared values R2 = lm.gdp.summary$r.squared R2[2] = lm.health.summary$r.squared R2[3] = lm.family.summary$r.squared R2[4] = lm.freedom.summary$r.squared R2[5] = lm.govt.summary$r.squared R2[6] = lm.generosity.summary$r.squared R2 = round(R2,3) # - # Let's furthermore add in **feature importances** according to entropy, which capture how relevant each feature is in predicting the target variable. # + weights=information.gain(h.score~gdp+family+health+freedom+govt+generosity+region,data=train) weights=(round(weights,4)) weights$Feature = c("GDP Per Capita", "Family Quality", "Health Quality", "Freedom", "Trust in Government", "Generosity", "Region") colnames(weights)[1]="Importance" weights = weights[,c(2,1)] weights = weights[order(-weights$Importance),] #Order the rows by decreasing order of importance of the features. rownames(weights)=NULL weights # - # We have already seen that a country's geographical region has a large impact on quality of life. It comes as no shock that GDP and health are also among the most important predictors of happiness; a high GDP suggests that basic financial needs are met for a large percentage of citizens, and good health allows people to better enjoy the present while being able to more optimistically plan for the future. # # Now we are ready to build the **scatterplots** that compare the six characteristics to happiness ratings. # + #Create the plots with the equation of the line of best fit, R-squared and feature importance displayed. plot.gdp = ggplot(train, aes(x=gdp,y=h.score)) + geom_point() + stat_smooth(method="lm") + annotate("text", label=eqn[1], x=1.68, y=4, size=3) + annotate("text", label=paste("R-squared:",R2[1]), x=1.58, y=3.5, size=3) + annotate("text", label="Importance: 0.487", x=1.58, y=4.5, size=3) + scale_x_continuous(lim=c(0,2)) + labs(x="GDP Per Capita", y="Happiness Score", title="Happiness Vs. GDP") + theme(axis.title=element_text(size=7)) plot.health = ggplot(train, aes(x=health,y=h.score)) + geom_point() + stat_smooth(method="lm") + annotate("text", label=eqn[2], x=1.28, y=3.75, size=3) + annotate("text", label=paste("R-squared:",R2[2]), x=1.19, y=3.25, size=3) + annotate("text", label="Importance: 0.464", x=1.19, y=4.25, size=3) + scale_x_continuous(lim=c(0,1.5)) + labs(x="Health", y="Happiness Score", title="Happiness Vs. Health") + theme(axis.title=element_text(size=7)) plot.family = ggplot(train, aes(x=family,y=h.score)) + geom_point() + stat_smooth(method="lm") + annotate("text", label=eqn[3], x=1.63, y=3.5, size=3) + annotate("text", label=paste("R-squared:",R2[3]), x=1.54, y=3, size=3) + annotate("text",label="Importance: 0.211", x=1.53, y=4, size=3) + scale_x_continuous(lim=c(0,2)) + labs(x="Family", y="Happiness Score", title="Happiness Vs. Family") + theme(axis.title=element_text(size=7)) plot.freedom = ggplot(train, aes(x=freedom,y=h.score)) + geom_point() + stat_smooth(method="lm") + annotate("text", label=eqn[4], x=1.06, y=3.75, size=3) + annotate("text",label=paste("R-squared:",R2[4]), x=0.995, y=3.25, size=3) + annotate("text", label="Importance: 0.164", x=1, y=4.25, size=3) + scale_x_continuous(lim=c(0,1.25)) + labs(x="Freedom", y="Happiness Score", title="Happiness Vs. Freedom") + theme(axis.title=element_text(size=7)) plot.govt = ggplot(train, aes(x=govt,y=h.score)) + geom_point() + stat_smooth(method="lm") + annotate("text", label=eqn[5], x=0.89, y=3.5, size=3) + annotate("text", label=paste("R-squared:",R2[5]), x=0.84, y=3, size=3) + annotate("text", label="Importance: 0.138", x=0.83, y=4, size=3) + scale_x_continuous(lim=c(0,1.22)) + labs(x="Trust in Government", y="Happiness Score", title="Happiness Vs. Trust In Government") + theme(axis.title=element_text(size=7)) plot.generosity = ggplot(train, aes(x=generosity,y=h.score)) + geom_point() + stat_smooth(method="lm") + annotate("text", label=eqn[6], x=0.85, y=3.5, size=3) + annotate("text", label=paste("R-squared:",R2[6]), x=0.81, y=3, size=3) + annotate("text", label="Importance: 0.064", x=0.8, y=4, size=3) + scale_x_continuous(lim=c(0,1.05)) + labs(x="Generosity", y="Happiness Score", title="Happiness Vs. Generosity") + theme(axis.title=element_text(size=7)) grid.arrange(plot.gdp, plot.health, plot.family, plot.freedom, plot.govt, plot.generosity) # - # We see the first 3 plots are relatively linear, the fourth is somewhat linear and the fifth and sixth are not linear at all. It therefore appears that GDP, health and family would be useful predictors for happiness while faith in government and the generosity of a nation are of negligible consequence. Freedom also positively correlates with happiness, but it is far from being the top priority in determining overall life quality. # Now let's create a **correlation matrix** for some more bivariate visualizations. Here we will include the six characteristics and happiness score variables. # + df.cor = data.frame(index1=c(rep(1,7),rep(2,7),rep(3,7),rep(4,7),rep(5,7),rep(6,7),rep(7,7)), index2=rep(c(1:7),7), cor=as.numeric(cor(train[,c(5,7:12)]))) #Columns 5,7:12 are h.score, gdp, family, health, freedom, govt, generosity in this order heat.cor=ggplot(df.cor, aes(x=index1, y=index2)) + geom_tile(aes(fill=cor)) + geom_text(aes(label=round(cor,2))) +theme_few() +labs(y=NULL,x=NULL, title="Correlation Matrix: Happiness And The Characteristics") + scale_fill_gradient(low="white", high="purple", name="Cor") + scale_x_continuous(breaks=1:7,labels=c("Happiness", "GDP", "Family", "Health", "Freedom", "Government", "Generosity")) + scale_y_continuous(breaks=1:7, labels=c("Happiness", "GDP", "Family", "Health", "Freedom", "Government", "Generosity")) + theme(axis.text.x = element_text(angle=45, vjust=0.5)) + theme(legend.title=element_text(size=9, face="bold")) #First column (and first row) correspond to target heat.cor # - # **Ignoring the first row and first column gives the correlation matrix for the six characteristics.* # # We can see that with the exception of GDP and generosity, all of the variables are positively correlated in varying degrees. The largest correlation among the characteristics is 0.78 and occurs between GDP and health; in more prosperous countries people have better access to nutrition and health care, as well as a better education regarding these subjects. The two next largest coefficients among the six characteristics are only 0.58 and 0.48, so while the relationships among characteristics are somewhat linear, they are not strong enough to plausibly make multicollinearity a big issue in predicting happiness scores. # **Multivariate Analysis: *Two Characteristics Vs. Happiness*** # For the first part of our multivariate analysis we will look at how pairs among the three most important characteristics - GDP, family and health - relate to happiness. The characteristics will be bucketed into intervals of length 0.2 so that appropriate heat maps can be constructed. Any characteristic value of 0 will by default be excluded from the buckets, as the first interval for each characteristic is (0,0.2], so we will update these buckets to include the 0 values. # + breaks.gdp = cut(train$gdp, seq(0,2,0.2)) #A factor variable with levels "(0,0.2]", "(0.2,0.4]", ... , "(1.8,2]" breaks.health = cut(train$health, seq(0,1.2,0.2)) breaks.family = cut(train$family, seq(0,1.6,0.2)) #Update the level names so that 0 can be assigned to an appropriate interval levels(breaks.gdp)[1]="[0,0.2]" #Before the update this level is "(0,0.2]" levels(breaks.health)[1]="[0,0.2]" levels(breaks.family)[1]="[0,0.2]" #The characteristic values that were 0 were replaced with NA because they did not fit in any interval before the level names were updated. Let's identify them so that we can manually update their intervals to "[0,0.2]". which(is.na(breaks.gdp)) which(is.na(breaks.health)) which(is.na(breaks.family)) # - # Now that we have identified the characteristics that have a value of 0 we can manually add in the [0,0.2] intervals for the 0 observations. We will then create the appropriate data frames of aggregated data that will allow us to construct our multivariate heat maps. # + breaks.gdp[226] = "[0,0.2]" breaks.health[c(118,260)] = "[0,0.2]" breaks.family[c(143,303)] = "[0,0.2]" #Create the data frame from which the data will be aggregated. breaks.df = data.frame(gdp = breaks.gdp, health=breaks.health, family=breaks.family, h.score=train$h.score, region=train$region) #Creating the aggregated data frames df.gdp.health = aggregate(h.score~gdp+health,data=breaks.df,mean) #Takes the average happiness score for each (gdp,health) pair in breaks. df.gdp.family = aggregate(h.score~gdp+family,data=breaks.df,mean) df.health.family= aggregate(h.score~health+family,data=breaks.df,mean) #Creating the heat maps heat.gdp.health = ggplot(df.gdp.health, aes(x=gdp,y=health)) + geom_tile(aes(fill=h.score)) + theme_few() + labs(x="GDP Per Capita", y="Health", title="Happiness By GDP And Health") + scale_fill_gradient(name="Happiness Score", low="green", high="red") + theme(axis.text.x = element_text(angle=45, vjust=0.5), axis.title=element_text(size=9, face="bold"), legend.title=element_text(size=9, face="bold")) heat.gdp.family = ggplot(df.gdp.family, aes(x=gdp,y=family)) + geom_tile(aes(fill=h.score)) + theme_few() + labs(x="GDP Per Capita", y="Family", title="Happiness By GDP And Family") + scale_fill_gradient(name="Happiness Score", low="green", high="red") + theme(axis.text.x = element_text(angle=45, vjust=0.5), axis.title=element_text(size=9, face="bold"), legend.title=element_text(size=9, face="bold")) heat.health.family = ggplot(df.health.family, aes(x=health,y=family)) + geom_tile(aes(fill=h.score)) + theme_few() + labs(x="Health", y="Family", title="Happiness By Health And Family") + scale_fill_gradient(name="Happiness Score", breaks=3:7, low="green", high="red") + theme(axis.text.x = element_text(angle=45, vjust=0.5), axis.title=element_text(size=9, face="bold"), legend.title=element_text(size=9, face="bold")) #Display the graphs grid.arrange(heat.gdp.health, heat.gdp.family, heat.health.family) # - # A common theme occurs amongst pairs of the top three characteristics in predicting happiness scores. There is the upwards-diagonal trend that when two of the three characteristic values are low the happiness score is low as well, and as they increase simultaneously, happiness rises until it reaches its peak near the top-right corner of the respective heat map, where the characteristic values are near their maximums. As can be seen in the heat maps above, we don't have access to any observations where the maximum bucket of any two of GDP, family and health occur. If we did it is plausible that it is at these observations, or at least near them, that happiness would be maximized. # # When building machine learning models one approach worth trying would be to feature engineer three new variables, each one corresponding to one of the pairs of the top three characteristics. Each feature could be constructed in such a way that it has a large value when both of its characteristics have large values, a small value when both of its characteristics have small values, and a value in between when one characteristic has a small value and the other is large. For example they can be summed, or more generally, any linear combination with positive coefficients can be constructed. # **Multivariate Analysis: *One Characteristic And Region Vs. Happiness*** # # Now let's see how happiness varies by region according to the values of the three most important characteristics. # + #The regions in order of increasing h.score; we will use this to order our factors in the heat maps. h.sort.region=c("Sub-Saharan Africa", "Southern Asia", "Southeastern Asia", "Central and Eastern Europe", "Middle East and Northern Africa", "Eastern Asia", "Latin America and Caribbean", "Western Europe", "North America", "Australia and New Zealand") #Make a data frame that groups h.score by region and gdp df.region.gdp = aggregate(h.score~region+gdp,data=breaks.df,mean) df.region.gdp$region = factor(df.region.gdp$region, ordered=TRUE, levels=h.sort.region) #Convert region to type factor so that heat maps can be made #Make a data frame that groups h.score by region and health df.region.health = aggregate(h.score~region+health,data=breaks.df,mean) df.region.health$region = factor(df.region.health$region, ordered=TRUE, levels=h.sort.region) #Make a data frame that groups h.score by region and family df.region.family = aggregate(h.score~region+family,data=breaks.df,mean) df.region.family$region = factor(df.region.family$region, ordered=TRUE, levels=h.sort.region) #Create the heat maps heat.region.gdp = ggplot(df.region.gdp, aes(x=region,y=gdp)) + geom_tile(aes(fill=h.score)) + theme_few() + labs(x=NULL, y="GDP Per Capita", title="Happiness By GDP And Region") + scale_x_discrete(breaks=h.sort.region, labels=c("SSA", "SA", "SEA", "CEE", "MENA", "EA", "LAC", "WE", "NA", "ANZ")) + theme(axis.text.x = element_text(angle=45, vjust=0.5)) + scale_fill_gradient(name="Happiness Score", high="red", low="yellow") + theme(axis.text.x = element_text(angle=45, vjust=0.5), axis.title=element_text(size=9, face="bold"), legend.title=element_text(size=9, face="bold")) heat.region.health = ggplot(df.region.health, aes(x=region,y=health)) + geom_tile(aes(fill=h.score)) + theme_few() + labs(x=NULL, y="Health", title="Happiness By Health And Region") + scale_x_discrete(breaks=h.sort.region, labels=c("SSA", "SA", "SEA", "CEE", "MENA", "EA", "LAC", "WE", "NA", "ANZ")) + scale_fill_gradient(name="Happiness Score", high="red", low="yellow") + theme(axis.text.x = element_text(angle=45, vjust=0.5), axis.title=element_text(size=9, face="bold"), legend.title=element_text(size=9, face="bold")) heat.region.family = ggplot(df.region.family, aes(x=region,y=family)) + geom_tile(aes(fill=h.score)) + theme_few() + labs(x=NULL, y="Family", title="Happiness By Family And Region") + scale_x_discrete(breaks=h.sort.region, labels=c("SSA", "SA", "SEA", "CEE", "MENA", "EA", "LAC", "WE", "NA", "ANZ")) + theme(axis.text.x = element_text(angle=45)) + scale_fill_gradient(name="Happiness Score", high="red", low="yellow") + theme(axis.text.x = element_text(angle=45, vjust=0.5), axis.title=element_text(size=9, face="bold"), legend.title=element_text(size=9, face="bold")) #Create abbreviated names for the regions that can be used for the plots (to save space) legend = data.frame(Region=h.sort.region, Abbreviation = c("SSA", "SA", "SEA", "CEE", "MENA", "EA", "LAC", "WE", "NA", "ANZ")) legend grid.arrange(heat.region.gdp, heat.region.health, heat.region.family) # - # We have already seen that there is a general upward trend in happiness for countries as any of GDP per capita, health quality or family quality increase. What we can see in the above heatmaps is that this trend exists not only globally but also within each region. In a machine learning project it would therefore be worth considering building a predictive model for each region, as we have seen throughout this project that there is a lot of variation between the different regions. # ## Conclusion # # # Happiness matters. Its primary value is found not within the subjective enjoyment of the feeling itself but in the enthusiasm for creativity, production and problem-solving that it instills in people. We have seen that the national economy, family life, health quality, general freedom, generosity of citizens and trust in government are positively correlated with national happiness levels in varying degrees. GDP, health and family in particular show the strongest correlations with happiness; when a person is healthy, has tight personal relationships and is not perpetually burdened by finanacial concerns, then his or her views on life will likely be more optimistic. The need for all three of these factors to be ranked highly to ensure a high probabilty of happiness can be relaxed to a need for just two of them to be ranked highly to ensure a high probability of happiness. Prosperous countries with great health care, prosperous countries with great familial relationships, and countries with great health care and great familial relationships can expect the average citizen to report a high quality of life. It would therefore be wise that nations make it a priority to provide its citizens with the opportunities and support required to develop these crucial life pillars, and in return for their efforts, to reap the benefits on a national level.
WorldHappinessReportEDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # High-throughput Full-Length Single-Cell RNA-Seq Automation # ## Abstract # # Existing protocols for full-length single-cell RNA sequencing (scRNA-seq) produce libraries of high complexity (thousands of distinct genes) with outstanding sensitivity and specificity of transcript quantification. These full-length libraries have the advantage of allowing probing of transcript isoforms, are informative regarding single nucleotide polymorphisms, and allow assembly of the VDJ region of the T- and B-cell receptor sequences. Since full length protocols are mostly plate-based at present, they are also suited to profiling cell types where cell numbers are limiting, such as rare cell types during development for instance. A disadvantage of these methods has been the scalability and cost of the experiments, which has limited their popularity as compared to droplet-based and nanowell approaches. Here, we describe an automated protocol for full-length scRNA-seq, including both an in-house automated SMART-seq2 protocol, and a commercial kit-based workflow. We discuss these two protocols in terms of ease-of-use, equipment requirements, running time, cost per sample and sequencing quality. By benchmarking the lysis buffers, reverse transcription enzymes and their combinations, we propose an optimized in-house automated protocol with dramatically reduced cost. These pipelines have been employed successfully for several research projects allied with the Human Cell Atlas initiative (www.humancellatlas.org) and are available on protocols.io. # + # %pylab inline import warnings warnings.filterwarnings("ignore") from SCQUA import * from readquant import * from glob import iglob import scanpy as sc import scipy # - # # Read in data # > raw data # ## salmon # Here we read in the salmon mapped data and save as AnnData (https://anndata.readthedocs.io/en/stable/anndata.AnnData.html) objects. tpm,cts = read_quants('salmon/*') qc=read_qcs('salmon/*') ad = sc.AnnData(scipy.sparse.csr_matrix(tpm.T.values)) ad.var_names = tpm.index ad.obs_names = tpm.columns ad.layers['counts'] = scipy.sparse.csr_matrix(cts.T.values) qc.index = qc.index obs = qc.loc[ad.obs_names] ad.obs = obs ad.obs['n_counts'] = ad.layers['counts'].sum(1) ad.obs['n_genes'] = (ad.layers['counts']>0).sum(1) ad.obs['lane'] = ad.obs_names.str.split('#').str[0] ad.write("anndata.h5") ad = sc.read("anndata.h5") df = get_result_ad(ad, ercc, sirv=None, spike=None) ad.obs = pd.concat([ad.obs,df],axis=1) ad.write("anndata.h5") ad.shape # > downsample to e4 # + tpm,cts = read_quants('salmon/lira_head_e4/*') qc=read_qcs('salmon/lira_head_e4/*') ad = sc.AnnData(scipy.sparse.csr_matrix(tpm.T.values)) ad.var_names = tpm.index ad.obs_names = tpm.columns ad.layers['counts'] = scipy.sparse.csr_matrix(cts.T.values) qc.index = qc.index obs = qc.loc[ad.obs_names] ad.obs = obs ad.obs['n_counts'] = ad.layers['counts'].sum(1) ad.obs['n_genes'] = (ad.layers['counts']>0).sum(1) ad.obs['lane'] = ad.obs_names.str.split('#').str[0] # - df = get_result_ad(ad, ercc, sirv=None, spike=None) ad.obs = pd.concat([ad.obs,df],axis=1) ad.write("anndata_e4.h5") ad.shape ad.obs_names.str.startswith("31617").sum() # > downsample to e5 # + tpm,cts = read_quants('salmon/lira_head_e5/*') qc=read_qcs('salmon/lira_head_e5/*') ad = sc.AnnData(scipy.sparse.csr_matrix(tpm.T.values)) ad.var_names = tpm.index ad.obs_names = tpm.columns ad.layers['counts'] = scipy.sparse.csr_matrix(cts.T.values) qc.index = qc.index obs = qc.loc[ad.obs_names] ad.obs = obs ad.obs['n_counts'] = ad.layers['counts'].sum(1) ad.obs['n_genes'] = (ad.layers['counts']>0).sum(1) ad.obs['lane'] = ad.obs_names.str.split('#').str[0] # - df = get_result_ad(ad, ercc, sirv=None, spike=None) ad.obs = pd.concat([ad.obs,df],axis=1) ad.write("anndata_e5.h5") ad.shape ad.obs_names.str.startswith("31617").sum() # > downsample to e6 # + tpm,cts = read_quants('salmon/lira_head_e6/*') qc=read_qcs('salmon/lira_head_e6/*') ad = sc.AnnData(scipy.sparse.csr_matrix(tpm.T.values)) ad.var_names = tpm.index ad.obs_names = tpm.columns ad.layers['counts'] = scipy.sparse.csr_matrix(cts.T.values) qc.index = qc.index obs = qc.loc[ad.obs_names] ad.obs = obs ad.obs['n_counts'] = ad.layers['counts'].sum(1) ad.obs['n_genes'] = (ad.layers['counts']>0).sum(1) ad.obs['lane'] = ad.obs_names.str.split('#').str[0] # - df = get_result_ad(ad, ercc, sirv=None, spike=None) ad.obs = pd.concat([ad.obs,df],axis=1) ad.write("anndata_e6.h5") ad.shape dfs = [] for id in ['e4','e5','e6']: ad = sc.read("anndata_%s.h5"%id) # print(ad.shape) df = ad.obs df['name'] = df.index df.index = '%s='%id+df.index dfs.append(df) df = pd.concat(dfs) df.shape df.to_csv("downsample.csv") df = pd.read_csv("downsample.csv", index_col=0) ad = sc.read("anndata.h5") ad.shape # ## metadata # Add the metadata annotation to the AnnData. names = [] metas = [] for f in iglob('cram/*.imeta'): name = f.split('/')[1].split('.')[0] xx = open(f,'r').read().split('----\n') for x in xx: if x.startswith('attribute: sample_supplier_name'): meta = x.split('\n')[1] break names.append(name) metas.append(meta) df = pd.DataFrame({'meta':metas},index=names) df.meta = df.meta.str.split(' ').str[1] # + df['Enzyme'] = df['meta'].str.split('_').str[0] df['Buffer'] = df['meta'].str.split('_').str[1] ad.obs['Enzyme'] = df.loc[ad.obs_names]['Enzyme'] ad.obs['Buffer'] = df.loc[ad.obs_names]['Buffer'] # - ad.write("anndata.h5") # ## qualimap # Add quality assessment metrics derived from qualimap2 (https://academic.oup.com/bioinformatics/article/32/2/292/1744356). dfs = [] for f in tqdm(iglob('qualimap_res/*/rnaseq_qc_results.txt')): name = f.split('/')[1] df = pd.read_csv(f, sep='=|:', skiprows=7, comment='>>', header=None,index_col=0) df.index = df.index.str.strip() df.columns = ['val'] df.val = df.val.str.strip() df['val'] = df['val'].str.replace(',','') df = df.iloc[2:19,:] df.loc['exonic_no'] = df.loc['exonic']['val'].split(' ')[0] df.loc['intronic_no'] = df.loc['intronic']['val'].split(' ')[0] df.loc['intergenic_no'] = df.loc['intergenic']['val'].split(' ')[0] df.loc['overlapping exon_no'] = df.loc['overlapping exon']['val'].split(' ')[0] df['val'] = df['val'].str.split('(').str[-1].str.split(')').str[0].replace('\%','') df.columns = [name] dfs.append(df) df = pd.concat(dfs, axis=1).T df['exonic'] = df['exonic'].str.replace('%','') df['intronic'] = df['intronic'].str.replace('%','') df['intergenic'] = df['intergenic'].str.replace('%','') df['overlapping exon'] = df['overlapping exon'].str.replace('%','') df['rRNA'] = df['rRNA'].str.replace('%','') df.to_csv("qualimap_res.csv") # ## rRNA # Read in the rRNA data from featureCounts (http://bioinf.wehi.edu.au/featureCounts/) result. dfs = [] names =[] for f in tqdm(iglob('rRNA/*.txt')): names.append(f.replace('rRNA/','').replace('.txt','')) flag=0 for i in open(f).read().split('\n'): if i.startswith('|| Successfully assigned alignments :'): dfs.append(i) flag=1 break if flag==0: dfs.append('(NA)') df = pd.Series(dfs, index=names) df = df.str.split('(').str[1].str.split(')').str[0] df = pd.Series(dfs, index=names) df = df.str.split('(').str[1].str.split(')').str[0] df = df[df != 'NA'] df.to_csv("rRNA.csv") ad = sc.read("anndata.h5") ad.obs['rRNA%'] = df.loc[ad.obs_names].str.replace('%','').fillna(0).astype(float) df = pd.read_csv("qualimap_res.csv", index_col=0) ad.obs = pd.concat([ad.obs,df.loc[ad.obs_names].fillna(0)], axis=1) ad.write("anndata.h5") # # ERCC+MT # Calculate percentages of ERCC contents and mitochondrial contents. # + df = pd.read_csv("/nfs/leia/research/saraht/chichau/Ref/input/GRCm38.cdna.all_ERCC.symbol.tsv", index_col=0, sep=' ', header=None) df.columns =['symbol'] dd = ad.var[ad.var_names.str.startswith("ERCC")] dd = pd.DataFrame({'symbol':dd.index.tolist()}, index=dd.index.tolist()) df = pd.concat([df,dd]) ad.var['symbol'] = df.loc[ad.var_names]['symbol'] df = pd.read_csv("/nfs/leia/research/saraht/chichau/Ref/input/GRCm38.cdna.MT.tsv", \ index_col=0, sep=' ', header=None) ad.var['MT'] = ad.var_names.isin(df.index) # - ad.obs['percent_mito'] = np.sum( ad[:, ad.var['MT']].X, axis=1).A1 / np.sum(ad.X, axis=1).A1 ad.shape ad.var['ENS'] = ad.var_names.tolist() ad.var_names = ad.var['symbol'].astype(str) ad.var_names_make_unique() ad.raw = sc.pp.log1p(ad, copy=True) ad.write("anndata.h5") ad = sc.read("anndata.h5") dfs = [] for f in iglob('gbc/*.geneBodyCoverage.txt'): name = f.replace('gbc/','').replace('.geneBodyCoverage.txt','') df = pd.read_csv(f, sep='\t', index_col=0).T if df.shape[1]<1: continue df = df.iloc[:,0].to_frame() df.columns = [name] dfs.append(df) df = pd.concat(dfs, axis=1).T.astype(int) df.shape ad.obsm['genebodycoverage'] = df.loc[ad.obs_names].fillna(0).values ad.write("anndata.h5") dfs = [] ns = [] for f in tqdm(iglob('SJ/*/psi/outrigger_summary.csv')): df = pd.read_csv(f, index_col=0) name = f.replace('SJ/','').replace('/psi/outrigger_summary.csv','') dfs.append(df.shape[0]) ns.append(name) df = pd.DataFrame({'ASE':dfs}, index=ns) ad.obs['ASE'] = df.loc[ad.obs_names].fillna(0).values ad.write("anndata.h5") ad = sc.read("anndata.h5") ad.obs[ad.obs['num_processed'].astype(int)>1000000] # # End
examples/SmartSeqAutomation_dataprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # prerequisite # # * language: Python3.7 # * Python lib # * scikit-learn-0.20.2 # * pandas-0.24.1 # * matplotlib-3.0.2 # %matplotlib inline import PIL.Image import IPython.display import numpy as np from sklearn import datasets from sklearn import linear_model import pandas as pd import matplotlib.pyplot as plt #helpler function def showarray(a): a = np.uint8(a) IPython.display.display(PIL.Image.fromarray(a)) # + # TODO replace the sampled 8*8 digits dataset with full MNIST # - # ## load dataset data, label = datasets.load_digits(return_X_y=True) instance, feature_num = data.shape np.random.seed(2019) randomize = np.random.permutation(instance) data = data[randomize] label = label[randomize] print("%d instance with %d features" % (instance, feature_num)) print('data shape', data.shape) print('label shape', label.shape) # ## check loaded data debug_data = data.reshape((1797, 8, 8)) idx = 4 img = 256 - debug_data[idx] l = label[idx] print("label is %d" % l) print("image is ") showarray(img) # ## bandit algorithm with epsilon 0.5 def run_agent(epsilon, epochs): arms = list(range(0, 10)) # arm -> ([feature0, feature1, ...], [0, 1, ...]) train_set = {} # arm -> theta_array: np array theta_set = {} for arm in arms: train_set[arm] = ([], []) v = np.zeros(feature_num + 1) v[-1] = 0.5 theta_set[arm] = v avg_actual_reward = [] actual_reward_sum = 0 trials = [] idx = 0 for epoch in range(epochs): for t in range(instance): append_feature = np.append(data[t], [1]) pickup_arm = arms[0] if np.random.rand() < epsilon: # exploration pickup_arm = arms[np.random.randint(len(arms))] else: # exploitation max_arm = [arms[0]] # the arm with same reward max_reward = -1 # possible min reward is 0 for arm in arms: expected_reward = theta_set[arm].dot(append_feature) if expected_reward > max_reward: max_reward = expected_reward max_arm = [arm] elif expected_reward == max_reward: max_arm.append(arm) pickup_arm = max_arm[np.random.randint(len(max_arm))] actual_reward = int(pickup_arm == label[t]) train_set[pickup_arm][0].append(append_feature) train_set[pickup_arm][1].append(actual_reward) trials.append(idx) actual_reward_sum += actual_reward avg_actual_reward.append( actual_reward_sum / len(trials)) idx += 1 # update theta_set[pickup_arm] since we observe new train instance if len(train_set[pickup_arm][1]) > 10: reg = linear_model.LinearRegression() reg.fit(train_set[pickup_arm][0], train_set[pickup_arm][1]) theta_set[pickup_arm] = reg.coef_ return trials, avg_actual_reward legends = [] fig, num_ax = plt.subplots(nrows=1, ncols=1) for epsilon in [0.5, 0.2, 0.1, 0.01, 0]: trials_agent, avg_actual_reward_agent = run_agent(epsilon, epochs=20) line = num_ax.plot(trials_agent, avg_actual_reward_agent, '--') legends.append("ℇ = %.2f" % epsilon) num_ax.set(title="average reward for agent", xlabel="trial", ylabel="average reward") num_ax.legend(legends, loc='lower right') fig.savefig("plot.pdf")
Bandit_guessing_number.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Lasagne Visualizer in Action # ============================ # The underlying example is borrowed from <NAME>'s tutorials at PyData 2015: https://github.com/ebenolson/pydata2015 # Finetuning a pretrained network # ================= # # We can take a network which was trained on the ImageNet dataset and adapt it to our own image classification problem. This can be a useful technique when training data is too limited to train a model from scratch. # # Here we try to classify images as either pancakes or waffles. # + import numpy as np import theano import theano.tensor as T import lasagne # %matplotlib inline import matplotlib.pyplot as plt import skimage.transform import sklearn.cross_validation import pickle import os import collections from lasagne_visualizer import lasagne_visualizer # - # Seed for reproducibility np.random.seed(42) CLASSES = ['pancakes', 'waffles'] LABELS = {cls: i for i, cls in enumerate(CLASSES)} # Dataset # -------- # # Images were downloaded from Google Image Search, and placed in the directories `./images/pancakes' and './images/waffles'. # # There are approximately 1300 images with a roughly even split. # Download and unpack dataset # !wget -N https://s3.amazonaws.com/emolson/pydata/images.tgz # !tar -xf images.tgz # Read a few images and display im = plt.imread('./images/pancakes/images?q=tbn:ANd9GcQ1Jtg2V7Me2uybx1rqxDMV58Ow17JamorQ3GCrW5TUyT1tcr8EMg') plt.imshow(im) im = plt.imread('./images/waffles/images?q=tbn:ANd9GcQ-0-8U4TAw6fn4wDpj8V34AwbhkpK9SNKwobolotFjNcgspX8wmA') plt.imshow(im) # + # Model definition for VGG-16, 16-layer model from the paper: # "Very Deep Convolutional Networks for Large-Scale Image Recognition" # Original source: https://gist.github.com/ksimonyan/211839e770f7b538e2d8 # More pretrained models are available from # https://github.com/Lasagne/Recipes/blob/master/modelzoo/ from lasagne.layers import InputLayer, DenseLayer, NonlinearityLayer from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import Pool2DLayer as PoolLayer from lasagne.nonlinearities import softmax from lasagne.utils import floatX def build_model(): net = collections.OrderedDict() net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc7'] = DenseLayer(net['fc6'], num_units=4096) net['fc8'] = DenseLayer(net['fc7'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net # - # Download a pickle containing the pretrained weights # !wget -N https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg16.pkl # Load model weights and metadata d = pickle.load(open('vgg16.pkl')) # Build the network and fill with pretrained weights net = build_model() lasagne.layers.set_all_param_values(net['prob'], d['param values']) # + # The network expects input in a particular format and size. # We define a preprocessing function to load a file and apply the necessary transformations IMAGE_MEAN = d['mean value'][:, np.newaxis, np.newaxis] def prep_image(fn, ext='jpg'): im = plt.imread(fn, ext) # Resize so smallest dim = 256, preserving aspect ratio h, w, _ = im.shape if h < w: im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True) else: im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True) # Central crop to 224x224 h, w, _ = im.shape im = im[h//2-112:h//2+112, w//2-112:w//2+112] rawim = np.copy(im).astype('uint8') # Shuffle axes to c01 im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1) # discard alpha channel if present im = im[:3] # Convert to BGR im = im[::-1, :, :] im = im - IMAGE_MEAN return rawim, floatX(im[np.newaxis]) # - # Test preprocesing and show the cropped input rawim, im = prep_image('./images/waffles/images?q=tbn:ANd9GcQ-0-8U4TAw6fn4wDpj8V34AwbhkpK9SNKwobolotFjNcgspX8wmA') plt.imshow(rawim) # + # Load and preprocess the entire dataset into numpy arrays X = [] y = [] for cls in CLASSES: for fn in os.listdir('./images/{}'.format(cls)): _, im = prep_image('./images/{}/{}'.format(cls, fn)) X.append(im) y.append(LABELS[cls]) X = np.concatenate(X) y = np.array(y).astype('int32') # + # Split into train, validation and test sets train_ix, test_ix = sklearn.cross_validation.train_test_split(range(len(y))) train_ix, val_ix = sklearn.cross_validation.train_test_split(range(len(train_ix))) X_tr = X[train_ix] y_tr = y[train_ix] X_val = X[val_ix] y_val = y[val_ix] X_te = X[test_ix] y_te = y[test_ix] # - # We'll connect our output classifier to the last fully connected layer of the network output_layer = DenseLayer(net['fc7'], num_units=len(CLASSES), nonlinearity=softmax) # + # Define loss function and metrics, and get an updates dictionary X_sym = T.tensor4() y_sym = T.ivector() prediction = lasagne.layers.get_output(output_layer, X_sym) loss = lasagne.objectives.categorical_crossentropy(prediction, y_sym) loss = loss.mean() acc = T.mean(T.eq(T.argmax(prediction, axis=1), y_sym), dtype=theano.config.floatX) params = lasagne.layers.get_all_params(output_layer, trainable=True) updates = lasagne.updates.nesterov_momentum( loss, params, learning_rate=0.0001, momentum=0.9) # - # Compile functions for training, validation and prediction train_fn = theano.function([X_sym, y_sym], loss, updates=updates) val_fn = theano.function([X_sym, y_sym], [loss, acc]) pred_fn = theano.function([X_sym], prediction) # generator splitting an iterable into chunks of maximum length N def batches(iterable, N): chunk = [] for item in iterable: chunk.append(item) if len(chunk) == N: yield chunk chunk = [] if chunk: yield chunk # We need a fairly small batch size to fit a large network like this in GPU memory BATCH_SIZE = 16 # + def train_batch(): ix = range(len(y_tr)) np.random.shuffle(ix) ix = ix[:BATCH_SIZE] return train_fn(X_tr[ix], y_tr[ix]) def val_batch(): ix = range(len(y_val)) np.random.shuffle(ix) ix = ix[:BATCH_SIZE] return val_fn(X_val[ix], y_val[ix]) # + NO_EPOCHS = 5 # %matplotlib notebook f = plt.figure(figsize=(8,30)) custom_ranges = {'fc6':[-.01,.01], 'fc7':[-.05,.05], 'fc8':[-.05,.05]} weight_supervisor = lasagne_visualizer.weight_supervisor(net, NO_EPOCHS, mode='currently_trainable',\ custom_weight_ranges=custom_ranges) weight_supervisor.initialize_grid() for epoch in range(NO_EPOCHS): for batch in range(25): loss = train_batch() ix = range(len(y_val)) np.random.shuffle(ix) weight_supervisor.accumulate_weight_stats() weight_supervisor.live_plot() f.tight_layout() f.canvas.draw() loss_tot = 0. acc_tot = 0. for chunk in batches(ix, BATCH_SIZE): loss, acc = val_fn(X_val[chunk], y_val[chunk]) loss_tot += loss * len(chunk) acc_tot += acc * len(chunk) loss_tot /= len(ix) acc_tot /= len(ix) print(epoch, loss_tot, acc_tot * 100)
examples/Finetuning for Image Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Custom Observers # Observers are at the heart of PyBN, but unfortunately it is not possible to define a recipe for everyones needs, but we built the system flexible enough that anybody can design its own observer. For simplicity of reading of this section we will recurr to a Jupyter Notebook trick to define a class along multiple code blocks. All observers derive from the class Observer. from pybn.observers import Observer import numpy as np # For this tutorial we will create an observer that just adds the state of each node in the network at each timestep and the observation is the average of such quantity. The first step is thus making our custom observer to derive from it and define the __init__ method. We will add the tag REQUIRED when the observer must have an implementation of that function. class MyCustomObserver(Observer): #REQUIRED. def __init__(self, nodes=1, runs=1): # Always call the constructor for the parent class. super().__init__(nodes=nodes, runs=runs) # Observations is used to declare the number and the name of the observations the observer will return. # This will also be the name used for the file within the execution module. # Thus, it is important to not repeat the names for the observartions. self.observations = ['average_sum'] # Data is where the observer will be storing all the data. # This variable is defined in the super class but may be modified if needed, # but the first dimension of this variable must be number of runs. # self.data = np.zeros((self.runs, self.nodes)) # Aditional variables may be declared if needed. self.table = np.zeros((self.nodes)) self.table_requires_update = False # We also need to declare a custom method to build this observer from a configuration file. class MyCustomObserver(MyCustomObserver): #REQUIRED. @classmethod def from_configuration(cls, configuration): return cls( nodes=configuration['parameters']['nodes'], runs=configuration['execution']['samples']) # We need to declare methods to clear the temporal variables after each run and to reset the observer to is default value. Only custom variables need to be modified. class MyCustomObserver(MyCustomObserver): #REQUIRED. def clear(self): # This function is called each time the networks starts a new run. # Its function is to reset al temporal variables. self.table = np.zeros((self.nodes)) # Always call post_clear() at the end of clear. self.post_clear() #REQUIRED. def reset(self): # This function is called by the user when it wants to reset the observer to default. # Most of the time will be called from network.reset_observers(). self.table = np.zeros((self.nodes)) # Always call post_reset() at the end of clear. # self.data is reseted here. self.post_reset() # The most important method is update. This is the data entry point. class MyCustomObserver(MyCustomObserver): #REQUIRED. def update(self, state): # Iterate through all nodes at time step t. for i in range(len(state)): # Add the value of that state. self.table[i] += state[i] # Always call post_update() at the end of clear. self.post_update() # We also need to define a method that process all the data, for this case we just need to divide the sum and the number of states the observer has seen (self.counter). class MyCustomObserver(MyCustomObserver): #REQUIRED. def process_data(self): # Average all the values and store it in self.data. self.data[self.current_run] = self.table / self.counter # Finally we just need to define the method observations_to_data to recover the data the observer is calculating. This method helps the observer to parse data and format it back to the you in a readble format. This method needs to be defined for two cases, the average of the network and the per node case. We will also add a few methods that will help the calculations. You may declare as many of these as necessary. class MyCustomObserver(MyCustomObserver): #REQUIRED. def observations_to_data(self, observation_name, per_node=False): if (observation_name == self.observations[0]): return self.observation_0(per_node=per_node) # Add as many cases as necessary # elif(observation_name == self.observations[1]): # return self.observation_1(per_node=per_node) #AUXILIAR. # Here we are returning the mean and the std of the observation but in general you may define as many different quantities as you need. # Just notice that they will be stored / printed as: # (Network average) [network_quantity_0, ... ,network_quantity_k] # (Per node) [node_0_quantity_0, ... ,node_0_quantity_k, node_1_quantity_0, ..., node_n_quantity_k] def observation_0(self, per_node=False): if (per_node): return np.mean(self.data, axis=0), np.std(self.data, axis=0) else: return np.mean(self.data), np.std(self.data) # That's it, our custom observer is properly defined. # # Testing the custom observer from pybn.graphs import uniform_graph from pybn.networks import BooleanNetwork # Lets define a small network that only perform a few steps in order to visually compare the result of the observer with the state evolution of the network. # + nodes = 8 steps = 5 average_connectivity = 3.1 graph = uniform_graph(nodes, average_connectivity) network = BooleanNetwork(nodes, graph) # - MyCustomObserver(nodes=nodes) # Instantiate and attach the observer. observers = [MyCustomObserver(nodes=nodes)] network.attach_observers(observers) # Perform one small execution and print the states. # Set a random initial state. network.set_initial_state(observe=True) print(network.state) # Perform several steps. for _ in range(steps): network.step(observe=True) print(network.state) # Print the observers summary. # Get observer's summary. network.observers_summary()
Tutorials/4_PyBN_Custom_Observers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # <table> # <tr align=left><td><img align=left src="./images/CC-BY.png"> # <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) <NAME></td> # </table> # # Note: This material largely follows the text "Numerical Linear Algebra" by Trefethen and Bau (SIAM, 1997) and is meant as a guide and supplement to the material presented there. # + init_cell=true slideshow={"slide_type": "skip"} # %matplotlib inline # %precision 3 import numpy import matplotlib.pyplot as plt # + [markdown] slideshow={"slide_type": "slide"} # # Numerical Linear Algebra # # Numerical methods for linear algebra problems lies at the heart of many numerical approaches and is something we will spend some time on. Roughly we can break down problems that we would like to solve into two general problems, solving a system of equations # # $$A \mathbf{x} = \mathbf{b}$$ # # and solving the eigenvalue problem # # $$A \mathbf{v} = \lambda \mathbf{v}.$$ # # We examine each of these problems separately and will evaluate some of the fundamental properties and methods for solving these problems. We will be careful in deciding how to evaluate the results of our calculations and try to gain some understanding of when and how they fail. # + [markdown] slideshow={"slide_type": "slide"} # ## General Problem Specification # # The number and power of the different tools made available from the study of linear algebra makes it an invaluable field of study. Before we dive in to numerical approximations we first consider some of the pivotal problems that numerical methods for linear algebra are used to address. # # For this discussion we will be using the common notation $m \times n$ to denote the dimensions of a matrix $A$. The $m$ refers to the number of rows and $n$ the number of columns. If a matrix is square, i.e. $m = n$, then we will use the notation that $A$ is $m \times m$. # + [markdown] slideshow={"slide_type": "subslide"} # ### Systems of Equations # # The first type of problem is to find the solution to a linear system of equations. If we have $m$ equations for $m$ unknowns it can be written in matrix/vector form, # # $$A \mathbf{x} = \mathbf{b}.$$ # # For this example $A$ is an $m \times m$ matrix, denoted as being in $\mathbb{R}^{m\times m}$, and $\mathbf{x}$ and $\mathbf{b}$ are column vectors with $m$ entries, denoted as $\mathbb{R}^m$. # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Vandermonde Matrix # # We have data $(x_i, y_i), ~~ i = 1, 2, \ldots, m$ that we want to fit a polynomial of order $m-1$. Solving the linear system $A \mathbf{p} = \mathbf{y}$ does this for us where # # $$A = \begin{bmatrix} # 1 & x_1 & x_1^2 & \cdots & x_1^{m-1} \\ # 1 & x_2 & x_2^2 & \cdots & x_2^{m-1} \\ # \vdots & \vdots & \vdots & & \vdots \\ # 1 & x_m & x_m^2 & \cdots & x_m^{m-1} # \end{bmatrix} \quad \quad \mathbf{y} = \begin{bmatrix} # y_1 \\ y_2 \\ \vdots \\ y_m # \end{bmatrix}$$ # # and $\mathbf{p}$ are the coefficients of the interpolating polynomial $\mathcal{P}_N(x) = p_0 + p_1 x + p_2 x^2 + \cdots + p_m x^{m-1}$. The solution to this system satisfies $\mathcal{P}_N(x_i)=y_i$ for $i=1, 2, \ldots, m$. # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Linear least squares 1 # # In a similar case as above, say we want to fit a particular function (could be a polynomial) to a given number of data points except in this case we have more data points than free parameters. In the case of polynomials this could be the same as saying we have $m$ data points but only want to fit a $n - 1$ order polynomial through the data where $n - 1 \leq m$. One of the common approaches to this problem is to minimize the "least-squares" error between the data and the resulting function: # $$ # E = \left( \sum^m_{i=1} |y_i - f(x_i)|^2 \right )^{1/2}. # $$ # + [markdown] slideshow={"slide_type": "subslide"} # But how do we do this if our matrix $A$ is now $m \times n$ and looks like # $$ # A = \begin{bmatrix} # 1 & x_1 & x_1^2 & \cdots & x_1^{n-1} \\ # 1 & x_2 & x_2^2 & \cdots & x_2^{n-1} \\ # \vdots & \vdots & \vdots & & \vdots \\ # 1 & x_m & x_m^2 & \cdots & x_m^{n-1} # \end{bmatrix}? # $$ # # Turns out if we solve the system # # $$A^T A \mathbf{x} = A^T \mathbf{b}$$ # # we can guarantee that the error is minimized in the least-squares sense[<sup>1</sup>](#footnoteRegression). (Although we will also show that this is not the most numerically stable way to solve this problem) # # # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Linear least squares 2 # # Fitting a line through data that has random noise added to it. # + slideshow={"slide_type": "fragment"} # Linear Least Squares Problem # First define the independent and dependent variables. N = 20 x = numpy.linspace(-1.0, 1.0, N) y = x + numpy.random.random((N)) # Define the Vandermonde matrix based on our x-values A = numpy.array([ numpy.ones(x.shape), x]).T A = numpy.array([ numpy.ones(x.shape), x, x**2]).T A # + slideshow={"slide_type": "subslide"} # Determine the coefficients of the polynomial that will # result in the smallest sum of the squares of the residual. p = numpy.linalg.solve(numpy.dot(A.T, A), numpy.dot(A.T, y)) print("Error in slope = %s, y-intercept = %s" % (numpy.abs(p[1] - 1.0), numpy.abs(p[0] - 0.5))) print(p) # + hide_input=true slideshow={"slide_type": "-"} # Plot it out, cuz pictures are fun! fig = plt.figure(figsize=(8,6)) axes = fig.add_subplot(1, 1, 1) f = numpy.zeros(x.shape) for i in range(len(p)): f += p[i] * x**i axes.plot(x, y, 'ko') axes.plot(x, f, 'r') axes.set_title("Least Squares Fit to Data") axes.set_xlabel("$x$") axes.set_ylabel("$f(x)$ and $y_i$") axes.grid() plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ### Eigenproblems # # Eigenproblems come up in a variety of contexts and often are integral to many problem of scientific and engineering interest. It is such a powerful idea that it is not uncommon for us to take a problem and convert it into an eigenproblem. Here we introduce the idea and give some examples. # # As a review, if $A \in \mathbb{C}^{m\times m}$ (a square matrix with complex values), a non-zero vector $\mathbf{v}\in\mathbb{C}^m$ is an **eigenvector** of $A$ with a corresponding **eigenvalue** $\lambda \in \mathbb{C}$ if # # $$A \mathbf{v} = \lambda \mathbf{v}.$$ # # One way to interpret the eigenproblem is that we are attempting to ascertain the "action" of the matrix $A$ on some subspace of $\mathbb{C}^m$ where this action acts like scalar multiplication. This subspace is called an **eigenspace**. # + [markdown] slideshow={"slide_type": "subslide"} # ### General idea of EigenProblems # # Rewriting the standard Eigen problem $A\mathbf{v}=\lambda\mathbf{v}$ for $A \in \mathbb{C}^{m\times m}$, $\mathbf{v}\in\mathbb{C}^m$ as # # $$ # (A - \lambda I)\mathbf{v} = 0 # $$ # # it becomes clear that for $\mathbf{v}$ to be non-trivial (i.e. $\neq \mathbf{0}$), requires that the matrix $(A-\lambda I)$ be singular, # # This is equivalent to finding all values of $\lambda$ such that $|A-\lambda I| = 0$ (the determinant of singular matrices is always zero). However, it can also be shown that # # $$ # | A-\lambda I| = P_m(\lambda) # $$ # # which is a $m$th order polynomial in $\lambda$. Thus $P_m(\lambda)=0$ implies the eigenvalues are the $m$ roots of $P$, and the **eigenspace** corresponding to $\lambda_i$ is just $N(A-\lambda_i I)$ # # + [markdown] slideshow={"slide_type": "subslide"} # ### Solving EigenProblems # # The temptation (and what) we usually teach in introductory linear algebra is to simply find the roots of $P_m(\lambda)$. However that would be **wrong**. The best algorithms for finding Eigenvalues are completely unrelated to rootfinding as we shall see. # + [markdown] slideshow={"slide_type": "skip"} # #### Example # # Compute the eigenspace of the matrix # $$ # A = \begin{bmatrix} # 1 & 2 \\ # 2 & 1 # \end{bmatrix} # $$ # # Recall that we can find the eigenvalues of a matrix by computing $\det(A - \lambda I) = 0$. # + [markdown] slideshow={"slide_type": "skip"} # In this case we have # $$\begin{aligned} # A - \lambda I &= \begin{bmatrix} # 1 & 2 \\ # 2 & 1 # \end{bmatrix} - \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \lambda\\ # &= \begin{bmatrix} # 1 - \lambda & 2 \\ # 2 & 1 - \lambda # \end{bmatrix}. # \end{aligned}$$ # The determinant of the matrix is # $$\begin{aligned} # \begin{vmatrix} # 1 - \lambda & 2 \\ # 2 & 1 - \lambda # \end{vmatrix} &= (1 - \lambda) (1 - \lambda) - 2 \cdot 2 \\ # &= 1 - 2 \lambda + \lambda^2 - 4 \\ # &= \lambda^2 - 2 \lambda - 3. # \end{aligned}$$ # This result is sometimes referred to as the characteristic equation of the matrix, $A$. # + [markdown] slideshow={"slide_type": "skip"} # Setting the determinant equal to zero we can find the eigenvalues as # $$\begin{aligned} # & \\ # \lambda &= \frac{2 \pm \sqrt{4 - 4 \cdot 1 \cdot (-3)}}{2} \\ # &= 1 \pm 2 \\ # &= -1 \mathrm{~and~} 3 # \end{aligned}$$ # + [markdown] slideshow={"slide_type": "skip"} # The eigenvalues are used to determine the eigenvectors. The eigenvectors are found by going back to the equation $(A - \lambda I) \mathbf{v}_i = 0$ and solving for each vector. A trick that works some of the time is to normalize each vector such that the first entry is 1 ($\mathbf{v}_1 = 1$): # # $$ # \begin{bmatrix} # 1 - \lambda & 2 \\ # 2 & 1 - \lambda # \end{bmatrix} \begin{bmatrix} 1 \\ v_2 \end{bmatrix} = 0 # $$ # # $$\begin{aligned} # 1 - \lambda + 2 v_2 &= 0 \\ # v_2 &= \frac{\lambda - 1}{2} # \end{aligned}$$ # + [markdown] slideshow={"slide_type": "skip"} # We can check this by # $$\begin{aligned} # 2 + \left(1- \lambda \frac{\lambda - 1}{2}\right) & = 0\\ # (\lambda - 1)^2 - 4 &=0 # \end{aligned}$$ # # which by design is satisfied by our eigenvalues. Another sometimes easier approach is to plug-in the eigenvalues to find the Null space of $A-\lambda I$ where the eigenvectors will be a basis for $N(A-\lambda I)$. The eigenvectors are therefore # # $$\mathbf{v} = \begin{bmatrix}1 \\ -1 \end{bmatrix}, \begin{bmatrix}1 \\ 1 \end{bmatrix}.$$ # # Note that these are linearly independent (and because $A^T = A$, also orthogonal) # + [markdown] slideshow={"slide_type": "slide"} # ## Fundamentals # + [markdown] slideshow={"slide_type": "slide"} # ### Matrix-Vector Multiplication # # One of the most basic operations we can perform with matrices is to multiply them be a vector. This matrix-vector product $A \mathbf{x} = \mathbf{b}$ is defined as # $$ # b_i = \sum^n_{j=1} a_{ij} x_j \quad \text{where}\quad i = 1, \ldots, m # $$ # + [markdown] slideshow={"slide_type": "subslide"} # ### row picture # In addition to index form, we can consider matrix-vector as a sequence of inner products (dot-products between the rows of $A$ and the vector $\mathbf{x}$. # \begin{align} # \mathbf{b} &= A \mathbf{x}, \\ # &= # \begin{bmatrix} \mathbf{a}_1^T \mathbf{x} \\ \mathbf{a}_2^T \mathbf{x} \\ \vdots \\ \mathbf{a}_m^T \mathbf{x}\end{bmatrix} # \end{align} # where $\mathbf{a}_i^T$ is the $i$th **row** of $A$ # + [markdown] slideshow={"slide_type": "subslide"} # #### Operation Counts # This view is convenient for calculating the **Operation counts** required for to compute $A\mathbf{x}$. If $A\in\mathbb{C}^{m\times n}$ and $\mathbf{x}\in\mathbb{C}^n$. Then just counting the number of multiplications involved to compute $A\mathbf{x}$ is $O(??)$ # # + [markdown] slideshow={"slide_type": "subslide"} # ### Column picture # # An alternative (and entirely equivalent way) to write the matrix-vector product is as a linear combination of the columns of $A$ where each column's weighting is $x_j$. # # $$ # \begin{align} # \mathbf{b} &= A \mathbf{x}, \\ # &= # \begin{bmatrix} & & & \\ & & & \\ \mathbf{a}_1 & \mathbf{a}_2 & \cdots & \mathbf{a}_n \\ & & & \\ & & & \end{bmatrix} # \begin{bmatrix} x_1 \\ x_2 \\ \vdots \\ x_n \end{bmatrix}, \\ # &= x_1 \mathbf{a}_1 + x_2 \mathbf{a}_2 + \cdots + x_n \mathbf{a}_n. # \end{align} # $$ # # This view will be useful later when we are trying to interpret various types of matrices. # + [markdown] slideshow={"slide_type": "subslide"} # One important property of the matrix-vector product is that is a **linear** operation, also known as a **linear operator**. This means that the for any $\mathbf{x}, \mathbf{y} \in \mathbb{C}^n$ and any $c \in \mathbb{C}$ we know that # # 1. $A (\mathbf{x} + \mathbf{y}) = A\mathbf{x} + A\mathbf{y}$ # 1. $A\cdot (c\mathbf{x}) = c A \mathbf{x}$ # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Vandermonde Matrix # # In the case where we have $m$ data points and want $m - 1$ order polynomial interpolant the matrix $A$ is a square, $m \times m$, matrix as before. Using the above interpretation the polynomial coefficients $p$ are the weights for each of the monomials that give exactly the $y$ values of the data. # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Numerical matrix-vector multiply # # Write a matrix-vector multiply function and check it with the appropriate `numpy` routine. Also verify the linearity of the matrix-vector multiply. # + slideshow={"slide_type": "subslide"} #A x = b #(m x n) (n x 1) = (m x 1) def matrix_vector_product(A, x): m, n = A.shape b = numpy.zeros(m) for i in range(m): for j in range(n): b[i] += A[i, j] * x[j] return b m = 4 n = 3 A = numpy.random.uniform(size=(m,n)) x = numpy.random.uniform(size=(n)) y = numpy.random.uniform(size=(n)) c = numpy.random.uniform() b = matrix_vector_product(A, x) print(numpy.allclose(b, numpy.dot(A, x))) print(numpy.allclose(matrix_vector_product(A, (x + y)), matrix_vector_product(A, x) + matrix_vector_product(A, y))) print(numpy.allclose(matrix_vector_product(A, c * x), c*matrix_vector_product(A, x))) # + [markdown] slideshow={"slide_type": "subslide"} # ### Matrix-Matrix Multiplication # # The matrix product with another matrix $A B = C$ is defined as # $$ # c_{ij} = \sum^m_{k=1} a_{ik} b_{kj} = \mathbf{a}_i^T\mathbf{b}_j # $$ # # i.e. each component of $C$ is a dot-product between the $i$th row of $A$ and the $j$th column of $B$ # + [markdown] slideshow={"slide_type": "subslide"} # As with matrix-vector multiplication, Matrix-matrix multiplication can be thought of multiple ways # # * $m\times p$ dot products # * $A$ multiplying the columns of $B$ # $$ # C = AB = \begin{bmatrix} # A\mathbf{b}_1 & A\mathbf{b}_2 & \ldots & A\mathbf{b}_p\\ # \end{bmatrix} # $$ # * Linear combinations of the rows of $B$ # $$ # C = AB = \begin{bmatrix} # \mathbf{a}_1^T B \\ \mathbf{a}_2^T B \\ \vdots \\ \mathbf{a}_m^T B\\ # \end{bmatrix} # $$ # + [markdown] slideshow={"slide_type": "subslide"} # ### Questions # * What are the dimensions of $A$ and $B$ so that the multiplication works? # * What are the Operations Counts for Matrix-Matrix Multiplication? # * Comment on the product $\mathbf{c}=(AB)\mathbf{x}$ vs. $\mathbf{d} = A(B\mathbf{x})$ # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Outer Product # # The product of two vectors $\mathbf{u} \in \mathbb{C}^m$ and $\mathbf{v} \in \mathbb{C}^n$ is a $m \times n$ matrix where the columns are the vector $u$ multiplied by the corresponding value of $v$: # $$ # \begin{align} # \mathbf{u} \mathbf{v}^T &= # \begin{bmatrix} u_1 \\ u_2 \\ \vdots \\ u_n \end{bmatrix} # \begin{bmatrix} v_1 & v_2 & \cdots & v_n \end{bmatrix}, \\ # & = \begin{bmatrix} v_1u_1 & \cdots & v_n u_1 \\ \vdots & & \vdots \\ v_1 u_m & \cdots & v_n u_m \end{bmatrix}. # \end{align} # $$ # + [markdown] slideshow={"slide_type": "subslide"} # It is useful to think of these as operations on the column vectors, and an equivalent way to express this relationship is # $$ # \begin{align} # \mathbf{u} \mathbf{v}^T &= # \begin{bmatrix} \\ \mathbf{u} \\ \\ \end{bmatrix} # \begin{bmatrix} v_1 & v_2 & \cdots & v_n \end{bmatrix}, \\ # &= # \begin{bmatrix} & & & \\ & & & \\ \mathbf{u}v_1 & \mathbf{u} v_2 & \cdots & \mathbf{u} v_n \\ & & & \\ & & & \end{bmatrix}, \\ # & = \begin{bmatrix} v_1u_1 & \cdots & v_n u_1 \\ \vdots & & \vdots \\ v_1 u_m & \cdots & v_n u_m \end{bmatrix}. # \end{align} # $$ # + [markdown] slideshow={"slide_type": "subslide"} # ### rank 1 updates # # We call any matrix of the form $\mathbf{u}\mathbf{v}^T$ a "rank one matrix" ( because its rank r=?). These sort of matrix operations are very common in numerical algorithms for orthogonalization, eigenvalues and the original page-rank algorithm for google. Again, the order of operations is critical. # + [markdown] slideshow={"slide_type": "fragment"} # Comment on the difference in values and operation counts between # # $$ # \mathbf{y} = (\mathbf{u}\mathbf{v}^T)\mathbf{x} # $$ # # and # $$ # \mathbf{y}' = \mathbf{u}(\mathbf{v}^T\mathbf{x}) # $$ # for $\mathbf{u}$, $\mathbf{v}$, $\mathbf{x}$, $\mathbf{y}$, $\mathbf{y}'\in\mathbb{R}^n$, # + [markdown] slideshow={"slide_type": "skip"} # #### Example: Upper Triangular Multiplication # # Consider the multiplication of a matrix $A \in \mathbb{C}^{m\times n}$ and the **upper-triangular** matrix $R$ defined as the $n \times n$ matrix with entries $r_{ij} = 1$ for $i \leq j$ and $r_{ij} = 0$ for $i > j$. The product can be written as # $$ # \begin{bmatrix} \\ \\ \mathbf{b}_1 & \cdots & \mathbf{b}_n \\ \\ \\ \end{bmatrix} = \begin{bmatrix} \\ \\ \mathbf{a}_1 & \cdots & \mathbf{a}_n \\ \\ \\ \end{bmatrix} \begin{bmatrix} 1 & \cdots & 1 \\ & \ddots & \vdots \\ & & 1 \end{bmatrix}. # $$ # # The columns of $B$ are then # $$ # \mathbf{b}_j = A \mathbf{r}_j = \sum^j_{k=1} \mathbf{a}_k # $$ # so that $\mathbf{b}_j$ is the sum of the first $j$ columns of $A$. # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Write Matrix-Matrix Multiplication # # Write a function that computes matrix-matrix multiplication and demonstrate the following properties: # 1. $A (B + C) = AB + AC$ (for square matrices)) # 1. $A (cB) = c AB$ where $c \in \mathbb{C}$ # 1. $AB \neq BA$ in general # + hide_input=false slideshow={"slide_type": "subslide"} def matrix_matrix_product(A, B): C = numpy.zeros((A.shape[0], B.shape[1])) for i in range(A.shape[0]): for j in range(B.shape[1]): for k in range(A.shape[1]): C[i, j] += A[i, k] * B[k, j] return C m = 4 n = 4 p = 4 A = numpy.random.uniform(size=(m, n)) B = numpy.random.uniform(size=(n, p)) C = numpy.random.uniform(size=(m, p)) c = numpy.random.uniform() print(numpy.allclose(matrix_matrix_product(A, B), numpy.dot(A, B))) print(numpy.allclose(matrix_matrix_product(A, (B + C)), matrix_matrix_product(A, B) + matrix_matrix_product(A, C))) print(numpy.allclose(matrix_matrix_product(A, c * B), c*matrix_matrix_product(A, B))) print(numpy.allclose(matrix_matrix_product(A, B), matrix_matrix_product(B, A))) # + [markdown] slideshow={"slide_type": "subslide"} # #### NumPy Products # # NumPy and SciPy contain routines that are optimized to perform matrix-vector and matrix-matrix multiplication. Given two `ndarray`s you can take their product by using the `dot` function. # + slideshow={"slide_type": "subslide"} n = 10 m = 5 # Matrix vector with identity A = numpy.identity(n) x = numpy.random.random(n) print(numpy.allclose(x, numpy.dot(A, x))) print(x-A.dot(x)) print(A*x) # Matrix vector product A = numpy.random.random((m, n)) print(numpy.dot(A, x)) # Matrix matrix product B = numpy.random.random((n, m)) print(numpy.dot(A, B)) # + [markdown] slideshow={"slide_type": "slide"} # ### Range and Null-Space # # #### Range # - The **range** of a matrix $A \in \mathbb R^{m \times n}$ (similar to any function), denoted as $\text{range}(A)$, is the set of vectors that can be expressed as $A x$ for $x \in \mathbb R^n$. # - We can also then say that that $\text{range}(A)$ is the space **spanned** by the columns of $A$. In other words the columns of $A$ provide a basis for $\text{range}(A)$, also called the **column space** of the matrix $A$. # - $C(A)$ controls the **existence** of solutions to $A\mathbf{x}=\mathbf{b}$ # + [markdown] slideshow={"slide_type": "subslide"} # #### Null-Space # - Similarly the **null-space** of a matrix $A$, denoted $\text{null}(A)$ is the set of vectors $x$ that satisfy $A x = 0$. # - $N(A)$ controls the **uniqueness** of solutions to $A\mathbf{x}=\mathbf{b}$ # - A similar concept is the **rank** of the matrix $A$, denoted as $\text{rank}(A)$, is the dimension of the column space. A matrix $A$ is said to have **full-rank** if $\text{rank}(A) = \min(m, n)$. This property also implies that the matrix mapping is **one-to-one**. # + [markdown] slideshow={"slide_type": "slide"} # ### Inverse # # A **non-singular** or **invertible** matrix is characterized as a matrix with full-rank. This is related to why we know that the matrix is one-to-one, we can use it to transform a vector $x$ and using the inverse, denoted $A^{-1}$, we can map it back to the original matrix. The familiar definition of this is # \begin{align*} # A \mathbf{x} &= \mathbf{b}, \\ # A^{-1} A \mathbf{x} & = A^{-1} \mathbf{b}, \\ # x &=A^{-1} \mathbf{b}. # \end{align*} # Since $A$ has full rank, its columns form a basis for $\mathbb{R}^m$ and the vector $\mathbf{b}$ must be in the column space of $A$. # + [markdown] slideshow={"slide_type": "subslide"} # There are a number of important properties of a non-singular matrix A. Here we list them as the following equivalent statements # 1. $A$ has an inverse $A^{-1}$ # 1. $\text{rank}(A) = m$ # 1. $\text{range}(A) = \mathbb{C}^m$ # 1. $\text{null}(A) = {0}$ # 1. 0 is not an eigenvalue of $A$ # 1. $\text{det}(A) \neq 0$ # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Properties of invertible matrices # # Show that given an invertible matrix that the rest of the properties hold. Make sure to search the `numpy` packages for relevant functions. # + slideshow={"slide_type": "subslide"} m = 3 for n in range(100): A = numpy.random.uniform(size=(m, m)) if numpy.linalg.det(A) != 0: break print(numpy.dot(numpy.linalg.inv(A), A)) print(numpy.linalg.matrix_rank(A)) print("N(A)= {}".format(numpy.linalg.solve(A, numpy.zeros(m)))) print("Eigenvalues = {}".format(numpy.linalg.eigvals(A))) # + [markdown] slideshow={"slide_type": "slide"} # ### Orthogonal Vectors and Matrices # # Orthogonality is a very important concept in linear algebra that forms the basis of many of the modern methods used in numerical computations. # + [markdown] slideshow={"slide_type": "subslide"} # Two vectors are said to be *orthogonal* if their **inner-product** or **dot-product** defined as # $$ # < \mathbf{x}, \mathbf{y} > \equiv (\mathbf{x}, \mathbf{y}) \equiv \mathbf{x}^T\mathbf{y} \equiv \mathbf{x} \cdot \mathbf{y} = \sum^m_{i=1} x_i y_i = 0 # $$ # Here we have shown the various notations you may run into (the inner-product is in-fact a general term for a similar operation for mathematical objects such as functions). # + [markdown] slideshow={"slide_type": "subslide"} # If $\langle \mathbf{x},\mathbf{y} \rangle = 0$ then we say $\mathbf{x}$ and $\mathbf{y}$ are orthogonal. The reason we use this terminology is that the inner-product of two vectors can also be written in terms of the angle between them where # $$ # \cos \theta = \frac{\langle \mathbf{x}, \mathbf{y} \rangle}{||\mathbf{x}||_2~||\mathbf{y}||_2} # $$ # and $||\mathbf{x}||_2$ is the Euclidean ($\ell^2$) norm of the vector $\mathbf{x}$. # + [markdown] slideshow={"slide_type": "subslide"} # We can write this in terms of the inner-product as well as # $$ # ||\mathbf{x}||_2^2 = \langle \mathbf{x}, \mathbf{x} \rangle = \mathbf{x}^T\mathbf{x} = \sum^m_{i=1} |x_i|^2. # $$ # # $$ # ||\mathbf{x}||_2 = \sqrt{\langle \mathbf{x}, \mathbf{x} \rangle} # $$ # + [markdown] slideshow={"slide_type": "subslide"} # The generalization of the inner-product to complex spaces is defined as # $$ # \langle x, y \rangle = \sum^m_{i=1} x_i^* y_i # $$ # where $x_i^*$ is the complex-conjugate of the value $x_i$. # + [markdown] slideshow={"slide_type": "subslide"} # #### Orthonormality # # Taking this idea one step further we can say a set of vectors $\mathbf{x} \in X$ are orthogonal to $\mathbf{y} \in Y$ if $\forall \mathbf{x},\mathbf{y}$ $< \mathbf{x}, \mathbf{y} > = 0$. If $\forall \mathbf{x},\mathbf{y}$ $||\mathbf{x}|| = 1$ and $||\mathbf{y}|| = 1$ then they are also called orthonormal. Note that we dropped the 2 as a subscript to the notation for the norm of a vector. Later we will explore other ways to define a norm of a vector other than the Euclidean norm defined above. # + [markdown] slideshow={"slide_type": "subslide"} # Another concept that is related to orthogonality is linear-independence. A set of vectors $\mathbf{x} \in X$ are **linearly independent** if $\forall \mathbf{x} \in X$ that each $\mathbf{x}$ cannot be written as a linear combination of the other vectors in the set $X$. # # # # An equivalent statement is that given a set of $n$ vectors $\mathbf{x}_i$, the only set of scalars $c_i$ that satisfies # $$ # \sum_{i=1}^n c_i\mathbf{x}_i = \mathbf{0} # $$ # is if $c_i=0$ for all $i\in[1,n]$ # # + [markdown] slideshow={"slide_type": "subslide"} # This can be related directly through the idea of projection. If we have a set of vectors $\mathbf{x} \in X$ we can project another vector $\mathbf{v}$ onto the vectors in $X$ by using the inner-product. This is especially powerful if we have a set of **orthogonal** vectors $X$, which are said to **span** a space (or provide a **basis** for a space), s.t. any vector in the space spanned by $X$ can be expressed as a linear combination of the basis vectors $X$ # $$ # \mathbf{v} = \sum^n_{i=1} \, \langle \mathbf{v}, \mathbf{x}_i \rangle \, \mathbf{x}_i. # $$ # Note if $\mathbf{v} \in X$ that # $$ # \langle \mathbf{v}, \mathbf{x}_i \rangle = 0 \quad \forall \mathbf{x}_i \in X \setminus \mathbf{v}. # $$ # + [markdown] slideshow={"slide_type": "subslide"} # Looping back to matrices, the column space of a matrix is spanned by its linearly independent columns. Any vector $v$ in the column space can therefore be expressed via the equation above. A special class of matrices are called **unitary** matrices when complex-valued and **orthogonal** when purely real-valued if the columns of the matrix are orthonormal to each other. Importantly this implies that for a unitary matrix $Q$ we know the following # # 1. $Q^* = Q^{-1}$ # 1. $Q^*Q = I$ # # where $Q^*$ is called the **adjoint** of $Q$. The adjoint is defined as the transpose of the original matrix with the entries being the complex conjugate of each entry as the notation implies. # + [markdown] slideshow={"slide_type": "subslide"} # As an example if we have the matrix # $$ # \begin{aligned} # Q &= \begin{bmatrix} q_{11} & q_{12} \\ q_{21} & q_{22} \\ q_{31} & q_{32} \end{bmatrix} \quad \text{then} \\ # Q^* &= \begin{bmatrix} q^*_{11} & q^*_{21} & q^*_{31} \\ q^*_{12} & q^*_{22} & q^*_{32} \end{bmatrix} # \end{aligned} # $$ # The important part of being an unitary matrix is that the projection onto the column space of the matrix $Q$ preserves geometry in an Euclidean sense, i.e. preserves the Cartesian distance. # + [markdown] hide_input=true slideshow={"slide_type": "slide"} # ### Vector Norms # # Norms (and also measures) provide a means for measure the "size" or distance in a space. In general a norm is a function, denoted by $||\cdot||$, that maps $\mathbb{C}^m \rightarrow \mathbb{R}$. In other words we stick in a multi-valued object and get a single, real-valued number out the other end. All norms satisfy the properties: # # 1. $~~~~||\mathbf{x}|| \geq 0$ # 1. $~~~~||\mathbf{x}|| = 0$ only if $\mathbf{x} = \mathbf{0}$ # 1. $$||\mathbf{x} + \mathbf{y}||\leq ||\mathbf{x}|| + ||\mathbf{y}||$$ (triangle inequality) # 1. $~~~||c \mathbf{x}|| = |c| ~ ||\mathbf{x}||$ where $c \in \mathbb{C}$ # + [markdown] slideshow={"slide_type": "subslide"} # There are a number of relevant norms that we can define beyond the Euclidean norm, also know as the 2-norm or $\ell_2$ norm: # # 1. $\ell_1$ norm: # $$ # ||\mathbf{x}||_1 = \sum^m_{i=1} |x_i|, # $$ # 1. $\ell_2$ norm: # $$ # ||\mathbf{x}||_2 = \left( \sum^m_{i=1} |x_i|^2 \right)^{1/2}, # $$ # + [markdown] slideshow={"slide_type": "subslide"} # 3. $\ell_p$ norm: # $$ # ||\mathbf{x}||_p = \left( \sum^m_{i=1} |x_i|^p \right)^{1/p}, \quad \quad 1 \leq p < \infty, # $$ # 1. $\ell_\infty$ norm: # $$ # ||\mathbf{x}||_\infty = \max_{1\leq i \leq m} |x_i|, # $$ # 1. weighted $\ell_p$ norm: # $$ # ||\mathbf{x}||_{W_p} = \left( \sum^m_{i=1} |w_i x_i|^p \right)^{1/p}, \quad \quad 1 \leq p < \infty, # $$ # # These are also related to other norms denoted by capital letters ($L_2$ for instance). In this case we use the lower-case notation to denote finite or discrete versions of the infinite dimensional counterparts. # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Comparisons Between Norms # # Compute the norms given some vector $\mathbf{x}$ and compare their values. Verify the properties of the norm for one of the norms. # + slideshow={"slide_type": "subslide"} def pnorm(x, p): """ return the vector p norm of a vector parameters: ----------- x: numpy array vector p: float or numpy.inf value of p norm such that ||x||_p = (sum(|x_i|^p))^{1/p} for p< inf for infinity norm return max(abs(x)) returns: -------- pnorm: float pnorm of x """ if p == numpy.inf: norm = numpy.max(numpy.abs(x)) else: norm = numpy.sum(numpy.abs(x)**p)**(1./p) return norm # + slideshow={"slide_type": "subslide"} m = 10 p = 4 x = numpy.random.uniform(size=m) ell_1 = pnorm(x, 1) ell_2 = pnorm(x, 2) ell_p = pnorm(x, p) ell_infty = pnorm(x, numpy.inf) print('x = {}'.format(x)) print() print("L_1 = {}\nL_2 = {}\nL_{} = {}\nL_inf = {}".format(ell_1, ell_2, p, ell_p, ell_infty)) y = numpy.random.uniform(size=m) print() print("Properties of norms:") print('y = {}\n'.format(y)) p = 2 print('||x+y||_{p} = {nxy}\n||x||_{p} + ||y||_{p} = {nxny}'.format( p=p,nxy=pnorm(x+y, p), nxny=pnorm(x, p) + pnorm(y, p))) c = 0.1 print('||c x||_{} = {}'.format(p,pnorm(c * x, p))) print(' c||x||_{} = {}'.format(p,c * pnorm(x, p))) # + [markdown] slideshow={"slide_type": "slide"} # ### Matrix Norms # # The most direct way to consider a matrix norm is those induced by a vector-norm. Given a vector norm, we can define a matrix norm as the smallest number $C$ that satisfies the inequality # $$ # ||A \mathbf{x}||_{m} \leq C ||\mathbf{x}||_{n}. # $$ # or as the supremum of the ratios so that # $$ # C = \sup_{\mathbf{x}\in\mathbb{C}^n ~ \mathbf{x}\neq\mathbf{0}} \frac{||A \mathbf{x}||_{m}}{||\mathbf{x}||_n}. # $$ # + [markdown] slideshow={"slide_type": "subslide"} # Noting that $||A \mathbf{x}||$ lives in the column space and $||\mathbf{x}||$ on the domain we can think of the matrix norm as the "size" of the matrix that maps the domain to the range. Also noting that if $||\mathbf{x}||_n = 1$ we also satisfy the condition we can write the induced matrix norm as # $$ # ||A||_{(m,n)} = \sup_{\mathbf{x} \in \mathbb{C}^n ~ ||\mathbf{x}||_{n} = 1} ||A \mathbf{x}||_{m}. # $$ # # This definition has a **geometric interpretation**. The set of all $\mathbf{x}$ such that $||\mathbf{x}||_n = 1$ is the "unit sphere" in $\mathbb{C}^n$. So the induced matrix norm is the largest vector in the deformed "sphere" and measures how much the matrix distorts the unit sphere. # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: Induced Matrix Norms # # Consider the matrix # $$ # A = \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix}. # $$ # Compute the induced-matrix norm of $A$ for the vector norms $\ell_2$ and $\ell_\infty$. # + [markdown] slideshow={"slide_type": "subslide"} # $\ell^2$: For both of the requested norms the unit-length vectors $[1, 0]$ and $[0, 1]$ can be used to give an idea of what the norm might be and provide a lower bound. # # $$ # ||A||_2 = \sup_{x \in \mathbb{R}^n} \left( ||A \cdot [1, 0]^T||_2, ||A \cdot [0, 1]^T||_2 \right ) # $$ # # computing each of the norms we have # # $$\begin{aligned} # \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} \cdot \begin{bmatrix} 1 \\ 0 \end{bmatrix} &= \begin{bmatrix} 1 \\ 0 \end{bmatrix} \\ # \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} \cdot \begin{bmatrix} 0 \\ 1 \end{bmatrix} &= \begin{bmatrix} 2 \\ 2 \end{bmatrix} # \end{aligned}$$ # # which translates into the norms $||A \cdot [1, 0]^T||_2 = 1$ and $||A \cdot [0, 1]^T||_2 = 2 \sqrt{2}$. This implies that the $\ell_2$ induced matrix norm of $A$ is at least $||A||_{2} = 2 \sqrt{2} \approx 2.828427125$. # + [markdown] slideshow={"slide_type": "skip"} # The exact value of $||A||_2$ can be computed using the spectral radius defined as # $$ # \rho(A) = \max_{i} |\lambda_i|, # $$ # where $\lambda_i$ are the eigenvalues of $A$. With this we can compute the $\ell_2$ norm of $A$ as # $$ # ||A||_2 = \sqrt{\rho(A^\ast A)} # $$ # # Computing the norm again here we find # $$ # A^\ast A = \begin{bmatrix} 1 & 0 \\ 2 & 2 \end{bmatrix} \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} = \begin{bmatrix} 1 & 2 \\ 2 & 8 \end{bmatrix} # $$ # which has eigenvalues # $$ # \lambda = \frac{1}{2}\left(9 \pm \sqrt{65}\right ) # $$ # so $||A||_2 \approx 2.9208096$. # + [markdown] slideshow={"slide_type": "subslide"} # The actual induced 2-norm of a matrix can be derived using the Singular Value Decomposition (SVD) and is simply the largest singular value $\sigma_1$. # # **Proof**: # Given that every Matrix $A\in\mathbb{C}^{m\times n}$ can be factored into its SVD (see notebook 10.1): # # $$ # A = U\Sigma V^* # $$ # # where $U\in\mathbb{C}^{m\times m}$ and $V\in\mathbb{C}^{n\times n}$ are unitary matrices with the property $U^*U=I$ and $V^*V=I$ (of their respective sizes) and $\Sigma$ is a real diagonal matrix of singular values $\sigma_1 \geq\sigma_2\geq...\sigma_n\geq 0$. # + [markdown] slideshow={"slide_type": "subslide"} # Then the 2-norm squared of a square matrix is # $$ # ||A||^2_2 = \sup_{\mathbf{x} \in \mathbb{C}^n ~ ||\mathbf{x}||_2 = 1} ||A \mathbf{x}||_2^2 = \mathbf{x}^TA^*A\mathbf{x} # $$ # but $A^*A = V\Sigma^2V^*$ so # # \begin{align} # ||A \mathbf{x}||_2^2 &= \mathbf{x}^*V\Sigma^2V^*\mathbf{x} \\ # &= \mathbf{y}^*\Sigma^2\mathbf{y} \quad\mathrm{where}\quad \mathbf{y}=V^*\mathbf{x}\\ # &= \sum_{i=1}^n \sigma_i^2|y_i|^2\\ # &\leq \sigma_1^2\sum_{i=1}^n |y_i|^2 = \sigma_i^2||\mathbf{y}||_2\\ # \end{align} # # but if $||\mathbf{x}||_2 = 1$ (i.e. is a unit vector), then so is $\mathbf{y}$ because unitary matrices don't change the length of vectors. So it follows that # $$ # ||A||_2 = \sigma_1 # $$ # # + slideshow={"slide_type": "subslide"} A = numpy.array([[1, 2], [0, 2]]) #calculate the SVD(A) U, S, Vt = numpy.linalg.svd(A) print('Singular_values = {}'.format(S)) print('||A||_2 = {}'.format(S.max())) print('||A||_2 = {}'.format(numpy.linalg.norm(A, ord=2))) # more fun facts about the SVD #print(U.T.dot(U)) #print(Vt.T.dot(Vt)) #print(A - numpy.dot(U,numpy.dot(numpy.diag(S),Vt))) # + [markdown] slideshow={"slide_type": "subslide"} # #### Other useful norms of a Matrix # # The 2-norm of a matrix can be expensive to compute, however there are other norms that are equivalent that can be directly computed from the components of $A$. For example # # * The induced 1-norm is simply max of the 1-norm of the **columns** of $A$ # # $$ # ||A \mathbf{x}||_1 = || \sum^n_{j=1} x_j \mathbf{a}_j ||_1 \leq \sum^n_{j=1} |x_j| ||\mathbf{a}_j||_1 \leq \max_{1\leq j\leq n} ||\mathbf{a}_j||_1 ||\mathbf{x}||_1 = \max_{1\leq j\leq n} ||\mathbf{a}_j||_1 # $$ # # * The induce $\infty$-norm is simply the max of the 1-norm of **rows** of $A$ # # $$ # ||A \mathbf{x}||_\infty = \max_{1 \leq i \leq m} | \mathbf{a}^*_i \mathbf{x} | \leq \max_{1 \leq i \leq m} ||\mathbf{a}^*_i||_1 # $$ # because the largest unit vector on the unit sphere in the $\infty$ norm is a vector of 1's. # # + [markdown] slideshow={"slide_type": "subslide"} # #### Example: # # $$ # A = \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix}. # $$ # # $$ ||A||_1 = 4, \quad ||A||_\infty = 3$$ # # - # Calculate the 1-norm of A normA_1 = numpy.max(numpy.sum(numpy.abs(A), axis=0)) print('||A||_1 = {}'.format(normA_1)) print('||A||_1 = {}'.format(numpy.linalg.norm(A, ord=1))) # calculate the infinity norm of A normA_inf = numpy.max(numpy.sum(numpy.abs(A), axis=1)) print('||A||_inf = {}'.format(normA_inf)) print('||A||_inf = {}'.format(numpy.linalg.norm(A, ord=numpy.inf))) # + [markdown] slideshow={"slide_type": "subslide"} # One of the most useful ways to think about matrix norms is as a transformation of a unit-ball to an ellipse. Depending on the norm in question, the norm will be some combination of the resulting ellipse. # - A = numpy.array([[1, 2], [0, 2]]) # + [markdown] slideshow={"slide_type": "subslide"} # #### 2-Norm # + hide_input=false slideshow={"slide_type": "-"} # ============ # 2-norm # Unit-ball fig = plt.figure() fig.suptitle("2-Norm: $||A||_2 = ${:3.4f}".format(numpy.linalg.norm(A,ord=2)),fontsize=16) fig.set_figwidth(fig.get_figwidth() * 2) axes = fig.add_subplot(1, 2, 1, aspect='equal') axes.add_artist(plt.Circle((0.0, 0.0), 1.0, edgecolor='r', facecolor='none')) draw_unit_vectors(axes, numpy.eye(2)) axes.set_title("Unit Ball") axes.set_xlim((-1.1, 1.1)) axes.set_ylim((-1.1, 1.1)) axes.grid(True) # Image # Compute some geometry u, s, v = numpy.linalg.svd(A) theta = numpy.empty(A.shape[0]) ellipse_axes = numpy.empty(A.shape) theta[0] = numpy.arccos(u[0][0]) / numpy.linalg.norm(u[0], ord=2) theta[1] = theta[0] - numpy.pi / 2.0 for i in range(theta.shape[0]): ellipse_axes[0, i] = s[i] * numpy.cos(theta[i]) ellipse_axes[1, i] = s[i] * numpy.sin(theta[i]) axes = fig.add_subplot(1, 2, 2, aspect='equal') axes.add_artist(patches.Ellipse((0.0, 0.0), 2 * s[0], 2 * s[1], theta[0] * 180.0 / numpy.pi, edgecolor='r', facecolor='none')) for i in range(A.shape[0]): axes.arrow(0.0, 0.0, ellipse_axes[0, i] - head_length * numpy.cos(theta[i]), ellipse_axes[1, i] - head_length * numpy.sin(theta[i]), head_width=head_width, color='k') draw_unit_vectors(axes, A, head_width=0.2) axes.set_title("Images Under A") axes.set_xlim((-s[0] + 0.1, s[0] + 0.1)) axes.set_ylim((-s[0] + 0.1, s[0] + 0.1)) axes.grid(True) plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # #### 1-Norm # + hide_input=true slideshow={"slide_type": "-"} # Note: that this code is a bit fragile to angles that go beyond pi # due to the use of arccos. import matplotlib.patches as patches def draw_unit_vectors(axes, A, head_width=0.1): head_length = 1.5 * head_width image_e = numpy.empty(A.shape) angle = numpy.empty(A.shape[0]) image_e[:, 0] = numpy.dot(A, numpy.array((1.0, 0.0))) image_e[:, 1] = numpy.dot(A, numpy.array((0.0, 1.0))) for i in range(A.shape[0]): angle[i] = numpy.arccos(image_e[0, i] / numpy.linalg.norm(image_e[:, i], ord=2)) axes.arrow(0.0, 0.0, image_e[0, i] - head_length * numpy.cos(angle[i]), image_e[1, i] - head_length * numpy.sin(angle[i]), head_width=head_width, color='b', alpha=0.5) head_width = 0.2 head_length = 1.5 * head_width # ============ # 1-norm # Unit-ball fig = plt.figure() fig.set_figwidth(fig.get_figwidth() * 2) fig.suptitle("1-Norm: $||A||_1 = {}$".format(numpy.linalg.norm(A,ord=1)), fontsize=16) axes = fig.add_subplot(1, 2, 1, aspect='equal') axes.plot((1.0, 0.0, -1.0, 0.0, 1.0), (0.0, 1.0, 0.0, -1.0, 0.0), 'r') draw_unit_vectors(axes, numpy.eye(2)) axes.set_title("Unit Ball") axes.set_xlim((-1.1, 1.1)) axes.set_ylim((-1.1, 1.1)) axes.grid(True) # Image axes = fig.add_subplot(1, 2, 2, aspect='equal') axes.plot((1.0, 2.0, -1.0, -2.0, 1.0), (0.0, 2.0, 0.0, -2.0, 0.0), 'r') draw_unit_vectors(axes, A, head_width=0.2) axes.set_title("Images Under A") axes.grid(True) plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # #### $\infty$-Norm # + hide_input=true slideshow={"slide_type": "-"} # ============ # infty-norm # Unit-ball fig = plt.figure() fig.suptitle("$\infty$-Norm: $||A||_\infty = {}$".format(numpy.linalg.norm(A,ord=numpy.inf)),fontsize=16) fig.set_figwidth(fig.get_figwidth() * 2) axes = fig.add_subplot(1, 2, 1, aspect='equal') axes.plot((1.0, -1.0, -1.0, 1.0, 1.0), (1.0, 1.0, -1.0, -1.0, 1.0), 'r') draw_unit_vectors(axes, numpy.eye(2)) axes.set_title("Unit Ball") axes.set_xlim((-1.1, 1.1)) axes.set_ylim((-1.1, 1.1)) axes.grid(True) # Image # Geometry - Corners are A * ((1, 1), (1, -1), (-1, 1), (-1, -1)) # Symmetry implies we only need two. Here we just plot two u = numpy.empty(A.shape) u[:, 0] = numpy.dot(A, numpy.array((1.0, 1.0))) u[:, 1] = numpy.dot(A, numpy.array((-1.0, 1.0))) theta[0] = numpy.arccos(u[0, 0] / numpy.linalg.norm(u[:, 0], ord=2)) theta[1] = numpy.arccos(u[0, 1] / numpy.linalg.norm(u[:, 1], ord=2)) axes = fig.add_subplot(1, 2, 2, aspect='equal') axes.plot((3, 1, -3, -1, 3), (2, 2, -2, -2, 2), 'r') for i in range(A.shape[0]): axes.arrow(0.0, 0.0, u[0, i] - head_length * numpy.cos(theta[i]), u[1, i] - head_length * numpy.sin(theta[i]), head_width=head_width, color='k') draw_unit_vectors(axes, A, head_width=0.2) axes.set_title("Images Under A") axes.set_xlim((-4.1, 4.1)) axes.set_ylim((-3.1, 3.1)) axes.grid(True) plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # #### Cauchy-Schwarz and Hölder Inequalities # # Computing matrix norms where $p \neq 1$ or $\infty$ is more difficult unfortunately. We have a couple of tools that can be useful however. # # - **Cauchy-Schwarz Inequality**: For the special case where $p=q=2$, for any vectors $\mathbf{x}$ and $\mathbf{y}$ # $$ # |\mathbf{x}^*\mathbf{y}| \leq ||\mathbf{x}||_2 ||\mathbf{y}||_2 # $$ # - **Hölder's Inequality**: Turns out this holds in general if given a $p$ and $q$ that satisfy $1/p + 1/q = 1$ with $1 \leq p, q \leq \infty$ # # $$ # |\mathbf{x}^*\mathbf{y}| \leq ||\mathbf{x}||_p ||\mathbf{y}||_q. # $$ # # **Note**: this is essentially what we used in the proof of the $\infty-$norm with $p=1$ and $q=\infty$ # + [markdown] slideshow={"slide_type": "subslide"} # #### General Matrix Norms (induced and non-induced) # # In general matrix-norms have the following properties whether they are induced from a vector-norm or not: # 1. $||A|| \geq 0$ and $||A|| = 0$ only if $A = 0$ # 1. $||A + B|| \leq ||A|| + ||B||$ (Triangle Inequality) # 1. $||c A|| = |c| ||A||$ # + [markdown] slideshow={"slide_type": "subslide"} # The most widely used matrix norm not induced by a vector norm is the **Frobenius norm** defined by # $$ # ||A||_F = \left( \sum^m_{i=1} \sum^n_{j=1} |A_{ij}|^2 \right)^{1/2}. # $$ # + [markdown] slideshow={"slide_type": "subslide"} # #### Invariance under unitary multiplication # # One important property of the matrix 2-norm (and Frobenius norm) is that multiplication by a unitary matrix does not change the product (kind of like multiplication by 1). In general for any $A \in \mathbb{C}^{m\times n}$ and unitary matrix $Q \in \mathbb{C}^{m \times m}$ we have # \begin{align*} # ||Q A||_2 &= ||A||_2 \\ ||Q A||_F &= ||A||_F. # \end{align*} # + [markdown] slideshow={"slide_type": "slide"} # <sup>1</sup><span id="footnoteRegression"> http://www.utstat.toronto.edu/~brunner/books/LinearModelsInStatistics.pdf</span>
10_LA_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="l67DSMuqgT-e" # # Language Classification # + [markdown] colab_type="text" id="D3Pr76NJgouv" # ## NLP to classify Languages # + [markdown] colab_type="text" id="WsZze7CR2SHl" # Our goal is to create a model (Tensorflow) that can classify the language (English, Spanish, etc.) of a given sentence. # + [markdown] colab_type="text" id="zAjv_ORf3RWn" # We will be mainly using Tensorflow framework along with Wikipedia, NumPy, Matplotlib, Pandas and Scikit-learn. # + [markdown] colab_type="text" id="Sstkqg1Q3A7y" # ## Platform # + [markdown] colab_type="text" id="qXv0Lnzp3HaG" # I am running this notebook on Google Colaboratory platform. # # Since it will be a simple model, so I am running this on CPU. # + [markdown] colab_type="text" id="ZOIrI1SL36YN" # # Importing Libraries # + colab={"base_uri": "https://localhost:8080/", "height": 292} colab_type="code" id="zqRh-cuHaTY4" outputId="be095445-73d1-4d8e-b8f8-a861a3546c74" # You may not have Wikipedia module. You can install it using this cell # !pip install wikipedia # + colab={} colab_type="code" id="9dzf5Y50ZzrN" # For files and directories import pickle import os # For data generation import wikipedia import pandas as pd # For basic operations import numpy as np # For image (confusion matrix) visualization import matplotlib.pyplot as plt # For model creation import tensorflow as tf from sklearn import utils, model_selection, preprocessing # For model evaluation from sklearn import metrics # + [markdown] colab_type="text" id="KP8MAYwm5Al2" # # Creating Dataset # + [markdown] colab_type="text" id="XfPlpfbq5HwO" # In this notebook, I will be classifying four languages, namely English, French, Spanish and Italian. # # # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="Zx9K81oGbzwH" outputId="ba810e2f-f662-4520-c163-59f1606323ac" # Get the codes of the languages wikipedia.languages() # + colab={} colab_type="code" id="cnAS0Iy3d8HN" # This function generates sentences on random topics using Wikipedia module in the given languages def generate_data(languages_code, each_sample): X = [] Y = [] print('Generating data for the following languages: ', languages_code) for language in languages_code: # Set the language wikipedia.set_lang(language) # Generate the sentences for i in range(0, each_sample): try: # print(flag, language, each_sample, i, [wikipedia.page(wikipedia.random(1)).summary]) X.append([wikipedia.summary(wikipedia.random(1), sentences=1)]) if language == 'it': Y.append('Italian') elif language == 'fr': Y.append('French') elif language == 'en': Y.append('English') elif language == 'es': Y.append('Spanish') except wikipedia.exceptions.DisambiguationError as e: e = 'Ignore' print('Language ', wikipedia.languages()[language], ' (', language, ') completed.') X = np.array(X).reshape(-1, 1) Y = np.array(Y).reshape(-1, 1) X = np.append(X, Y, axis = 1) # print(X.shape) # Converting the data into a dataframe df = pd.DataFrame(X, columns = ['Sentences', 'Language']) # Saving this data into a '.csv' file in the current directory df.to_csv('all_data.csv', index = False) print('The data is generated.') # + [markdown] colab_type="text" id="imeM178L6fcE" # You may choose different and also many more languages as you wish to do. # # Just get the code of the language from above and and change the 'languages_code' variable in below cell. # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="9TbRmGIOnMtb" outputId="ef7a3146-a72e-4924-f927-9608127c1315" languages_code = ['it', 'fr', 'en', 'es'] generate_data(languages_code, 1000) # + [markdown] colab_type="text" id="gXAqRGOaYwwS" # # Dataset Pre-processing # + [markdown] colab_type="text" id="f0gHtxEXY8Vw" # ## Loading the dataset # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="cSAvMePpkqpx" outputId="281d4866-f28e-48a1-af98-4ab6eb5f2943" # Load the created dataset df = pd.read_csv('all_data.csv', encoding='latin-1') # Print last 5 lines of dataset df.tail() # + [markdown] colab_type="text" id="iqXe2q5VizLs" # ## Sentence Pre-processing # + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="pQ_oU6O9EBNC" outputId="49df8124-9e6a-461f-f0bf-16d6bf1a350e" # Converting all letters to lowercase and then removing unwanted strings df['Processed_Sentence'] = df['Sentence'].str.lower().replace('[^\w\s]','') df['Processed_Sentence'] = df['Processed_Sentence'].fillna('fillna') # Print few of the processed sentences print('Sample processed sentences: \n', df['Processed_Sentence'][::500]) # + [markdown] colab_type="text" id="85XJRQsQaPTe" # Now we will tokenize the words using 'Tokenizer' from Tensorflow module. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="3sC44TxrYHYV" outputId="9f6f3f98-4fa1-4e6f-d352-ffc9375f826a" # Choose the top 5000 words from the vocabulary top_k = 5000 # Create a 'Toknizer' object tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words = top_k, oov_token = "<unk>") # Fit the tokenizer on processed sentences tokenizer.fit_on_texts(list(df['Processed_Sentence'])) vocab_size = len(tokenizer.word_index) + 1 print('Total vocab size: ', vocab_size) # + [markdown] colab_type="text" id="73pbpIjFa0f0" # Now, we will pad (post-padding) all the sentences till the given maximum length. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ZbAGTdZEedpE" outputId="929e9dd7-f800-4fd3-8d7e-8d1026039203" # Choose the maximum length of sentence to keep max_length = 500 # Get all tokenized sentences all_sentences = tokenizer.texts_to_sequences(df['Processed_Sentence']) # Pad the tokenized sentences all_sentences = tf.keras.preprocessing.sequence.pad_sequences(all_sentences, padding='post', maxlen = max_length) print('Shape of sentences data: ', all_sentences.shape) # + [markdown] colab_type="text" id="KCPUZqN3i5UJ" # ## Labels Pre-processing # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="MBmFbfxDvHRI" outputId="782ff8c0-ac20-4434-8baf-f4911f4bb8be" # Get the labels from the dataset Y = df['Language'] print('Total dataset: ', len(Y)) # + [markdown] colab_type="text" id="EyE092_8b9u7" # Now we will encode our labels (English, Italian, ...) into numeric data (0, 1, ...) using 'LabelEncoder' from Scikit-learn module. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="hFwfOt43zgDY" outputId="6fe3748f-8b5b-4537-e021-7d47e5b80a68" # Create encoder object encoder = preprocessing.LabelEncoder() # Fit the encoder on our labels encoder.fit(Y) # Get the encoded labels Y = encoder.transform(Y) print('Sample labels: \n', Y[::500]) # + [markdown] colab_type="text" id="5ZxP8B4Fdiv6" # Now, we will one-hot encode our labels using Tensorflow module. # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="POvWoUEv8WbG" outputId="61020266-2791-4bef-beb5-1420098d6a1b" # Get the number of unique labels num_classes = len(np.unique(Y)) # Convert the labels into one-hot vector Y = tf.keras.utils.to_categorical(Y, num_classes = num_classes) print('Sample labels: \n', Y[::500]) print('\nShape of labels data: ', Y.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="lHm38RnUr7w9" outputId="b6f64a69-90a0-4ef5-bcde-67630fa41e2a" # Check the labels mapping print('Labels mapping is as follows: \n') for i in range(num_classes): print(encoder.classes_[i], ' >> ', i) # + [markdown] colab_type="text" id="SCrVtRqVeA9c" # ## Splitting the dataset # + [markdown] colab_type="text" id="zP1Vt1hzeH5v" # We will split the dataset into training and testing set using Scikit-learn. # # Validation set will be created while training the model. # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="HzKnSYWvjHNU" outputId="262b1b7e-c698-48f8-f421-c497deaca67d" X_train, X_test, Y_train, Y_test = model_selection.train_test_split(all_sentences, Y, test_size = 0.1) print('X_train shape: ', X_train.shape) print('Y_train shape: ', Y_train.shape) print('X_test shape: ', X_test.shape) print('Y_test shape: ', Y_test.shape) # + [markdown] colab_type="text" id="jNQ1yIN6klda" # # Model # + [markdown] colab_type="text" id="HCeoFq-1fVdn" # ## Model Configurations # + [markdown] colab_type="text" id="EyTHVg8PfzxR" # Setting the hyper-parameters # + colab={} colab_type="code" id="Igy5fntCkj1X" # Feel free to change these parameters according to your system's configuration batch_size = 128 embedding_dim = 128 # Setting this to a higher value (considering the data) as we will be using Early-Stopping method max_epochs = 20 # The following parameters are same as used before vocab_size = vocab_size num_classes = num_classes # + colab={} colab_type="code" id="psPdG8LNn4GD" # This is used to plot performance graphs of our models def performance(histories): plt.figure(figsize = (15, 10)) # Plot Loss plt.subplot(2,2,1) plt.title('Cross Entropy Loss') plt.plot(histories.history['loss'], color = 'blue', label = 'train') plt.plot(histories.history['val_loss'], color = 'red', label = 'test') # Plot Accuracy plt.subplot(2,2,2) plt.title('Classification Accuracy') plt.plot(histories.history['accuracy'], color = 'blue', label = 'train') plt.plot(histories.history['val_accuracy'], color = 'red', label = 'test') # Show the plot plt.show() # + [markdown] colab_type="text" id="xzyiVYBzgL8n" # ## Create model # + colab={"base_uri": "https://localhost:8080/", "height": 391} colab_type="code" id="48Bo0H3Yl3Os" outputId="c5962765-23b5-49b9-c7c4-a22078a72b05" # Creating a model model = tf.keras.models.Sequential() model.add(tf.keras.layers.Embedding(input_dim = vocab_size, output_dim = embedding_dim, input_length = max_length)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(256, activation = 'relu')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(256, activation = 'relu')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(num_classes, activation = 'softmax')) # Compiling the model model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) # Summarize the model model.summary() # + [markdown] colab_type="text" id="Vf6WpY4hgTgh" # ## Training of model # + [markdown] colab_type="text" id="w2azB6ErgXXj" # We will create 3 callbacks: # # # 1. **Model Checkpoint** # - To save the best model. # 2. **Early Stopping** # - To stop the training when validation accuracy starts increasing. # 3. **Reduce Learning rate on Plateau** # - To decrease the learning rate when validation loss stops decreasing. # # # + colab={} colab_type="code" id="tkBwfs_VoURU" callbacks = [tf.keras.callbacks.ModelCheckpoint('model.h5', save_best_only = True), tf.keras.callbacks.EarlyStopping(monitor = 'val_accuracy', min_delta = 0.00001, patience = 5, verbose = 1, mode = 'auto'), tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience = 2, verbose = 1, mode = 'auto', min_lr = 0.000001)] # + [markdown] colab_type="text" id="DwsGg3GIghTr" # Start the training. # + colab={"base_uri": "https://localhost:8080/", "height": 620} colab_type="code" id="Z4DzpKEqnHd5" outputId="23664564-51ac-4b06-efe6-21497b903b2c" history = model.fit(np.array(X_train), np.array(Y_train), batch_size = batch_size, validation_split = 0.1, callbacks = callbacks, epochs = max_epochs) performance(history) # + [markdown] colab_type="text" id="QVck3U6okiNJ" # We have got very high training and validation accuracies. That's great. # # We also don't have any overfitting. The high validation accuracy compared to training accuracy in the starting epochs is due to Dropout layers. # # + [markdown] colab_type="text" id="lpr2fHA5grjL" # ## Testing of model # + [markdown] colab_type="text" id="ZNs0JrWUl1uv" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="6fMZIorFoWK3" outputId="c27a0a66-fca9-4aae-da07-48462b0c7625" # Let us evaluate the model on test set model.evaluate(np.array(X_test), np.array(Y_test)) # + [markdown] colab_type="text" id="tv8S0yEpg8To" # Seems like we have a perfect model. # # Let us evaluate using other parameters too. # + [markdown] colab_type="text" id="L6C2Uyzel6Jj" # ### Classification report # + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="QoPYVjeioWkq" outputId="bdc0fcc9-1ed5-46f8-f5f0-0fe0e60aa637" # Predict and Evaluate Y_predict = tf.keras.utils.to_categorical(np.argmax(model.predict(X_test), axis = 1), num_classes = num_classes).astype('int32') accuracy = np.sum(Y_predict == Y_test) / np.size(Y_predict) print("Test accuracy = {}".format(accuracy), '\n\n') # Classification report print(metrics.classification_report(Y_test, Y_predict, target_names = encoder.classes_, digits = 4)) # + [markdown] colab_type="text" id="2V_vUL7DhLLk" # Plotting a confusion matrix of evaluation. # + [markdown] colab_type="text" id="cCyfHlPYmAEN" # ### Confusion matrix # + colab={"base_uri": "https://localhost:8080/", "height": 465} colab_type="code" id="2dLK5fnioWqG" outputId="34224eee-44cf-47a8-d6a6-1f01a08374d9" # Plot confusion matrix cm = metrics.confusion_matrix(Y_predict.argmax(axis = 1), Y_test.argmax(axis = 1)) plt.figure(figsize = (6, 6)) plt.imshow(cm, cmap=plt.cm.Blues) plt.xlabel("Predicted labels") plt.ylabel("True labels") plt.xticks([], []) plt.yticks([], []) plt.title('Confusion matrix ') plt.colorbar() plt.show() print('Confusion matrix values: \n') cm # + [markdown] colab_type="text" id="89feRYDS0B-p" # # Saving the model # + colab={} colab_type="code" id="oGJbLZ4Q0b2V" # We will save the tokenizer so that we do not have to generate it again with open('tokenizer.pkl', 'wb') as handle: pickle.dump(tokenizer, handle, protocol = pickle.HIGHEST_PROTOCOL) # We will also save other necessary parameters all_param = [max_length, encoder.classes_] with open('all_param.pkl', 'wb') as p: pickle.dump(all_param, p, protocol = pickle.HIGHEST_PROTOCOL) # + colab={} colab_type="code" id="rmxQL2vY0Bc-" # We have already saved our model during the callbacks # Otherwise, we could also save it using this code model.save('model.h5') # + [markdown] colab_type="text" id="KWFXRecqzKcd" # # Predict new sentences # + [markdown] colab_type="text" id="nwmsRIRGhYuK" # Let us see how our model works on real worls new random examples. # + colab={} colab_type="code" id="K1P8LyGoaPUv" # Run this cell to avoid printing Tensorflow warning of tr-tracing same graph multiple times tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # + colab={} colab_type="code" id="pb7nnXuczHKn" # This function is used to predict our model on given sentences def predict_new(sentences): # Loading the text Tokenizer with open('tokenizer.pkl', 'rb') as handle: my_tokenizer = pickle.load(handle) # Loading other necessary parameters with open('all_param.pkl', 'rb') as p: my_all_param = pickle.load(p) my_max_length, my_classes = my_all_param # Loading the model my_model = tf.keras.models.load_model('model.h5') # Pre-processing the sentences my_df = pd.DataFrame(sentences, columns = ['Sentence']) my_df['Processed_Sentence'] = my_df['Sentence'].str.lower().replace('[^\w\s]','') my_df['Processed_Sentence'] = my_df['Processed_Sentence'].fillna('fillna') # Tokenizing and passing the sentences my_sentences = my_tokenizer.texts_to_sequences(my_df['Processed_Sentence']) my_sentences = tf.keras.preprocessing.sequence.pad_sequences(my_sentences, padding='post', maxlen = my_max_length) # Getting the predictions my_prediction = my_model.predict(my_sentences) # Getting the classes of predictions my_pred_class = my_classes[np.argmax(my_prediction, axis = 1)] return my_pred_class, my_prediction # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" id="YhFeoJlSzHV4" outputId="31c8a0a1-2c4e-4717-c557-8dcd13f616b4" # I have taken a text in Englis and converted it into other languages using Google Translator s = [['TensorFlow is a free and open-source software library for dataflow and differentiable programming across a range of tasks.'], # English ['TensorFlow è una libreria software gratuita e open source per il flusso di dati e la programmazione differenziabili in una vasta gamma di attività.'], # Italian ['TensorFlow es una biblioteca de software gratuita y de código abierto para el flujo de datos y la programación diferenciable en una variedad de tareas.'], # Spanish ['TensorFlow est une bibliothèque logicielle gratuite et open-source pour le flux de données et la programmation différenciable sur une gamme de tâches.'] # French ] # Get the predictions and classes s_class, s_pred = predict_new(s) # Print the predictions and classes for i in range(len(s)): print('The sentence \'', s[i], '\'\nis in \'', s_class[i], '\' language with ', np.max(s_pred[i])*100, '% confidence.\n') # + [markdown] colab_type="text" id="tySmfHkzjeyK" # Our model is working great and giving correct prediction with a very high confidence. # + [markdown] colab_type="text" id="mBEFLbYbjySx" # # Thank You
Language_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # # TSFRESH Robot Failure Example # This example show shows how to use [tsfresh](https://tsfresh.readthedocs.io/) to exctract useful features from multiple timeseries and use them to improve classification performance. # + deletable=true editable=true # %matplotlib inline import matplotlib.pylab as plt import seaborn as sns from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, load_robot_execution_failures from tsfresh import extract_features, extract_relevant_features, select_features from tsfresh.utilities.dataframe_functions import impute from tsfresh.feature_extraction import ComprehensiveFCParameters from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # - # We set the logger to Error level # This is not recommend for normal use as you can oversee important Warning messages import logging logging.basicConfig(level=logging.ERROR) # + [markdown] deletable=true editable=true # ## Load and visualize data # The data set documents 88 robot executions (`id` 1 - 88), which are a subset of the [Robot Execution Failures Data Set](https://archive.ics.uci.edu/ml/datasets/Robot+Execution+Failures). For the purpose of simplicity we are only differentiating between successfull and failed executions (`y`). # For each execution 15 force (F) and torque (T) samples are given, which were measured at regular time intervals for the spatial dimensions x, y, and z. Therefore each row of the data frame references a specific execution (`id`), a time index (`index`) and documents the respective measurements of 6 sensors (`F_x`, `F_y`, `F_z`, `T_x`, `T_y`, `T_z`). # + deletable=true editable=true download_robot_execution_failures() df, y = load_robot_execution_failures() df.head() # + deletable=true editable=true df[df.id == 3][['time', 'F_x', 'F_y', 'F_z', 'T_x', 'T_y', 'T_z']].plot(x='time', title='Success example (id 3)', figsize=(12, 6)); df[df.id == 20][['time', 'F_x', 'F_y', 'F_z', 'T_x', 'T_y', 'T_z']].plot(x='time', title='Failure example (id 20)', figsize=(12, 6)); # + [markdown] deletable=true editable=true # ## Extract Features # + deletable=true editable=true extraction_settings = ComprehensiveFCParameters() # + deletable=true editable=true X = extract_features(df, column_id='id', column_sort='time', default_fc_parameters=extraction_settings, impute_function= impute) # + deletable=true editable=true X.head() # + deletable=true editable=true X.info() # + deletable=true editable=true X_filtered = extract_relevant_features(df, y, column_id='id', column_sort='time', default_fc_parameters=extraction_settings) # + deletable=true editable=true X_filtered.head() # + deletable=true editable=true X_filtered.info() # + [markdown] deletable=true editable=true # ## Train and evaluate classifier # + deletable=true editable=true X_train, X_test, X_filtered_train, X_filtered_test, y_train, y_test = train_test_split(X, X_filtered, y, test_size=.4) # + deletable=true editable=true cl = DecisionTreeClassifier() cl.fit(X_train, y_train) print(classification_report(y_test, cl.predict(X_test))) # + deletable=true editable=true cl.n_features_ # + deletable=true editable=true cl2 = DecisionTreeClassifier() cl2.fit(X_filtered_train, y_train) print(classification_report(y_test, cl2.predict(X_filtered_test))) # + deletable=true editable=true cl2.n_features_ # + [markdown] deletable=true editable=true # Compared to using all features (`cl.n_features_`), using only the relevant features (`cl2.n_features_`) achieves better classification performance with less data. # + [markdown] deletable=true editable=true # # Extraction + filtering is the same as filtered extraction # + [markdown] deletable=true editable=true # Above, we performed two feature extractions runs. A filtered one and a non filtered one. However, the results of the filtered is equal to just extracting all features and then filtering them. # + deletable=true editable=true X_filtered_2 = select_features(X, y) # + deletable=true editable=true (X_filtered.columns == X_filtered_2.columns).all()
notebooks/robot_failure_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # こちらは[Making-it-rain](https://github.com/pablo-arantes/Making-it-rain)のノートブックを日本語化したものです。オリジナルのノートブックは以下のボタンから起動できます。 # # <a href="https://colab.research.google.com/github/pablo-arantes/making-it-rain/blob/main/Gromacs_inputs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # この日本語ノートブックをColabで使うには以下のボタンを利用ください。 # # <a href="https://colab.research.google.com/github/magattaca/making-it-rain-jp/blob/main/Gromacs_inputs_JP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="pj2BWZxUDbEE" # # **ようこそ!** # # OpenMMとGROMACS生体分子シミュレーションパッケージのインプットを用いて、分子動力学(MD)シミュレーションを行うためのJupyterノートブックです。このノートブックは論文" # ***Making it rain: Cloud-based molecular simulations for everyone***" ([リンク](https://doi.org/10.1021/acs.jcim.1c00998))のsupplementary materialです。このパイプラインを利用する前に論文を参照することをお勧めします。 # # このノートブックの主な目的は、クラウドコンピューティングの力を借りて、マイクロ秒単位のMDシミュレーションを安価に、かつ実現可能な方法で実行する方法をデモンストレーションすることです。 # # --- # # **このノートブックはMDシミュレーションの標準プロトコルではありません。** 単にシミュレーションプロトコルの各ステップを示しただけのシンプルなMDパイプラインです。 # # --- # **バグ** # - バグを見つけたらイシューを報告してください https://github.com/pablo-arantes/making-it-rain/issues # # **謝辞** # - 優れたオープンソースエンジンを開発されたOpenMMチームに感謝いたします。 # # - Making-it-rainは**<NAME>** ([@pablitoarantes](https://twitter.com/pablitoarantes))と**<NAME>** ([@mdpoleto](https://twitter.com/mdpoleto))、 **<NAME>** ([@ConradoPedebos](https://twitter.com/ConradoPedebos))、**<NAME>** ([@ligabue_braun](https://twitter.com/ligabue_braun))が開発しました。 # # - また、素晴らしいプラグイン[py3Dmol](https://3dmol.csb.pitt.edu/)は[David Koes](https://github.com/dkoes)による功績です。 # # - 関連するノートブックは右を参照してください: [Making-it-rain](https://github.com/pablo-arantes/making-it-rain) # + [markdown] id="hoyY6XonD1UX" # # **イントロダクション** # # 一般に、MDシミュレーションは、1)シミュレーションボックス上の全原子の原子座標セット、2)原子間の相互作用エネルギーを記述する力場パラメータセットに依存しています。 # # GROMACSの入力としては、以下が必要です。 # # * 原子座標のセットを含む .groファイル # * 系のトポロジーを含むそれぞれの .topファイル # # このノートブックでは、PDB 1AKI(ニワトリ卵白リゾチーム)のシミュレーションを行います。シミュレーションボックスを構築するために、GROMACSパッケージ(https://www.gromacs.org/)を使います。インプットファイルの例は[ここ](https://github.com/pablo-arantes/making-it-rain/tree/main/GROMACS_INPUTS)からダウンロードできます; # # ## --- # # # # # # # + [markdown] id="Lh96y6mGFY1D" # --- # --- # # **MD計算環境のセッティング** # # まず最初に、シミュレーションに必要なライブラリとパッケージをインストールする必要があります。インストールする主なパッケージは以下です。: # # 1. Anaconda (https://docs.conda.io/en/latest/miniconda.html) # 2. OpenMM (https://openmm.org/) # 3. PyTraj (https://amber-md.github.io/pytraj/latest/index.html) # 4. py3Dmol (https://pypi.org/project/py3Dmol/) # 5. Numpy (https://numpy.org/) # 6. Matplotlib (https://matplotlib.org/) # 7. AmberTools (https://ambermd.org/AmberTools.php) # + cellView="form" id="wH1oMiVUlxO_" #@title **依存関係のインストール** #@markdown しばらく時間がかかります。コーヒーでも飲んで一服してください ;-) # install dependencies # !pip -q install py3Dmol 2>&1 1>/dev/null # !pip install --upgrade MDAnalysis 2>&1 1>/dev/null # !pip install biopandas 2>&1 1>/dev/null # install conda # !wget -qnc https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh # !bash Miniconda3-latest-Linux-x86_64.sh -bfp /usr/local 2>&1 1>/dev/null # !rm Miniconda3-latest-Linux-x86_64.sh # !conda install -y -q -c conda-forge openmm=7.6 python=3.7 pdbfixer 2>&1 1>/dev/null # #!conda install -c conda-forge ambertools --yes 2>&1 1>/dev/null # !conda install -c ambermd pytraj --yes 2>&1 1>/dev/null #load dependencies import sys sys.path.append('/usr/local/lib/python3.7/site-packages/') from biopandas.pdb import PandasPdb import openmm as mm from openmm import * from openmm.app import * from openmm.unit import * import os import urllib.request import numpy as np import MDAnalysis as mda import py3Dmol from __future__ import print_function import pytraj as pt import platform import scipy.cluster.hierarchy from scipy.spatial.distance import squareform import scipy.stats as stats import matplotlib.pyplot as plt import pandas as pd from scipy.interpolate import griddata import seaborn as sb from statistics import mean, stdev from pytraj import matrix from matplotlib import colors from IPython.display import set_matplotlib_formats # #%matplotlib inline #set_matplotlib_formats('png') #plt.figure(figsize=(5,7)) # + [markdown] id="fDQnAKJLFxtt" # ## Google Driveを利用したシミュレーションデータの保存 # # Google Colabでは、ユーザーが計算ノードにデータを保持することはできません。しかし、Google Driveを利用して、シミュレーションファイルの読み書きや保存を行うことは可能です。そのため,以下のことをお勧めします: # # 1. 自分のGoogle Driveにフォルダを作成し、そこに必要な入力ファイルをコピーします。 # 2. 作成したディレクトリのパスをコピーします。以下のセルでパスを利用します。 # + cellView="form" id="Lm7Akepv_vl-" #@title ### **Google Driveのインポート** #@markdown "Run"ボタンを押してGoogle Driveをアクセス可能にしてください。 from google.colab import drive drive.flush_and_unmount() drive.mount('/content/drive', force_remount=True) # + cellView="form" id="lOKg9eH_ueRn" #@title **GPUノードが正しく割り当てられているかどうかチェックします** # gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ') print('and then re-execute this cell.') else: print(gpu_info) # + [markdown] id="BZwl66HTGI7v" # --- # # **必要なインプットファイルの読み込み** # # この時点で、すべてのライブラリと依存関係がインストールされ、必要なインプットファイルがすでにGoogle Driveのフォルダにあるはずです。 # # # 以下に、全てのインプットファイルの名前(**gro and top**)と、それらを含むGoogle Driveフォルダのパスを記入してください。 # # **注意:** OpenMMはGROMACSフォーマットのうち、**AMBER、CHARMM、とOPLS**力場が利用可能です。GROMACS力場は利用できません。 # + cellView="form" id="0sfrVUYeXhtp" import warnings warnings.filterwarnings('ignore') #@title **下に必要なインプットファイルを記入してください:* TOP_filename = 'topol.top' #@param {type:"string"} GRO_filename = '1AKI_solv_ions.gro' #@param {type:"string"} Google_Drive_Path = '/content/drive/MyDrive/GROMACS' #@param {type:"string"} workDir = Google_Drive_Path #@markdown Google Driveのパスに独自の力場フォルダがある場合は、「yes」を選択して、フォルダ名を入力してください(**このステップは必須ではありません**)。 Force_field_folder = 'No' #@param ["Yes", "No"] Foldername = '' #@param {type:"string"} if Force_field_folder == 'No': foldername = 'top' top_folder = os.path.join(workDir, str(foldername)) top_folder_check = os.path.exists(top_folder) if top_folder_check == False: # !npx degit https://github.com/pablo-arantes/Making-it-rain/ temp 2> /dev/null cp_command = "cp -r temp/top " + workDir original_stdout = sys.stdout # Save a reference to the original standard output with open('cp.sh', 'w') as f: sys.stdout = f # Change the standard output to the file we created. print(cp_command) sys.stdout = original_stdout # Reset the standard output to its original value # !chmod 700 cp.sh 2>&1 1>/dev/null # !bash cp.sh 2>&1 1>/dev/null # !rm -r temp cp.sh else: foldername = Foldername top_folder = os.path.join(workDir, str(foldername)) top = os.path.join(workDir, str(TOP_filename)) gro = os.path.join(workDir, str(GRO_filename)) universe = mda.Universe(gro) with mda.Writer(os.path.join(workDir, "SYS.pdb")) as pdb: pdb.write(universe) PDB_filename = 'SYS.pdb' pdb = os.path.join(workDir, str(PDB_filename)) pdb_gromacs = os.path.exists(pdb) top_gromacs = os.path.exists(top) gro_gromacs = os.path.exists(gro) top_folder_check = os.path.exists(top_folder) if pdb_gromacs == True and top_gromacs == True and gro_gromacs == True and top_folder_check == True: print("Files loaded succesfully! ;-)") else: print("ERROR! Check your input names and Google Drive path") # + [markdown] id="C8kKR7bpI86W" # ## シミュレーションボックスを眺めてみましょう: # + cellView="form" id="vmQ27nZLssjv" #@title **3D構造の表示** import ipywidgets from ipywidgets import interact, fixed import warnings warnings.filterwarnings('ignore') def show_pdb(show_box=True, show_sidechains=False, show_mainchain=False, color="None"): def mainchain(p, color="white", model=0): BB = ['C','O','N','CA'] p.addStyle({"model":model,'atom':BB}, {'stick':{'colorscheme':f"{color}Carbon",'radius':0.4}}) def box(p, model=0): p.addModelsAsFrames(pdb) p.addSurface(py3Dmol.SAS, {'opacity': 0.6, 'color':'white'}) #comment this line if you dont want to see the water box def sidechain(p, model=0): HP = ["ALA","GLY","VAL","ILE","LEU","PHE","MET","PRO","TRP","CYS","TYR"] BB = ['C','O','N'] p.addStyle({"model":model,'and':[{'resn':HP},{'atom':BB,'invert':True}]}, {'stick':{'colorscheme':"whiteCarbon",'radius':0.4}}) p.addStyle({"model":model,'and':[{'resn':"GLY"},{'atom':'CA'}]}, {'sphere':{'colorscheme':"whiteCarbon",'radius':0.4}}) p.addStyle({"model":model,'and':[{'resn':"PRO"},{'atom':['C','O'],'invert':True}]}, {'stick':{'colorscheme':"whiteCarbon",'radius':0.4}}) p.addStyle({"model":model,'and':[{'resn':HP,'invert':True},{'atom':BB,'invert':True}]}, {'stick':{'colorscheme':"whiteCarbon",'radius':0.4}}) p = py3Dmol.view(js='https://3dmol.org/build/3Dmol.js') p.addModel(open(pdb,'r').read(),'pdb') if color == "rainbow": p.setStyle({'cartoon': {'color':'spectrum'}}) else: p.setStyle({'cartoon':{}}) if show_sidechains: sidechain(p) if show_mainchain: mainchain(p) if show_box: box(p) p.zoomTo() return p.show() interact(show_pdb, show_box=ipywidgets.Checkbox(value=True), show_sidechains=ipywidgets.Checkbox(value=False), show_mainchain=ipywidgets.Checkbox(value=False), color=ipywidgets.Dropdown(options=['None', 'rainbow'], value='rainbow')) # + [markdown] id="n85MrAO7M7uQ" # --- # --- # # **シミュレーションボックスの平衡化** # # 適切なMD平衡化プロトコルは、タンパク質の実験的なコンフォメーションを維持しながら、シミュレーションボックス全体で温度と圧力の両方を平衡化するように設計されています。さらに、溶媒がタンパク質の周りに馴染むようにし、適切な溶媒和層を形成します。 # # 以下では、温度、圧力、シミュレーション時間などのMD平衡化パラメータを設定します。また、タンパク質の重原子をその場に拘束しておくための力定数(force constant)や、原子座標をトラジェクトリファイル(.dcd)に保存する頻度も定義します。 # # 設定が終わったら、次の2つのセルを実行して系を平衡化することができます。 # + cellView="form" id="8x9Qp_dbr9HP" #@title ### **MD平衡化プロトコルのパラメータ:** # remove whitespaces Jobname = '1aki_equil' #@param {type:"string"} Minimization_steps = "1000" #@param ["1000", "5000", "10000", "20000", "50000", "100000"] #@markdown シミュレーション時間(ナノ秒)と積分時間(フェムト秒): Time = "2" #@param {type:"string"} stride_time_eq = Time Integration_timestep = "2" #@param ["0.5", "1", "2", "3", "4"] dt_eq = Integration_timestep #@markdown 温度(ケルビン)と圧力(バール) Temperature = 298 #@param {type:"string"} temperature_eq = Temperature Pressure = 1 #@param {type:"string"} pressure_eq = Pressure #@markdown 位置拘束の力定数(kJ/mol): Force_constant = 800 #@param {type:"slider", min:0, max:2000, step:100} #@markdown トラジェクトリファイルを書き出す頻度(ピコ秒): Write_the_trajectory = "10" #@param ["10", "100", "200", "500", "1000"] write_the_trajectory_eq = Write_the_trajectory #@markdown ログファイルを書き出す頻度(ピコ秒): Write_the_log = "10" #@param ["10", "100", "200", "500", "1000"] write_the_log_eq = Write_the_log #@markdown --- # + cellView="form" id="zoamR9iynphz" #@title **平衡化MDシミュレーション(NPTアンサンブル)の実行** #@markdown さあ、系を平衡化しましょう! ########################################### import openmm as mm from openmm import * from openmm.app import * from openmm.unit import * import pytraj as pt from sys import stdout, exit, stderr import os, math, fnmatch ############################################# # Defining MD simulation parameters jobname = os.path.join(workDir, Jobname) coordinatefile = os.path.join(workDir, str(GRO_filename)) pdbfile = os.path.join(workDir, str(PDB_filename)) topologyfile = os.path.join(workDir, str(TOP_filename)) time_ps = float(Time)*1000 simulation_time = float(time_ps)*picosecond # in ps dt = int(dt_eq)*femtosecond temperature = float(temperature_eq)*kelvin savcrd_freq = int(write_the_trajectory_eq)*picosecond print_freq = int(write_the_log_eq)*picosecond pressure = float(pressure_eq)*bar restraint_fc = int(Force_constant) # kJ/mol nsteps = int(simulation_time.value_in_unit(picosecond)/dt.value_in_unit(picosecond)) nprint = int(print_freq.value_in_unit(picosecond)/dt.value_in_unit(picosecond)) nsavcrd = int(savcrd_freq.value_in_unit(picosecond)/dt.value_in_unit(picosecond)) ############################################# # Defining functions to use below: def backup_old_log(pattern, string): result = [] for root, dirs, files in os.walk("./"): for name in files: if fnmatch.fnmatch(name, pattern): try: number = int(name[-2]) avail = isinstance(number, int) #print(name,avail) if avail == True: result.append(number) except: pass if len(result) > 0: maxnumber = max(result) else: maxnumber = 0 backup_file = "\#" + string + "." + str(maxnumber + 1) + "#" os.system("mv " + string + " " + backup_file) return backup_file def restraints(system, crd, fc, restraint_array): boxlx = system.getDefaultPeriodicBoxVectors()[0][0].value_in_unit(nanometers) boxly = system.getDefaultPeriodicBoxVectors()[1][1].value_in_unit(nanometers) boxlz = system.getDefaultPeriodicBoxVectors()[2][2].value_in_unit(nanometers) if fc > 0: # positional restraints for all heavy-atoms posresPROT = CustomExternalForce('k*periodicdistance(x, y, z, x0, y0, z0)^2;') posresPROT.addPerParticleParameter('k') posresPROT.addPerParticleParameter('x0') posresPROT.addPerParticleParameter('y0') posresPROT.addPerParticleParameter('z0') for atom1 in restraint_array: atom1 = int(atom1) xpos = crd.positions[atom1].value_in_unit(nanometers)[0] ypos = crd.positions[atom1].value_in_unit(nanometers)[1] zpos = crd.positions[atom1].value_in_unit(nanometers)[2] posresPROT.addParticle(atom1, [fc, xpos, ypos, zpos]) system.addForce(posresPROT) return system ############################################## ############################################# print("\n> Simulation details:\n") print("\tJob name = " + jobname) print("\tCoordinate file = " + str(coordinatefile)) print("\tPDB file = " + str(pdbfile)) print("\tTopology file = " + str(topologyfile)) print("\n\tSimulation_time = " + str(simulation_time)) print("\tIntegration timestep = " + str(dt)) print("\tTotal number of steps = " + str(nsteps)) print("\n\tSave coordinates each " + str(savcrd_freq)) print("\tPrint in log file each " + str(print_freq)) print("\n\tTemperature = " + str(temperature)) print("\tPressure = " + str(pressure)) ############################################# print("\n> Setting the system:\n") print("\t- Reading topology and structure file...") gro = GromacsGroFile(coordinatefile) top = GromacsTopFile(topologyfile, periodicBoxVectors=gro.getPeriodicBoxVectors(), includeDir= os.path.join(workDir, str(foldername))) print("\t- Creating system and setting parameters...") nonbondedMethod = PME nonbondedCutoff = 1.0*nanometers ewaldErrorTolerance = 0.0005 constraints = HBonds rigidWater = True constraintTolerance = 0.000001 friction = 1.0 system = top.createSystem(nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff, constraints=constraints, rigidWater=rigidWater, ewaldErrorTolerance=ewaldErrorTolerance) print("\t- Applying restraints. Force Constant = " + str(Force_constant) + "kJ/mol") pt_system = pt.iterload(coordinatefile, pdb) pt_topology = pt_system.top restraint_array = pt.select_atoms('!(:SOL) & !(:NA) & !(:CL) & !(:MG) & !(:K)', pt_topology) system = restraints(system, gro, restraint_fc, restraint_array) print("\t- Setting barostat...") system.addForce(MonteCarloBarostat(pressure, temperature)) print("\t- Setting integrator...") integrator = LangevinIntegrator(temperature, friction, dt) integrator.setConstraintTolerance(constraintTolerance) simulation = Simulation(top.topology, system, integrator) simulation.context.setPositions(gro.positions) # if gro.boxVectors is not None: # simulation.context.setPeriodicBoxVectors(topologyfile, periodicBoxVectors=gro.getPeriodicBoxVectors()) print("\t- Energy minimization: " + str(Minimization_steps) + " steps") simulation.minimizeEnergy(tolerance=10*kilojoule/mole, maxIterations=int(Minimization_steps)) print("\t-> Potential Energy = " + str(simulation.context.getState(getEnergy=True).getPotentialEnergy())) print("\t- Setting initial velocities...") simulation.context.setVelocitiesToTemperature(temperature) ############################################# # Running Equilibration on NPT ensemble dcd_file = jobname + ".dcd" log_file = jobname + ".log" rst_file = jobname + ".rst" prv_rst_file = jobname + ".rst" pdb_file = jobname + ".pdb" # Creating a trajectory file and reporters dcd = DCDReporter(dcd_file, nsavcrd) firstdcdstep = (nsteps) + nsavcrd dcd._dcd = DCDFile(dcd._out, simulation.topology, simulation.integrator.getStepSize(), firstdcdstep, nsavcrd) # charmm doesn't like first step to be 0 simulation.reporters.append(dcd) simulation.reporters.append(StateDataReporter(stdout, nprint, step=True, speed=True, progress=True, totalSteps=nsteps, remainingTime=True, separator='\t\t')) simulation.reporters.append(StateDataReporter(log_file, nprint, step=True, kineticEnergy=True, potentialEnergy=True, totalEnergy=True, temperature=True, volume=True, speed=True)) print("\n> Simulating " + str(nsteps) + " steps...") simulation.step(nsteps) simulation.reporters.clear() # remove all reporters so the next iteration don't trigger them. ################################## # Writing last frame information of stride print("\n> Writing state file (" + str(rst_file) + ")...") state = simulation.context.getState( getPositions=True, getVelocities=True ) with open(rst_file, 'w') as f: f.write(XmlSerializer.serialize(state)) last_frame = int(nsteps/nsavcrd) print("> Writing coordinate file (" + str(pdb_file) + ", frame = " + str(last_frame) + ")...") positions = simulation.context.getState(getPositions=True).getPositions() PDBFile.writeFile(simulation.topology, positions, open(pdb_file, 'w')) print("\n> Finished!\n") # + [markdown] id="LXyL26HCO8Bu" # --- # --- # # **MDシミュレーション本番の実行(Production)** # # 最後に、平衡化された系の座標を入力構造として、シミュレーション本番(Production simulation)そのものを進めます。 # # ここでは、熱力学的に平衡化された系から本番のシミュレーションを開始することを保証するために、平衡化シミュレーションの最終フレームの原子の位置と速度を含む*.rst 状態ファイル*を使用することに注意してください。 # # ここでもう一つの重要な情報は**Number_of_strides**と**Stride_Time**。このノートブックでは指定した*stride*数のシミュレーションを行うので、**simulation time = Number_of_strides*Stride_Time**となります。例えば、*Number_of_strides=10* と*Stride_Time=10 ns*と設定することで100nsシミュレーションできます。 # # **重要:Productionシミュレーションの最後に、すべてのstrideを連結して完全なトラジェクトリファイルを作成し、可視化および分析することができます。** # # この方法の背景にあるアイデアは、Google ColabでGPUを使える断続的な時間(12h/24h)をうまく利用することです。 # + cellView="form" id="Z0JV6Zid50_o" #@markdown ### **インプットファイルの名前を下に記入してください:** Equilibrated_PDB = '1aki_equil.pdb' #@param {type:"string"} State_file = '1aki_equil.rst' #@param {type:"string"} #@markdown --- #@markdown ### **MD Prodcutionプロトコルのパラメータ:** # remove whitespaces Jobname = '1aki_prod' #@param {type:"string"} #@markdown シミュレーション時間(ナノ秒)、stride数(整数)と積分時間(フェムト秒): Stride_Time = "5" #@param {type:"string"} stride_time_prod = Stride_Time Number_of_strides = "1" #@param {type:"string"} nstride = Number_of_strides Integration_timestep = "2" #@param ["0.5", "1", "2", "3", "4"] dt_prod = Integration_timestep #@markdown 温度(ケルビン)と圧力(バール) Temperature = 298 #@param {type:"string"} temperature_prod = Temperature Pressure = 1 #@param {type:"string"} pressure_prod = Pressure #@markdown トラジェクトリファイルを書き出す頻度(ピコ秒): Write_the_trajectory = "10" #@param ["10", "100", "200", "500", "1000"] write_the_trajectory_prod = Write_the_trajectory #@markdown ログファイルを書き出す頻度(ピコ秒): Write_the_log = "10" #@param ["10", "100", "200", "500", "1000"] write_the_log_prod = Write_the_log #@markdown --- # + cellView="form" id="0QcjKSoqHHhi" #@title **平衡化した後のMDシミュレーション本番(Production)(NPTアンサンブル)** # ########################################### import openmm as mm from openmm import * from openmm.app import * from openmm.unit import * from sys import stdout, exit, stderr import os, math, fnmatch ############################################# # Defining MD simulation parameters jobname = os.path.join(workDir, str(Jobname)) coordinatefile = os.path.join(workDir, str(GRO_filename)) pdbfile = os.path.join(workDir, Equilibrated_PDB) topologyfile = os.path.join(workDir, str(TOP_filename)) equil_rst_file = os.path.join(workDir, State_file) stride_time_ps = float(stride_time_prod)*1000 stride_time = float(stride_time_ps)*picosecond nstride = int(Number_of_strides) dt = int(dt_prod)*femtosecond temperature = float(temperature_prod)*kelvin savcrd_freq = int(write_the_trajectory_prod)*picosecond print_freq = int(write_the_log_prod)*picosecond pressure = float(pressure_prod)*bar simulation_time = stride_time*nstride nsteps = int(stride_time.value_in_unit(picosecond)/dt.value_in_unit(picosecond)) nprint = int(print_freq.value_in_unit(picosecond)/dt.value_in_unit(picosecond)) nsavcrd = int(savcrd_freq.value_in_unit(picosecond)/dt.value_in_unit(picosecond)) firststride = 1 # must be integer ############################################# # Defining functions to use below: def backup_old_log(pattern, string): result = [] for root, dirs, files in os.walk("./"): for name in files: if fnmatch.fnmatch(name, pattern): try: number = int(name[-2]) avail = isinstance(number, int) #print(name,avail) if avail == True: result.append(number) except: pass if len(result) > 0: maxnumber = max(result) else: maxnumber = 0 backup_file = "\#" + string + "." + str(maxnumber + 1) + "#" os.system("mv " + string + " " + backup_file) return backup_file ############################################## ############################################# print("\n> Simulation details:\n") print("\tJob name = " + jobname) print("\tCoordinate file = " + str(coordinatefile)) print("\tPDB file = " + str(pdbfile)) print("\tTopology file = " + str(topologyfile)) print("\n\tSimulation_time = " + str(stride_time*nstride)) print("\tIntegration timestep = " + str(dt)) print("\tTotal number of steps = " + str(nsteps*nstride)) print("\tNumber of strides = " + str(nstride) + " (" + str(stride_time) + " in each stride)") print("\n\tSave coordinates each " + str(savcrd_freq)) print("\tSave checkpoint each " + str(savcrd_freq)) print("\tPrint in log file each " + str(print_freq)) print("\n\tTemperature = " + str(temperature)) print("\tPressure = " + str(pressure)) ############################################# print("\n> Setting the system:\n") print("\t- Reading topology and structure file...") gro = GromacsGroFile(coordinatefile) top = GromacsTopFile(topologyfile, periodicBoxVectors=gro.getPeriodicBoxVectors(), includeDir= os.path.join(workDir, str(foldername))) print("\t- Creating system and setting parameters...") nonbondedMethod = PME nonbondedCutoff = 1.0*nanometers ewaldErrorTolerance = 0.0005 constraints = HBonds rigidWater = True constraintTolerance = 0.000001 friction = 1.0 system = top.createSystem(nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff, constraints=constraints, rigidWater=rigidWater, ewaldErrorTolerance=ewaldErrorTolerance) print("\t- Setting barostat...") system.addForce(MonteCarloBarostat(pressure, temperature)) print("\t- Setting integrator...") integrator = LangevinIntegrator(temperature, friction, dt) integrator.setConstraintTolerance(constraintTolerance) simulation = Simulation(top.topology, system, integrator) simulation.context.setPositions(gro.positions) ############################################# # Opening a loop of extension NSTRIDE to simulate the entire STRIDE_TIME*NSTRIDE for n in range(1, nstride + 1): print("\n\n>>> Simulating Stride #" + str(n) + " <<<") dcd_file = jobname + "_" + str(n) + ".dcd" log_file = jobname + "_" + str(n) + ".log" rst_file = jobname + "_" + str(n) + ".rst" prv_rst_file = jobname + "_" + str(n-1) + ".rst" pdb_file = jobname + "_" + str(n) + ".pdb" if os.path.exists(rst_file): print("> Stride #" + str(n) + " finished (" + rst_file + " present). Moving to next stride... <") continue if n == 1: print("\n> Loading previous state from equilibration > " + equil_rst_file + " <") with open(equil_rst_file, 'r') as f: simulation.context.setState(XmlSerializer.deserialize(f.read())) currstep = int((n-1)*nsteps) currtime = currstep*dt.in_units_of(picosecond) simulation.currentStep = currstep simulation.context.setTime(currtime) print("> Current time: " + str(currtime) + " (Step = " + str(currstep) + ")") else: print("> Loading previous state from > " + prv_rst_file + " <") with open(prv_rst_file, 'r') as f: simulation.context.setState(XmlSerializer.deserialize(f.read())) currstep = int((n-1)*nsteps) currtime = currstep*dt.in_units_of(picosecond) simulation.currentStep = currstep simulation.context.setTime(currtime) print("> Current time: " + str(currtime) + " (Step = " + str(currstep) + ")") dcd = DCDReporter(dcd_file, nsavcrd) firstdcdstep = (currstep) + nsavcrd dcd._dcd = DCDFile(dcd._out, simulation.topology, simulation.integrator.getStepSize(), firstdcdstep, nsavcrd) # first step should not be 0 simulation.reporters.append(dcd) simulation.reporters.append(StateDataReporter(stdout, nprint, step=True, speed=True, progress=True, totalSteps=(nsteps*nstride), remainingTime=True, separator='\t\t')) simulation.reporters.append(StateDataReporter(log_file, nprint, step=True, kineticEnergy=True, potentialEnergy=True, totalEnergy=True, temperature=True, volume=True, speed=True)) print("\n> Simulating " + str(nsteps) + " steps... (Stride #" + str(n) + ")") simulation.step(nsteps) simulation.reporters.clear() # remove all reporters so the next iteration don't trigger them. ################################## # Writing last frame information of stride print("\n> Writing state file (" + str(rst_file) + ")...") state = simulation.context.getState( getPositions=True, getVelocities=True ) with open(rst_file, 'w') as f: f.write(XmlSerializer.serialize(state)) last_frame = int(nsteps/nsavcrd) print("> Writing coordinate file (" + str(pdb_file) + ", frame = " + str(last_frame) + ")...") positions = simulation.context.getState(getPositions=True).getPositions() PDBFile.writeFile(simulation.topology, positions, open(pdb_file, 'w')) print("\n> Finished!\n") # + cellView="form" id="DaLoQlJGf57o" #@title **トラジェクトリを連結し整列する** Skip = "1" #@param ["1", "2", "5", "10", "20", "50"] stride_traj = Skip Output_format = "xtc" #@param ["dcd", "pdb", "trr", "xtc"] #@markdown **注意:** フレーム数が大きすぎるとColabのメモリ許容範囲を超えてしまいます。5000フレーム以下なら十分です。 simulation_time_analysis = stride_time_ps*nstride simulation_ns = float(Stride_Time)*int(Number_of_strides) number_frames = int(simulation_time_analysis)/int(Write_the_trajectory) number_frames_analysis = number_frames/int(stride_traj) traj_end = os.path.join(workDir, str(Jobname) + "_all.dcd") traj_end2 = os.path.join(workDir, str(Jobname) + "_all." + str(Output_format)) template = os.path.join(workDir, str(Jobname) + '_%s.dcd') flist = [template % str(i) for i in range(1, nstride + 1)] #print(flist) trajlist = pt.load(flist, pdb, stride=stride_traj) traj_image = trajlist.iterframe(autoimage=True, rmsfit=0) traj_write = pt.write_traj(traj_end, traj_image, overwrite=True) traj_load = pt.load(traj_end, pdb) traj_align = pt.align(traj_load, mask="@CA", ref=0) traj_write = pt.write_traj(traj_end, traj_align, overwrite=True, options='dcd') traj_write = pt.write_traj(traj_end2, traj_align, overwrite=True, options=Output_format) traj_load = pt.load(traj_end, pdb) print(traj_load) traj_end_check = os.path.exists(traj_end2) if traj_end_check == True: print("Trajectory concatenated successfully! :-)") else: print("ERROR: Check your inputs! ") # + cellView="form" id="x_awbwdZnp6Q" #@title **トラジェクトリの読み込み、可視化と確認** #@markdown しばらく時間がかかります。コーヒーをもう一杯どうでしょう? :-) #@markdown **注意:** もし系の原子数が100K以上なら、このステップをスキップしてください。原子数が多いとColabのメモリ制限を超えて止まる可能性があります。 import warnings warnings.filterwarnings('ignore') # !rm *.pdb 2> /dev/null #py3dmol functions class Atom(dict): def __init__(self, line): self["type"] = line[0:6].strip() self["idx"] = line[6:11].strip() self["name"] = line[12:16].strip() self["resname"] = line[17:20].strip() self["resid"] = int(int(line[22:26])) self["x"] = float(line[30:38]) self["y"] = float(line[38:46]) self["z"] = float(line[46:54]) self["sym"] = line[76:78].strip() def __str__(self): line = list(" " * 80) line[0:6] = self["type"].ljust(6) line[6:11] = self["idx"].ljust(5) line[12:16] = self["name"].ljust(4) line[17:20] = self["resname"].ljust(3) line[22:26] = str(self["resid"]).ljust(4) line[30:38] = str(self["x"]).rjust(8) line[38:46] = str(self["y"]).rjust(8) line[46:54] = str(self["z"]).rjust(8) line[76:78] = self["sym"].rjust(2) return "".join(line) + "\n" class Molecule(list): def __init__(self, file): for line in file: if "ATOM" in line or "HETATM" in line: self.append(Atom(line)) def __str__(self): outstr = "" for at in self: outstr += str(at) return outstr if number_frames_analysis > 10: stride_animation = number_frames_analysis/10 else: stride_animation = 1 u = mda.Universe(pdb, traj_end) # Write out frames for animation protein = u.select_atoms('not (resname SOL)') i = 0 for ts in u.trajectory[0:len(u.trajectory):int(stride_animation)]: if i > -1: with mda.Writer('' + str(i) + '.pdb', protein.n_atoms) as W: W.write(protein) i = i + 1 # Load frames as molecules molecules = [] for i in range(int(len(u.trajectory)/int(stride_animation))): with open('' + str(i) + '.pdb') as ifile: molecules.append(Molecule(ifile)) models = "" for i in range(len(molecules)): models += "MODEL " + str(i) + "\n" for j,mol in enumerate(molecules[i]): models += str(mol) models += "ENDMDL\n" #view.addModelsAsFrames(models) # Animation view = py3Dmol.view(width=800, height=600) view.addModelsAsFrames(models) for i, at in enumerate(molecules[0]): default = {"cartoon": {'color': 'spectrum'}} view.setStyle({'model': -1, 'serial': i+1}, at.get("pymol", default)) view.zoomTo() view.animate({'loop': "forward"}) view.show() # + [markdown] id="Emh0vU5UjgB6" # --- # --- # # **解析** # # トラジェクトリを可視化することは非常に有効ですが、より定量的なデータも時には必要です。 # # MDトラジェクトリの解析は多岐にわたるので、ここですべてを網羅するつもりはありません。しかし、MDanalysisやPyTraj を利用することで、簡単にシミュレーションを解析することができます。 # # 以下では、シミュレーションの挙動を解明するのに光を当てるのに役立つコードスニペットの例をいくつか示します。 # + cellView="form" id="wBrBMF4Puyv6" #@title **タンパク質CA原子のRMSDを計算** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'rmsd_ca' #@param {type:"string"} rmsd = pt.rmsd(traj_load, ref = 0, mask = "@CA") time = len(rmsd)*int(Write_the_trajectory)/1000 time_array = np.arange(0,time,int(Write_the_trajectory)/1000)*int(stride_traj) # Plotting: ax = plt.plot(time_array, rmsd, alpha=0.6, color = 'blue', linewidth = 1.0) plt.xlim(0, simulation_ns) #plt.ylim(2, 6) plt.xlabel("Time (ns)", fontsize = 14, fontweight = 'bold') plt.ylabel("RMSD [$\AA$]", fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks(fontsize = 12) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') raw_data=pd.DataFrame(rmsd) raw_data.to_csv(os.path.join(workDir, Output_name + ".csv")) # + cellView="form" id="ZHyMpikjuaLT" #@title **RMSDを分布としてプロット** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'rmsd_dist' #@param {type:"string"} ax = sb.kdeplot(rmsd, color="blue", shade=True, alpha=0.2, linewidth=0.5) plt.xlabel('RMSD [$\AA$]', fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks([]) plt.ylabel('') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(True) ax.spines['left'].set_visible(False) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') # + cellView="form" id="CvOFrXGXwXrV" #@title **タンパク質CA原子の慣性半径(radius of gyration )を計算** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'radius_gyration' #@param {type:"string"} radgyr = pt.radgyr(traj_load, mask = "@CA") time = len(rmsd)*int(Write_the_trajectory)/1000 time_array = np.arange(0,time,int(Write_the_trajectory)/1000)*int(stride_traj) # Plotting: plt.plot(time_array, radgyr, alpha=0.6, color = 'green', linewidth = 1.0) plt.xlim(0, simulation_ns) #plt.ylim(2, 6) plt.xlabel("Time (ns)", fontsize = 14, fontweight = 'bold') plt.ylabel("Radius of gyration ($\AA$)", fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks(fontsize = 12) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') raw_data=pd.DataFrame(radgyr) raw_data.to_csv(os.path.join(workDir, Output_name + ".csv")) # + cellView="form" id="2Q7FKg8Fuxr9" #@title **慣性半径を分布としてプロット** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'radius_gyration_dist' #@param {type:"string"} ax = sb.kdeplot(radgyr, color="green", shade=True, alpha=0.2, linewidth=0.5) plt.xlabel('Radius of gyration ($\AA$)', fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks([]) plt.ylabel('') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(True) ax.spines['left'].set_visible(False) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') # + cellView="form" id="p2Y0DgwTxLWc" #@title **タンパク質CA原子のRMSFを計算** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'rmsf_ca' #@param {type:"string"} rmsf = pt.rmsf(traj_load, "@CA") bfactor = pt.bfactors(traj_load, byres=True) # Plotting: plt.plot(rmsf[:,1], alpha=1.0, color = 'red', linewidth = 1.0) plt.xlabel("Residue", fontsize = 14, fontweight = 'bold') plt.ylabel("RMSF ($\AA$)", fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.xlim(0, len(rmsf[:-1])) #plt.xticks(np.arange(min(rmsf[:1]), max(rmsf[:1]))) plt.yticks(fontsize = 12) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') raw_data=pd.DataFrame(rmsf) raw_data.to_csv(os.path.join(workDir, Output_name + ".csv")) # + cellView="form" id="JalicqqrTodW" #@title **2D RMSD** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = '2D_rmsd' #@param {type:"string"} last_frame = len(time_array) stride_ticks_f = (last_frame)/5 ticks_frame = np.arange(0,(len(time_array) + float(stride_ticks_f)), float(stride_ticks_f)) a = ticks_frame.astype(float) stride_ticks_t = (simulation_ns)/5 tick_time = np.arange(0,(float(simulation_ns) + float(stride_ticks_t)), float(stride_ticks_t)) b = tick_time.astype(float) mat1 = pt.pairwise_rmsd(traj_load, mask="@CA", frame_indices=range(int(number_frames_analysis))) ax = plt.imshow(mat1, cmap = 'PRGn', origin='lower', interpolation = 'bicubic') plt.title('2D RMSD') plt.xlabel('Time (ns)', fontsize = 14, fontweight = 'bold') plt.ylabel('Time (ns)', fontsize = 14, fontweight = 'bold') # plt.xticks(fontsize = 12) # plt.yticks(fontsize = 12) plt.xticks(a, b.round(decimals=3), fontsize = 12) plt.yticks(a, b.round(decimals=3), fontsize = 12) # plt.xlim(0, a[-1]) # plt.ylim(0, a[-1]) cbar1 = plt.colorbar() cbar1.set_label("RMSD ($\AA$)", fontsize = 14, fontweight = 'bold') plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') raw_data=pd.DataFrame(mat1) raw_data.to_csv(os.path.join(workDir, Output_name + ".csv")) # + cellView="form" id="_mgVSbBshWFV" #@title **主成分分析(PCA)の固有ベクトルを計算** data = pt.pca(traj_load, fit=True, ref=0, mask='@CA', n_vecs=2) #print('projection values of each frame to first mode = {} \n'.format(data[0][0])) #print('projection values of each frame to second mode = {} \n'.format(data[0][1])) #print('eigvenvalues of first two modes', data[1][0]) #print("") #print('eigvenvectors of first two modes: \n', data[1][1]) last_frame = len(time_array) stride_ticks_f = (last_frame)/5 ticks_frame = np.arange(0,(len(time_array) + float(stride_ticks_f)), float(stride_ticks_f)) a = ticks_frame.astype(float) a2 = a.tolist() stride_ticks_t = (simulation_ns)/5 tick_time = np.arange(0,(float(simulation_ns) + float(stride_ticks_t)), float(stride_ticks_t)) b = tick_time.astype(float) #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'PCA' #@param {type:"string"} Output_PC1 = 'PC1' #@param {type:"string"} Output_PC2 = 'PC2' #@param {type:"string"} # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # high resolution projection_data = data[0] plt.title(r'PCA of C-$\alpha$') PC1 = data[0][0] PC2 = data[0][1] a = plt.scatter(PC1,PC2, c=range(int(number_frames_analysis)), cmap='Greens', marker='o',s=8, alpha=1) plt.clim(0, last_frame) plt.xlabel('PC1', fontsize = 14, fontweight = 'bold') plt.ylabel('PC2', fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks(fontsize = 12) # N = len(number_frames) # x2 = np.arange(N) cbar1 = plt.colorbar(a, orientation="vertical") cbar1.set_label('Time(ns)', fontsize = 14, fontweight = 'bold') cbar1.set_ticks(a2) cbar1.set_ticklabels(b.round(decimals=3)) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') pc1=pd.DataFrame(PC1) pc1.to_csv(os.path.join(workDir, Output_PC1 + ".csv")) pc2=pd.DataFrame(PC2) pc2.to_csv(os.path.join(workDir, Output_PC2 + ".csv")) # + cellView="form" id="yce9RfNtpl-J" #@title **主成分1(PC1)と主成分2(PC2)を分布としてプロット** Output_name = 'PCA_dist' #@param {type:"string"} fig = plt.figure(figsize=(9,5)) plt.subplot(1, 2, 1) ax = sb.kdeplot(PC1, color="green", shade=True, alpha=0.2, linewidth=0.5) plt.xlabel('PC1', fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks([]) plt.ylabel('') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(True) ax.spines['left'].set_visible(False) plt.subplot(1, 2, 2) ax2 = sb.kdeplot(PC2, color="purple", shade=True, alpha=0.2, linewidth=0.5) plt.xlabel('PC2', fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks([]) plt.ylabel('') ax2.spines['top'].set_visible(False) ax2.spines['right'].set_visible(False) ax2.spines['bottom'].set_visible(True) ax2.spines['left'].set_visible(False) plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') # + cellView="form" id="pTDb7CEfkLq1" #@title **Pearson's Cross Correlation (CC)** #@markdown **出力ファイルの名前を下に記入してください:** Output_name = 'cross_correlation' #@param {type:"string"} traj_align = pt.align(traj_load, mask='@CA', ref=0) mat_cc = matrix.correl(traj_align, '@CA') ax = plt.imshow(mat_cc, cmap = 'PiYG_r', interpolation = 'bicubic', vmin = -1, vmax = 1, origin='lower') plt.xlabel('Residues', fontsize = 14, fontweight = 'bold') plt.ylabel('Residues', fontsize = 14, fontweight = 'bold') plt.xticks(fontsize = 12) plt.yticks(fontsize = 12) cbar1 = plt.colorbar() cbar1.set_label('$CC_ij$', fontsize = 14, fontweight = 'bold') plt.savefig(os.path.join(workDir, Output_name + ".png"), dpi=600, bbox_inches='tight') raw_data=pd.DataFrame(mat_cc) raw_data.to_csv(os.path.join(workDir, Output_name + ".csv"))
Gromacs_inputs_JP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cafo # language: python # name: conda-env-cafo-py # --- # This notebook creates two subsets of NAIP tiles from the list of _all_ NAIP tiles: # - A list of the most recent 100cm resolution imagery per state # - A list of the most recent imagery per state # # It also creates per-state splits for each of the above subsets. # Copyright (c) Microsoft Corporation. All rights reserved # Licensed under the MIT License. from collections import defaultdict with open("../data/naip_blob_list.txt") as f: lines = f.read().strip().split() urls = [ "https://naipblobs.blob.core.windows.net/naip/" + line for line in lines if line.endswith(".tif") ] state_year_sets = defaultdict(set) state_resolution_sets = defaultdict(set) state_year_resolutions = {} unique_resolutions = set() for url in urls: year = int(url.split("/")[6]) state_code = url.split("/")[5] resolution = url.split("/")[7].split("_")[1].strip("0") unique_resolutions.add(resolution) state_year_sets[state_code].add(year) state_resolution_sets[state_code].add((year, resolution)) state_year_resolutions[(state_code, year)] = resolution unique_resolutions # ## Most recent 100cm resolution imagery most_recent_100cm_state_year_pairs = set() for state, years in state_year_sets.items(): years = sorted(list(years)) for i in range(len(years)-1, -1, -1): resolution = state_year_resolutions[(state, years[i])] if resolution != "60cm" and resolution != "50cm": break year = years[i] most_recent_100cm_state_year_pairs.add((state, year)) filtered_urls = [] for url in urls: year = int(url.split("/")[6]) state_code = url.split("/")[5] if (state_code, year) in most_recent_100cm_state_year_pairs: filtered_urls.append(url) len(filtered_urls) with open("../data/naip_most_recent_100cm.csv", "w") as f: f.write("image_fn\n") f.write("\n".join(filtered_urls)) for state_year_pair in most_recent_100cm_state_year_pairs: filtered_urls = [] for url in urls: year = int(url.split("/")[6]) state_code = url.split("/")[5] if (state_code, year) == state_year_pair: filtered_urls.append(url) with open("../data/naip_most_recent_100cm_by_state/%s_%d.csv" % (state_year_pair[0], state_year_pair[1]), "w") as f: f.write("image_fn\n") f.write("\n".join(filtered_urls)) # ## Most recent imagery most_recent_state_year_pairs = set() for state, years in state_year_sets.items(): years = sorted(list(years)) year = years[-1] most_recent_state_year_pairs.add((state, year)) filtered_urls = [] for url in urls: year = int(url.split("/")[6]) state_code = url.split("/")[5] if (state_code, year) in most_recent_state_year_pairs: filtered_urls.append(url) len(filtered_urls) with open("../data/naip_most_recent.csv", "w") as f: f.write("image_fn\n") f.write("\n".join(filtered_urls)) for state_year_pair in most_recent_state_year_pairs: filtered_urls = [] for url in urls: year = int(url.split("/")[6]) state_code = url.split("/")[5] if (state_code, year) == state_year_pair: filtered_urls.append(url) with open("../data/naip_most_recent_by_state/%s_%d.csv" % (state_year_pair[0], state_year_pair[1]), "w") as f: f.write("image_fn\n") f.write("\n".join(filtered_urls))
notebooks/Data preparation - Parse NAIP file list.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:ada] * # language: python # name: conda-env-ada-py # --- import pandas as pd com401_1 = pd.read_csv('anon_data/com402-1.csv', header=None, names=['email', 'movie', 'date', 'rating']) imdb_1 = pd.read_csv('anon_data/imdb-1.csv', header=None, names=['email', 'movie', 'date', 'rating']) # + merged = imdb_1.merge(com401_1, on=['date', 'rating'], how='inner') merged.head() # + hashed_trump_email = merged.loc[merged['email_x'] == '<EMAIL>', 'email_y'].iloc[0] hashed_trump_email # - movies_hashes = ( merged.groupby(['movie_y', 'movie_x']) .count() .sort_values('email_x', ascending=False) .groupby(level=0) .head(1).reset_index() ) hashed_movies_watched_by_donald = com401_1[com401_1['email'] == hashed_trump_email] # + movies_watched_by_donald = ( hashed_movies_watched_by_donald .merge(movies_hashes, left_on=['movie'], right_on=['movie_y'])['movie_x'].drop_duplicates() ) movies_watched_by_donald # -
hw8/hw8ex1/ex1-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml') eye_cascade=cv2.CascadeClassifier('haarcascade_eye.xml') cap=cv2.VideoCapture(0) while True: r,img=cap.read() #gray=cv2.cvtColor(img, img, cv2.COLOR_BGR2Luv) gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces=face_cascade.detectMultiScale(gray) for(x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0)) roi_gray=gray[y:y+h, x:x+w] roi_color=img[y:y+h, x:x+w]#region from face eyes=eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew, ey+eh), (255,0,0)) cv2.imshow('img',img) k=cv2.waitKey(30) & 0xff if k==27: break cap.release() cv2.destroyAllWindows() # + img=cv2.imread("1-face.png") gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) face=face_cascade.detectMultiScale(gray) for(x,y,w,h) in face: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) cv2.imshow('imgh',img) cv2.waitKey(0) cv2.destroyAllWindows() # -
eye and face recog Snehal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Procesamiento de datos y visualización con Python # ## Ejercicio Seaborn 2 # Obtén un histograma y desactiva la función de densidad. # + tags=[] # Seaborn # Exercise 2 import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline import seaborn as sns # Read Apple quote file AAPL = pd.read_csv("AAPL.csv") print(AAPL.head(5)) # - sns.set(color_codes=True) sns.distplot(AAPL['Adj Close'], kde = False, bins = 10)
HCD/HCD-UNI4-TAREA1-Seaborn2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + #The filter function takes in two arguments: function and a sequence. #It offers a convenient way to filter out all the elements of an #iterable sequence (such as a list) for which the function doesnt #return True (it doesnt neccessarily have to return False to be #removed, just as long as it True is not returned) #The first argument, function, needs to return a boolean value #so that the filter() can work properly. This function will be #applied to every element of the iterable and if the function #returns True, then the element is included in the result. # - def even_check(num): if num%2==0: return True else: return False # + lst= range(0,20) filter (even_check, lst) # + #Lambda expressions are common, just like map() and reduce() use #lambda expressions. # - filter(lambda x: x%2==0, lst) filter(lambda num: num>3, lst) # + #Section 7.4: Zip() # + #zip() makes an iterator that aggregrates elements from two or more #iterables. #It returns an iterator of tuples, where #the i-th tuple contains the i-th element #from each of the argument sequences or iterables. #The iterator stops when the shortest input iterable #is exhausted. With a single iterable argument, it returns #an iterator of 1-tuples. With no arguments, it returns an #empty iterator. #Basically taking two iterables and zipping them up into tuple pairs. #And if one iterable is shorter than the other, zip only goes as far #as the shortest iterable. #It's easier to understand with example: # - x=[1,2,3] y=[4,5,6] zip(x,y) a=[1,2,3,4,5] b=[2,2,10,1,1] for pair in zip(a,b): print max(pair) #^if that said return there, we'd have to put this for loop into #a function map(lambda pair: max(pair), zip(a,b)) q=[1,2,3] r=[4,5,6,7,8] #you're defined by the shortest iterable... zip(x,y) # + #Let's try zipping together dictionaries: # - d1={'a':1,'b':2} d2={'c':4,'d':5} zip(d1,d2) #Iterating through dictionaries results in just the keys. for i in d1: print i zip(d2, d1.itervalues()) #We can call methods in to iterate values of a dictionary instead #Create a function to switch the keys and values of two dictionaries #using zip. Want our output to be a dictionary not a list of tuples. def switcheroo(d1,d2): dout ={} for d1key, d2val in zip(d1,d2.itervalues()): dout[d1key] = d2val #Indexed the dictionary at a key to define its value. return dout d1 d2 switcheroo(d1,d2) # + #Note: the zip function isn't that long to make by hand: def zip(*iterables): # zip('ABCD', 'xy') --> Ax By sentinel = object() iterators = [iter(it) for it in iterables] while iterators: result = [] for it in iterators: elem = next(it, sentinel) if elem is sentinel: return result.append(elem) yield tuple(result)
Section7.3,7.4 Filter(), zip() Nick.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="-DTQ0SGMoEuR" outputId="2e543765-30ba-47a8-8d37-6140b7e745b7" # !pip install nlp # + colab={"base_uri": "https://localhost:8080/"} id="pVhQSZwGoKZ3" outputId="8137074e-bab0-4322-8f4e-e03c035a157a" # %matplotlib inline import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import nlp import random def show_history(h): epochs_trained = len(h.history['loss']) plt.figure(figsize=(16, 6)) plt.subplot(1, 2, 1) plt.plot(range(0, epochs_trained), h.history.get('accuracy'), label='Training') plt.plot(range(0, epochs_trained), h.history.get('val_accuracy'), label='Validation') plt.ylim([0., 1.]) plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.subplot(1, 2, 2) plt.plot(range(0, epochs_trained), h.history.get('loss'), label='Training') plt.plot(range(0, epochs_trained), h.history.get('val_loss'), label='Validation') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() def show_confusion_matrix(y_true, y_pred, classes): from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_true, y_pred, normalize='true') plt.figure(figsize=(8, 8)) sp = plt.subplot(1, 1, 1) ctx = sp.matshow(cm) plt.xticks(list(range(0, 6)), labels=classes) plt.yticks(list(range(0, 6)), labels=classes) plt.colorbar(ctx) plt.show() print('Using TensorFlow version', tf.__version__) # + colab={"base_uri": "https://localhost:8080/", "height": 350, "referenced_widgets": ["7dc089a158f543af8e891509740fdf49", "12ee1a53521d4bba9899aa90d23211e0", "<KEY>", "a2802df0b92b4a769c6347126c9fb79d", "<KEY>", "3e8da7dc33c04c02ad13593f03209bbb", "0866af7573c9461a9d67672c6a99288e", "<KEY>", "64ed202afc4047afba752156143496c2", "8e88066b57ea46f88a83c5211a53292c", "d13beec73ae94588b3834259daa2735e", "3e80d0230c404e4fb4bceb3232c3d630", "b7141aebb5a8427993d3daf60ada8cf6", "a27e9ff2830d4635853976f513833e0e", "<KEY>", "d90e8dd78f1844ec94ed907d2ba61a13", "<KEY>", "e97ad72a10b540f8a9b64842c26c9ebc", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "8a6f45a879d64e15862cfcb863e5dcf5", "ee574a16d8c443cbbeba2fadfe5bce03", "00ba6234ce674603a694f20b4556acbc", "ecad4b4951534b6491f0a2ce1ed0329a", "3a78f606f82e420a938e91aa346843d8", "d3640762ff2e47e9bbc3b1a93bf7e3dd", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "43425ac8ae0e4467981ec691367174cc", "<KEY>", "a7f07ff486b54176a90d8ba0cbd1fa0a", "a35af837ce9b4e28bbf0e5439f0543a5", "9522e607a5b7446e954a01a3126e9319", "<KEY>", "0f6ede1aba4f470cae1fe5b7c98b765a", "37eec438711c4366a058c88b73d9fe65", "e161a3e85812446e8ddb9ea3255620c6", "17fd4b381e97408db5d7bf8af865375d", "1ed39042effe440b8f514b75faed537b", "<KEY>", "<KEY>", "5ee35ddbc9ca4db2a4c3fa2140539033", "<KEY>", "3752a82505824bbbaba64e404d9fbe69", "<KEY>", "<KEY>", "ccd8b537e4d54a6d8f85a1ab933da3ea", "c66f54e3bbbe4e4fbe85739abfe72ecc", "<KEY>", "c55dafe417ae4840aa8f1e0d63a664ff", "165ec7c22f064b829090305ea5fd3c91", "5fafb222759042e39c25c9f94126d14e", "<KEY>", "2b23ed81187e46e58e82dc8386378509", "<KEY>", "<KEY>", "<KEY>", "36391b7fb5b04ea0bbf6a5345bfeb00b"]} id="XVcZp03YoYvz" outputId="a998a3fe-afa8-48a5-aff7-65b4310aa233" dataset = nlp.load_dataset('emotion') # + id="M-vODzgMoq8v" train = dataset['train'] val = dataset['validation'] test = dataset['test'] # + id="EgOTxJWxpDit" def get_tweet(data): tweets = [x['text'] for x in data] labels = [x['label'] for x in data] return tweets, labels # + id="0_moLb55pV8n" tweets, labels = get_tweet(train) # + colab={"base_uri": "https://localhost:8080/"} id="iOJrf0gOpgvx" outputId="58148c14-3e29-4b49-dc52-e518bd527ef7" tweets[0], labels[0] # + id="Mplf-KpuqOAX" from tensorflow.keras.preprocessing.text import Tokenizer # + id="p8O8jpxoq8s8" tokenizer = Tokenizer(num_words=10000, oov_token='<UNK>') tokenizer.fit_on_texts(tweets) # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="0aRZ0aRuruBV" outputId="ca55d1c1-a289-43a0-902b-0d83dabad0d5" tweets[0] # + colab={"base_uri": "https://localhost:8080/"} id="0FMFzFGRrER2" outputId="8750c38d-2bc4-4c94-d6d0-88702c7956a2" tokenizer.texts_to_sequences([tweets[0]]) # + colab={"base_uri": "https://localhost:8080/", "height": 284} id="Cdqhya7Cr5qg" outputId="e762d847-8e27-4ac2-a6cb-6a9685c82c1d" lengths = [len(t.split(' ')) for t in tweets] plt.hist(lengths, bins=len(set(lengths))) plt.show # + id="SCf-LddLFRER" maxlen=50 from tensorflow.keras.preprocessing.sequence import pad_sequences # + id="SIQGqEvrFPC8" def get_sequences(tokenizer, tweets): # making all sequences same length sequences = tokenizer.texts_to_sequences(tweets) padded = pad_sequences(sequences, truncating = 'post', padding='post', maxlen=maxlen) return padded # + id="Pxl7opzoGvlm" padded_train_seq = get_sequences(tokenizer, tweets) # + colab={"base_uri": "https://localhost:8080/"} id="1w2pQ0FtHdbr" outputId="793076c0-3283-44f9-89ba-f4a5739825be" padded_train_seq[0] # + colab={"base_uri": "https://localhost:8080/"} id="Vq_Z0PIjHhGH" outputId="f4cf9558-6736-466f-be15-fb1528d7ff4a" classes = set(labels) print(classes) # + id="fr0ZEwOcICLx" class_to_index = dict((c,i) for i, c in enumerate(classes)) index_to_class = dict((v,k) for k, v in class_to_index.items()) # + colab={"base_uri": "https://localhost:8080/"} id="YQCqSsEDJO90" outputId="bcfe8327-c489-4e23-ca2e-e923af6e3c9c" class_to_index # + colab={"base_uri": "https://localhost:8080/"} id="nYDMNhLfJRPZ" outputId="36abeb45-9db5-45b1-a46a-becae26de015" index_to_class # + id="X5GQXeY7JWOP" names_to_ids = lambda labels: np.array([class_to_index.get(x) for x in labels]) # + colab={"base_uri": "https://localhost:8080/"} id="P0mvmY2HJe-I" outputId="e6da2e92-88d2-4f54-dad7-cb22efa462e9" train_labels = names_to_ids(labels) train_labels[0] # + id="FfbDphICJS8g" model = tf.keras.models.Sequential([ tf.keras.layers.Embedding(10000,16,input_length=maxlen), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(20, return_sequences=True)), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(20)), tf.keras.layers.Dense(6, activation='softmax') ]) model.compile( loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # + colab={"base_uri": "https://localhost:8080/"} id="IqregWqON0Np" outputId="c4830fff-aa30-459a-c731-54fb350aa226" model.summary() # + id="K0I1haM9O-66" val_tweets, val_labels = get_tweet(val) val_seq = get_sequences(tokenizer, val_tweets) val_labels= names_to_ids(val_labels) # + colab={"base_uri": "https://localhost:8080/"} id="FUqV3QzDPHUb" outputId="579adffc-4b91-4cdb-ebe8-27c9e479c67f" val_tweets[0], val_labels[0] # + colab={"base_uri": "https://localhost:8080/"} id="n1FqrC0DPos-" outputId="74679c27-f189-4cd2-f1ab-d90b25e5a74d" h = model.fit( padded_train_seq, train_labels, validation_data=(val_seq, val_labels), epochs=20, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=2)] ) # + id="8E5vdF8oRfMM" test_tweets, test_labels=get_tweet(test) test_seq = get_sequences(tokenizer, test_tweets) test_labels=names_to_ids(test_labels) # + colab={"base_uri": "https://localhost:8080/"} id="c1d9a2vxRsfe" outputId="0cf70c3c-b1e0-4a50-debe-9d4650e3fdab" model.evaluate(test_seq, test_labels) # + colab={"base_uri": "https://localhost:8080/", "height": 530} id="9rDdpqOqUoqC" outputId="9e6be7a3-ba6c-40c8-8d94-801f95121c8c" preds = model.predict_classes(test_seq) show_confusion_matrix(test_labels, preds, list(classes)) # + colab={"base_uri": "https://localhost:8080/"} id="oykFmHYkV7_V" outputId="be6bc05a-c6bd-4f63-f43b-382a18e55227" from google.colab import drive drive.mount('/content/drive') # + id="0ADBfuj2WCXd" model.save("/content/drive/My Drive/TweetEmotionRecognition/h5/tweet_model.h5") # + colab={"base_uri": "https://localhost:8080/"} id="lw0f8aVPWNcx" outputId="64784cb0-bda4-4c4b-f755-420b65a8995a" # model.save("/content/drive/My Drive/TweetEmotionRecognition/model/") # + colab={"base_uri": "https://localhost:8080/"} id="T8rmxEivR09T" outputId="fbfdbeea-5fb5-4381-c34b-f0ad75923c16" i = random.randint(0,len(test_labels)-1) print('Sentence:', test_tweets[i]) print('Emotion:', index_to_class[test_labels[i]]) p = model.predict(np.expand_dims(test_seq[i], axis=0))[0] print(test_seq[i]) pred_class=index_to_class[np.argmax(p).astype('uint8')] print('Predicted Emotion: ', pred_class) # + colab={"base_uri": "https://localhost:8080/"} id="g0g3IpBApsG9" outputId="ba6bb322-5b6c-476a-8044-9f4cab8c2eab" sequence = tokenizer.texts_to_sequences(["i am angry"]) paddedSequence = pad_sequences(sequence, truncating = 'post', padding='post', maxlen=maxlen) p = model.predict(np.expand_dims(paddedSequence[0], axis=0))[0] print(paddedSequence[0]) pred_class=index_to_class[np.argmax(p).astype('uint8')] print('Predicted Emotion: ', pred_class) print(p) print(index_to_class) # + colab={"base_uri": "https://localhost:8080/"} id="n653hHm7rlLH" outputId="a858f8b5-e17a-48c7-c2c6-249662773654" load_model = tf.keras.models.load_model("/content/drive/My Drive/TweetEmotionRecognition/h5/tweet_model.h5") print(load_model.summary()) # + colab={"base_uri": "https://localhost:8080/"} id="SG_yrHkTs6P0" outputId="e8793f2b-09c7-44ca-b73b-89000a8fd2da" sentence = 'i am not sure what to do' sequence = tokenizer.texts_to_sequences([sentence]) paddedSequence = pad_sequences(sequence, truncating = 'post', padding='post', maxlen=maxlen) p = load_model.predict(np.expand_dims(paddedSequence[0], axis=0))[0] pred_class=index_to_class[np.argmax(p).astype('uint8')] print('Sentence:', sentence) print('Predicted Emotion: ', pred_class)
TweetEmotionRecognition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Experiments on Cross Lingual Transfer for Intent Detection # ## <NAME> # ## 10.2.2021 # This notebook is used to run the experiments for Intent Only cross-lingual experiments using XLM-R. The blocks for training and testing are meant to be run individually and correspond to the experiments listed in the paper # + from preprocessing.util import * import pickle import sklearn import torch import numpy as np from simpletransformers.classification import ClassificationModel torch.manual_seed(136) #from model import * # - # We begin by preprocessing the data # + mapping = {} with open('preprocessing/label_map.json','r') as f: mapping = json.load(f) mapping = {int(k):v for k,v in mapping.items()} # preprocess training and test files to pandas df # eng train en_df, en_mapping = df_format(("data/en/train-en.tsv"),mapping) # eng eval en_df_eval, en_mapping = df_format("data/en/eval-en.tsv",mapping) # eng test en_df_test, en_mapping = df_format("data/en/test-en.tsv",mapping) # es train es_df, es_mapping = df_format("data/es/train-es.tsv",mapping) # es eval es_df_eval, es_mapping = df_format("data/es/eval-es.tsv",mapping) # es test es_df_test, es_mapping = df_format("data/es/test-es.tsv",mapping) # th train th_df, th_mapping = df_format("data/th/train-th_TH.tsv",mapping) # th eval th_df_eval, th_mapping = df_format("data/th/eval-th_TH.tsv",mapping) # th test th_df_test, th_mapping = df_format("data/th/test-th_TH.tsv",mapping) mapping_list = list(mapping.values()) # + # drop some duplicate values # This is perhaps unnecessary. en_train = en_df.drop_duplicates("text") en_eval = en_df_eval.drop_duplicates("text") en_test = en_df_test.drop_duplicates("text") es_train = es_df.drop_duplicates("text") es_eval = es_df_eval.drop_duplicates("text") es_test = es_df_test.drop_duplicates("text") th_train = th_df.drop_duplicates("text") th_eval = th_df_eval.drop_duplicates("text") th_test = th_df_test.drop_duplicates("text") # - # Since we do not know if the other experiments were trained on a combination of train and eval, we err on the safe side ignore the eval file # + # en_full_train = pd.concat([en_train,en_eval]) # es_full_train = pd.concat([es_train,es_eval]) # th_full_train = pd.concat([th_train, th_eval]) # quick hack to by-pass combining en eval en_full_train = en_train es_full_train = es_train th_full_train = th_train # - # we no longer use these paths. # should be removed path2model = "prelim_models/" path2model_en = "prelim_models/en/" path2model_es = "prelim_models/es/" path2model_th = "prelim_models/th/" #path2model_x = "/home/santi/BA/final_models/x/" # mix create data for cross-lingual training en_th_full_train = pd.concat([en_full_train,th_full_train]) en_es_full_train = pd.concat([en_full_train,th_full_train]) # + def avg_sent_l(df): return sum([len(l.split()) for l in df["text"]])/len(df) def lexical_diversity(df): lexes = set() for l in df["text"]: for w in l.split(): lexes.add(w) return len(lexes), lexes def analyze_wrong(wrong_predictions,model): wrongs = [(inp.text_a,inp.label) for inp in wrong_predictions] wrong_preds, vecs = model.predict([t for t,l in wrongs]) dom_corr = 0 weak_dom = 0 rem_alarms = ["reminder","alarm"] results = [] for (text, lab_true), lab_pred in zip(wrongs,wrong_preds): lab_pred = mapping[lab_pred] lab_true = mapping[lab_true] dom_pred = lab_pred.split("/")[0] dom_true = lab_true.split("/")[0] if dom_pred == dom_true: dom_corr += 1 if (dom_pred in rem_alarms) and (dom_true in rem_alarms): weak_dom += 1 results.append((text,lab_pred, lab_true)) #print(text,"\t" ,lab_pred,"\t", lab_true,"\t", dom_pred,"\t", dom_true) return results, dom_corr/len(wrongs) # - # metrics used for evaluation macro = lambda x,y: sklearn.metrics.f1_score(x,y, average= 'macro') micro = lambda x,y: sklearn.metrics.f1_score(x,y, average= 'micro') report = lambda x,y: sklearn.metrics.classification_report(x,y,digits = 5,labels = list(range(0,12)), target_names = mapping_list) report_dict = lambda x,y: sklearn.metrics.classification_report(x,y,digits = 5,output_dict = True,labels = list(range(0,12)),target_names = mapping_list) accuracy = lambda x,y: sklearn.metrics.accuracy_score(x,y) def custom_eval(df, model, ex_name = "experiment 1", verbose = True): results, predictions_vs, wrongs = model.eval_model(df, macro=macro, micro=micro,accuracy=accuracy, report=report, report_dict = report_dict) results["name"] = ex_name false_preds,dom_acc = analyze_wrong(wrongs,model) results["wrong_predictions"] = false_preds results["domain_of_wrongs"] = dom_acc results["domain_accuracy"] = results["accuracy"] + (1-results["accuracy"])*dom_acc if verbose: print("results for experiment: ",ex_name) print(results["report"]) print("domain accuracy: ",results["domain_accuracy"]) return results # Here we gather some statistics about the datasets # + # print("average sentence length") print("en",avg_sent_l(en_train)) print("es",avg_sent_l(es_train)) print("th",avg_sent_l(th_train)) print("unique tokens") print("en",lexical_diversity(en_train)[0]) print("es",lexical_diversity(es_train)[0]) print("th",lexical_diversity(th_train)[0]) # - # # # # We load up a pretrained XLM-R model with a Max Ent layer for classification. Arguments are left pretty vanilla except fp16 which is not relevant for the results. # + #change the hyper-parameters here. args={"fp16": True, 'learning_rate':1e-5, 'num_train_epochs': 5, 'reprocess_input_data': True, 'overwrite_output_dir': True, 'save_steps':-1, "save_model_every_epoch":False, } # - experiment_results = {} # + # train english model # full train = train + eval args["output_dir"] = "models/intent_en_train" model= ClassificationModel('xlmroberta','xlm-roberta-base', num_labels=12, args=args) model.train_model(en_full_train) # test eng results = custom_eval(en_test, model, "train_en_test_en") experiment_results[results["name"]] = results # test es results = custom_eval(es_test, model, "train_en_test_es") experiment_results[results["name"]] = results # test th results = custom_eval(th_test, model, "train_en_test_th") experiment_results[results["name"]] = results # + # reset model # train on full spanish args["output_dir"] = "models/intent_es_train" model= ClassificationModel('xlmroberta','xlm-roberta-base', num_labels=12, args=args) model.train_model(es_full_train) # test eng results = custom_eval(en_test, model, "train_es_test_en") experiment_results[results["name"]] = results # test es results = custom_eval(es_test, model, "train_es_test_es") experiment_results[results["name"]] = results # test th results = custom_eval(th_test, model, "train_es_test_th") experiment_results[results["name"]] = results # - # + # reset model args["output_dir"] = "models/intent_th_train" model= ClassificationModel('xlmroberta','xlm-roberta-base', num_labels=12, args=args) # train on full thai model.train_model(th_full_train) # test eng results = custom_eval(en_test, model, "train_th_test_en") experiment_results[results["name"]] = results # test es results = custom_eval(es_test, model, "train_th_test_es") experiment_results[results["name"]] = results # test th results = custom_eval(th_test, model, "train_th_test_th") experiment_results[results["name"]] = results # + args["output_dir"] = "models/intent_en_th_train" model= ClassificationModel('xlmroberta','xlm-roberta-base', num_labels=12, args=args) # train on full thai and eng mixed model.train_model(en_th_full_train) # test eng results = custom_eval(en_test, model, "train_th_test_en") experiment_results[results["name"]] = results # test es results = custom_eval(es_test, model, "train_th_test_es") experiment_results[results["name"]] = results # test th results = custom_eval(th_test, model, "train_th_test_th") experiment_results[results["name"]] = results # + args["output_dir"] = "models/intent_en_es_train" model= ClassificationModel('xlmroberta','xlm-roberta-base', num_labels=12, args=args) # train on full thai and eng mixed model.train_model(en_es_full_train) # test eng results = custom_eval(en_test, model, "train_th_test_en") experiment_results[results["name"]] = results # test es results = custom_eval(es_test, model, "train_th_test_es") experiment_results[results["name"]] = results # test th results = custom_eval(th_test, model, "train_th_test_th") experiment_results[results["name"]] = results # - # Here are some sanity checks for text, predicted, real in experiment_results["train_en_test_th"]["wrong_predictions"]: print(text, "\t", predicted, "\t", real) ##### SANITY CHECK ##### def unique_sents(test_df, train_df): print("unique utterances in test data out of :", len(test_df)) unique_sents = [] train_set = set(train_df["text"]) for sent in test_df["text"]: if sent not in train_set: unique_sents.append(sent) print(len(unique_sents)/len(test_df)*100,"% of the sentences are unique") unique_sents(en_test,en_full_train) unique_sents(es_test, es_eval) unique_sents(es_test,es_full_train) unique_sents(th_test, th_eval) unique_sents(th_test,th_full_train) predict_sent = lambda sent: mapping[model.predict([sent])[0][0]] predict_sent("what's the weather in Potsdam") predict_sent("don't wake me up tomorrow") # + predict_sent("ตั้ง เวลา พรุ่ง บ่าย พรุ่งนี้") # - predict_sent("que temperatura hay aqui") predict_sent("no necesito que levantarme el sabado" ) predict_sent("sabado no necesito que levantarme" ) predict_sent("ไม่ ต้อง ปลุก ฉัน วัน เสาร์ นะ" ) predict_sent("วัน เสาร์ ไม่ ต้อง ปลุก ฉัน นะ") predict_sent("you don't have to wake me up on saturday") predict_sent("saturday you don't have to wake me up") en_full_train[en_full_train["text"].str.contains("^on (saturday|sunday|monday|tuesday)",case=False, regex=True)] mapping[10] predict_sent("I don't have to wake up early on saturday") # this is a weird sentence predict_sent("saturday you don't have to wake me up") predict_sent("am Samstag musst du mich nicht aufwecken") predict_sent("ich nicht muss aufstehen am Samstag") "el sabado no necesito el despertador" # doesn't work # implicit "cuanto falta hasta el alarma" "cuanto tiempo queda hasta que me levanto" "que temperatura hay aqui"
Intent_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project description "Continuous control" # # The "Continuous Control" project is one of three problems that are part of [Udacity](https://www.udacity.com/)'s Deep Reinforcement Learning Nanodegree. The task is to apply torque to the two joints of a robotic arm, in order to move a hand at the end of the arm into a dynamically moving target zone. # # A description of the environment can be found in [0]; the agent has a 33 dimensional observation space that describes both the position and movement of the arm as well as the location of the target zone. The action space consists of four continuous torque values, two values per joint of the arm. A reward is given when the hand is in the target zone. # # My first attempts to solve the task using the Deep Deterministic Policy Gradient algorithm (DDPG, Lillicrap et al., 2015, [1]) were unsuccesful. [Spinning Up in Deep RL](https://spinningup.openai.com/en/latest/) [2] has a short description of various DRL algorithms, which is where I found an improvement of DDPG: the Twin Delayed DDPG algorithm (TD3, Fujimoto et al., 2018, [3]) has some improvements over DDPG that helped me to solve the task. # # ## Actor Critic Methods, DDPG & T3D algorithm # Techniques like DQN learning work well with discrete and relatively small action spaces, however in continuous action spaces, finding the optimal value of the Q-value function is an optimization problem on its own [1]. Lillicrap et al. (2015, [1]) solved this problem by using an actor-critic method. In the actor-critic method a policy (actor) is directly learned with help of the Q-value function (critic). # # While the original manuscripts are definitely worth reading, I will briefly mention the main ideas following the summaries given by "Spinning Up Deep RL" (see [2] for their DDPG description and [3] for TD3): # # Deep Deterministic Policy Gradient learns a representation $\phi_{local}$ of the action-value function Q very similar to the DQN algorithm by using the Bellman equation. In order to improve stability another representation of the action value function $Q_{target}$ is used, and this neural network is slowly updated to match $Q_{local}$. The target action-value function $Q_{target}$ can be updated with help of the the Bellman equation: # # $$Q_{target}(s_{0}, a_{0})=r + \gamma * Q_{target}(s_{1}, \mu_{\phi_{target}}(s_{1}))$$ # where $s_{0}$ is the current state, $a_{0}$ the current action, r the reward after taking the action and $s_{1}$ is the next sate. # # The local network is then updated by minimizing the mean squared error between $Q_{target}(s_{0}, a_{0})$ and $Q_{local}(s_{0}, a_{0})$. # # The representation of the policy $\mu_{\theta}$ is learned by taking gradient ascent steps in the direction of the gradient of the expected action-value function [2]: # $$ \nabla_{\theta}Q_{\phi}(s_{0}, \mu_{\theta}(s_{0}))$$ # As explained in [3] and [4] DDPG can be instable due to an overestimation of the learned Q-values. # The T3D algorithm by Fujimoto et al. (2018, [3]) adds three improvements to the DDPG method that help stabilize the algorithm's performance: # - The algorithm uses two Q-functions $Q_{A}$ and $Q_{B}$. The minimum of the predictions of the Q-value, $min(Q_{A}\mu_{\phi_{target}}(s_{1}), Q_{B}\mu_{\phi_{target}}(s_{1}))$ is used during the Bellman update of the critic neural network. This helps prevent overestimation of the Q-value function during training. # - The actor (or policy) network is updated less frequently than the critic networks # - Noise is added to the target action in the critic update step, which helps, for example, prevent problems due to spikes in the learned Q-value function. # # ## Implementation # # I started implementing DDPG by loosely following the [DDPG pendulum implementation by Udacity](https://github.com/udacity/deep-reinforcement-learning/tree/master/ddpg-pendulum) [7]. # Similarly to the Udacity benchmark implementation, the neural networks were not updated in each step in order to increase stability. 10 update steps with a batch size of 64 were performed after 20 steps in the environment. # Rather than performing the step and experience handling in the main loop, I implemented an "episode generator", following an idea from [8]. The advantage of the episode generator is that it can later easily be extended to return n-step updates rather than one step updates. # # Since the DDPG implementation did not lead to a learning agent I started implementing the improvements of the TD3 algorithm ([3], [4]): double Q-learning, delayed policy updates and target policy smoothing. Similar to the [original author's implementation](https://github.com/sfujim/TD3) ([5]), I implemented the two critic networks in a single class that returns Q values for both in one step. # ## Results # # A short animation of the trained agent is shown below: # ![Recher Gif](results/reacher.gif) # # # The TD3 algorithm was able to learn. Fig. 1 shows the scores of each individual episode (blue curve) as well as a running mean over 100 episodes. The average score was above 30 after roughly 500 episodes. Results for other runs with slightly different parameters can be found in the results folder. The settings are listed below and are pretty standard settings from the DDPG manuscript [1] and the TD3 manuscript [4]. # One noteworthy difference was the relatively low learning rates of 5e-4 for both the actor and the critic network.![Learning curve](results/run003/learning_curve.png) # # # Figure 2 and 3 show the actor-loss and critic-loss. The values presented are an average loss of the 10 update steps at the end of an episode. The actor-loss steadily increases during the training. Since it represents the expected Q-value (or the expected reward from a given state) it is expected to be increasing. # ![Actor Loss](results/run003/a_loss.png) # # A little more suprising, the critic-loss which is the mean squared error between the Bellman target Q-value and the expected Q-value does show large values with increasing training time, it should decrease with training time. In Figure 3 the individual critic losses for the critic networks $Q_{A}$ (blue) and $Q_{B}$ (orange) are shown, however the values $Q_{A}$ are hidden behind $Q_{B}$. The peaks of higher loss function values are not equidistant (in particular they did not always appear at the end of an episode). ![Critic Loss](results/run003/c_loss.png) # The x-label on the critic-loss Figure above is mislabeled, it should be step number instead of episode number. # # ## Future improvements # # One obvious improvement would be to further investigate the cause of the increasing critic-loss. Since the implementation uses two Q-target networks one approach would be to look at the loss of each network individually. # # Besides this (necessary) bug-fix there are many other interesting ways to improve the agent. Distributed Distributional Deterministic Policy Gradients (D4PG, [6]) makes use of a complete distribution of reward values instead of only a mean value of the reward. It would also be interesting to combine D4PG with the improvements from T3D. # Additionally using n-step updates rather than just the next step would be an interesting direction that could improve the agent. # # ## Settings for the presented run: # The settings for the run shown above are printed below: settings = { 'batch_size': 64, # Number of experience samples per training step 'buffer_size': int(3e6), # Max number of samples in the replay memory 'gamma': 0.99, # Reward decay factor 'tau': 1e-3, # Update rate for the slow update of the target networks 'lr_actor': 5e-4, # Actor learning rate 'lr_critic': 5e-4, # Critic learning rate 'action_noise': 0.4, # Noise added during episodes played 'action_clip': 1.0, # Actions are clipped to +/- action_clip 'target_action_noise': 0.4, # Noise added during the critic update step 'target_noise_clip': 0.2, # Noise clip for the critic update step 'number_steps': 1, # Number of steps for roll-out, currently not used 'optimize_critic_every': 2, # Update the critic only every X update steps 'pretrain_steps': int(10000),# Number of random actions played before training starts 'actor_settings': actor_settings, 'critic_settings': critic_settings} # ## Further reading and references # # Since my original DDPG implementation did not converge, I started reading various different sources, which are listed below: # - As suggested by Udacity, I started by studying the DDPG pendulum implementation [7] # - The descriptions of DDPG and TD3 from OpenAi ([3], [4]) were great to understand both algorithms in more detail # - The TD3 implementation of the original authors can be found under [5] # - The Deep Reinforcement Learning Hands-On book by <NAME> [8] was a valuable source that greatly helped me understand the topic better and the hands-on code examples were really helpful to see, how things can be implemented in Python and PyTorch. # - One Udacity student helped many people in the internal discussion boards and shared his github profile (which you can find at [9]). The github profile features a very thorough implementation of the DP4G algorithm. The idea of using a pre-train phase with random actions before training is from this implementation. # - TowardsDataScience had a series of blogposts on the same task [10]. The post features implementations of three different algorithms: DDPG, DP4G and A2C. # - The PyTorch tutorials and documentation were again very helpful [11] # # # References # [0] https://github.com/udacity/deep-reinforcement-learning/tree/master/p2_continuous-control # [1] Lillicrap, <NAME>., et al. "Continuous control with deep reinforcement learning." arXiv preprint arXiv:1509.02971 (2015). # [2] https://spinningup.openai.com/en/latest/algorithms/ddpg.html # [3] https://spinningup.openai.com/en/latest/algorithms/td3.html # [4] Fujimoto, Scott, <NAME>, and <NAME>. "Addressing function approximation error in actor-critic methods." arXiv preprint arXiv:1802.09477 (2018). # [5] https://github.com/sfujim/TD3, last accessed: 2019-04-14 # [6] Barth-Maron, Gabriel, et al. "Distributed distributional deterministic policy gradients." arXiv preprint arXiv:1804.08617 (2018). # [7] https://github.com/udacity/deep-reinforcement-learning/tree/master/ddpg-pendulum, last accessed: 2019-04-14 # [8] Lapan, Maxim. Deep Reinforcement Learning Hands-On: Apply modern RL methods, with deep Q-networks, value iteration, policy gradients, TRPO, AlphaGo Zero and more. Packt Publishing Ltd, 2018. # [9] https://github.com/whiterabbitobj/Continuous_Control/, last accessed: 2019-04-14 # [10] https://towardsdatascience.com/rl-train-the-robotic-arm-to-reach-a-ball-part-01-1cecd2e1cfb8 and # https://towardsdatascience.com/rl-train-the-robotic-arm-to-reach-a-ball-part-02-fc8822ace1d8 # [11] https://pytorch.org/
Report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:Anaconda3] # language: python # name: conda-env-Anaconda3-py # --- # ## Lists a_list = [1, 2.3, 'a', True] an_empty_list = list() a_list[1] # prints 2.3 a_list[1] = 2.5 # a_list is now [1, 2.5, 'a', True] a_list[1:3] # prints [2.3, 'a'] a_list[::2] # returns only odd elements: [1, 'a'] a_list[::-1] # returns the reverse of the list: [True, 'a', 2.3, 1] a_list.append(5) # a_list is now [1, 2.5, 'a', True, 5] a_list len(a_list) # prints 5 del a_list[0] # a_list is now [2.5, 'a', True, 5] a_list a_list += [1, 'b'] # a_list is now [2.5, 'a', True, 5, 1, 'b'] a_list a, b, c, d, e, f = [2.5, 'a', True, 5, 1, 'b'] # a now is 2.5, b is 'a' and so on a tuple(a_list) # prints (2.5, 'a', True, 5, 1, 'b') # ## Dictionaries b_dict = {1: 1, '2': '2', 3.0: 3.0} b_dict['2'] # prints '2' b_dict['2'] = '2.0' # b_dict is now {1: 1, '2': '2.0', 3.0: 3.0} b_dict b_dict['a'] = 'a' # b_dict is now {3.0: 3.0, 1: 1, '2': '2.0', 'a': 'a'} b_dict len(b_dict) # prints 4 del b_dict[3.0] # b_dict is now {1: 1, '2': '2.0', 'a': 'a'} b_dict b_dict['a_key'] if 'a_key' in b_dict: b_dict['a_key'] else: print("'a_key' is not present in the dictionary") b_dict.get('a_key') from collections import defaultdict c_dict = defaultdict(lambda: 'empty') c_dict['a_key'] # requiring a nonexistent key will always return the string 'empty' # ## Defining functions def half(x): return x/2.0 import math def sigmoid(x): try: return 1.0 / (1 + math.exp(-x)) except: if x < 0: return 0.0 else: return 1.0 # + def sum_a_const(c): return lambda x: x+c sum_2 = sum_a_const(2) sum_3 = sum_a_const(3) print(sum_2(2)) print(sum_3(2)) # prints 4 and 5 # - half(10) # prints 5.0 sigmoid(0) # prints 0.5 # + a_list = [1,2,3,4,5] def modifier(L): L[0] = 0 def unmodifier(L): M = L[:] # Here we are copying the list M[0] = 0 unmodifier(a_list) print(a_list) # you still have the original list, [1, 2, 3, 4, 5] modifier(a_list) print(a_list) # your list have been modified: [0, 2, 3, 4, 5] # - # ## Classes, objects and OOP class Incrementer(object): def __init__(self): print ("Hello world, I'm the constructor") self._i = 0 i = Incrementer() # prints "Hello world, I'm the constructor" class Incrementer(object): def __init__(self): print ("Hello world, I'm the constructor") self._i = 0 def increment(self): self._i += 1 return self._i i = Incrementer() print (i.increment()) print (i.increment()) print (i.increment()) class Incrementer(object): def __init__(self): print ("Hello world, I'm the constructor") self._i = 0 def increment(self): self._i += 1 return self._i def set_counter(self, counter): self._i = counter i = Incrementer() i.set_counter(10) print (i.increment()) print (i._i) # ## Exception 0/0 len(1, 2) pi * 2 try: a = 10/0 except ZeroDivisionError: a = 0 a # + for entry in ['alpha', 'bravo', 'charlie', 'delta']: print (entry) # prints the content of the list, one entry for line # + a_dict = {1: 'alpha', 2: 'bravo', 3: 'charlie', 4: 'delta'} for key in a_dict: print (key, a_dict[key]) # Prints: # 1 alpha # 2 bravo # 3 charlie # 4 delta # + def incrementer(): i = 0 while i<5: yield(i) i +=1 for i in incrementer(): print (i) # Prints: # 0 # 1 # 2 # 3 # 4 # - # ## Conditionals # + def is_positive(val): if val< 0: print ("It is negative") elif val> 0: print ("It is positive") else: print ("It is exactly zero!") is_positive(-1) is_positive(1.5) is_positive(0) # Prints: # It is negative # It is positive # It is exactly zero! # - a_list = [1,2,3,4,5] a_power_list = [value**2 for value in a_list] # the resulting list is [1, 4, 9, 16, 25] a_power_list filter_even_numbers = [value**2 for value in a_list if value % 2 == 0] # the resulting list is [4, 16] filter_even_numbers another_list = ['a','b','c','d','e'] a_dictionary = {key:value for value, key in zip(a_list, another_list)} # the resulting dictionary is {'a': 1, 'c': 3, 'b': 2, 'e': 5, 'd': 4} a_dictionary
Appendix/Strengthen Your Python Foundations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # TF跑CNN原則:請把矩陣大小和格式輸入輸出對清楚 # + import tensorflow as tf import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) def weight(shape): return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name ='W') def bias(shape): return tf.Variable(tf.constant(0.1, shape=shape), name ='b') def conv2d(x, W): #由strides第2,3維度決定filter一次往x和y方向走動多少 return tf.nn.conv2d(x, W, strides=[1,1,1,1],padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1,2,2,1], #濾鏡大小 strides=[1,2,2,1], #2x2中取最大 padding='SAME') #輸入層 with tf.name_scope('Input_Layer'): x = tf.placeholder("float",shape=[None, 784], name="X") x_image = tf.reshape(x, [-1, 28, 28, 1]) #為了符合卷基第一層的格式,第四個維度是RGB:3,黑白:1 #卷積1 with tf.name_scope('C1_Conv'): W1 = weight([5,5,1,16]) b1 = bias([16]) C1_Conv = tf.nn.relu( conv2d(x_image, W1) + b1 ) with tf.name_scope('C1_Pool'): C1_Pool = max_pool_2x2(C1_Conv) #卷積2 with tf.name_scope('C2_Conv'): W2 = weight([5,5,16,36]) b2 = bias([36]) C2_Conv = tf.nn.relu( conv2d(C1_Pool, W2) + b2 ) with tf.name_scope('C2_Pool'): C2_Pool = max_pool_2x2(C2_Conv) #Fully connected layer with tf.name_scope('D_Flat'): #壓成平的 D_Flat = tf.reshape(C2_Pool, [-1, 1764]) with tf.name_scope('D_Hidden_Layer'): W3= weight([1764, 32]) b3= bias([32]) D_Hidden = tf.nn.relu( tf.matmul(D_Flat, W3) + b3 ) D_Hidden_Dropout= tf.nn.dropout(D_Hidden, keep_prob=0.8) with tf.name_scope('Output_Layer'): W4 = weight([32,10]) b4 = bias([10]) y_predict= tf.nn.softmax( tf.matmul(D_Hidden_Dropout, W4) + b4 ) #設置optimizer with tf.name_scope("optimizer"): y_label = tf.placeholder("float", shape=[None, 10], name="y_label") #在這裡才引入標準解答y_label loss_function = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits (logits=y_predict , labels=y_label)) optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss_function) #評估模型 with tf.name_scope("evaluate_model"): correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) #訓練模型 trainEpochs = 10 batchSize = 100 totalBatchs = int(mnist.train.num_examples/batchSize) epoch_list=[] accuracy_list=[] loss_list=[] from time import time startTime=time() sess = tf.Session() sess.run(tf.global_variables_initializer()) for epoch in range(trainEpochs): for i in range(totalBatchs): batch_x, batch_y = mnist.train.next_batch(batchSize) sess.run(optimizer,feed_dict={x: batch_x, y_label: batch_y}) loss,acc = sess.run([loss_function,accuracy], feed_dict={x: mnist.validation.images, y_label: mnist.validation.labels}) epoch_list.append(epoch) loss_list.append(loss) accuracy_list.append(acc) print("Train Epoch:", '%02d' % (epoch+1), \ "Loss=","{:.9f}".format(loss)," Accuracy=",acc, 'Epoch time=',time()-startTime) duration = time()-startTime print("Train Finished takes:",duration) # - # ## loss, accu視覺化 import matplotlib.pyplot as plt fig = plt.gcf() fig.set_size_inches(4,2) plt.plot(epoch_list, loss_list, label = 'loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['loss'], loc='upper left') plt.show() import matplotlib.pyplot as plt fig = plt.gcf() fig.set_size_inches(4,2) plt.plot(epoch_list, accuracy_list, label = 'accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['loss'], loc='upper left') plt.show() print("Accuracy:", sess.run(accuracy,feed_dict={x: mnist.test.images, y_label: mnist.test.labels})) # # 實際預測 y_predict = sess.run(y_predict, feed_dict={x: mnist.test.images[:5000]}) prediction_result=sess.run(tf.argmax(y_predict,1), feed_dict={x: mnist.test.images , y_label: mnist.test.labels}) # + import numpy as np def show_images_labels_predict(images,labels,prediction_result): fig = plt.gcf() fig.set_size_inches(8, 10) for i in range(0, 10): ax=plt.subplot(5,5, 1+i) ax.imshow(np.reshape(images[i],(28, 28)), cmap='binary') ax.set_title("label=" +str(np.argmax(labels[i]))+ ",predict="+str(prediction_result[i]) ,fontsize=9) plt.show() show_images_labels_predict(mnist.test.images,mnist.test.labels,prediction_result) # - # 找尋錯誤的配對 for i in range(500): if prediction_result[i]!=np.argmax(mnist.test.labels[i]): print("i="+str(i)+ " label=",np.argmax(mnist.test.labels[i]), "predict=",prediction_result[i]) # # 跑了這麼久,儲存model saver = tf.train.Saver() save_path = saver.save(sess, "saveModel/CNN_model1") print("Model saved in file: %s" % save_path) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter('log/CNN',sess.graph) # # 畫tensorboard
Tensorflow_MNIST_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py365 # language: python # name: py365 # --- import tensorflow as tf import matplotlib.pyplot as plt import csv import random import numpy as np import requests import os import sys csv.field_size_limit(sys.maxsize) # + # name of data file birth_weight_file = 'birthweight.dat' # download data and create data file if file does not exist in current directory if not os.path.exists(birth_weight_file): birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat' birth_file = requests.get(birthdata_url) birth_data = birth_file.text.split('\r\n') birth_header = birth_data[0].split('\t') birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1] with open(birth_weight_file, "w") as f: writer = csv.writer(f) writer.writerows([birth_header]) writer.writerows(birth_data) f.close() # read birth weight data into memory birth_data = [] with open(birth_weight_file, newline='') as csvfile: csv_reader = csv.reader(csvfile) birth_header = next(csv_reader) for row in csv_reader: birth_data.append(row) birth_data = [[float(x) for x in row] for row in birth_data] # Extract y-target (birth weight) y_vals = np.array([x[8] for x in birth_data]) # Filter for features of interest cols_of_interest = ['AGE', 'LWT', 'RACE', 'SMOKE', 'PTL', 'HT', 'UI'] x_vals = np.array([[x[ix] for ix, feature in enumerate(birth_header) if feature in cols_of_interest] for x in birth_data]) # + # set batch size for training batch_size = 150 # make results reproducible seed = 3 np.random.seed(seed) tf.random.set_seed(seed) # Split data into train/test = 80%/20% train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False) test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) x_vals_train = x_vals[train_indices] x_vals_test = x_vals[test_indices] y_vals_train = y_vals[train_indices] y_vals_test = y_vals[test_indices] # + # Record training column max and min for scaling of non-training data train_max = np.max(x_vals_train, axis=0) train_min = np.min(x_vals_train, axis=0) # Normalize by column (min-max norm to be between 0 and 1) def normalize_cols(mat, max_vals, min_vals): return (mat - min_vals) / (max_vals - min_vals) x_vals_train = np.nan_to_num(normalize_cols(x_vals_train, train_max, train_min)) x_vals_test = np.nan_to_num(normalize_cols(x_vals_test, train_max, train_min)) # + # Define Variable Functions (weights and bias) def init_weight(shape, st_dev): weight = tf.Variable(tf.random.normal(shape, stddev=st_dev)) return weight def init_bias(shape, st_dev): bias = tf.Variable(tf.random.normal(shape, stddev=st_dev)) return bias # - x_data = tf.keras.Input(dtype=tf.float32, shape=(7,)) # + # Create fully connected layer: def fully_connected(input_layer, weights, biases): return tf.keras.layers.Lambda(lambda x: tf.nn.relu(tf.add(tf.matmul(x, weights), biases)))(input_layer) # - # 25 - 10 - 3 # + #--------Create the first layer (25 hidden nodes)-------- weight_1 = init_weight(shape=[7, 25], st_dev=5.0) bias_1 = init_bias(shape=[25], st_dev=10.0) layer_1 = fully_connected(x_data, weight_1, bias_1) #--------Create the second layer (10 hidden nodes)-------- weight_2 = init_weight(shape=[25, 10], st_dev=5.0) bias_2 = init_bias(shape=[10], st_dev=10.0) layer_2 = fully_connected(layer_1, weight_2, bias_2) #--------Create the third layer (3 hidden nodes)-------- weight_3 = init_weight(shape=[10, 3], st_dev=5.0) bias_3 = init_bias(shape=[3], st_dev=10.0) layer_3 = fully_connected(layer_2, weight_3, bias_3) #--------Create our output layer (1 output value)-------- weight_4 = init_weight(shape=[3, 1], st_dev=5.0) bias_4 = init_bias(shape=[1], st_dev=10.0) final_output = fully_connected(layer_3, weight_4, bias_4) model = tf.keras.Model(inputs=x_data, outputs=final_output, name='multiple_layers_neural_network') # + # Declare Adam Optimizer optimizer = tf.keras.optimizers.Adam(0.025) # Training Loop loss_vec = [] test_loss = [] for i in range(200): rand_index = np.random.choice(len(x_vals_train), size=batch_size) rand_x = x_vals_train[rand_index] rand_y = np.transpose([y_vals_train[rand_index]]) # Open a GradientTape with tf.GradientTape(persistent=True) as tape: # Forward pass. output = model(rand_x) # Apply loss function (MSE) loss = tf.reduce_mean(tf.abs(rand_y - output)) loss_vec.append(loss) # Get gradients of loss with reference to the weights and bias variables to adjust gradients_w1 = tape.gradient(loss, weight_1) gradients_b1 = tape.gradient(loss, bias_1) gradients_w2 = tape.gradient(loss, weight_2) gradients_b2 = tape.gradient(loss, bias_2) gradients_w3 = tape.gradient(loss, weight_3) gradients_b3 = tape.gradient(loss, bias_3) gradients_w4 = tape.gradient(loss, weight_4) gradients_b4 = tape.gradient(loss, bias_4) # Update the weights and bias variables of the model. optimizer.apply_gradients(zip([gradients_w1, gradients_b1, gradients_w2, gradients_b2, gradients_w3, gradients_b3, gradients_w4, gradients_b4], [weight_1, bias_1, weight_2, bias_2, weight_3, bias_3, weight_4, bias_4])) # Forward pass. output_test = model(x_vals_test) # Apply loss function (MSE) on test temp_loss = tf.reduce_mean(tf.abs(np.transpose([y_vals_test]) - output_test)) test_loss.append(temp_loss) if (i+1) % 25 ==0: print(f'Generation: {i+1}. Loss = {loss.numpy()}') # - plt.plot(loss_vec, 'k--', label='Train Loss') plt.plot(test_loss, 'r--', label='Test Loss') plt.title('Loss per Generation') plt.ylabel('Loss') plt.legend(loc='upper right') plt.show() # + # Model Accuracy actuals = np.array([x[0] for x in birth_data]) test_actuals = actuals[test_indices] train_actuals = actuals[train_indices] test_preds = model(x_vals_test) train_preds = model(x_vals_train) test_preds = np.array([1.0 if x < 2500.0 else 0.0 for x in test_preds]) train_preds = np.array([1.0 if x < 2500.0 else 0.0 for x in train_preds]) # Print out accuracies test_acc = np.mean([x == y for x, y in zip(test_preds, test_actuals)]) train_acc = np.mean([x == y for x, y in zip(train_preds, train_actuals)]) print('On predicting the category of low birthweight from regression output (<2500g):') print(f'Test Accuracy: {test_acc}') print(f'Train Accuracy: {train_acc}')
Chapter 6 - Neural Networks/Using a multilayer neural network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pytorch_p36] # language: python # name: conda-env-pytorch_p36-py # --- # Chenling: you have to run this in R to install the R packages # # install.packages("idr") # # source("https://bioconductor.org/biocLite.R") # # biocLite("edgeR") # # biocLite("DESeq2") # + # %load_ext autoreload # %autoreload 2 import os os.chdir("/home/ec2-user/scVI/") import sys os.environ["PATH"] += os.pathsep + '/home/ec2-user/anaconda3/envs/pytorch_p36/bin/R' os.environ["R_HOME"] = '/home/ec2-user/anaconda3/envs/pytorch_p36/lib/R' os.getcwd() # - import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 # %matplotlib inline # %matplotlib inline # + from scvi.dataset.dataset10X import Dataset10X from scvi.dataset.pbmc import PbmcDataset import pandas as pd from scvi.models.vae import VAE from scvi.models.scanvi import SCANVI from scvi.inference import UnsupervisedTrainer, SemiSupervisedTrainer, AlternateSemiSupervisedTrainer from sklearn.metrics import roc_auc_score from scvi.inference.posterior import get_bayes_factors from scvi.inference.posterior import * import numpy as np from scvi.dataset.dataset import GeneExpressionDataset import os from scvi.metrics.clustering import select_indices_evenly # + def auc_score_threshold(gene_set, bayes_factor, gene_symbols): # put ones on the genes from the gene_set true_labels = np.array([g in gene_set for g in gene_symbols]) estimated_score = np.abs(bayes_factor) indices = np.isfinite(estimated_score) return roc_auc_score(true_labels[indices], estimated_score[indices]) def WeightedAccuracy(y,y_pred,cell_types): res = dict() for i in np.unique(y): res[cell_types[i]] = (np.mean(y_pred[y == i] == i), sum(y==i)) return(res) pbmc = PbmcDataset() de_data = pbmc.de_metadata pbmc.update_cells(pbmc.batch_indices.ravel()==0) # pbmc.labels = pbmc.labels.reshape(len(pbmc),1) donor = Dataset10X('fresh_68k_pbmc_donor_a') donor.gene_names = donor.gene_symbols donor.labels = np.repeat(0,len(donor)).reshape(len(donor),1) donor.cell_types = ['unlabelled'] all_dataset = GeneExpressionDataset.concat_datasets(pbmc, donor) # Now resolve the Gene symbols to properly work with the DE all_gene_symbols = donor.gene_symbols[ np.array( [np.where(donor.gene_names == x)[0][0] for x in list(all_dataset.gene_names)] )] ##################################################################### # Gene sets 1 ############################################################################ path_geneset = "Additional_Scripts/genesets.txt" geneset_matrix = np.loadtxt(path_geneset, dtype=np.str)[:, 2:] CD4_TCELL_VS_BCELL_NAIVE, CD8_TCELL_VS_BCELL_NAIVE, CD8_VS_CD4_NAIVE_TCELL, NAIVE_CD8_TCELL_VS_NKCELL \ = [set(geneset_matrix[i:i + 2, :].flatten()) & set(all_gene_symbols) for i in [0, 2, 4, 6]] # these are the length of the positive gene sets for the DE print((len(CD4_TCELL_VS_BCELL_NAIVE), len(CD8_TCELL_VS_BCELL_NAIVE), len(CD8_VS_CD4_NAIVE_TCELL), len(NAIVE_CD8_TCELL_VS_NKCELL))) print(all_dataset.cell_types) comparisons = [ ['CD4 T cells', 'B cells'], ['CD8 T cells', 'B cells'], ['CD8 T cells', 'CD4 T cells'], ['CD8 T cells', 'NK cells'] ] gene_sets = [CD4_TCELL_VS_BCELL_NAIVE, CD8_TCELL_VS_BCELL_NAIVE, CD8_VS_CD4_NAIVE_TCELL, NAIVE_CD8_TCELL_VS_NKCELL] ##################################################################### # Gene sets 2 ############################################################################ print(de_data.columns.values) CD = de_data['CD_adj.P.Val'] BDC = de_data['BDC_adj.P.Val'] BDC2 = de_data['BDC2_adj.P.Val'] CD = np.asarray(de_data['GS'][CD<0.05]) BDC = np.asarray(de_data['GS'][BDC<0.05]) BDC2 = np.asarray(de_data['GS'][BDC2<0.05]) gene_sets = [set(CD) & set(all_gene_symbols), set(BDC)& set(all_gene_symbols), set(BDC2) & set(all_gene_symbols)] comparisons = [ ['CD8 T cells', 'CD4 T cells'], ['B cells', 'Dendritic Cells'], ['B cells', 'Dendritic Cells'] ] # + vae = VAE(all_dataset.nb_genes, n_batch=all_dataset.n_batches, n_labels=all_dataset.n_labels, n_hidden=128, n_latent=10, n_layers=2, dispersion='gene') import torch trainer = UnsupervisedTrainer(vae, all_dataset, train_size=1.0) # trainer.train(n_epochs=200) # torch.save(trainer.model,'../DE/vae.model.pkl') trainer.model = torch.load('DE/vae.model.pkl') trainer.train_set.entropy_batch_mixing() full = trainer.create_posterior(trainer.model, all_dataset, indices=np.arange(len(all_dataset))) latent, batch_indices, labels = full.sequential().get_latent() keys = all_dataset.cell_types from scvi.inference.posterior import entropy_batch_mixing sample = select_indices_evenly(2000, batch_indices) batch_entropy = entropy_batch_mixing(latent[sample, :], batch_indices[sample]) # - latent_labelled = latent[batch_indices.ravel()==0, :] latent_unlabelled = latent[batch_indices.ravel()==1, :] labels_labelled = labels[batch_indices.ravel()==0] labels_unlabelled = labels[batch_indices.ravel()==1] n_labels = np.sum(batch_indices.ravel()==1) from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=10) neigh = neigh.fit(latent_labelled, labels_labelled) vae_pred = neigh.predict(latent) np.mean(vae_pred[batch_indices.ravel()==0]==labels[batch_indices.ravel()==0]) # ## Code for running DE from copy import deepcopy batch2 = deepcopy(all_dataset) batch2.update_cells(batch_indices.ravel()==1) cell_type_label = \ [[np.where(all_dataset.cell_types == x[i])[0].astype('int')[0] for i in [0, 1]] for x in comparisons] # + from scipy.stats import kendalltau import rpy2 from rpy2.robjects import r import rpy2.robjects as robj import rpy2.robjects.numpy2ri from rpy2.robjects.packages import importr rpy2.robjects.numpy2ri.activate() from rpy2.robjects.lib import grid from rpy2.robjects import r, Formula py2ri_orig = rpy2.robjects.conversion.py2ri from rpy2.rinterface import RRuntimeWarning r["library"]("idr") def idr(bayes, p_value, p_prior=0.1): p_val_1r = r.matrix(bayes[:, np.newaxis], nrow=3343, ncol=1) r.assign("p_val_1", p_val_1r) p_val_2r = r.matrix(p_value[:, np.newaxis], nrow=3343, ncol=1) r.assign("p_val_2", p_val_2r) r("x <- cbind(p_val_1[, 1], p_val_2[, 1])") r("mu = 1") r("sigma = 0.5") r("rho = 0.5") r.assign("p", 0.25) return kendalltau(bayes, p_value)[0] r("idr.out <- est.IDR(x, mu, sigma, rho, p, eps=0.001, max.ite=20)") return r("idr.out$para$p")[0] r["library"]("edgeR") import pandas def conversion_pydataframe(obj): """ Convert pandas DataFrame or python object to an R dataframe/object. """ if isinstance(obj, pandas.core.frame.DataFrame): od = OrderedDict() for name, values in obj.iteritems(): if values.dtype.kind == 'O': od[name] = rpy2.robjects.vectors.StrVector(values) else: od[name] = rpy2.robjects.conversion.py2ri(values) return rpy2.robjects.vectors.DataFrame(od) else: return py2ri_orig(obj) # - def run_edgeR(gene_expression, bio_assignment, gene_names, batch_info=None, batch=True): if batch_info is None: batch = False r_counts = conversion_pydataframe(gene_expression) r_bio_group = conversion_pydataframe(bio_assignment) r_dge = r.DGEList(counts=r.t(r_counts), genes=gene_names) r.assign("dge", r_dge) r.assign("bio_group", r.factor(r_bio_group)) r("dge$samples$bio_group <- bio_group") if batch: r_batch_group = conversion_pydataframe(batch_info) r.assign("batch_group", r.factor(r_batch_group)) r("dge$samples$batch_group <- batch_group") r("""dge <- suppressWarnings(edgeR::calcNormFactors(dge))""") if not batch: r("""design <- model.matrix(~bio_group, data = dge$samples)""") r("""colnames(design) <- c("Intercept", "bio")""") if batch: r("""design <- model.matrix(~bio_group+batch_group, data = dge$samples)""") r("""colnames(design) <- c("Intercept", "bio", "batch")""") r("""dge <- estimateDisp(dge, design)""") r("""fit <- glmFit(dge, design)""") if not batch: r("""lrt <- glmLRT(fit)""") if batch: r("""lrt <- glmLRT(fit, coef="bio")""") return r("lrt$table$PValue") # sanity check, this should work and return some numbers run_edgeR(all_dataset.X[:100].A, np.hstack((np.zeros(50), np.ones(50))), all_dataset.gene_names) run_edgeR(all_dataset.X[:100].A, np.hstack((np.zeros(50), np.ones(50))), all_dataset.gene_names, batch_info=np.hstack((np.zeros(20), np.ones(80)))) for t, comparison in enumerate(comparisons): print(t, comparison) interest_list = ["CD", "BDC"] p_prior_list = [0.03, 0.25] de_data.index = de_data["GS"] # ## Running SCANVI scanvi = SCANVI(all_dataset.nb_genes, all_dataset.n_batches, all_dataset.n_labels, n_layers=2) #,classifier_parameters = {'dropout_rate':0.5, 'n_hidden':256, 'n_layers':2}) scanvi.load_state_dict(trainer.model.state_dict(), strict=False) trainer_scanvi = SemiSupervisedTrainer(scanvi, all_dataset, classification_ratio=50, n_epochs_classifier=1, lr_classification=5 * 1e-3) #trainer_scanvi = AlternateSemiSupervisedTrainer(scanvi, all_dataset, n_epochs_classifier=10, lr_classification=5 * 1e-3) trainer_scanvi.labelled_set = trainer_scanvi.create_posterior(indices=(all_dataset.batch_indices == 0)) trainer_scanvi.unlabelled_set = trainer_scanvi.create_posterior(indices=(all_dataset.batch_indices == 1)) trainer_scanvi.train(n_epochs=5) # check for conformity between scVI and SCANVI + kNN np.mean(trainer_scanvi.unlabelled_set.sequential().compute_predictions()[1] == neigh.predict(latent_unlabelled)) trainer_scanvi.labelled_set.accuracy() de_posterior = trainer_scanvi.create_posterior(trainer_scanvi.model, all_dataset, indices=np.arange(0,len(all_dataset))) de_posterior.get_regenerate_scale(1,5) def reimpute(t, c, s, gamma): cell_idx = np.where(np.logical_and(vae_pred == cell_type_label[t][c], batch_indices.ravel() == s))[0] # REIMPUTE de_posterior = trainer_scanvi.create_posterior(trainer_scanvi.model, all_dataset, indices=cell_idx) # counterfactuals ? scale_info = de_posterior.get_regenerate_scale(gamma, int(cell_type_label[t][c])) return scale_info def bayes_SCANVI(t, comparaison, T=1, n_perm = 10000): gene_set = gene_sets[t] res_scanvi = np.zeros((3, 2, T)) # 3 datasets, 2 metrics, T indep runs p_value = de_data[interest_list[t] + "_adj.P.Val"][all_gene_symbols].values p_prior = p_prior_list[t] for rep in range(T): #PBMC8K only type_A = reimpute(t, 0, 0, 0) type_B = reimpute(t, 1, 0, 0) scale_pbmc = np.concatenate((type_A, type_B), axis=0) local_labels = np.concatenate((np.ones(type_A.shape[0]), np.zeros(type_B.shape[0]))) bayes_pbmc = get_bayes_factors(scale_pbmc, local_labels, 0, 1, m_permutation=n_perm) res_scanvi[0, 0, rep] = auc_score_threshold(gene_set, bayes_pbmc, all_gene_symbols) res_scanvi[0, 1, rep] = idr(np.abs(bayes_pbmc), -np.log(p_value), p_prior=p_prior) # PBMC68K only type_A = reimpute(t, 0, 1, 1) type_B = reimpute(t, 1, 1, 1) questionable_scale_68k = np.concatenate((type_A, type_B), axis=0) local_labels = np.concatenate((np.ones(type_A.shape[0]), np.zeros(type_B.shape[0]))) bayes_questionable = get_bayes_factors(questionable_scale_68k, local_labels, 0, 1, m_permutation=n_perm) res_scanvi[1, 0, rep] = auc_score_threshold(gene_set, bayes_questionable, all_gene_symbols) res_scanvi[1, 1, rep] = idr(np.abs(bayes_questionable), -np.log(p_value), p_prior=p_prior) #WHOLE PBMC #first all imputed pbmc type_A = np.concatenate((reimpute(t, 0, 0, 0), reimpute(t, 0, 0, 1)), axis=0) type_B = np.concatenate((reimpute(t, 1, 0, 1), reimpute(t, 1, 0, 1)), axis=0) scale_pbmc = np.concatenate((type_A, type_B), axis=0) local_labels = np.concatenate((np.ones(type_A.shape[0]), np.zeros(type_B.shape[0]))) probs_all_imputed_pbmc = get_bayes_factors(scale_pbmc, local_labels, 0, 1,logit=False, m_permutation=n_perm) #second all imputed 68K type_A = np.concatenate((reimpute(t, 0, 1, 0), reimpute(t, 0, 1, 1)), axis=0) type_B = np.concatenate((reimpute(t, 1, 1, 1), reimpute(t, 1, 1, 1)), axis=0) scale_68k = np.concatenate((type_A, type_B), axis=0) local_labels = np.concatenate((np.ones(type_A.shape[0]), np.zeros(type_B.shape[0]))) probs_all_imputed_68k = get_bayes_factors(scale_68k, local_labels, 0, 1,logit=False, m_permutation=n_perm) p_s = 0.5 bayes_all_imputed = p_s * probs_all_imputed_pbmc + (1 - p_s) * probs_all_imputed_68k bayes_all_imputed = np.log(bayes_all_imputed + 1e-8) - np.log(1 - bayes_all_imputed + 1e-8) res_scanvi[2, 0, rep] = auc_score_threshold(gene_set, bayes_all_imputed, all_gene_symbols) res_scanvi[2, 1, rep] = idr(np.abs(bayes_all_imputed), -np.log(p_value), p_prior=p_prior) return res_scanvi res_scanvi_CD = bayes_SCANVI(0, ['CD8 T cells', 'CD4 T cells'], T=10) res_scanviBD = bayes_SCANVI(1, ['B cells', 'Dendritic Cells'], T=10) res_scanvi_CD res_scanviBD # # Analysis for DE in PBMCs # + # trainer = UnsupervisedTrainer(vae, all_dataset, train_size=1.0) # trainer.train(n_epochs=200) # - def bayes(t, comparaison, T=1, n_perm = 10000): gene_set = gene_sets[t] cell_idx_8k = np.where(np.logical_or( all_dataset.labels.ravel() == cell_type_label[t][0], all_dataset.labels.ravel() == cell_type_label[t][1]))[0] cell_idx_68k = np.where(np.logical_or( vae_pred[batch_indices.ravel()==1] == cell_type_label[t][0], vae_pred[batch_indices.ravel()==1] == cell_type_label[t][1]))[0] cell_indices = np.where(np.logical_or( vae_pred == cell_type_label[t][0], vae_pred == cell_type_label[t][1]))[0] joint_de_posterior = trainer.create_posterior(trainer.model, all_dataset, indices=cell_indices) scale_pbmc = joint_de_posterior.sequential().get_harmonized_scale(0) scale_68k = joint_de_posterior.sequential().get_harmonized_scale(1) questionable_de_posterior = trainer.create_posterior(trainer.model, batch2, indices=cell_idx_68k) questionable_scale_68k = questionable_de_posterior.sequential().get_harmonized_scale(1) res_vi = np.zeros((3, 2, T)) # 3 datasets, 2 metrics, T indep runs res_eR = np.zeros((3, 2, T)) p_value = de_data[interest_list[t] + "_adj.P.Val"][all_gene_symbols].values p_prior = p_prior_list[t] for rep in range(T): #PBMC8K only bayes_pbmc = get_bayes_factors(scale_pbmc, all_dataset.labels.ravel()[cell_indices], cell_type_label[t][0], cell_type_label[t][1], m_permutation=n_perm) res_vi[0, 0, rep] = auc_score_threshold(gene_set, bayes_pbmc, all_gene_symbols) res_vi[0, 1, rep] = idr(np.abs(bayes_pbmc), -np.log(p_value), p_prior=p_prior) ind_0 = np.random.choice(np.where(all_dataset.labels.ravel() == cell_type_label[t][0])[0], 100) ind_1 = np.random.choice(np.where(all_dataset.labels.ravel() == cell_type_label[t][1])[0], 100) expression_data = np.vstack((all_dataset.X[ind_0].A, all_dataset.X[ind_1].A)) bio_data = np.hstack((all_dataset.labels.ravel()[ind_0], all_dataset.labels.ravel()[ind_1])) edgeR_pbmc = run_edgeR(expression_data, bio_data, all_dataset.gene_names) res_eR[0, 0, rep] = auc_score_threshold(gene_set, -np.log(edgeR_pbmc), all_gene_symbols) res_eR[0, 1, rep] = idr(-np.log(edgeR_pbmc), -np.log(p_value), p_prior=p_prior) # PBMC68K only bayes_questionable = get_bayes_factors(questionable_scale_68k, vae_pred[batch_indices.ravel()==1][cell_idx_68k], cell_type_label[t][0], cell_type_label[t][1], logit=True, m_permutation=n_perm) res_vi[1, 0, rep] = auc_score_threshold(gene_set, bayes_questionable, all_gene_symbols) res_vi[1, 1, rep] = idr(np.abs(bayes_questionable), -np.log(p_value), p_prior=p_prior) ind_0 = np.random.choice(np.where(vae_pred[batch_indices.ravel()==1] == cell_type_label[t][0])[0], 100) ind_1 = np.random.choice(np.where(vae_pred[batch_indices.ravel()==1] == cell_type_label[t][1])[0], 100) expression_data = np.vstack((all_dataset.X[batch_indices.ravel()==1][ind_0].A, all_dataset.X[batch_indices.ravel()==1][ind_1].A)) bio_data = np.hstack((vae_pred[batch_indices.ravel()==1][ind_0], vae_pred[batch_indices.ravel()==1][ind_1])) edgeR_68k = run_edgeR(expression_data, bio_data, all_dataset.gene_names) res_eR[1, 0, rep] = auc_score_threshold(gene_set, -np.log(edgeR_68k), all_gene_symbols) res_eR[1, 1, rep] = idr(-np.log(edgeR_68k), -np.log(p_value), p_prior=p_prior) #WHOLE PBMC probs_all_imputed_pbmc = get_bayes_factors(scale_pbmc, vae_pred[cell_indices], cell_type_label[t][0], cell_type_label[t][1], logit=False, m_permutation=n_perm) probs_all_imputed_68k = get_bayes_factors(scale_68k, vae_pred[cell_indices], cell_type_label[t][0], cell_type_label[t][1], logit=False, m_permutation=n_perm) p_s = 0.5 bayes_all_imputed = p_s * probs_all_imputed_pbmc + (1 - p_s) * probs_all_imputed_68k bayes_all_imputed = np.log(bayes_all_imputed + 1e-8) - np.log(1 - bayes_all_imputed + 1e-8) res_vi[2, 0, rep] = auc_score_threshold(gene_set, bayes_all_imputed, all_gene_symbols) res_vi[2, 1, rep] = idr(np.abs(bayes_all_imputed), -np.log(p_value), p_prior=p_prior) ind_0 = np.random.choice(np.where(vae_pred == cell_type_label[t][0])[0], 100) ind_1 = np.random.choice(np.where(vae_pred == cell_type_label[t][1])[0], 100) expression_data = np.vstack((all_dataset.X[ind_0].A, all_dataset.X[ind_1].A)) bio_data = np.hstack((vae_pred[ind_0], vae_pred[ind_1])) batch_data = np.hstack((batch_indices.ravel()[ind_0], batch_indices.ravel()[ind_1])) edgeR_all = run_edgeR(expression_data, bio_data, all_dataset.gene_names, batch_info=batch_data) res_eR[2, 0, rep] = auc_score_threshold(gene_set, -np.log(edgeR_all), all_gene_symbols) res_eR[2, 1, rep] = idr(-np.log(edgeR_all), -np.log(p_value), p_prior=p_prior) return res_vi, res_eR # + # res_vi_CD, res_eR_CD = bayes(0, ['CD8 T cells', 'CD4 T cells'], T=10) # + res_vi_CD = np.zeros(shape=(3, 2, 20)) res_vi_CD[:, :, 0:10] = np.array([[[0.74179377, 0.74341328, 0.74160423, 0.74092827, 0.74140632, 0.74038472, 0.74296032, 0.74308575, 0.74411432, 0.74314708], [0.08750865, 0.08894623, 0.08789463, 0.08762385, 0.08780319, 0.08762654, 0.08826396, 0.08900238, 0.08873346, 0.08811804]], [[0.74675263, 0.74551083, 0.74674288, 0.74913868, 0.74716935, 0.74822022, 0.74852684, 0.74792196, 0.74713312, 0.74715402], [0.08048591, 0.08033702, 0.08046566, 0.08046593, 0.08016567, 0.08051044, 0.08016824, 0.07980553, 0.08121617, 0.08056803]], [[0.76197345, 0.76349121, 0.76386194, 0.76282362, 0.76226056, 0.76212258, 0.76225777, 0.76212816, 0.7634271 , 0.76225917], [0.09586819, 0.09568671, 0.09580298, 0.09603906, 0.09650841, 0.09511547, 0.09542531, 0.09559493, 0.09614803, 0.09609681]]]) res_vi_CD[:, :, 10:20] = np.array([[[0.75417419, 0.75457558, 0.75468848, 0.75278605, 0.75332542, 0.75416165, 0.75286967, 0.75245155, 0.75249058, 0.75410033], [0.09158216, 0.09102047, 0.09207042, 0.09157896, 0.09150232, 0.09132688, 0.09088955, 0.0913412 , 0.09111706, 0.09173033]], [[0.79908962, 0.79865199, 0.79874398, 0.79935025, 0.80011819, 0.79805966, 0.79902551, 0.79841924, 0.79771541, 0.79759416], [0.09859252, 0.09773535, 0.0975626 , 0.0980051 , 0.09800814, 0.0964899 , 0.09836047, 0.09763968, 0.09819487, 0.09751467]], [[0.79269941, 0.79350777, 0.79318582, 0.79333077, 0.79405968, 0.79299488, 0.79301439, 0.79338931, 0.7933015 , 0.79138235], [0.10209858, 0.10273243, 0.10230053, 0.10252938, 0.10292382, 0.10213497, 0.10260402, 0.10224322, 0.10237377, 0.10181907]]]) res_scanvi_CD = np.zeros(shape=(3, 2, 20)) res_scanvi_CD[:, :, :10] = np.array([[[0.73591227, 0.74181607, 0.74102305, 0.73939657, 0.7415429 , 0.74234429, 0.74275126, 0.74165301, 0.74082653, 0.73958055], [0.08576389, 0.08914505, 0.08794416, 0.0883914 , 0.08934365, 0.08863148, 0.08805632, 0.08908045, 0.08863834, 0.08748389]], [[0.73813247, 0.73829135, 0.73999866, 0.7371262 , 0.73926138, 0.73710669, 0.73958194, 0.73939379, 0.73613806, 0.73713596], [0.08108458, 0.08152833, 0.08175576, 0.08071192, 0.0820627 , 0.08104455, 0.08215112, 0.08162892, 0.08114707, 0.0809839 ]], [[0.73731714, 0.73618823, 0.7361283 , 0.73251717, 0.73472064, 0.7323987 , 0.73572412, 0.7329994 , 0.73235132, 0.73477918], [0.05325123, 0.05274954, 0.0528162 , 0.05147266, 0.05318291, 0.0514511 , 0.05263902, 0.05149933, 0.0517519 , 0.05266503]]]) res_scanvi_CD[:, :, 10:20] = np.array([[[0.72642243, 0.72194719, 0.72290886, 0.72434997, 0.72413952, 0.72289911, 0.72474718, 0.72087124, 0.72483638, 0.72704403], [0.08455153, 0.08383432, 0.08644679, 0.08488945, 0.085161 , 0.08583689, 0.08629802, 0.0857071 , 0.086799 , 0.08682616]], [[0.71818554, 0.72028588, 0.7213256 , 0.71729217, 0.71859808, 0.71968379, 0.71805175, 0.71740088, 0.7206371 , 0.72063292], [0.0764584 , 0.0765634 , 0.07685212, 0.07651119, 0.07691423, 0.07666002, 0.07657037, 0.07576228, 0.07721007, 0.07710098]], [[0.73986765, 0.73857289, 0.73862724, 0.74205997, 0.74187879, 0.73936034, 0.74235544, 0.73938264, 0.7399903 , 0.73793456], [0.05429242, 0.05423416, 0.05436585, 0.05527159, 0.05515136, 0.05405697, 0.05595232, 0.05381156, 0.05469455, 0.05400319]]]) res_eR_CD = np.array([[[ 0.76960686, 0.71892282, 0.72494648, 0.75238605, 0.76890024, 0.70751661, 0.75340625, 0.74500072, 0.73596245, 0.70655913], [ 0.04291846, 0.00267855, 0.03567862, 0.0423587 , 0.03815828, 0.02030369, 0.06091144, 0.03342154, 0.03137338, 0.03229609]], [[ 0.64782078, 0.63272121, 0.61034782, 0.59525801, 0.62588501, 0.61103631, 0.63377905, 0.60779591, 0.60141128, 0.62892611], [-0.00643908, -0.02246596, -0.00749381, 0.00080053, -0.00311443, -0.0044296 , 0.01644145, 0.00226646, -0.02082449, 0.00244452]], [[ 0.65886323, 0.67516 , 0.67036699, 0.66198934, 0.63316999, 0.64692601, 0.65000753, 0.63855951, 0.63738739, 0.66806596], [ 0.01081833, 0.00459823, -0.00159639, 0.01723901, 0.0048482 , 0.00922013, 0.00262937, -0.00441655, 0.01479358, 0.01156533]]]) # + # res_viBD, res_eRBD = bayes(1, ['B cells', 'Dendritic Cells'], T=10) # + res_viBD = np.zeros(shape=(3, 2, 20)) res_viBD[:, :, 0:10] = np.array([[[0.76216623, 0.7621787 , 0.76190101, 0.76309841, 0.76293924, 0.7629658 , 0.76238431, 0.76258438, 0.76264306, 0.7626761 ], [0.27079867, 0.27089967, 0.27083262, 0.27195724, 0.27135307, 0.27161809, 0.27123275, 0.27124604, 0.27110905, 0.27136024]], [[0.6647708 , 0.66397908, 0.66641847, 0.66500968, 0.66508222, 0.66401142, 0.66490156, 0.6627357 , 0.66476641, 0.66393403], [0.17887529, 0.17828975, 0.18058998, 0.17899893, 0.17895525, 0.17814105, 0.17894282, 0.17672005, 0.17928901, 0.17828845]], [[0.73260152, 0.73267499, 0.73358615, 0.73319918, 0.73316546, 0.73398051, 0.73346186, 0.73343437, 0.7333542 , 0.73397912], [0.2442582 , 0.24450203, 0.24486564, 0.24488628, 0.24457159, 0.24527592, 0.24504063, 0.2447643 , 0.24495298, 0.24537476]]]) res_viBD[:, :, 10:20] = np.array([[[0.75488502, 0.75374653, 0.75411455, 0.75401729, 0.75406719, 0.75420996, 0.75399627, 0.75460894, 0.75443475, 0.75364973], [0.2638777 , 0.26282807, 0.26325461, 0.26283149, 0.2631392 , 0.26339292, 0.26314752, 0.26362337, 0.26330234, 0.2628288 ]], [[0.65061453, 0.650344 , 0.65092572, 0.65210856, 0.65120918, 0.6511579 , 0.65051103, 0.6497193 , 0.64993115, 0.65079588], [0.15986153, 0.15937252, 0.16013605, 0.16154904, 0.160391 , 0.16011636, 0.15967454, 0.15909964, 0.15904088, 0.15983152]], [[0.7162087 , 0.71617104, 0.71592916, 0.71521645, 0.71573926, 0.71538278, 0.71668923, 0.71594856, 0.71633877, 0.71671949], [0.23016267, 0.23022916, 0.229731 , 0.2291882 , 0.2297717 , 0.22934815, 0.23083626, 0.23023846, 0.23039364, 0.23059034]]]) res_scanviBD = np.zeros(shape=(3, 2, 20)) res_scanviBD[:, :, 0:10] = np.array([[[0.75741636, 0.75759194, 0.75530756, 0.75769798, 0.75687253, 0.75750345, 0.75652114, 0.75726688, 0.75536116, 0.75609097], [0.26981428, 0.26968712, 0.26795401, 0.26931847, 0.26893219, 0.26969672, 0.26857317, 0.26922094, 0.26761619, 0.26827264]], [[0.61778868, 0.61487407, 0.61268003, 0.61774986, 0.61310765, 0.61494823, 0.61750544, 0.61525688, 0.61407726, 0.61757244], [0.12643376, 0.12426852, 0.12215373, 0.12657811, 0.12234908, 0.12382592, 0.12636433, 0.12479465, 0.12327429, 0.12611727]], [[0.70791976, 0.70693767, 0.70634787, 0.70565133, 0.70654147, 0.70728075, 0.70637213, 0.70708992, 0.7073988 , 0.70590476], [0.21001276, 0.20899559, 0.2084635 , 0.20813155, 0.20902723, 0.20955855, 0.20845892, 0.20910832, 0.20938078, 0.20817102]]]) res_scanviBD[:, :, 10:20] = np.array([[[0.76208953, 0.76043308, 0.76239402, 0.76090783, 0.76078608, 0.76331119, 0.76113239, 0.76129134, 0.76086995, 0.76251323], [0.27278669, 0.27165384, 0.27267647, 0.27240443, 0.27141383, 0.27418495, 0.27250955, 0.27185519, 0.27196155, 0.27315013]], [[0.62128293, 0.62249188, 0.62497008, 0.62396281, 0.62468315, 0.62209244, 0.6229671 , 0.62243921, 0.62077675, 0.61347822], [0.1311073 , 0.13233849, 0.13501059, 0.13417064, 0.13440472, 0.13212217, 0.13216301, 0.13185219, 0.13153761, 0.12328159]], [[0.71552325, 0.71486182, 0.71476133, 0.71533704, 0.71545486, 0.71520282, 0.71598923, 0.71492975, 0.71560595, 0.7149205 ], [0.21705476, 0.21682827, 0.21658671, 0.2171116 , 0.21707235, 0.21699594, 0.21809479, 0.21661098, 0.21735403, 0.21699208]]]) res_eRBD = np.array([[[0.7416328 , 0.74108303, 0.7507067 , 0.75045367, 0.7474157 , 0.73776104, 0.7328043 , 0.7499224 , 0.72650216, 0.73513615], [0.24713109, 0.25346313, 0.25564169, 0.26018451, 0.25280303, 0.24126276, 0.24769716, 0.25299663, 0.23575936, 0.24152132]], [[0.70013099, 0.68185462, 0.70254589, 0.69692252, 0.68672046, 0.69266935, 0.68556026, 0.69962944, 0.67819704, 0.68147574], [0.20929109, 0.18102042, 0.20679624, 0.19731936, 0.20039674, 0.19525754, 0.1890307 , 0.21515119, 0.19126713, 0.17762601]], [[0.71723514, 0.70821316, 0.70168463, 0.72272244, 0.72508906, 0.73544405, 0.70920518, 0.71120539, 0.72057794, 0.72148184], [0.22764441, 0.21764863, 0.21054128, 0.2284016 , 0.21516779, 0.23869439, 0.2199275 , 0.22334562, 0.23646588, 0.23779653]]]) # - res_vi_CD.shape # + plt.figure(figsize=(10, 5)) props = dict(widths=0.2, patch_artist=True, medianprops=dict(color="black"), whis=[5, 95], sym="") #AUC CD$CD* ax = plt.subplot(1, 2, 1) ax.set_ylabel("AUC") ax.set_xlabel("CD4 / CD8 Cell-sets") bp1 = ax.boxplot([np.array(res_vi_CD)[i, 0] for i in range(3)], positions=[0, 1, 2], **props) bp2 = ax.boxplot([np.array(res_scanvi_CD)[i, 0] for i in [2]], positions=[2.2], **props) bp3 = ax.boxplot([np.array(res_eR_CD)[i, 0] for i in range(3)], positions=[0.2, 1.2, 2.4], **props) for patch in bp1['boxes']: patch.set_facecolor('red') for patch in bp2['boxes']: patch.set_facecolor('green') ax.set_xticklabels(['A','B', 'A+B']) ax.legend([bp1["boxes"][0], bp2["boxes"][0], bp3["boxes"][0]], ['scVI', 'SCANVI', 'edgeR']) # AUC BDC ax = plt.subplot(1, 2, 2) ax.set_ylabel("AUC") ax.set_xlabel("B / DC Cell-sets") bp1 = ax.boxplot([np.array(res_viBD)[i, 0] for i in range(3)], positions=[0, 1, 2], **props) bp2 = ax.boxplot([np.array(res_scanviBD)[i, 0] for i in [2]], positions=[2.2], **props) bp3 = ax.boxplot([np.array(res_eRBD)[i, 0] for i in range(3)], positions=[0.2, 1.2, 2.4], **props) for patch in bp1['boxes']: patch.set_facecolor('red') for patch in bp2['boxes']: patch.set_facecolor('green') ax.set_xticklabels(['A','B', 'A+B']) ax.legend([bp1["boxes"][0], bp2["boxes"][0], bp3["boxes"][0]], ['scVI', 'SCANVI', 'edgeR']) plt.tight_layout() plt.savefig("figures/DE_panel_1.pdf", transparency=True) # + plt.figure(figsize=(10, 5)) props = dict(widths=0.2, patch_artist=True, medianprops=dict(color="black"), whis=[5, 95], sym="") #AUC CD$CD* ax = plt.subplot(1, 2, 1) ax.set_ylabel("Kendall-Tau") ax.set_xlabel("CD4 / CD8 Cell-sets") bp1 = ax.boxplot([np.array(res_vi_CD)[i, 1] for i in range(3)], positions=[0, 1, 2], **props) bp2 = ax.boxplot([np.array(res_scanvi_CD)[i, 1] for i in [2]], positions=[2.2], **props) bp3 = ax.boxplot([np.array(res_eR_CD)[i, 1] for i in range(3)], positions=[0.2, 1.2, 2.4], **props) for patch in bp1['boxes']: patch.set_facecolor('red') for patch in bp2['boxes']: patch.set_facecolor('green') ax.set_xticklabels(['A','B', 'A+B']) ax.legend([bp1["boxes"][0], bp2["boxes"][0], bp3["boxes"][0]], ['scVI', 'SCANVI', 'edgeR']) # AUC BDC ax = plt.subplot(1, 2, 2) ax.set_ylabel("Kendall-Tau") ax.set_xlabel("B / DC Cell-sets") bp1 = ax.boxplot([np.array(res_viBD)[i, 1] for i in range(3)], positions=[0, 1, 2], **props) bp2 = ax.boxplot([np.array(res_scanviBD)[i, 1] for i in [2]], positions=[2.2], **props) bp3 = ax.boxplot([np.array(res_eRBD)[i, 1] for i in range(3)], positions=[0.2, 1.2, 2.4], **props) for patch in bp1['boxes']: patch.set_facecolor('red') for patch in bp2['boxes']: patch.set_facecolor('green') ax.set_xticklabels(['A','B', 'A+B']) ax.legend([bp1["boxes"][0], bp2["boxes"][0], bp3["boxes"][0]], ['scVI', 'SCANVI', 'edgeR']) plt.tight_layout() plt.savefig("figures/DE_panel_2.pdf", transparency=True) # -
notebooks/.ipynb_checkpoints/DE-final-Copy1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="p8c-jsAEizFL" # Data source: https://www.kaggle.com/mirichoi0218/insurance/downloads/insurance.zip/1 # + [markdown] colab_type="text" id="wzatJhR2izFP" # # Introduction # # Health insurance in India is a growing segment of India's economy. The Indian health system is one of the largest in the world, with the number of people it concerns: nearly 1.3 billion potential beneficiaries. The health industry in India has rapidly become one of the most important sectors in the country in terms of income and job creation. In 2018, one hundred million Indian households (500 million people) do not benefit from health coverage. In 2011, 3.9%[1] of India's gross domestic product was spent in the health sector. # # According to the World Health Organization (WHO), this is among the lowest of the BRICS (Brazil, Russia, India, China, South Africa) economies. Policies are available that offer both individual and family cover. Out of this 3.9%, health insurance accounts for 5-10% of expenditure, employers account for around 9% while personal expenditure amounts to an astounding 82%. # # In the year 2016, the NSSO released the report “Key Indicators of Social Consumption in India: Health” based on its 71st round of surveys. The survey carried out in the year 2014 found out that, more than 80% of Indians are not covered under any health insurance plan, and only 18% (government funded 12%) of the urban population and 14% (government funded 13%) of the rural population was covered under any form of health insurance. # # India's public health expenditures are lower than those of other middle-income countries. In 2012, they accounted for 4% of GDP, which is half as much as in China with 5.1%. In terms of public health spending per capita, India ranks 184th out of 191 countries in 2012. Patients' remaining costs represent about 58% of the total.[4] The remaining costs borne by the patient represent an increasing share of the household budget, from 5% of this budget in 2000 to over 11% in 2004-2005.[5] On average, the remaining costs of poor households as a result of hospitalization accounted for 140% of their annual income in rural areas and 90% in urban areas. # # This financial burden has been one of the main reasons for the introduction of health insurance covering the hospital costs of the poorest. # + [markdown] colab_type="text" id="oesJF4SpizFR" # # Data Description: # # The data at hand contains medical costs of people characterized by certain attributes. # # # Domain: # Healthcare # # # Context: # Leveraging customer information is paramount for most businesses. In the case of an insurance company, attributes of customers like the ones mentioned below can be crucial in making business decisions. Hence, knowing to explore and # generate value out of such data can be an invaluable skill to have. # # # Attribute Information: # # - age : age of primary beneficiary # - sex : insurance contractor gender, female, male # - bmi : Body mass index, providing an understanding of body, # - weights that are relatively high or low relative to height, # - objective index of body weight (kg / m ^ 2) using the ratio of # - height to weight, ideally 18.5 to 24.9 # - children : Number of children covered by health insurance / # - Number of dependents # - smoker : Smoking # - region : the beneficiary's residential area in the US, northeast,southeast, southwest, northwest. # - charges : Individual medical costs billed by health insurance. # + [markdown] colab_type="text" id="hd_Df5tpizFS" # ## Import all the necessary libraries # + colab={} colab_type="code" id="1j77pOKCizFU" outputId="cbf6873b-556b-43ba-f982-a992a0dc05d5" import numpy as np import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline import seaborn as sns import statsmodels.api as sm import scipy.stats as stats import copy import os # + colab={} colab_type="code" id="NhNrS58tizFa" sns.set() #setting the default seaborn style for our plots # - # Checking Working directory # + [markdown] colab_type="text" id="sNsrtbLpizFh" # ## Read the data into the notebook # + colab={} colab_type="code" id="m4CBNkarizFk" df = pd.read_csv('insurance.csv') # read the data as a data frame # + colab={} colab_type="code" id="hUl05_yTizFo" outputId="4a22bba1-8cdd-4857-a6ae-3868e3c58702" #checking the head of the data frame # + [markdown] colab_type="text" id="xgO5moKvizFt" # ## Basic EDA # * Find the shape of the data,data type of individual columns # * Check the presence of missing values # * Descriptive stats of numerical columns # * Find the distribution of numerical columns and the asssociated skeweness and presence of outliers # * Distribution of categorical columns # + colab={} colab_type="code" id="WK8KGKH6izFv" outputId="37f4bdbb-1189-4a89-a65b-404296b6e2a1" #info about the data # + [markdown] colab_type="text" id="qHvBRKK1izFy" # ### - The data has 1338 instances with 7 attributes. 2 integer type, 2 float type and 3 object type(Strings in the column) # + colab={} colab_type="code" id="miMj56kOizFz" outputId="9b152ead-8349-48b5-9e27-3e4c8e3e54a1" #check the shape of the data # + [markdown] colab_type="text" id="FA8oL5eiizF5" # The data set contains 1338 observatiosn of data and 7 variables. # + [markdown] colab_type="text" id="SXZ4XYNOizF8" # # Check for missing value # + colab={} colab_type="code" id="XUo9a96pizF-" outputId="c7986281-2576-40bc-d2d6-b5320d6e8358" # Check for missing value in any colum # + [markdown] colab_type="text" id="2hBJr4cQizGD" # ### - There are no null values in any of the column # + [markdown] colab_type="text" id="zjKrB2P5izGE" # # Checking the summary of dataframe # + colab={} colab_type="code" id="wFFuAyBIizGF" outputId="191f3c34-fb90-4539-92d9-fedc65686115" # five point summary of the continuous attributes # + [markdown] colab_type="text" id="DXoP-AtzizGJ" # -Data looks legit as all the statistics seem reasonable # -Looking at the age column, data looks representative of the true age distribution of the adult population # -Very few people have more than 2 children. 75% of the people have 2 or less children # -The claimed amount is higly skewed as most people would require basic medi-care and only few suffer from diseases which cost more to get rid of # + [markdown] colab_type="text" id="xS5PeTpFizGK" # # Plot the Histograms # + colab={} colab_type="code" id="QGuwudKLizGL" outputId="ae6d2820-be90-45e6-e85c-ba721af02495" #Plots to see the distribution of the continuous features individually # + [markdown] colab_type="text" id="BoKJIGQBizGQ" # - bmi looks quiet normally distributed # - Age seems be be distributed quiet uniformly # - As seen in the previous step, charges are highly skewed # + colab={} colab_type="code" id="vmBsL3adizGR" outputId="49ae8584-0cf9-4ee8-bafe-2602dae3c056" Skewness = pd.DataFrame({'Skewness' : [stats.skew(df.bmi),stats.skew(df.age),stats.skew(df.charges)]}, index=['bmi','age','charges']) # Measure the skeweness of the required columns Skewness # + [markdown] colab_type="text" id="K8F4zP3RizGV" # - Skew of bmi is very less as seen in the previous step # - age is uniformly distributed and there's hardly any skew # - charges are highly skewed # + [markdown] colab_type="text" id="RD_GIB5bizGW" # # Check Outliers # + [markdown] colab_type="text" id="O-I9IYXFizGX" # Box plot will be plotted to check for outliers. # + colab={} colab_type="code" id="_4h1KJh6izGY" outputId="5572200a-3ebf-4949-9bda-52c76de162f4" # + [markdown] colab_type="text" id="O8jp4sscizGc" # - There are no outliers present in the age variable. # - bmi variable shows presence of few extreme values # - charges as it is highly skewed, there are quiet a lot of extreme values. # + [markdown] colab_type="text" id="T5HJ6ID4izGd" # # Plot Count Plot # # We will plot various count plot to see how the variable has been distributed. # + colab={} colab_type="code" id="_XmUGXlqizGg" outputId="7c4aa5ce-daf9-4272-cf60-c83d8296147a" # + colab={} colab_type="code" id="O9vsFL6vizGp" outputId="a9f401e7-0f23-4ded-aaa2-eeb6937c1a93" # + colab={} colab_type="code" id="-tpLOM-7izGz" outputId="4e5a1ba9-48aa-4612-c09d-55b3c33132bd" # + colab={} colab_type="code" id="uB0AVLXtizG7" outputId="ebf686c7-ed21-4bdc-d551-d1ceba12992e" # + [markdown] colab_type="text" id="beGGBl7pizHE" # - There are a lot more non-smokers than there are smokers in the data # - Instances are distributed evenly accross all regions # - Gender is also distributed evenly # - Most instances have less than 2 children and very few have 4 or 5 children # + [markdown] colab_type="text" id="gBloBdCXizHF" # ### Bi-variate distribution of every possible attribute pair # + colab={} colab_type="code" id="Wmjnv5tcizHG" outputId="3aa9a123-58b2-4014-a2fa-9261e4f0fbfc" # + [markdown] colab_type="text" id="bblNWOU6izHK" # - The only obvious correlation of 'charges' is with 'smoker' # - Looks like smokers claimed more money than non-smokers # - There's an interesting pattern between 'age' and 'charges. Could be because for the same ailment, older people are charged more than the younger ones # + [markdown] colab_type="text" id="bXzaceYkizHL" # # Check Correlation # # To find out the correlation we will use the corr function and also we will plot a heatmap to visualise this correlation. # + colab={} colab_type="code" id="LWMMV37xizHM" outputId="ccd3b252-f6e1-410d-8fef-49612ee6a726" # + colab={} colab_type="code" id="pIR4LPbvizHR" outputId="50d0eaa8-a77a-4457-ed06-5990a762b1a1" # + [markdown] colab_type="text" id="e_HY0nbIizHZ" # ## Do charges of people who smoke differ significantly from the people who don't? # + colab={} colab_type="code" id="nXyYjidEizHa" outputId="24cc408c-6bb5-4463-d823-20dd960a7510" # + colab={} colab_type="code" id="fPLmyjLrizHd" outputId="db9e1dfb-25d2-4c88-e4df-4b6a1e8c9cde" #Scatter plot to look for visual evidence of dependency between attributes smoker and charges accross different ages # + [markdown] colab_type="text" id="zYUva10AizHi" # - Visually the difference between charges of smokers and charges of non-smokers is apparent # + [markdown] colab_type="text" id="HxFso4wnizHo" # ## Does bmi of males differ significantly from that of females? # + colab={} colab_type="code" id="NAO0uNiIizHp" outputId="34f5c58a-6fa3-4dca-e0bc-9b4bae75eb8a" #Checking the distribution of males and females # + colab={} colab_type="code" id="GaH_sr-5izHu" outputId="d6368ebe-b4a4-419e-9bc7-79ef58a8a3d7" # + [markdown] colab_type="text" id="dJjJ_8CRizH9" # ##### * bmi of both the genders are identical
M2 Statistical Methods for Decision Making/Week_1_SMDM_Descriptive_Statistics/Insurance Case Study_Student_file.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Should I stay in U.S ? Let's anylisis the unemploy rate # + # %matplotlib inline import matplotlib.pyplot as plt plt.plot() plt.show() # + # %matplotlib inline import pandas import matplotlib.pyplot as plt import datetime unrate=pandas.read_csv("UNRATE.csv") #print(type(unrate)) #f = open('mysongs.csv', 'r') #data = f.read() #rows = data.split('\n') #songs=[] #for row in rows: # song=row.split(',') # songs.append(song) #print(songs) #songname is xaxis,song[0] #song[1] is data #xaxis=["2017/1/1","2017/2/1","2017/3/1","2017/4/1","2017/5/1","2017/6/1","2017/7/1","2017/8/1","2017/9/1","2017/10/1","2017/11/1","2017/12/1"] xaxis=["2017/1/1","2017/1/2","2017/1/3","2017/1/4","2017/1/5","2017/1/6","2017/1/7","2017/1/8","2017/1/9","2017/1/10","2017/1/11","2017/1/12"] data=[1,3,5,7,3,1,2,5,10,9,11,62] first_twelve=unrate[0:12] xaxis=first_twelve['DATE'] data=first_twelve['UNRATE'] print(xaxis) print(data) date_time=[] for xax in xaxis: date = datetime.datetime.strptime(xax,'%Y/%m/%d') date_time.append(date) print(date_time) plt.plot(date_time,first_twelve['UNRATE']) #plt.plot(xaxis,data) #plt.plot() plt.xticks(rotation=90) plt.show() # -
ipnb/unemployreport.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import socket import sys # + # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening server_address = ('localhost', 9999) print (sys.stderr, 'connecting to %s port %s' % server_address) sock.connect(server_address) # - msg='hello there' sock.sendall(msg.encode('utf-8')) import plotly plotly.__version__ import numpy as np import plotly.plotly as py import plotly.tools as tls import plotly.graph_objs as go import plotly.plotly as py from plotly.graph_objs import * # auto sign-in with credentials or use py.sign_in() trace1 = Scatter( x=[1, 2, 3], y=[5, 6, 7], mode='markers', marker=Marker( color='red', symbol='square' ) ) data = Data([trace1]) py.plot(data) # #%matplotlib inline import time import pylab as pl from IPython import display file=open('coordinate.txt','r') #for i in range(50): i=0 while True: point=file.readline() points=point.split() x=(int(points[0])+int(points[2]))/2 y=(int(points[1])+int(points[3]))/2 pl.plot(x,y,'ro') if i==1: display.clear_output(wait=True) display.display(pl.gcf()) i=0 pl.axis([0,1920,1080,0]) i=i+1 import numpy as np points=list() # Read object coordinates from file file=open('/home/nvidia/darknet/test.txt','r') while True: point=file.readline() if point=="eoframe\n": break points.append(point) points for i, coord in enumerate(points): obj=coord.split() obj file.close() label=["Mock", "Car", "SUV", "SmallTruck", "MediumTruck", "LargeTruck", "Pedestrian", "Bus", "Van", "GroupOfPeople", "Bicycle", "Motorcycle" , "TrafficSignal-Green", "TrafficSignal-Yellow", "TrafficSignal-Red"] label[0] import cv2 import numpy cam=cv2.VideoCapture("/home/nvidia/darknet/vid.mp4")
.ipynb_checkpoints/client_test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SpaceX: Launch Operations, Inventory & Business # # # ### Primer # SpaceX - A company credited with launching the first privately developed vehicle into orbit; is largely responsible for kickstarting the space race yet again. Over the years it has achieved a lot of incredible feats, and in this analysis, we'll look to explore SpaceX's operations; garnering insights that give us information on the subtle details behind those achievements and a general idea of the SpaceX programs. # # # The goal of this analysis is to discover facts that would have been missed by mainstream media coverage and to develop an understanding of the company by way of numbers. We would look towards tech blogs, engineering, and space community posts for our analysis. We might also encounter well-known facts in our analysis which we would look at from a different perspective. # # # The analysis uses data from the following source: https://www.kaggle.com/rohanrao/rspacex-data # The notebook consists of the following subsections: # # > [Setup & Loading](#Setup-&-Loading) # # > [Data Preparation](#Data-Preparation) # # > [Exploration & Analysis](#Exploration-&-Analysis) # # > [Final Insights](#Final-Insights) # # # ** ** # # # ## Setup & Loading # Section Index: # # > [Setup](#Setup) # # > [Data Loading](#Data-Loading) # # > [Data Preview Options](#Data-Preview-Options) # #### Setup # + # Importing Dependancies import pandas as pd import numpy as np import seaborn as sns import matplotlib as plt # %matplotlib inline # - # #### Data Loading # + # Loading in each table as a dataframe launches = pd.read_csv("Datasets/launches.csv") rockets = pd.read_csv("Datasets/rockets.csv") launchpads = pd.read_csv("Datasets/launchpads.csv") payloads = pd.read_csv("Datasets/payloads.csv") ships = pd.read_csv("Datasets/ships.csv") cores = pd.read_csv("Datasets/cores.csv") capsules = pd.read_csv("Datasets/capsules.csv") # - # #### Data Preview Options # + pd.options.display.max_rows = 30 pd.options.display.max_columns = 30 launches.head(10) # Main DataFrame ; contains unique_ids of other df's # rockets.head(10) # launchpads.head(10) # Key DataFrames ; contains additional info of cols in main df # payloads.head(10) # ships.head(20) # cores.head(10) # capsules.head(10) # - # # # *Key Observations from Data Preview:* # * There a lot of categorical features and a DateTime feature stored as a string. It would be useful to convert them to an appropriate type for better analysis. # # # * The 'id columns' in the df's which are in the form of an MD5 hash, act as liaison to other df's. Converting them to name columns would improve comprehensibility. # # # * A lot of interrelated data are stored in different tables. For example, the launches df contains the payloads launched, but the payloads df would contain the payloads' manufacturers. In this scenario, it would be useful to have a function extract information from a related table. # # # * Certain features hold list/array type values in the form of strings. Having them transformed to list type would make them more efficient to work with. # # # # # ## Data Preparation # In this stage, we'll conduct some basic data cleaning procedures and work on some of the observations from the data preview stage. After this, we'll define a bunch of new features essential to our analysis. # # * Handling missing data by imputing appropriate values. # * Converting datatypes to types that would be more suitable for analysis. # * Defining functions that would ease data transformation and analysis. # * Transforming data to appropriate formats for ease of reading and analysis. # * Feature Engineering - adding and defining features essential to solving problems. # * Variable definition for visualizations. # Section Index: # # > [Collecting df Essentials](#Collecting-df-Essentials) # # > [Visualizing Missing Data](#Visualizing-Missing-Data) # # > [Dealing with Null Values](#Dealing-with-Null-Values) # # > [Datatype Conversions](#Datatype-Conversions) # # > [Defining Essential Functions](#Defining-Essential-Functions) # # > [Data Transformations](#Data-Transformations) # # > [Defining New Features](#Defining-New-Features) # # > [Data: Post Wrangling](#Data:-Post-Wrangling) # #### Collecting df Essentials # + missing_data_pct = [] DTypes = [] data = [launches, rockets, launchpads, payloads, ships, cores, capsules] # Calculate percentage of missing data for each dataframe; handling missing values for df in data: # Appending percentage of missing data of each df for ease of access missing_data_pct.append(df.isnull().sum().apply(lambda x : 100*(x/len(df.index)))) # Appending column datatypes of each df for ease of access DTypes.append(df.dtypes) #DTypes #missing_data_pct[0].drop_duplicates(keep = False) # Not ideal for related variables in df # - # #### Visualizing Missing Data # + # Drawing a list of heatmaps each of which represents a dataframe def draw_null_maps(): fig, axes = plt.pyplot.subplots(7,1, figsize = (25,30)) df_num = 0 while df_num != 7: for row in range(7): for column in range(1): N = sns.heatmap(data[df_num].isnull().T, ax = axes[row]) df_num += 1 draw_null_maps() # - # #### Dealing with Null Values # + # Total rows in main df is approx 100, thus retaining all data # Handling missing values for launch data launches.fairings_reused.fillna('Unknown', inplace = True) # Might change imputation value basis future analysis launches.fairings_recovered.fillna('Unknown', inplace = True) launches.fairings_recovery_attempts.fillna('Unknown', inplace = True) # Handling missing values for payload data payloads.mass_kg.fillna(payloads.mass_kg.median(), inplace = True) # Imputing median to mitigate effect of outliers payloads.mass_lb.fillna(payloads.mass_lb.median(), inplace = True) payloads.regime.fillna('Unknown', inplace = True) payloads.orbit.fillna('Unknown', inplace = True) payloads.reference_system.fillna('Unknown', inplace = True) # Don't fancy using ships' data for our analysis, hence skipping imputation of missing values # Handling missing values for cores data cores.block.fillna(0, inplace = True) # Since all cores have atleast a single block; 0 implies unknown status # Visualizing the changes draw_null_maps() # - # #### Datatype Conversions # + # Converting features to an appropriate type for ease of analysis. # Launches launches.date = pd.to_datetime(launches.date) launches[['fairings_reused','fairings_recovery_attempts', 'fairings_recovered']] = launches[['fairings_reused', 'fairings_recovery_attempts', 'fairings_recovered']].astype('bool') # Rockets rockets.boosters = pd.Series(pd.Categorical(rockets.boosters, ordered = False)) # launchpads for col in ['status','timezone','region','locality']: launchpads[col] = pd.Series(pd.Categorical(launchpads[col], ordered = False)) # payloads for col in ['orbit', 'reference_system', 'regime']: payloads[col] = pd.Series(pd.Categorical(payloads[col], ordered = False)) # cores cores.status = pd.Series(pd.Categorical(cores.status, ordered = False)) # capsules capsules.status = pd.Series(pd.Categorical(capsules.status, ordered = False)) # Re-run code cell 4 to witness changes # - # #### Defining Essential Functions # + # The following functions would aid in data tranformation and in preparing new datasets from the existing df's # Function to get desired values from key df's onto the main df def get_value(client_id_col, key_df, key_id_col_name, key_val_col_name): DF = key_df[[key_id_col_name, key_val_col_name]] DF.set_index( key_id_col_name, inplace = True) # Setting id col as index to enable accessing elements based off it val_list = [] # To store & return the final values list for id in client_id_col: for ID in DF.index: if id == ID: val_list.append(DF.loc[ID,key_val_col_name]) return val_list # Replacing entity Id's with names def clean_seq(x): # For lists in form of strings chars = ['[',']',"'"] for char in chars: x = x.replace(char,'') return x.split(',') def id_to_name(x, key_df, id_col_name, name_col_name, is_list = False): # The converter function if is_list == True: # Be sure to apply clean_seq() first name_list = [] for val in x: name_list.append(id_to_name(val.strip(),key_df,id_col_name,name_col_name)) return name_list DF = key_df[[id_col_name, name_col_name]] # creating Id-name pairs from the key dataframe DF.set_index(id_col_name, inplace = True) for ID in DF.index: if x == ID: return DF.loc[ID,name_col_name] # - # #### Data Transformations # + # Applying converter function to pure Id columns launches.rocket_id = launches.rocket_id.apply(id_to_name, args = [rockets,'rocket_id','name']) launches.launchpad_id = launches.launchpad_id.apply(id_to_name, args = [launchpads,'launchpad_id','full_name']) launches.rename(columns = { 'rocket_id' : 'rocket', 'launchpad_id': 'launchpad' }, inplace = True) # Applying converter to columns containing Id list for col in ['payloads','ships','capsules']: launches[col] = launches[col].apply(clean_seq) launches.payloads = launches.payloads.apply(id_to_name, args = [payloads,'payload_id','name'], is_list = True) launches.ships = launches.ships.apply(id_to_name, args = [ships,'ship_id','name'], is_list = True) launches.capsules = launches.capsules.apply(id_to_name, args = [capsules,'capsule_id','serial'], is_list = True) # Converted DataFrame launches.head(10) # - # #### Defining New Features # *launches* # + # Adding features to launches df # Adding Launch Year launches['launch_year'] = launches.date.dt.year # Adding Launch Month launches['launch_month'] = launches.date.dt.month import calendar launches.launch_month = launches.launch_month.apply(lambda x: calendar.month_abbr[x]) # Adding Launch Quarter q_map = { 1 : 'Q1', 2 : 'Q2', 3 : 'Q3', 4 : 'Q4' } launches['launch_quarter'] = launches.date.dt.quarter.map(q_map) launches['quarter'] = launches.launch_year.astype('str') + launches.launch_quarter.astype('str') # Adding cost of launch basis rocket launch_costs = pd.Series(get_value(launches.rocket, rockets,'name','cost_per_launch'), name = 'launch_cost') launches['launch_cost'] = launch_costs #launches.head(20) # - # *rockets* # + # Adding Features to rockets df # Adding Payload capacity payload_capacity_LEO = { # Data Source: SpaceX.com/vehicles 'Falcon 1' : None, 'Falcon 9' : 22800, 'Falcon Heavy' : 63800, 'Starship': 90718 # Assumption: Starship has same payload capacity in LEO & GTO } payload_capacity_GTO = { # Data Source: spaceX.com/vehicles 'Falcon 1' : None, 'Falcon 9' : 8300, 'Falcon Heavy': 26700, 'Starship' : 90718 } rockets['LEO_capacity_kg'] = rockets.name.map(payload_capacity_LEO) rockets['GTO_capacity_kg'] = rockets.name.map(payload_capacity_GTO) # Adding cost efficiency metrics rockets['cost_per_kilo_LEO'] = rockets.cost_per_launch/rockets.LEO_capacity_kg rockets['payload_per_dollar_LEO'] = rockets.LEO_capacity_kg/rockets.cost_per_launch rockets['cost_per_kilo_GTO'] = rockets.cost_per_launch/rockets.GTO_capacity_kg rockets['payload_per_dollar_GTO'] = rockets.GTO_capacity_kg/rockets.cost_per_launch # Adding payloads to mass percent rockets['payload_pct_mass_kg_LEO'] = (rockets.LEO_capacity_kg/rockets.mass_kg)*100 # - # *payloads* # + # Adding Features to payloads df # Adding Payload Manufacturer & Customer Classes US_gov = ['US Naval Academy','National Space Organization','Naval Postgrad School','Naval Research Lab', 'Applied Defense Systems','USAF Academy'] foreign_gov = ['Israel Aerospace Industries','NSPO','South Korea'] def class1(x): # To be used on manufacturer data to classify customers. if x in US_gov: return 'US Government' elif x in foreign_gov: return 'Foreign Government' elif x == 'SpaceX': return 'SpaceX' elif x == '': return 'Unknown' else: return 'Private Company' # launches.payloads = [x[0] for x in launches.payloads] # Data was present as a single-valued list payloads.manufacturers = [x[0] for x in payloads.manufacturers.apply(clean_seq)] payloads['customer_class'] = payloads.manufacturers.apply(class1) # + # Re run the data preview cells to witness changes. # + # Visualisation Aids font = { 'family': 'Barlow', 'color': 'black', 'weight': 'normal', 'size': 12, } # - # #### Data: Post Wrangling # + # Preview the data sets post wrangling launches.head(10) # rockets.head(10) # launchpads.head(10) # payloads.head(10) # ships.head(20) # cores.head(10) # capsules.head(10) # - # # # # # # # ## Exploration & Analysis # # # In this section, we'll look to study SpaceX's **launch frequencies** over the years to discern patterns in their launch operations. We'll also be exploring the **rocket inventory**, and analyzing their usage and efficacy. # # Secondly, we will study SpaceX's **customer base** and understand their general requirements which hopefully should give us an understanding of the SpaceX business. # ### Inferences # To guide our exploration and satiate our curiosity, we'll be looking to answer the following questions: # # - [In what period did SpaceX amp up its operations?](#Analysing-Launch-Frequencies) # - [How are launches spaced out in a year? Is there a season with most launches? If yes, explore.](#Analysing-Launch-Frequencies) # - [Which rockets have the potential to be SpaceX's workhorse in the coming future?](#Rocket-Inventory-Analysis) # - [What are the general payload requirements of SpaceX's major customers ( including itself) in terms of mass, reference system, and orbit?](#Customer-Demographics-&-Payloads) # - [Repeat customers vs Diversification: Which would be better in the long run?](#Customer-Demographics-&-Payloads) # ### Analysing Launch Frequencies # Section Index: # # # > [Launch Data Grouped by Rocket Type](#Launch-Data-Grouped-by-Rocket-Type) # # > [Mean Success Rate of Launches](#Mean-Success-Rate-of-Launches) # # > [Quarterly Aggregate Launches](#Quarterly-Aggregate-Launches) # # > [Quarterly Launch Data From 2017 to 2020](#Quarterly-Launch-Data-From-2017-to-2020) # # #### Launch Data Grouped by Rocket Type # To understand launch frequencies we'll be visualising yearly launches grouped by the type of rocket used. # # + # Style Setting sns.set_style('white') # DF to visualise launch timeline LD = pd.DataFrame(launches[['rocket','launch_year']].groupby(['launch_year','rocket']).size()) LD.reset_index(inplace = True) LD.columns = ['launch_year','rocket','Launches/Year'] #lD # Visualising launch timeline f = sns.catplot(x = 'launch_year', y = 'Launches/Year',col = 'rocket',kind = 'point', data = LD) f.set(yticks = np.arange(0,22,2), xlabel = 'Launch Year') f.set_xticklabels(rotation = 30) # - # Before analyzing the above data, it would also be useful to also understand the success rate of these launches. # #### Mean Success Rate of Launches # + # Visualising Launch Perfomance SR = sns.catplot(y ='launch_year', hue = 'success', col = 'rocket', kind = 'count', data = launches, orient = "h") SR.set( xticks = np.arange(0,24,2), xlabel = 'No. Launches', ylabel = 'Launch Year') # - # Observations: We observe that falcon 1 was used as a learning tool and after getting 2 consecutive successes in 2008 and 2009 respectively, SpaceX moved onto falcon 9 - which has pretty much been the workhorse rocket ship. The Falcon 9 has succeeded in all but 2 of its launches and has proved itself to be the go-to rocket. The falcon heavy, which is essentially 3 Falcon 9's packed together, was first launched in 2018 in a test flight and has had 4 launches until 2020. # # Key points: # # * We see two major upticks in operations: once in 2014 - when the launches doubled to 6, and the other in 2017 - which is when SpaceX upscaled its operations and did 18 launches. # # # * A significant dip in operations is observed in 2019 wherein the launch operations were nearly halved, # accompanied by an equally sharp rise in operations the following year. # # # * From the launch success visualization, it's evident that SpaceX has enjoyed a very high success rate. # # # # Yet another question that comes to mind is how are SpaceX launches distributed over a year. # And if there has been a particular quarter in which SpaceX carried out most of its launches? # # # To answer this, we'll first visualize the total quarterly launches and analyze the launch pattern # in aggregate. # # #### Quarterly Aggregate Launches # + # DF for visualising Quarterly launch data dfQ = launches.groupby(['launch_year','launch_quarter',]).size().unstack(0) dfQ['quarter_total'] = dfQ.sum(axis = 1) dfQ.loc['year_total'] = dfQ.sum(axis = 0) dfQ # Total Quartely launches QT = sns.set(rc = {'figure.figsize' : (20,3)}, style = 'whitegrid') QT = sns.pointplot( x = dfQ.index.drop('year_total'), y = dfQ['quarter_total'].drop('year_total')) sns.despine() QT.set(yticks = np.arange(0,50,5)) QT.set_ylabel('Total Launches', loc = 'top', rotation = 0, fontdict = font) QT.set_xlabel('Launch Quarter', rotation = 0, fontdict = font) QT.set_title('Quartely Aggregate Launches', fontdict = {'size' : 15, 'family' : 'Barlow', 'weight' : 'medium'} , pad = 20) # - # From the above graph, it seems that SpaceX carries out most of its launches in the first half of the year with the maximum launches happening in Q2 which is then accompanied by a dip in launches in Q3 and Q4. # # It is to be noted that this is aggregate data, what we need is to find is: if the pattern holds every year. Since most of the data in the above graph come from the years 2017 to 2020. We'll plot the quarterly launch frequencies for these years to further understand this. # #### Quarterly Launch Data From 2017 to 2020 # + launcht = launches.groupby(launches.quarter).size() launcht.index = pd.PeriodIndex(launcht.index, freq = 'Q-JAN') sns.set_style('whitegrid') # Quarterly launch data from 2017 to 2020 idx = pd.period_range('2017Q1', '2020Q4', freq = 'Q-JAN') fig, axes = plt.pyplot.subplots(figsize = (15,3)) T = sns.pointplot(x = idx , y = launcht[idx]) sns.despine() T.set(yticks = np.arange(0,10,3)) T.set_xlabel('Launch Timeline', fontdict = font) T.set_ylabel('Num. Launches', fontdict = font) T.set_title('Quarterly launch data from 2017 to 2020 ', fontdict = font, pad = 20) # - # Alright, barring a few exceptions we observe an alternating pattern, where the launches would increase from Q1 to Q2, followed by a dip in Q3 and a rise in Q4. Overall, we observe the number of launches in the second quarter is more than the number of launches in Q4. What remains interesting is despite there being fewer launches in Q3 in 2017 to 2020, the number of launches in Q3 remains more than in Q4 in the aggregate data, implying little to no launches were carried out in the fourth quarter during the initial years of SpaceX. # # **Key Observation**: Q2 remains the period with most launches in a year. # # **Anomalies**: We see that the alternating pattern is offset at 2 places: # # * In 2018, when the launches in Q2 stayed the same as those in Q1. This, however, seems to be a minor anomaly and it would be best left out of further investigation for now. # # # * In 2020, when the pattern was completely offset, Q2 launches dipped and stayed the same in Q3 to later decrease in Q4. The most probable candidate for this offset is manned spaceflight in 2020. # # # # ### Rocket Inventory Analysis # # # In the rocket inventory analysis, we'll explore the payload to launch-cost ratio as a metric for cost efficiency, and evaluate potential candidates for a work-horse rocket. Alternatively, we could also find dimensions for a hypothetical rocket ideal for repeated launches. # # Before we begin, let's first define the parameters which would make a good work-horse: # # 1. Launch cost-efficiency - An ideal workhorse would give the maximum payload capacity per unit cost. # 2. Reusability - Here, two things matter - the number of times a rocket can be used before being discarded, and the second - recovery rate of a particular rocket. # # Since we have limited data on reusability performance, we'll be using launch cost efficiency as our primary indicator. # # # We'll be analyzing rocket dimensions and trying to investigate any underlying relationships that they may have with the payload to cost ratio. Of course, there won't be any causal relationships. What we aim to do here is understand the cost efficiency of SpaceX's rocket design. And for this purpose we'll be looking at the following: # # * How has cost efficiency changed with more powerful rockets? # # # * A look at the change in the rocket dimensions. # # # * Which dimensional metric is associated with the most change in payload capacity and cost efficiency? # # # # *It is to be noted here that Falcon 9 and Falcon heavy are closely related.* # # Section Index: # # > [Powerful Rockets vs Cost Efficiency](#Powerful-Rockets-vs-Cost-Efficiency) # # > [Changes in Rocket Dimensions](#Changes-in-Rocket-Dimensions) # # > [Dimensions vs Cost Efficienc](#Dimensions-vs-Cost-Efficiency) # # > [Launch Cost](#Launch-Cost) # # # #### Powerful Rockets vs Cost Efficiency # To measure changes in cost efficiency with the introduction of newer rockets, we'll be plotting the percentage change in payload capacity per unit dollar for each orbit system. # + # Percent changes in Cost Efficiency sup = plt.pyplot.figure() plt.pyplot.figure(figsize = (15,3)) plt.pyplot.plot(rockets.name,rockets.payload_per_dollar_LEO.pct_change(), color = 'b', label = 'LEO', marker = 'o') plt.pyplot.plot(rockets.name,rockets.payload_per_dollar_GTO.pct_change(), color = 'r', label = 'GTO', marker = 'o') sns.despine() plt.pyplot.yticks(np.arange(-1,55,5)) plt.pyplot.xlabel('Rocket Name', fontdict = font) plt.pyplot.ylabel('Percentage Points', fontdict = font) plt.pyplot.title('Improvement in Cost Efficiency', fontdict = font, pad = 20) plt.pyplot.legend(loc = 'best') plt.pyplot.show() # - # We observe a 44% and a 14% improvement in cost efficiency for GTO and LEO respectively on the starship rocket as compared to falcon heavy, which, had shown negligible improvements in the launch cost over falcon 9. An important thing to note here is that cost efficiency improved with the introduction of more powerful rockets. This hints at major design and engineering changes over the simple addition of costlier parts. # Our visualization of the changes in rocket dimension and their relationship with cost efficiency would help us further explore this insight. # # # #### Changes in Rocket Dimensions # Based on the data, we can work with mass, height, and diameter as our primary dimensions. The idea is to plot the changes in overall dimensions with the introduction of a newer rocket model giving us a mental map to compare rocket sizes. # + fig = plt.pyplot.figure() plt.pyplot.figure(figsize = (17,4)) plt.pyplot.plot(rockets.name,rockets.mass_kg.pct_change() ,color='g', label = 'Mass', marker = 'o') plt.pyplot.plot(rockets.name,rockets.height_mt.pct_change() ,color='b', label = 'Height', marker = 'o') plt.pyplot.plot(rockets.name,rockets.diameter_mt.pct_change(),color='r', label = 'Diameter', marker = 'o') sns.despine() plt.pyplot.xlabel('Rocket Name', fontdict = font) plt.pyplot.ylabel('Percentage Points', loc = 'center', fontdict = font) plt.pyplot.yticks(np.arange(-1,20,1)) plt.pyplot.legend(loc = 'upper center') plt.pyplot.title('Percent Changes in Rocket Dimensions', fontdict = font, pad = 20) plt.pyplot.show() # - # Right, so we observe that Falcon 9 had a mammoth 17% increase in mass compared to its predecessor Falcon 1, which then increased by 1.5% with Falcon heavy and decreased by less than a percent in Starship. The other 2 dimensions, underwent moderate changes with height increasing by 2% in Falcon 9, staying the same in Falcon heavy, and then a 1% percent increase in starship; and diameter, increasing by 1%, then 2%, and then decreasing by half a percent in Starship. # # Falcon heavy is simply a modified version of Falcon 9 containing the main body and 2 Falcon 9 boosters. This explains the moderate increases in mass and diameter with the same height. Starship, on the other hand, is a newly engineered rocket slightly taller than Falcons and mass and diameter between Falcon 9 and Falcon Heavy. # # It would be worth noting the payload capacity of each of these rockets and see how the changes in dimensions hold up with them. # + # Style Setting sns.set_style('white') # Payload Capacities fig = plt.pyplot.figure(figsize = (13,5)) # LEO LEO = fig.add_subplot(1,2,1) L = sns.barplot(x = rockets.LEO_capacity_kg, y = rockets.name, palette = 'ocean') sns.despine() L.set_xlabel('Payload Capacity in LEO (Kg)',fontdict = font) L.set_ylabel('Rocket', fontdict = font) L.set_title('Low Earth Orbit', fontdict = font) # GTO GTO = fig.add_subplot(1,2,2) G = sns.barplot(x = rockets.GTO_capacity_kg, y = rockets.name, palette = 'flare') sns.despine() G.set_xlabel('Payload Capacity in GTO (Kg)',fontdict = font) G.set_ylabel('Rocket', fontdict = font) G.set_title('Geostationary Transfer Orbit', fontdict = font) plt.pyplot.subplots_adjust(wspace = .6) # - # From the above graph, it is pretty clear that the payload capacity has increased with newer models and minor tweaks in dimensions. We also observe that Falcons' payload cap is much less in GTO. # # An interesting bit would be to understand how these dimensions would have moved with the cost efficiency metrics. # which should hopefully provide a basis for uncovering some insights on SpaceX's rocket design and engineering principles. # # # The following plot does exactly that. We are plotting cost_per_kg on the y axis to examine its relationship with each of the dimensions. The cost_per_kg measures in dollars the amount of money spent on carrying a kilogram of payload to each of the orbit systems. # # # #### Dimensions vs Cost Efficiency # As discussed above we'll be plotting our dimensions against cost_per_kg in GTO. # # Additionally, we are going to focus on yet another metric viz payload fraction. It represents the payload capacity as a fraction of the total mass of the rocket. The metric is indicative of engine performance and engineering design since a high payload fraction means greater payload capacity per unit mass of the rocket, which, is a function of engine performance. # + # Style Setting sns.set_style('whitegrid') # Plots fig = plt.pyplot.figure(figsize = (15,10)) fig.suptitle('Dimensions vs Cost Efficiency') # Mass vs Cost Efficiency ax1 = fig.add_subplot(2,2,1) plt.pyplot.plot(rockets.mass_kg, rockets.cost_per_kilo_GTO, color='r', label = 'Mass', marker = 'o') plt.pyplot.xlabel('Mass (Kg)', fontdict = font) plt.pyplot.ylabel('Inv. Cost Efficiency ($)', fontdict = font) plt.pyplot.title('Mass vs Cost Efficiency', fontdict = font, pad = 20) # Diameter vs Cost Efficiency ax2 = fig.add_subplot(2,2,2) plt.pyplot.plot(rockets.diameter_mt,rockets.cost_per_kilo_GTO ,color='b', label = 'Diameter', marker = 'o') plt.pyplot.xlabel('Diameter (mtr)', fontdict = font) plt.pyplot.ylabel('Inv. Cost Efficiency ($)', fontdict = font) plt.pyplot.title('Diameter vs Cost Efficiency', fontdict = font, pad = 20) # Height vs Cost Efficiency ax3 = fig.add_subplot(2,2,3) plt.pyplot.plot(rockets.height_mt,rockets.cost_per_kilo_GTO,color='g', label = 'Height', marker = 'o') plt.pyplot.xlabel('Height (mtr)', fontdict = font) plt.pyplot.ylabel('Inv. Cost Efficiency ($)', fontdict = font) plt.pyplot.title('Height vs Cost Efficiency', fontdict = font, pad = 20) # Payload Fraction vs Cost Efficiency ax4 = fig.add_subplot(2,2,4) plt.pyplot.plot(rockets.payload_pct_mass_kg_LEO, rockets.cost_per_kilo_GTO ,color='k',label = 'Payload Fraction', marker = 'o') plt.pyplot.xlabel('Payload Fraction (%)', fontdict = font) plt.pyplot.ylabel('Inv. Cost Efficiency ($)', fontdict = font) plt.pyplot.title('Payload Fraction vs Cost Efficiency', fontdict = font, pad = 20) plt.pyplot.subplots_adjust(wspace = 0.3, hspace = 0.4) # - # # # In all the above graphs. the first point represents Falcon 9, the second - Falcon Heavy, and third - Starship # # We observe that cost efficiency has improved as new models were introduced. Starship has shown the greatest improvement in cost efficiency with a unit cost of less than $1000 - An improvement of nearly 3000 dollars. # # While we have already discussed the movement of the first 3 dimensions, it is remarkable that starship has managed to significantly reduce the unit cost of payload, and increase the payload fraction to 7.2%. # # Finally, let's look at the overall cost of launching a type of rocket alongside their payload capacities to give ourselves a snapshot of the analysis and also check for any numerical triviality. # #### Launch Cost # + # Rocket Launch Cost Rockets = rockets.copy() Rockets.cost_per_launch = Rockets.cost_per_launch.div(10**6).astype('str') + ' million' Rockets.LEO_capacity_kg = Rockets.LEO_capacity_kg.dropna().astype('str') + ' Kg' Rockets.GTO_capacity_kg = Rockets.GTO_capacity_kg.dropna().astype('str') + ' Kg' Rockets.rename(columns = {'name': 'Rocket', 'cost_per_launch' : 'Launch Cost', 'LEO_capacity_kg' : 'LEO Capacity', 'GTO_capacity_kg' : 'GTO Capacity'}, inplace = True ) Rockets.index = Rockets.Rocket Rockets[['Launch Cost','LEO Capacity','GTO Capacity']] # - # From the above table - starship stands out. Its cost is comparable to that of Falcon 1's and has the highest payload capacity of all the rockets. # We'll be using these insights in the final section. Next, we move onto analyzing SpaceX's customer demographic and payload data. # # # ### Customer Demographics & Payloads # In this section, we'll look at the major clientele of SpaceX in terms of individual customers and customer classes. # # Firstly, we'll look at the top clients of SpaceX and look out for any major customers. We hope to get a fair understanding of the distribution of SpaceX's clientele. After that, we'll study the average payload requirements of SpaceX's top customers both individually, and in terms of classes. # Subsections: # # > [SpaceX - Major Clientele](#SpaceX---Major-Clientele) # # > [Customer Diversity of SpaceX](#Customer-Diversity-of-SpaceX) # # > [Major Requirements](#Major-Requirements) # #### SpaceX - Major Clientele # This section deals with extracting the top customers of SpaceX in terms of the number of orders and look out for any major clients. # + # Style Setting sns.set(rc = {'figure.figsize': (20,5)}, style = 'white' ) # Creating Manufacturer list Mft = [] payloads.manufacturers.apply(clean_seq) for val in payloads.manufacturers.apply(clean_seq): Mft.append(val[0]) MFT = pd.DataFrame(Mft, columns = ['Manufacturers']).replace('','Unknown') #MFT # Finding Total Number of Orders num_mfts = len(MFT.where(MFT.Manufacturers != 'SpaceX').dropna()) # SpaceX - Major Clientele c = sns.barplot( y = MFT.Manufacturers.value_counts().drop(['SpaceX','Unknown'])[:7].index, x = np.multiply(np.divide(MFT.Manufacturers.value_counts().drop(['SpaceX','Unknown'])[:7].values,num_mfts),100), palette = 'mako' ) sns.despine() c.set_xlabel('Percentage of Orders (%)', fontdict = font) c.set_ylabel('Client',rotation = 0, loc = 'top', fontdict = font) c.set_title('SpaceX - Major Clientele', fontdict = {'size' : 15}, pad = 20) # - # As we observe, Thales Alenia Space & SSL are customers with the most orders having 13% each of the total number of orders placed by SpaceX's clients. These include orders by the US and other foreign governments but not payloads from SpaceX itself. The next major client is Boeing with 11% orders followed by Airbus Defence & Space, Lockheed Martin, and Orbital ATK with 7%, 5%, and 5% of orders respectively, with other clients individually contributing less than 5% of orders. # # From this, we can say that SpaceX's customer base is fairly diversified. The top three clients which have a significantly higher percentage of orders deserve some further study in assessing them for their potential to provide long term business which we'll explore in a later section. # # # Here, we have just worked with individual customers, but government contracts happen to be frequently reported as a major revenue stream for SpaceX, and thus, it would be a good idea to study the customer base in terms of customer class viz, Private, US government, Foreign government. That should tell us the role of the government enterprises in the SpaceX business. # # The next section delves into customer diversity in terms of customer classes. # #### Customer Diversity of SpaceX # + # Style Setting sns.set_style('white') # Diversity of SpaceX orders = np.sum(MFT.Manufacturers.dropna().apply(class1).value_counts().values) C = sns.barplot(x = np.multiply(np.divide(MFT.Manufacturers.dropna().apply(class1).value_counts().values,orders),100), y = MFT.Manufacturers.dropna().apply(class1).value_counts().index, palette = 'magma' ) sns.despine() C.set(xticks = np.arange(0,110,10)) C.set_xticklabels(np.arange(0,110,10)) C.set_ylabel('Client Type',fontdict = font) C.set_xlabel('Percentage of Orders (%)', fontdict = font) C.set_title('Customer Diversity of SpaceX', fontdict = font, pad = 20) # - # We see that the US government constitutes just 5% of the satellite launches, with private companies constituting 60% followed by SpaceX at 35%. It should be noted here that the US government category exclusively contains defense and research satellites, the space operations funded by NASA are manufactured by SpaceX and thus would appear in SpaceX's numbers. # # In general, it seems that private companies are the major clients in terms of income from satellite launches. NASA and other government organizations do have a significant part in SpaceX's operating finances and, as hinted by the above graph, those contributions would probably come as long-term contracts that enable SpaceX to carry out its operations. # # Before moving on to the conclusions, let us also try and understand the general requirements of these customers in terms of mass, reference system, and orbit. The approach would be the same as this section wherein we'll first look at customers individually, and then as classes. # # #### Major Requirements # Here, we'll look at client-wise general satellite configurations in two ways. # # **Individual Customers** # + # Indivisual Customers customer_requirement = pd.DataFrame({ 'mass_kg' : None, 'reference_system' : None, 'orbit' : None, 'regime' : None }, index = payloads.manufacturers.value_counts().drop(['SpaceX', ''])[:5].index ) for client in customer_requirement.index: for requirement in ['mass_kg','reference_system','orbit','regime']: if requirement == 'mass_kg': customer_requirement.loc[client,requirement] = payloads[requirement].where(payloads['manufacturers'] == client).dropna().mean() else: customer_requirement.loc[client,requirement] = payloads[requirement].where(payloads['manufacturers'] == client).dropna().value_counts()[:1].index[0] customer_requirement.rename( columns = { 'mass_kg' : 'Average Mass', 'reference_system': 'Main Reference Sytem', 'orbit' : 'Main Orbit', 'regime' : 'Main Regime',}, inplace = True) customer_requirement # - # The table above gives us a very good summary of the requirements of SpaceX's major clients. Among the top 5 clients, we observe geocentric as the main reference system, GTO as the main orbit, and geostationary as the main regime. # # It is to be noted that Thales Alenia Space has Polar Orbit as its most frequent orbit with a low_earth regime; it also has the highest mass viz. 7017.5 Kg amongst the top clients. Given that it covers 13% of the market share, it is worth looking into the type of satellites that this company sends up to better understand it as a customer. # # The next code cell looks at the satellites launched by Thales Alenia Space. # + thales_sat = pd.DataFrame( { 'Major Payloads of Thales Alenia Space' : payloads.name.where(payloads.manufacturers == 'Thales Alenia Space').dropna() }) thales_sat.index = range(len(thales_sat)) thales_sat # - # As seen above, Iridium Next happens to be the major satellite class of Thales Alenia Space. Iridium Next is a constellation of communication satellites built for the European space agency. # # Reference: https://earth.esa.int/web/eoportal/satellite-missions/i/iridium-next # # This means that Thales Alenia Space is a strong candidate for a repeat customer. # # Another major client is SSL. The next code cell lists out the satellites it sends to space. # + SSL_sat = pd.DataFrame({ 'Major Payloads of SSL' : payloads.name.where(payloads.manufacturers == 'SSL').dropna() }) SSL_sat.index = range(len(SSL_sat)) SSL_sat # - # As for SSL, it seems as if they have a wide range of satellites sent into orbit which makes it a less desirable prospect as a repeat customer. The others seem to have fairly similar payload requirements. Let us now dive into the requirements by customer classes. # # # # **Customer Classes** # + # Customer Classes class_requirement = pd.DataFrame({ 'mass_kg' : None, 'reference_system' : None, 'orbit' : None, 'regime' : None }, index = payloads.customer_class.unique() ) for client_class in class_requirement.index: for requirement in ['mass_kg','reference_system','orbit','regime']: if requirement == 'mass_kg': class_requirement.loc[client_class,requirement] = payloads[requirement].where(payloads['customer_class'] == client_class).dropna().mean() else: class_requirement.loc[client_class,requirement] = payloads[requirement].where(payloads['customer_class'] == client_class).dropna().value_counts()[:1].index[0] class_requirement.rename( columns = { 'mass_kg' : 'Average Mass', 'reference_system': 'Main Reference Sytem', 'orbit' : 'Main Orbit', 'regime' : 'Main Regime',}, inplace = True) class_requirement.drop('Unknown',axis = 0) # - # Here we observe that each customer class has a different main orbit and regime requirement. By 'main' we refer to the most frequently occurring values in that particular category. # # Private companies have the GTO as their main orbit, while SpaceX has the ISS as its main orbit - this could be mainly because of the Dragon program as part of its work as a crew resupply vehicle to the ISS. # # Since foreign governments constitute just 2% of the orders, we can safely overlook them for now. # # The US government on average sends the lightest payloads to orbit. LEO with the low-earth regime with a mass of 260 kg is their main requirement # # In this upcoming section, we'll be digging deeper into the satellites carried up by SpaceX and begin to answer the questions we posed to ourselves at the begginning of the exploration stage. # # # # ## Final Insights # # # In this section, we return to the questions we posed ourselves during the exploration stage and answer them with additional insights wherever possible. Using our findings as a base, we'll refer to related blog posts and articles and solidify our understanding of that section. # # To maintain readability, we'll be answering these questions in order. # * [In what year did SpaceX amp up its operations?](#In-what-year-did-SpaceX-amp-up-its-operations?) # # * [How are launches spaced out in a year? Is there a season with most launches? If yes, explore.](#How-are-launches-spaced-out-in-a-year?-Is-there-a-season-with-most-launches?-If-yes,-explore.) # # * [Which rockets have the potential to be SpaceX's workhorse in the coming future?](#Which-rockets-have-the-potential-to-be-SpaceX's-workhorse-in-the-coming-future?) # # * [What are the general payload requirements?](#What-are-the-general-payload-requirements?) # # * [Repeat customers vs Diversification: What would be better in the long run?](#Repeat-Customers-vs-Diversification:-What-would-be-better-in-the-long-run?) # # ** ** # #### In what year did SpaceX amp up its operations? # **Year of Scaling Operations** # # Basis our analysis, it is evident that SpaceX amped up its operations in the year **2017** wherein it has doubled its launch operations to 18 launches in that year. Let's explore the possible reasons behind this. # # **Further Study & Explanation** # # After referring to the following article by the Atlantic: https://www.theatlantic.com/science/archive/2017/12/spacex-launch-falcon-heavy. We note the following: # # SpaceX was responsible for: carrying commercial satellites, supplies to the international space station, and some secret government missions. It was a year with one successful launch after another and zero mishaps. # # In March, SpaceX reused a Falcon 9 first stage in one of its launches of the commercial satellites. And in November, NASA permitted them to reuse rockets parts in their resupply missions. # # In my opinion, the reusing of Falcon 9's first stage enabled SpaceX to reduce the time take taken between launches as refurbishing a used part would take considerably less than time than building a new one. This coupled with the streak of successful launches meant that SpaceX was able to double its launches in 2017. # # ** ** # #### How are launches spaced out in a year? Is there a season with most launches? If yes, explore. # **General Pattern** # # Since 2017, the launches have shown to follow an oscillating pattern, with the second quarter having the most number of launches in aggregate. The second quarter corresponds to the summer season. # # **Insights From References** # # The following document by NASA enlists the 14 weather criteria that determine the fate of a planned launch. This was specifically designed for the crew dragon launch but would give us a general idea of the weather monitoring that goes behind a successful launch. The document: https://www.nasa.gov/sites/default/files/atoms/files/falcon9_crewdragon_launch_weather_criteria_fact_sheet.pdf # # Basis the article: It seems that the ideal condition for a rocket launch involves breezy, consistent wind conditions with no wind shear. Thunderstorms and cloudy weather seem to be the main causes for scrubbing a launch. # # **Ideal Season** # # The months in the second quarter lay in the spring season and the onset of summer, the whole period is categorized by pleasant weather conditions with moderate winds and clear skies and while rain is common during this period, the general conditions would probably make this quarter most suitable for rocket launches. # # ** ** # #### Which rockets have the potential to be SpaceX's workhorse in the coming future? # **Findings** # # In our analysis, it was pretty clear that SpaceX managed to **improve** the **cost-efficiency** of its rockets with **newer models**. # # * **Starship** clearly **stands out** in terms of payload capacity, building cost, cost efficiency, and engine performance. # # * Falcon heavy - also has shown significant improvement as compared to Falcon 9 relying solely on good design. # # Referring to the following: https://www.spacex.com/vehicles, we find: # # * Starship is built out of completely different materials and has a different engine altogether. <br> # * The core concept behind Falcon heavy is attaching two Falcon 9 boosters to the main body giving it the power of 27 Merlin engines with a negligible increase in mass thus significantly improving its payload capacity and launch cost efficiency. # # **Reasons Behind Starship's Excellent performance**: # # * Starship uses a version of stainless steel which is known for its low cost as compared to the carbon fiber used in Falcons, apart from this, it has better heat bearing capacity than carbon fiber which is essential in good reusability performance. # <br> # * The raptor is a more powerful engine, as compared to the merlin, and while it costs more than Merlin, it makes up for it with high cost-efficiency. The major improvements in cost efficiency and payload fraction are due to these changes. # # It is to be noted that Starship is the first of its class with plans to develop more powerful versions of the same. # # **Prospective Future Workhorse**: # # **Starship** looks like an ideal candidate and was also designed to be so. # # However, given the **significant improvements** in the cost efficiency & payload fraction due to the use of raptor engine & stainless steal, there is room for a **lighter version of Starship** # # The lighter version would trade-off **payload capacity and cost-effectiveness ** for much less **cost per launch**. # This version of the Starship could be useful as a **workhorse** for **delivering payloads** to **low earth orbit**. # # ** ** # #### What are the general payload requirements? # The general requirements of SpaceX's major customers were covered exhaustively in the [explroration section](#Major-Requirements) itself and thus we would be moving on to the next section. # # ** ** # #### Repeat Customers vs Diversification: What would be better in the long run? # **Primer** # # * SpaceX currently relies heavily on NASA contacts with the crew resupply missions and would continue to do so for quite some time. <br> # * From analyzing SpaceX's customer base for satellites, we found that Thales Alenia Space could be a good candidate for repeat business as it would be setting the Iridium next constellation. <br> # * SpaceX is already working on setting up the Starlink constellation which, once active, will be a solid source of revenue. # # **Long Term Strategy** # # SpaceX could explore building **strong relations** with companies like **Thales and SSL**, and other companies that deal in **research and communication satellites** to build more business from them and reduce its reliance on the government. These companies can continue to be a source of revenue after the Starlink has been deployed, and also might be able to help SpaceX in **providing relevant data** for its more ambitious projects. # # Thus, **repeat customers** would be better for SpaceX's business in the long run. # # ** **
SpaceX EDA/SpaceX Data - EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109A Introduction to Data Science # # ## Homework 4: Logistic Regression # # **Harvard University**<br/> # **Fall 2019**<br/> # **Instructors**: <NAME>, <NAME>, and <NAME> # # <hr style="height:2pt"> # # #RUN THIS CELL import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles) # ### INSTRUCTIONS # # - **This is an individual homework. No group collaboration.** # - To submit your assignment follow the instructions given in Canvas. # - Restart the kernel and run the whole notebook again before you submit. # - As much as possible, try and stick to the hints and functions we import at the top of the homework, as those are the ideas and tools the class supports and is aiming to teach. And if a problem specifies a particular library you're required to use that library, and possibly others from the import list. # - Please use .head() when viewing data. Do not submit a notebook that is excessively long because output was not suppressed or otherwise limited. # + import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegressionCV from sklearn.linear_model import LassoCV from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score from sklearn.model_selection import KFold from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split import matplotlib import matplotlib.pyplot as plt # %matplotlib inline import zipfile import seaborn as sns sns.set() # - # <div class='theme'> Cancer Classification from Gene Expressions </div> # # In this problem, we will build a classification model to distinguish between two related classes of cancer, acute lymphoblastic leukemia (ALL) and acute myeloid leukemia (AML), using gene expression measurements. The data set is provided in the file `data/dataset_hw4.csv`. Each row in this file corresponds to a tumor tissue sample from a patient with one of the two forms of Leukemia. The first column contains the cancer type, with **0 indicating the ALL** class and **1 indicating the AML** class. Columns 2-7130 contain expression levels of 7129 genes recorded from each tissue sample. # # In the following questions, we will use linear and logistic regression to build classification models for this data set. # # <div class='exercise'><b> Question 1 [20 pts]: Data Exploration </b></div> # # First step is to split the observations into an approximate 80-20 train-test split. Below is some code to do this for you (we want to make sure everyone has the same splits). Print dataset shape before splitting and after splitting. `Cancer_type` is our target column. # # # **1.1** Take a peek at your training set (show a glimpse of what you did): you should notice the severe differences in the measurements from one gene to the next (some are negative, some hover around zero, and some are well into the thousands). To account for these differences in scale and variability, normalize each predictor to vary between 0 and 1. # # # **1.2** The training set contains more predictors than observations. What problem(s) can this lead to in fitting a classification model to such a data set? Explain in 3 or fewer sentences. # # # **1.3** Identify and report which 10 genes individually discriminate between the two cancer classes the best (consider every gene in the data set). Note: it wil lbe useful to save this list for future parts. # # Plot two histograms ofyour best predictor - one using training and another for the testing dataset. Each histogram should clearly distinguish two different `Cancer_type` classes. # # Hint: You may use t-testing to make this determination: #https://en.wikipedia.org/wiki/Welch%27s_t-test. # # # **1.4** Using your top gene from the previous part (call it `best_predictor`), create a classification model by manually eye-balling a value for this gene that would discriminate the two classes the best. Justify your choice in 1-2 sentences. Report the accuracy of this hand-chosen model on the test set. # # <hr> <hr> # <hr> # ### Solutions # **First step is to split the observations into an approximate 80-20 train-test split. Below is some code to do this for you (we want to make sure everyone has the same splits). Print dataset shape before splitting and after splitting. `Cancer_type` is our target column.** # + np.random.seed(10) df = pd.read_csv('data/hw4_enhance.csv', index_col=0) X_train, X_test, y_train, y_test =train_test_split(df.loc[:, df.columns != 'Cancer_type'], df.Cancer_type, test_size=0.2, random_state = 109, stratify = df.Cancer_type) # + print(df.shape) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) print(df.Cancer_type.value_counts(normalize=True)) # - # **1.1 Take a peek at your training set (show a glimpse of what you did): you should notice the severe differences in the measurements from one gene to the next (some are negative, some hover around zero, and some are well into the thousands). To account for these differences in scale and variability, normalize each predictor to vary between 0 and 1.** # #your code here display(X_train.head()) # + #your code here scaler=MinMaxScaler().fit(X_train) ##https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html X_train_scaled=pd.DataFrame(scaler.transform(X_train),index=X_train.index,columns=X_train.columns) X_test_scaled=pd.DataFrame(scaler.transform(X_test),index=X_test.index,columns=X_test.columns) X_train_scaled.head() # - # **1.2 The training set contains more predictors than observations. What problem(s) can this lead to in fitting a classification model to such a data set? Explain in 3 or fewer sentences.** # - When a training set contains more predictors than observations we cannot use the Least Square Regression. Therefore, in such cases we need to use regularization methods and introduce penalty terms # # **1.3** **Identify and report which 10 genes individually discriminate between the two cancer classes the best (consider every gene in the data set). Note: it wil lbe useful to save this list for future parts.** # # **Plot two histograms of your best predictor - one using training and another for the testing dataset. Each histogram should clearly distinguish two different `Cancer_type` classes.** # # **Hint: You may use t-testing to make this determination: #https://en.wikipedia.org/wiki/Welch%27s_t-test.** # + Cancer0 = df[df['Cancer_type']==0] Cancer1 = df[df['Cancer_type']==1] All_Genes = df.columns[1:] ttest_List = [] for i in range(len(All_Genes)-1): ind_gene = All_Genes[i] mean_diff = Cancer0[ind_gene].mean() - Cancer1[ind_gene].mean() C0_var = Cancer0[ind_gene].std()**2/Cancer0[ind_gene].count() C1_var = Cancer1[ind_gene].std()**2/Cancer1[ind_gene].count() denominator = np.sqrt(C0_var + C1_var) ttest = mean_diff/denominator ttest_List.append(ttest) #ttest_List # + top10_gene_index = np.argsort(ttest_List)[-10:] top10_gene = [X_train.columns[i] for i in top10_gene_index] top_gene_index = np.argsort(ttest_List)[-1:] top_gene = X_train.columns[top_gene_index][0] # + #your code here #logist_train_model1 = LogisticRegression(C=100000).fit(X_train_scaled,y_train) #model1_abs_coef = np.abs(logist_train_model1.coef_) #top10_gene_index = np.argsort(model1_abs_coef)[0][-10:] #top10_gene = [X_train.columns[i] for i in top10_gene_index] #top_gene_index = np.argsort(model1_abs_coef)[0][-1:] #top_gene = X_train.columns[top_gene_index][0] # - #your code here print("Top Gene Predictor: \t{}\n".format(top_gene)) print("Top 10 Gene Predictors: \n{}".format(top10_gene)); # + #0 indicating the ALL class and 1 indicating the AML X_train_df = pd.DataFrame(list(zip(X_train[top_gene], y_train)),columns =[top_gene, 'Cancer_type']) TopGene_Cancer0_train = X_train_df[(X_train_df.Cancer_type == 0)][top_gene] TopGene_Cancer1_train = X_train_df[(X_train_df.Cancer_type == 1)][top_gene] X_test_df = pd.DataFrame(list(zip(X_test[top_gene], y_test)),columns =[top_gene, 'Cancer_type']) TopGene_Cancer0_test = X_test_df[(X_test_df.Cancer_type == 0)][top_gene] TopGene_Cancer1_test = X_test_df[(X_test_df.Cancer_type == 1)][top_gene] f,ax = plt.subplots(1,2, figsize=(14,6)) ax[0].hist(TopGene_Cancer0_train, alpha=0.5, label = 'Cancer0_ALL'); ax[0].hist(TopGene_Cancer1_train, alpha=0.5, label = 'Cancer1_AML'); ax[0].set_title('Train Data') ax[0].set_xlabel(top_gene) ax[0].set_ylabel('Frequency') ax[0].legend() ax[1].hist(TopGene_Cancer0_test, alpha=0.5, label = 'Cancer0_ALL'); ax[1].hist(TopGene_Cancer1_test, alpha=0.5, label = 'Cancer1_AML'); ax[1].set_title('Test Data') ax[1].set_xlabel(top_gene) ax[1].set_ylabel('Frequency') ax[1].legend() # + # Using Scaled Data X_train_df = pd.DataFrame(list(zip(X_train_scaled[top_gene], y_train)),columns =[top_gene, 'Cancer_type']) TopGene_Cancer0_train = X_train_df[(X_train_df.Cancer_type == 0)][top_gene] TopGene_Cancer1_train = X_train_df[(X_train_df.Cancer_type == 1)][top_gene] X_test_df = pd.DataFrame(list(zip(X_test_scaled[top_gene], y_test)),columns =[top_gene, 'Cancer_type']) TopGene_Cancer0_test = X_test_df[(X_test_df.Cancer_type == 0)][top_gene] TopGene_Cancer1_test = X_test_df[(X_test_df.Cancer_type == 1)][top_gene] f,ax = plt.subplots(1,2, figsize=(14,6)) ax[0].hist(TopGene_Cancer0_train, alpha=0.5, label = 'Cancer0_ALL'); ax[0].hist(TopGene_Cancer1_train, alpha=0.5, label = 'Cancer1_AML'); ax[0].set_title('Train Data') ax[0].set_xlabel(top_gene) ax[0].set_ylabel('Frequency') ax[0].legend() ax[1].hist(TopGene_Cancer0_test, alpha=0.5, label = 'Cancer0_ALL'); ax[1].hist(TopGene_Cancer1_test, alpha=0.5, label = 'Cancer1_AML'); ax[1].set_title('Test Data') ax[1].set_xlabel(top_gene) ax[1].set_ylabel('Frequency') ax[1].legend() # - # **1.4 Using your top gene from the previous part (call it `best_predictor`), create a classification model by eye-balling a value for this gene that would discriminate the two classes the best. Justify your choice in 1-2 sentences. Report the accuracy of this hand-chosen model on the test set.** # # + #your code here best_predictor = top_gene X_test_df['y_pred_test'] = ((X_test_df[top_gene] >= 0) * (X_test_df[top_gene] <= 0.5)).astype(float) test_score = accuracy_score(y_test, X_test_df['y_pred_test'])*100 print("Training Set Accuracy: {}% \n".format(test_score)) print("It appears that scaled values between 0 and 0.5 of the {} gene predicts the existace of Cancer1 (AML)." "Though it may not be wise to make this predictions based on one gene, if we have are given only this piece" "of information it's reasonable make these assumptions based on the probability at each gene level".format(top_gene)) # - # <div class='exercise'><b> Question 2 [25 pts]: Linear and Logistic Regression </b></div> # # # In class we discussed how to use both linear regression and logistic regression for classification. For this question, you will work with a single gene predictor that you identify as the best predictor above to explore these two methods. # # **2.1** Fit a simple linear regression model to the training set using the single gene predictor `best_predictor` to predict cancer type. The scores predicted by the regression model for a patient could be interpreted as an estimate of the probability that the patient has Cancer_type=1 (AML). Is there a problem with this interpretation? # # # Create a figure with following items displayed on the same plot (use training data): # - the predicted quantitative response from the linear regression model as a function of the best gene predictor # - the true binary response. # # # **2.2** Use your estimated linear regression model to classify observations into 0 and 1 using the standard Bayes' classifier. Evaluate the classification accuracy of this classification model on both the training and test sets. # # # **2.3** Next, fit a simple logistic regression model to the training set. How do the training and test classification accuracies of this model compare with the linear regression model? # # Remember, you need to set the regularization parameter for sklearn's logistic regression function to be a very large value in order to **not** regularize (use 'C=100000'). # # # **2.4** # Print and interpret the logistic regression coefficients: the 'slope' and intercept. # # # Create 2 plots (one each for training and testing data) with 4 items displayed on each plot. # - the predicted quantitative response from the linear regression model as a function of the best gene predictor. # - the predicted probabilities of the logistic regression model as a function of the best gene predictor. # - the true binary response. # - a horizontal line at $y=0.5$. # # Based on these plots, does one of the models appear better suited for binary classification than the other? Explain in 3 sentences or fewer. # # # <hr> # ### Solutions # **2.1** **Fit a simple linear regression model to the training set using the single gene predictor `best_predictor` to predict cancer type. The scores predicted by the regression model for a patient could be interpreted as an estimate of the probability that the patient has Cancer_type=1 (AML). Is there a problem with this interpretation?** # # import statsmodels.api as sm # + # your code here X_train_cst = sm.add_constant(X_train_scaled[best_predictor]) X_test_cst = sm.add_constant(X_test_scaled[best_predictor]) model_linear_sklearn = LinearRegression() model_linear_sklearn.fit(X_train_cst, y_train) y_pred_train = model_linear_sklearn.predict(X_train_cst) y_pred_test = model_linear_sklearn.predict(X_test_cst) # + # your code here #the predicted quantitative response from the linear regression model as a function of the best gene predictor #the true binary response plt.scatter(X_train_scaled[best_predictor], y_pred_train, label = 'Predicted Y value') plt.scatter(X_train_scaled[best_predictor], y_train, label = 'True Response') plt.xlabel(best_predictor) plt.ylabel("Cancer Type") plt.title("Training Data") plt.legend(); # - plt.hist(y_pred_train, alpha=0.6, label = 'Predicted Y value') plt.xlabel(best_predictor) plt.ylabel("Frequency") plt.title("Training Data") plt.legend(); # - The limitation of using the predicted y values as the pprobability that patient has cancer is that there's no clear cut off point. Furthermore, since the existance of the cancer doesn't solely depend on this gene. Therefore, there's a chance that even though the predicted y value indicates that the probability of this patient having cancer as 0.3, However in reality the cancer can be present in that patient # # **2.2** **Use your estimated linear regression model to classify observations into 0 and 1 using the standard Bayes' classifier. Evaluate the classification accuracy of this classification model on both the training and test sets.** # + # your code here classfy_train = (y_pred_train > 0.5).astype(int) classfy_test = (y_pred_test > 0.5).astype(int) print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, classfy_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, classfy_test) * 100)) # - # # **2.3** **Next, fit a simple logistic regression model to the training set. How do the training and test classification accuracies of this model compare with the linear regression model? Are the classifications substantially different? Explain why this is the case.** # # **Remember, you need to set the regularization parameter for sklearn's logistic regression function to be a very large value in order to **not** regularize (use 'C=100000'). # + # your code here Log_model1 = LogisticRegression(C=100000).fit(X_train_cst,y_train) Log_model1_pred_train = Log_model1.predict(X_train_cst) Log_model1_pred_test = Log_model1.predict(X_test_cst) print("Logistic Regression") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Log_model1_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Log_model1_pred_test) * 100)) print("\nLinear Regression") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, classfy_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, classfy_test) * 100)) # - # - The accuracy scores from both models are extremely similar. This is because we are using only one predictor to perform the model. Therefore, our linear estimates are pretty much the same # # **2.4 Print and interpret the logistic regression coefficients: the 'slope' and the intercept.** # # **Create 2 plots (with training and test data) with 4 items displayed on each plot.** # - the predicted quantitative response from the linear regression model as a function of the best gene predictor. # - the predicted probabilities of the logistic regression model as a function of the best gene predictor. # - the true binary response. # - a horizontal line at $y=0.5$. # # **Based on these plots, does one of the models appear better suited for binary classification than the other? Explain in 3 sentences or fewer.** # print("Logistic Regression") print("Intercept\t: {}".format(Log_model1.coef_[0][0])) print("Slope\t\t: {}".format(Log_model1.coef_[0][1])) # + # your code here f,ax = plt.subplots(1,2, figsize=(14,6)) #the predicted quantitative response from the linear regression model as a function of the best gene predictor. ax[0].plot(X_train_scaled[best_predictor], y_pred_train, c="b", label = 'Linear regression Prediction') ax[1].plot(X_test_scaled[best_predictor], y_pred_test, c="b", label = 'Linear regression Prediction') #the predicted probabilities of the logistic regression model as a function of the best gene predictor. Log_model1_predproba_train = Log_model1.predict_proba(X_train_cst)[:,1] Log_model1_predproba_test = Log_model1.predict_proba(X_test_cst)[:,1] ax[0].scatter(X_train_scaled[best_predictor], Log_model1_predproba_train, c="g", alpha = 0.5, label = 'Logistic regression Prediction') ax[1].scatter(X_test_scaled[best_predictor], Log_model1_predproba_test, c="g", alpha = 0.5, label = 'Logistic regression Prediction') #the true binary response. ax[0].scatter(X_train_scaled[best_predictor], y_train, c="orange") ax[1].scatter(X_test_scaled[best_predictor], y_test, c="orange") #a horizontal line at 𝑦=0.5 . ax[0].axhline(y=.5, c="r") ax[1].axhline(y=.5, c="r") # labels ax[0].set_title("Train Set using " + best_predictor + " gene") ax[0].set_xlabel(best_predictor + "expression level") ax[0].set_ylabel("Cancer type") ax[0].legend(); ax[1].set_title("Test Set using " + best_predictor + " gene") ax[1].set_xlabel(best_predictor + "expression level") ax[1].set_ylabel("Cancer type") ax[1].legend(); # - # - Though Both the regressions appear to be extremely similar Logistic regression might be the best option because it has a boundary of 0-1. # <div class='exercise'> <b> Question 3 [20pts]: Multiple Logistic Regression </b> </div> # # # **3.1** Next, fit a multiple logistic regression model with all the gene predictors from the data set. How does the classification accuracy of this model compare with the models fitted in question 2 with a single gene (on both the training and test sets)? # # # **3.2** How many of the coefficients estimated by this multiple logistic regression in the previous part are significantly different from zero at a *significance level of 5%*? Use the same value of C=100000 as before. # # **Hint:** To answer this question, use *bootstrapping* with 100 bootstrap samples/iterations. # # # **3.3** Comment on the classification accuracy of training and test set? Given the results above how would you assess the generalization capacity of your trained model? What other tests would you suggest to better guard against false sense of security on the accuracy of the model as a whole? # # **3.4** Now use regularization to improve predictions from the multiple logistic regression model. Use LASSO-like regularization and cross-validation within the training set to tune the model. Report the classification accuracy on both the training and test set. # # **3.5** Do the 10 best predictors from Q1 hold up as important features in this regularized model? If not, explain why this is the case (feel free to use the data to support your explanation). # <hr> # ### Solutions # **3.1** **Next, fit a multiple logistic regression model with all the gene predictors from the data set. How does the classification accuracy of this model compare with the models fitted in question 2 with a single gene (on both the training and test sets)?** # # + # your code here X_train_cst = sm.add_constant(X_train) X_test_cst = sm.add_constant(X_test) Multi_Logistic_model = LogisticRegression(C=100000).fit(X_train_cst, y_train) Multi_Logistic_pred_train = Multi_Logistic_model.predict(X_train_cst) Multi_Logistic_pred_test = Multi_Logistic_model.predict(X_test_cst) print("Multiple Logistic Regression") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Multi_Logistic_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Multi_Logistic_pred_test) * 100)) print("\nLogistic Regression") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Log_model1_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Log_model1_pred_test) * 100)) # - # - Multiple logistic regression shows a significantly higher Accuracy score than the logistic regression performed with the single gene. However, this could be due to over fitting # # **3.2** **How many of the coefficients estimated by this multiple logistic regression in the previous part are significantly different from zero at a *significance level of 5%*? Use the same value of C=100000 as before.** # # **Hint:** **To answer this question, use *bootstrapping* with 1000 bootstrap samples/iterations.** # train_data = pd.concat([X_train,y_train], axis=1) # + # your code here model = LogisticRegression(C=100000).fit(X_train, y_train) bootstrapping = 100 coef = np.zeros((bootstrapping, train_data.shape[1]-1)) for i in range(bootstrapping): coef[i,:] = model.coef_ coef_count = 0 for i in range(coef.shape[1]): coeff_samples = coef[:,i] lower_bound = np.percentile(coeff_samples, 2.5) upper_bound = np.percentile(coeff_samples, 97.5) if lower_bound>0 or upper_bound<0: coef_count += 1 print('Number coefficients different from zero at a significance level of 5%\t: {}'.format(coeffs_count)) # - # **3.3 Open question: Comment on the classification accuracy of training and test set? Given the results above how would you assest the generalization capacity of your trained model? What other tests would you suggest to better guard against false sense of security on the accuracy of the model as a whole.** # - When performing Multiple logicstic Regression, we reached 100% accuracy with the training set. This suggest that we may have overfit the model. If we have more independent dataset we can run this model against that data to guage the accuracy. However, in order to better guard against the false sense of security on accuracy, we can plot the ROC curve # **3.4 Now use regularization to improve predictions from the multiple logistic regression model. Use LASSO-like regularization and cross-validation within the training set to tune the model. Report the classification accuracy on both the training and test set.** # your code here Log_RegCV=LogisticRegressionCV(multi_class='auto', penalty='l1', solver='liblinear').fit(X_train_cst,y_train) Log_RegCV_predproba_train = Log_RegCV.predict_proba(X_train_cst)[:,1] # + Log_RegCV_pred_train = Log_RegCV.predict(X_train_cst) Log_RegCV_pred_test = Log_RegCV.predict(X_test_cst) print("Logistic Regression CV") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Log_RegCV_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Log_RegCV_pred_test) * 100)) # - # **3.5: Do the 10 best predictors from Q1 hold up as important features in this regularized model? If not, explain why this is the case (feel free to use the data to support your explanation).** # your code here print("Top 10 Gene Predictors: \n{}".format(top10_gene)); # your code here reg_top10_gene_index = np.argsort(np.abs(Log_RegCV.coef_[0]))[-10:] reg_top10_gene_list = [X_train.columns[i] for i in reg_top10_gene_index] print("Top 10 Gene Predictors using Regularized model: \n{}".format(reg_top10_gene_list)); # + # your code here priority_index_top10_gene = [] for i in top10_gene_index: x = len(Log_RegCV.coef_[0])-list(np.argsort(np.abs(Log_RegCV.coef_[0]))).index(i) priority_index_top10_gene.append(x) print("Importance rating of the previously predicted Top10 genes with respect to the Regularized model : \n{}".format(priority_index_top10_gene)); # - reg_top_gene_index = np.argsort(np.abs(Log_RegCV.coef_[0]))[-1:] reg_top_gene_list = [X_train.columns[i] for i in reg_top_gene_index][0] print("Top Gene Predictor using Regularized model: \t{}".format(reg_top_gene_list)); # + X_train_df = pd.DataFrame(list(zip(X_train_scaled[reg_top_gene_list], y_train)),columns =[reg_top_gene_list, 'Cancer_type']) TopGene_Cancer0_train = X_train_df[(X_train_df.Cancer_type == 0)][reg_top_gene_list] TopGene_Cancer1_train = X_train_df[(X_train_df.Cancer_type == 1)][reg_top_gene_list] X_test_df = pd.DataFrame(list(zip(X_test_scaled[reg_top_gene_list], y_test)),columns =[reg_top_gene_list, 'Cancer_type']) TopGene_Cancer0_test = X_test_df[(X_test_df.Cancer_type == 0)][reg_top_gene_list] TopGene_Cancer1_test = X_test_df[(X_test_df.Cancer_type == 1)][reg_top_gene_list] f,ax = plt.subplots(1,2, figsize=(14,6)) ax[0].hist(TopGene_Cancer0_train, alpha=0.5, label = 'Cancer0_ALL'); ax[0].hist(TopGene_Cancer1_train, alpha=0.5, label = 'Cancer1_AML'); ax[0].set_title('Train Data') ax[0].set_xlabel(reg_top_gene_list) ax[0].set_ylabel('Frequency') ax[0].legend() ax[1].hist(TopGene_Cancer0_test, alpha=0.5, label = 'Cancer0_ALL'); ax[1].hist(TopGene_Cancer1_test, alpha=0.5, label = 'Cancer1_AML'); ax[1].set_title('Test Data') ax[1].set_xlabel(reg_top_gene_list) ax[1].set_ylabel('Frequency') ax[1].legend(); # - # - Previously predicted top10 Gene predictors are not equivalent to the Top10 predictors obtained from the regularized model. In fact, importance rating of the previously predicted Top10 genes with respect to the resularized model indicated that none of the previous predectors falls with the top 100 predictors. # - Based on the accuracy scores of the initial model, I believe the regularized model predictors might actually be better at Cancer type prediction # - This might be due to making predictions using the linear regression model. # # <div class='exercise'> <b> Question 4 [25pts]: Multi Class Log Regression </b> </div> # **4.1** Load the data from `hw4_mc_enhance.csv.zip` and examine its structure. How many instances of each class are there in our dataset? # # **4.2** Split the dataset into train and test, 80-20 split, random_state = 8. # # We are going to utilize these two features - 'M31523_at', 'X95735_at'. Create a scatter plot of these two features using the training dataset. It should be easily discernable via labeling/marking in the plot which observations belong to which `cancer_type`. # # **4.3** Fit the following two models using crossvalidation: # - Logistic Regression Multiclass model with linear features. # - Logistic Regression Multiclass model with Polynomial features, degree = 2. # # **4.4** Plot the decision boundaries for each model and interpret the results. Hint: You may utilize the function `overlay_decision_boundary`. # # **4.5** Report and plot the CV scores for the two models and interpret. # # <hr> # ### Solutions # **4.1 Load the data from `hw4_mc_enhance.csv.zip` and examine its structure. How many instances of each class are there in our dataset?** #your code here mc_data = pd.read_csv('data/hw4_mc_enhance.csv', index_col=0) mc_data.head() # + #your code here mc_data.describe() mc_data_isna_count = pd.DataFrame(mc_data.isna().sum()) print("Count of Null entries in the dataset:") mc_data_isna_count[mc_data_isna_count[0]>0].count() # - display(mc_data.shape) mc_data.groupby('cancer_type').count() # **4.2 Split the dataset into train and test, 80-20 split, random_state = 8.** # # **We are going to utilize these two features - 'M31523_at', 'X95735_at'. Create a scatter plot of these two features using training dataset. We should be able to discern from the plot which sample belongs to which `cancer_type`.** # + # your code here #train_data, test_data = train_test_split(mc_data, test_size=.2, random_state=8) subset_data = pd.concat([mc_data['M31523_at'],mc_data['X95735_at'],mc_data['cancer_type']], axis=1) X_train, X_test, y_train, y_test =train_test_split(subset_data.loc[:, subset_data.columns != 'cancer_type'], subset_data.cancer_type, test_size=0.2, random_state = 8, stratify = subset_data.cancer_type) # - def create_scatterplot(dataset): ax=plt.gca() ax.set_xlabel("M31523_at") ax.set_ylabel("X95735_at") for i in range(0,4): dataset_i = dataset[dataset['cancer_type']==i] ax.scatter(dataset_i["M31523_at"],dataset_i["X95735_at"]) ax.legend(["0", "1", "2"],loc="right") return ax # + # your code here f,ax = plt.subplots(1,2, figsize=(14,6)) ax[0].scatter(X_train['M31523_at'],y_train, alpha=0.5, label = 'M31523_at') ax[0].scatter(X_train['X95735_at'],y_train, alpha=0.5, label = 'X95735_at') ax[0].set_title('Train Data') ax[0].set_xlabel('Gene Level') ax[0].set_ylabel('Cancer Type') ax[0].legend() xy_train = pd.concat([X_train['M31523_at'],X_train['X95735_at'],y_train], axis=1) #ax[1] = sns.scatterplot(x="M31523_at", y="X95735_at", hue=xy_train['cancer_type'], data=xy_train) ax[1] = create_scatterplot(xy_train) ax[1].set_title('Train Data'); #legend = ax[1].legend_ #labels=['0', '1', '2','2'] #for i, label in enumerate(labels): # legend.get_texts()[i+1].set_text(label) # - #sns.pairplot(xy_train, hue = 'cancer_type') g = sns.pairplot(xy_train, x_vars=["M31523_at"], y_vars=["X95735_at"], hue = 'cancer_type') g.fig.set_size_inches(8,6) # **4.3 Fit the following two models using crossvalidation:** # # **Logistic Regression Multiclass model with linear features.** # # **Logistic Regression Multiclass model with Polynomial features, degree = 2.** # # + # your code here scaler=MinMaxScaler().fit(X_train) X_train_scaled=pd.DataFrame(scaler.transform(X_train),index=X_train.index,columns=X_train.columns) X_test_scaled=pd.DataFrame(scaler.transform(X_test),index=X_test.index,columns=X_test.columns) X_train_cst = sm.add_constant(X_train_scaled) X_test_cst = sm.add_constant(X_test_scaled) Multi_Log_RegCV=LogisticRegressionCV().fit(X_train_cst,y_train) Multi_Log_RegCV_predproba_train = Multi_Log_RegCV.predict_proba(X_train_cst)[:,1] Multi_Log_RegCV_pred_train = Multi_Log_RegCV.predict(X_train_cst) Multi_Log_RegCV_pred_test = Multi_Log_RegCV.predict(X_test_cst) print("Multiclass Logistic Regression CV") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Multi_Log_RegCV_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Multi_Log_RegCV_pred_test) * 100)) # + # your code here X_train_poly_cst=X_train_cst.copy() X_train_poly_cst['M31523_at^2'] = X_train_cst['M31523_at']**2 X_train_poly_cst['X95735_at^2'] = X_train_cst['X95735_at']**2 X_test_poly_cst=X_test_cst.copy() X_test_poly_cst['M31523_at^2'] = X_test_poly_cst['M31523_at']**2 X_test_poly_cst['X95735_at^2'] = X_test_poly_cst['X95735_at']**2 Multi_Log_RegCV_poly=LogisticRegressionCV().fit(X_train_poly_cst,y_train) Multi_Log_RegCV_poly_predproba_train = Multi_Log_RegCV_poly.predict_proba(X_train_poly_cst)[:,1] Multi_Log_RegCV_poly_pred_train = Multi_Log_RegCV_poly.predict(X_train_poly_cst) Multi_Log_RegCV_poly_pred_test = Multi_Log_RegCV_poly.predict(X_test_poly_cst) print("Multiclass Logistic Regression CV with Polynomial features, degree = 2") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Multi_Log_RegCV_poly_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Multi_Log_RegCV_poly_pred_test) * 100)) # + # your code here from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures polynomial_logreg_estimator = make_pipeline( PolynomialFeatures(degree=2, include_bias=False), LogisticRegression()) X_train_poly_cst=X_train_cst.copy() X_train_poly_cst['M31523_at^2'] = X_train_cst['M31523_at']**2 X_train_poly_cst['X95735_at^2'] = X_train_cst['X95735_at']**2 X_test_poly_cst=X_test_cst.copy() X_test_poly_cst['M31523_at^2'] = X_test_poly_cst['M31523_at']**2 X_test_poly_cst['X95735_at^2'] = X_test_poly_cst['X95735_at']**2 #Multi_Log_RegCV_poly.fit(X_train_poly_cst, y_train) Multi_Log_RegCV_poly = polynomial_logreg_estimator.fit(X_train_poly_cst, y_train) Multi_Log_RegCV_poly_pred_train = Multi_Log_RegCV_poly.predict(X_train_poly_cst) Multi_Log_RegCV_poly_pred_test = Multi_Log_RegCV_poly.predict(X_test_poly_cst) print("Multiclass Logistic Regression CV with Polynomial features, degree = 2 using make_pipeline") print("Accuracy for Training Data\t: {}".format(accuracy_score(y_train, Multi_Log_RegCV_poly_pred_train) * 100)) print("Accuracy for Testing Data\t: {}".format(accuracy_score(y_test, Multi_Log_RegCV_poly_pred_test) * 100)) # - # **4.4 Plot the decision boundary and interpret results. Hint: You may utilize the function `overlay_decision_boundary`** # # + def overlay_decision_boundary(ax, model, colors=None, nx=200, ny=200, desaturate=.5, xlim=None, ylim=None): """ A function that visualizes the decision boundaries of a classifier. ax: Matplotlib Axes to plot on model: Classifier to use. - if `model` has a `.predict` method, like an sklearn classifier, we call `model.predict(X)` - otherwise, we simply call `model(X)` colors: list or dict of colors to use. Use color `colors[i]` for class i. - If colors is not provided, uses the current color cycle nx, ny: number of mesh points to evaluated the classifier on desaturate: how much to desaturate each of the colors (for better contrast with the sample points) xlim, ylim: range to plot on. (If the default, None, is passed, the limits will be taken from `ax`.) """ # Create mesh. xmin, xmax = ax.get_xlim() if xlim is None else xlim ymin, ymax = ax.get_ylim() if ylim is None else ylim xx, yy = np.meshgrid( np.linspace(xmin, xmax, nx), np.linspace(ymin, ymax, ny)) X = np.c_[xx.flatten(), yy.flatten()] # Predict on mesh of points. model = getattr(model, 'predict', model) y = model(X) #print("Do I predict" , y) # y[np.where(y=='aml')]=3 # y[np.where(y=='allT')]=2 # y[np.where(y=='allB')]=1 y = y.astype(int) # This may be necessary for 32-bit Python. y = y.reshape((nx, ny)) # Generate colormap. if colors is None: # If colors not provided, use the current color cycle. # Shift the indices so that the lowest class actually predicted gets the first color. # ^ This is a bit magic, consider removing for next year. colors = (['white'] * np.min(y)) + sns.utils.get_color_cycle() if isinstance(colors, dict): missing_colors = [idx for idx in np.unique(y) if idx not in colors] #assert len(missing_colors) == 0, f"Color not specified for predictions {missing_colors}" # Make a list of colors, filling in items from the dict. color_list = ['white'] * (np.max(y) + 1) for idx, val in colors.items(): color_list[idx] = val else: assert len(colors) >= np.max(y) + 1, "Insufficient colors passed for all predictions." color_list = colors color_list = [sns.utils.desaturate(color, desaturate) for color in color_list] cmap = matplotlib.colors.ListedColormap(color_list) # Plot decision surface ax.pcolormesh(xx, yy, y, zorder=-2, cmap=cmap, norm=matplotlib.colors.NoNorm(), vmin=0, vmax=y.max() + 1) xx = xx.reshape(nx, ny) yy = yy.reshape(nx, ny) if len(np.unique(y)) > 1: ax.contour(xx, yy, y, colors="black", linewidths=1, zorder=-1) else: print("Warning: only one class predicted, so not plotting contour lines.") # + # your code here Multi_Log_RegCV=LogisticRegressionCV().fit(x_train,y_train) ax=create_scatterplot(xy_train) ax.set_title("Logistic Linear Regression") overlay_decision_boundary(ax, Multi_Log_RegCV) # + # your code here polynomial_logreg_estimator = make_pipeline( PolynomialFeatures(degree=2, include_bias=False), LogisticRegressionCV()) Multi_Log_RegCV_poly = polynomial_logreg_estimator.fit(x_train, y_train) ax=create_scatterplot(xy_train) ax.set_title("Logistic Regression with Polynomial Features") overlay_decision_boundary(ax, Multi_Log_RegCV_poly) # - # **4.5 Report and plot the CV scores for the two models and interpret.** print("Cross Validation Score of Linear Logistic Regression: \n {}" .format(cross_val_score(Multi_Log_RegCV, X_train_cst, y_train))) print("\nCross Validation Score of Polynomial Logistic Regression: \n {}" .format(cross_val_score(Multi_Log_RegCV_poly, X_train_poly_cst, y_train))) # + # your code here x_2_range = np.arange(x_2_min, x_2_max, 0.003) f,ax = plt.subplots(1,1, figsize=(6,6)) ax.hist(cross_val_score(Multi_Log_RegCV, X_train, y_train), label= 'Logistic Linear Regression - CV Score') ax.hist(cross_val_score(Multi_Log_RegCV_poly, X_train_poly_cst, y_train), label= 'Logistic Polynomial Regression - CV Score') ax.set_title('Train Set') ax.legend(); # - # - Cross Validation scores of both models appear to be very similar. However, I failed to see the value in plotting the Cross Validation Scores # <div class='exercise'><b> Question 5: [10 pts] Including an 'abstain' option </b></div> # # One of the reasons a hospital might be hesitant to use your cancer classification model is that a misdiagnosis by the model on a patient can sometimes prove to be very costly (e.g. if the patient were to file a law suit seeking a compensation for damages). One way to mitigate this concern is to allow the model to 'abstain' from making a prediction: whenever it is uncertain about the diagnosis for a patient. However, when the model abstains from making a prediction, the hospital will have to forward the patient to a specialist, which would incur additional cost. How could one design a cancer classification model with an abstain option, such that the cost to the hospital is minimized? # # *Hint:* Think of ways to build on top of the logistic regression model and have it abstain on patients who are difficult to classify. # **5.1** More specifically, suppose the cost incurred by a hospital when a model mis-predicts on a patient is $\$5000$ , and the cost incurred when the model abstains from making a prediction is \$1000. What is the average cost per patient for the OvR logistic regression model (without quadratic or interaction terms) from **Question 4**. Note that this needs to be evaluated on the patients in the test set. # **5.2** Design a classification strategy (into the 3 groups plus the *abstain* group) that has as low cost as possible per patient (certainly lower cost per patient than the logistic regression model). Give a justification for your approach. # <hr> # ### Solutions # **5.1 More specifically, suppose the cost incurred by a hospital when a model mis-predicts on a patient is $\$5000$ , and the cost incurred when the model abstains from making a prediction is \$1000. What is the average cost per patient for the OvR logistic regression model (without quadratic or interaction terms) from Question 4. Note that this needs to be evaluated on the patients in the test set.** # ** # ... # ** # *your answer here* # from sklearn.metrics import confusion_matrix # + # your code here Multi_Log_RegCV=LogisticRegressionCV(multi_class = "ovr").fit(X_train_cst,y_train) Multi_Log_RegCV_predproba_test = Multi_Log_RegCV.predict_proba(X_test_cst)[:,1] Multi_Log_RegCV_pred_train = Multi_Log_RegCV.predict(X_train_cst) Multi_Log_RegCV_pred_test = Multi_Log_RegCV.predict(X_test_cst) confusion_matrix = pd.crosstab( y_test.values.flatten(), Multi_Log_RegCV_pred_test.flatten(), rownames=['Actual Class'], colnames=['Predicted Class']) display(confusion_matrix) incorrectPred = len(y_test) - (confusion_matrix[0][0] + confusion_matrix[1][1] + confusion_matrix[2][2]) print("Using Test Data:") print("Avg Cost per Patient using Accuracy Score\t: {}".format((1-(accuracy_score(y_test, Multi_Log_RegCV_pred_test))) * 5000)) print("Avg Cost per Patient using Confusion Matrix\t: {}".format(incorrectPred*5000/len(y_test))) # - # **5.2 Design a classification strategy (into the 3 groups plus the *abstain* group) that has as low cost as possible per patient (certainly lower cost per patient than the logistic regression model). Give a justification for your approach.** cost_abstain = 1000 cost_missPred = 5000 def cost_basedOn_prob(y_prob, y_values, p): y_thresh = [] for prob in y_prob: if prob[0] >= p: y_thresh.append(1) #CancerType=0 elif prob[1] >= p: y_thresh.append(2) #CancerType=1 elif prob[2] >= p: y_thresh.append(3) #CancerType=2 else: y_thresh.append(0) #abstain cost = 0 for y_t, y in zip(y_thresh, y_values): if y_t == y: cost += 0 elif y_t == 0: cost += cost_abstain else: cost += cost_missPred return cost / len(y_values) def probability_threshold(model, x, y_true): min_avg_cost = np.infty y_prob = model.predict_proba(x) best_prob = 0.5 p = 0.5 while p <= 1: avg_cost = cost_basedOn_prob(y_prob, y_true, p) if avg_cost < min_avg_cost: min_avg_cost = avg_cost best_prob = p p += 0.1 return best_prob, min_avg_cost # + # your code here Multi_Log_RegCV_predproba_test = Multi_Log_RegCV.predict_proba(X_test_cst) best_probability, min_avg_cost = probability_threshold(Multi_Log_RegCV, X_test_cst, y_test) AvgCost_usingLinearLog_test = cost_basedOn_prob(Multi_Log_RegCV_predproba_test, y_test, best_probability) print("Min avg. Cost per Patient using Linear Log. Reg\t: {}".format(AvgCost_usingLinearLog_test)) # + Multi_Log_RegCV_poly=LogisticRegressionCV(multi_class = "ovr").fit(X_train_poly_cst,y_train) Multi_Log_RegCV_poly_predproba_test = Multi_Log_RegCV_poly.predict_proba(X_test_poly_cst) best_probability, min_avg_cost = probability_threshold(Multi_Log_RegCV_poly, X_test_poly_cst, y_test) AvgCost_usingPolyLog_test = cost_basedOn_prob(Multi_Log_RegCV_poly_predproba_test, y_test, best_probability) print("Min avg. Cost per Patient using Poly. Log. Reg\t: {}".format(AvgCost_usingPolyLog_test)) # + kNN = KNeighborsClassifier(n_neighbors=144).fit(X_train_cst, y_train) kNN_predproba_test = kNN.predict_proba(X_test_cst) best_probability, min_avg_cost = probability_threshold(kNN, X_test_cst, y_test) AvgCost_usingkNN_test = cost_basedOn_prob(kNN_predproba_test, y_test, best_probability) print("Min avg. Cost per Patient using kNN\t\t: {}".format(AvgCost_usingkNN_test)) # - # - In this method, we compared 3 models Linear Logistic Regression, Poly Logistic Regression and kNN model. Based on our calculations all three models returned the 666 as the minimum Avg cost per patient. # # - These models were created by predicting the best probability of each model as a cut off threshold. I do suspect these can result in overfitting the models. However, in theoratical stand point these appear to be the min cost per patients
GhostNorm_233077_8729764_cs109a_HW4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:southern_ocean] * # language: python # name: conda-env-southern_ocean-py # --- import pandas as pd import numpy as np import pymc3 as pm import arviz as az import matplotlib.pyplot as pl # + np.random.seed(25) n_changepoints = 10 t = np.arange(1000) s = np.sort(np.random.choice(t, n_changepoints, replace=False)) A = (t[:, None] > s) * 1 delta = np.random.normal(size=n_changepoints) k = 1 m = 5 growth = (k + A @ delta) * t gamma = -s * delta offset = m + A @ gamma trend = growth + offset pl.figure(figsize=(16, 3 * 3)) n = 310 i = 0 for t, f in zip(['Linear Trend with Changepoints', 'Growth rate', 'Growth offset'], [trend, growth, offset]): i += 1 pl.subplot(n + i) pl.title(t) pl.yticks([]) pl.vlines(s, min(f), max(f), lw=0.8, linestyles='--') pl.plot(f) # - np.testing.assert_array_equal(np.dot(A, gamma), A@gamma) df = pd.read_csv('/accounts/ekarakoy/Downloads/example_wp_log_peyton_manning.csv', parse_dates=['ds'] ) df.info() df['y_scaled'] = df['y'] / df.y.max() df['t'] = (df.ds - df.ds.min()) / (df.ds.max() - df.ds.min()) df.head() df.plot(x='ds', y='y', figsize=(16, 6), title="Wikipedia pageviews for '<NAME>'"); # + def trend_model(m, t, n_changepoints=25, changepoints_prior_scale=0.05, growth_prior_scale=5, changepoint_range=0.8): """ The piecewise linear trend with changepoint implementation in PyMC3. :param m: (pm.Model) :param t: (np.array) MinMax scaled time. :param n_changepoints: (int) The number of changepoints to model. :param changepoint_prior_scale: (flt/ None) The scale of the Laplace prior on the delta vector. If None, a hierarchical prior is set. :param growth_prior_scale: (flt) The standard deviation of the prior on the growth. :param changepoint_range: (flt) Proportion of history in which trend changepoints will be estimated. :return g, A, s: (tt.vector, np.array, tt.vector) """ s = np.linspace(0, changepoint_range * np.max(t), n_changepoints + 1)[1:] # * 1 casts the boolean to integers A = (t[:, None] > s) * 1 with m: # initial growth k = pm.Normal('k', 0 , growth_prior_scale) if changepoints_prior_scale is None: changepoints_prior_scale = pm.Exponential('tau', 1.5) # rate of change delta = pm.Laplace('delta', 0, changepoints_prior_scale, shape=n_changepoints) # offset m = pm.Normal('m', 0, 5) gamma = -s * delta g = (k + pm.math.dot(A, delta)) * t + (m + pm.math.dot(A, gamma)) return g, A, s # Generate a PyMC3 Model context m = pm.Model() with m: y, A, s = trend_model(m, df['t']) sigma = pm.HalfCauchy('sigma', 0.5, testval=1) pm.Normal('obs', mu=y, sd=sigma, observed=df['y_scaled']) # + def sanity_check(m, df): """ :param m: (pm.Model) :param df: (pd.DataFrame) """ # Sample from the prior and check of the model is well defined. y = pm.sample_prior_predictive(model=m, vars=['obs'])['obs'] pl.figure(figsize=(16, 6)) pl.plot(y.mean(0), label='mean prior') pl.fill_between(np.arange(y.shape[1]), -y.std(0), y.std(0), alpha=0.25, label='standard deviation') pl.plot(df['y_scaled'], label='true value') pl.legend() # And run the sanity check sanity_check(m, df) # + # Find a point estimate of the models parameters with m: aprox = pm.find_MAP() # Determine g, based on the parameters def det_trend(k, m, delta, t, s, A): return (k + np.dot(A, delta)) * t + (m + np.dot(A, (-s * delta))) # run function and rescale to original scale g = det_trend(aprox['k'], aprox['m'], aprox['delta'], df['t'], s, A) * df['y'].max() pl.figure(figsize=(16, 6)) pl.title('$g(t)$') pl.plot(g) pl.scatter(np.arange(df.shape[0]), df.y, s=0.5, color='black') # - # Testing Fourier formulation for seasonality # + np.random.seed(6) def fourier_series(t, p=365.25, n=10): # 2 pi n / p x = 2 * np.pi * np.arange(1, n + 1) / p # 2 pi n / p * t x = x * t[:, None] x = np.concatenate((np.cos(x), np.sin(x)), axis=1) return x def fourier_series2(t, p=365.25, n=10): x = 2* np.pi * np.arange(1, n+1) / p x = x*t[:, None] x = np.cos(x - phi_n) return x phi_n = np.arctan2(beta[1::2], beta[::2]) An = np.sqrt(beta[1::2]**2+ beta[::2]**2) n = 4 t = np.arange(1000) beta = np.random.normal(size=2 * n) pl.figure(figsize=(16, 6)) pl.plot(fourier_series(t, 365.25, n) @ beta) pl.plot(fourier_series2(t, n=n) @An, color='r') # - An.shape phi = np.arctan2(beta[1::2], beta[::2]) An = np.sqrt(beta[1::2]**2+ beta[::2]**2) An
Part-2-PyMC3-modeling/ipynb/GAM/Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from __future__ import division import numpy as np from PIL import Image from scipy import misc from skimage import data from skimage.color import rgb2gray import matplotlib.pyplot as plt # %matplotlib inline import requests from StringIO import StringIO import skimage.measure import math import pickle import csv def unpickle(file): import cPickle with open(file, 'rb') as fo: dict = cPickle.load(fo) return dict def squash_pixels(value): if value<0: return 0 elif value<255: return value else: return 255 def conv_2d_kernel(img_array, kernel, squash_pixel=True): padded_array=np.pad(img_array, (1,1), 'constant') kernel_width = kernel.shape[0] kernel_height = kernel.shape[1] transformed_array = np.zeros(img_array.shape) for i in xrange(padded_array.shape[0] - kernel_width +1): for j in xrange(padded_array.shape[1] - kernel_height + 1): temp_array = padded_array[i:i+kernel_width, j:j+kernel_height] if squash_pixel: transformed_array[i,j] = squash_pixels(np.sum(temp_array*kernel)) else: transformed_array[i,j] = np.sum(temp_array*kernel) return transformed_array def relu_layer(x): #turn all negative values in a matrix into zeros z = np.zeros_like(x) return np.where(x>z,x,z) def max_pooling(img_array, pool_size): img_width = img_array.shape[0] img_height = img_array.shape[1] res_array = skimage.measure.block_reduce(img_array, (pool_size,pool_size), np.max) return res_array #print img def sigmoid(x): return 1/(1+np.exp(-x)) def sigmoid_derivative(x): return x*(1-x) input_layer_neurons = 16 output_layer_neurons = 1 hidden_layer_neurons = 8 w1 = np.random.uniform(size=(16, 8)) b1 = np.random.uniform(size=(1, 8)) w2 = np.random.uniform(size=(8,1)) b2 = np.random.uniform(size=(1, 1)) def fcl_for_training(dataset, epoch,y,lr): global w1 global b1 global w2 global b2 for i in range(epoch): #FEED FORWARD hidden_layer_input = np.dot(dataset,w1) + b1 hidden_layer_activations = sigmoid(hidden_layer_input) output_layer_input = np.dot(hidden_layer_activations, w2) + b2 output = sigmoid(output_layer_input) #BACKPROPAGATION E = y-output slope_output_layer = sigmoid_derivative(output) slope_hidden_layer = sigmoid_derivative(hidden_layer_activations) d_output = E * slope_output_layer error_at_hidden_layer = d_output.dot(w2.T) d_hidden_layer = error_at_hidden_layer*slope_hidden_layer w2 += hidden_layer_activations.T.dot(d_output)*lr b2 += np.sum(d_output, axis=0, keepdims=True) * lr w1 += dataset.T.dot(d_hidden_layer) * lr b1 += np.sum(d_hidden_layer, axis=0, keepdims=True) * lr def fcl_for_testing(dataset, epoch): global w1 global b1 global w2 global b2 for i in range(epoch): hidden_layer_input = np.dot(dataset,w1) + b1 hidden_layer_activations = sigmoid(hidden_layer_input) output_layer_input = np.dot(hidden_layer_activations, w2) + b2 output = sigmoid(output_layer_input) return output #img = unpickle('cifar-10-batches-py/data_batch_1')['data'] #labels = unpickle("cifar-10-batches-py/data_batch_1")['labels'] kernel1 = np.array([[0,-1,0], [-1,5,-1], [0,-1,0]]) X = [] for i in range(1,50000): img = np.array(Image.open("train/%d.png" %i)) img = img[:,:,0] X.append(img) epoch = 5000 lr = 0.1 #label_names = pickle.load(open("cifar-10-batches-py/batches.meta", 'rb'))['label_names'] #print label_names file_to_read_from = 'trainLabels.csv' #initializing as many lists as the columns you want (not all) col1, col2, col3 = [], [], [] with open(file_to_read_from, 'r') as file_in: reader = csv.reader(file_in, delimiter=',') #might as well be ',', '\t' etc for row in reader: col1.append(row[0]) col2.append(row[1]) map_dict = { "frog": np.array([[1],[0],[0], [0], [0],[0],[0], [0], [0],[0],[0], [0], [0],[0], [0], [0]]) , "airplane": np.array([[0],[1],[0], [0], [0],[0],[0], [0], [0],[0],[0], [0], [0],[0], [0], [0]]), "automobile": np.array([[0],[0],[1], [0], [0],[0],[0], [0], [0],[0],[0], [0], [0],[0], [0], [0]]), "truck": np.array([[0],[0],[0], [1], [0],[0],[0], [0], [0],[0],[0], [0], [0],[0], [0], [0]]), "deer": np.array([[0],[0],[0], [0], [1],[0],[0], [0], [0],[0],[0], [0], [0],[0], [0], [0]]), "bird": np.array([[0],[0],[0], [0], [0],[1],[0], [0], [0],[0],[0], [0], [0],[0], [0], [0]]), "cat": np.array([[0],[0],[0], [0], [0],[0],[1], [0], [0],[0],[0], [0], [0],[0], [0], [0]]), "dog": np.array([[0],[0],[0], [0], [0],[0],[0], [1], [0],[0],[0], [0], [0],[0], [0], [0]]), "horse": np.array([[0],[0],[0], [0], [0],[0],[0], [0], [1],[0],[0], [0], [0],[0], [0], [0]]), "ship": np.array([[0],[0],[0], [0], [0],[0],[0], [0], [0],[1],[0], [0], [0],[0], [0], [0]]) } i=0 for img_arr in X: im1 = conv_2d_kernel(img_arr, kernel1) im2 = relu_layer(im1) im3 = max_pooling(im2, 2) fin_op = fcl_for_training(im3, epoch, map_dict[col2[i]], lr) i=i+1 img_arra = np.array(Image.open("deer.png")) img_arra = img_array[:,:,0] ime1 = conv_2d_kernel(img_arra, kernel1) ime2 = relu_layer(ime1) ime3 = max_pooling(im2, 2) print fcl_for_testing(ime3, epoch) #y1=conv_2d_kernel(img, kernel1) #y1=relu_layer(y1) #y=max_pooling(y1,2) #fcl(X, epoch,ao,lr) ''' f,ax_array = plt.subplots(3) f.set_figheight(10) f.set_figwidth(15) ax_array[0].imshow(img, cmap = plt.get_cmap('gray')) ax_array[1].imshow(y1, cmap = plt.get_cmap('gray')) ax_array[2].imshow(y, cmap = plt.get_cmap('gray')) ''' # -
CIFAR-10 using CNN/final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:TF-1_1] # language: python # name: conda-env-TF-1_1-py # --- # # TensorFlow-Keras Layers # *by <NAME>* # <img src="../../images/keras-tensorflow-logo.jpg" width="400"> # # # A Deep Learning model in TensorFlow is represented as layers composed into eachother to form a trainable complex model. Each layer represents a high-level operation in the computational graph. These can be visualized as lego blocks can that be combined together and repeated across the architecture to form the neural network. # # Below is an example of Google's Inception model that can produce high performance image classification. # # <img src="../../images/googlenet.png" width="1500"> # # # Below are examples of common layers provided the TF-Keras `layers` module: # # ** Convolutional Layers** # ``` # tf_keras.layers.Conv1D # tf_keras.layers.Conv2D # tf_keras.layers.Conv3D # ``` # # ** Max-Pooling Layers** # ``` # tf_keras.layers.MaxPool1D # tf_keras.layers.MaxPool2D # tf_keras.layers.MaxPool3D # ``` # # ** Avergae Pooling Layers** # ``` # tf_keras.layers.AvgPool1D # tf_keras.layers.AvgPool2D # tf_keras.layers.AvgPool3D # ``` # # ** Fully-Connected layer** # ``` # tf_keras.layers.Dense # ``` # # ** Other Layers** # ``` # tf_keras.layers.Flatten # tf_keras.layers.Dropout # tf_keras.layers.BatchNormalization # ``` # # ** Activation Layers** # ``` # tf_keras.activations.relu # tf_keras.activations.sigmoid # tf_keras.activations.softmax # tf_keras.activations.tanh # tf_keras.activations.elu # tf_keras.activations.hard_sigmoid # tf_keras.activations.softplus # tf_keras.activations.softsign # tf_keras.activations.linear # ``` # + import tensorflow as tf tf_keras = tf.contrib.keras # - # ## Convolutional Layer # # 2D convolution layer - This layer creates a convolution kernel that is convolved # with the layer input to produce a tensor of # outputs. # # ``` # tf_keras.layers.Conv2D # # Arguments: # filters: Integer, the dimensionality of the output space # (i.e. the number output of filters in the convolution). # kernel_size: An integer or tuple/list of 2 integers, specifying the # width and height of the 2D convolution window. # Can be a single integer to specify the same value for # all spatial dimensions. # strides: An integer or tuple/list of 2 integers, # specifying the strides of the convolution along the width and height. # Can be a single integer to specify the same value for # all spatial dimensions. # Specifying any stride value != 1 is incompatible with specifying # any `dilation_rate` value != 1. # padding: one of `"valid"` or `"same"`. # activation: Activation function to use. # If you don't specify anything, no activation is applied # (ie. "linear" activation: `a(x) = x`). # use_bias: Boolean, whether the layer uses a bias vector. # ``` # + # output filter size filters = 10 # feature map size kernel_size = (3,3) # conv2D - spatial convolution over images tf_keras.layers.Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', activation= tf.nn.relu, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros') # conv1D - temporal convolution tf_keras.layers.Conv1D(filters, kernel_size, strides=(1, 1), padding='valid', activation= tf.nn.relu, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros') # conv3D - spatial convolution over volumes tf_keras.layers.Conv3D(filters, kernel_size, strides=(1, 1), padding='valid', activation= tf.nn.relu, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros') # - # ## Max-Pooling Layer # # This layer create a max-pooling operation to reduces the number of parameters by downsamling the input and can also fight over-fitting. # # ``` # tf_keras.layers.MaxPool2D # # Arguments: # pool_size: integer or tuple of 2 integers, # factors by which to downscale (vertical, horizontal). # (2, 2) will halve the input in both spatial dimension. # If only one integer is specified, the same window length # will be used for both dimensions. # strides: Integer, tuple of 2 integers, or None. # Strides values. # If None, it will default to `pool_size`. # padding: One of `"valid"` or `"same"` (case-insensitive). # data_format: A string, # one of `channels_last` (default) or `channels_first`. # The ordering of the dimensions in the inputs. # `channels_last` corresponds to inputs with shape # `(batch, width, height, channels)` while `channels_first` # corresponds to inputs with shape # `(batch, channels, width, height)`. # ``` # + # max-pooling 2D - spatial data tf_keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2,2), padding='valid', data_format="channels_last") # max-pooling 1D - temporal data tf_keras.layers.MaxPool1D(pool_size=(2, 2), strides=(2,2), padding='valid', data_format="channels_last") # max-pooling 3D - spatial or spatio-temporal tf_keras.layers.MaxPool3D(pool_size=(2, 2), strides=(2,2), padding='valid', data_format="channels_last") # - # ## Average Pooling Layer tf_keras.layers.AvgPool1D tf_keras.layers.AvgPool2D tf_keras.layers.AvgPool3D # ## Dropout # Dropout consists in randomly setting # a fraction `p` of input units to 0 at each update during training time, # which helps prevent overfitting. # # ``` # tf_keras.layers.Dropout # # Arguments: # rate: float between 0 and 1. Fraction of the input units to drop. # noise_shape: 1D integer tensor representing the shape of the # binary dropout mask that will be multiplied with the input. # For instance, if your inputs have shape # `(batch_size, timesteps, features)` and # you want the dropout mask to be the same for all timesteps, # you can use `noise_shape=(batch_size, 1, features)`. # ``` # dropout tf_keras.layers.Dropout(rate = 0.5) # ## Batch normalization layer # # Normalize the activations of the previous layer at each batch, # i.e. applies a transformation that maintains the mean activation # close to 0 and the activation standard deviation close to 1. # # ``` # tf_keras.layers.BatchNormalization # # Arguments: # axis: Integer, the axis that should be normalized # (typically the features axis). # For instance, after a `Conv2D` layer with # `data_format="channels_first"`, # set `axis=1` in `BatchNormalization`. # momentum: Momentum for the moving average. # epsilon: Small float added to variance to avoid dividing by zero. # center: If True, add offset of `beta` to normalized tensor. # If False, `beta` is ignored. # scale: If True, multiply by `gamma`. # If False, `gamma` is not used. # When the next layer is linear (also e.g. `nn.relu`), # this can be disabled since the scaling # will be done by the next layer. # beta_initializer: Initializer for the beta weight. # gamma_initializer: Initializer for the gamma weight. # moving_mean_initializer: Initializer for the moving mean. # moving_variance_initializer: Initializer for the moving variance. # ``` tf_keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones') # ## Fully Connected (Dense) Layer # # Fully-connected layer computes: # # `output = activation(dot(input, kernel) + bias)` # where: # - `activation` is the element-wise activation function # - `kernel` is a weights matrix created by the layer # - `bias` is a bias vector created by the layer # # if the input to the layer has a rank greater than 2, then # it is flattened prior to the initial dot product with `kernel`. # + # fully connected layer tf_keras.layers.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros') # flatten to vector tf_keras.layers.Flatten() # - # ## Activation Layer # This is a layer of neurons that applies the non-saturating activation function. It increases the nonlinear properties of the decision function tf_keras.activations.relu(inputs) tf_keras.activations.sigmoid(inputs) tf_keras.activations.softmax(inputs) tf_keras.activations.tanh(inputs) tf_keras.activations.elu(inputs) tf_keras.activations.hard_sigmoid(inputs) tf_keras.activations.softplus(inputs) tf_keras.activations.softsign(inputs) tf_keras.activations.linear(inputs) # ## Next Lesson # ### CNN layers in TF-Keras # - You will learn aboutthe different layers in TF-Keras # # <img src="../../images/divider.png" width="100">
Jupyter Notebook files/Section 2/Layers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Wal Street Letters Visualization of Data Clusters # In this notebook we visualize the data clusters for the Wall Street # + import numpy as np import h5py import matplotlib.pyplot as plt # %matplotlib inline # Now nexa modules| import sys sys.path.append("../") from visualization.data_clustering import visualize_data_cluster_text_to_image # - # #### Load the files # + # First we load the file file_location = '../results_database/text_wall_street_big.hdf5' run_name = '/low-resolution' f = h5py.File(file_location, 'r') # Now we need to get the letters and align them text_directory = '../data/wall_street_letters.npy' letters_sequence = np.load(text_directory) Nletters = len(letters_sequence) symbols = set(letters_sequence) # Load the particular example Nspatial_clusters = 3 Ntime_clusters = 15 Nembedding = 3 parameters_string = '/' + str(Nspatial_clusters) parameters_string += '-' + str(Ntime_clusters) parameters_string += '-' + str(Nembedding) nexa = f[run_name +parameters_string] # - # #### First visualize the cluster cluster = 2 data_centers = np.arange(1, 15, 1) for data_center in data_centers: fig = visualize_data_cluster_text_to_image(nexa, f, run_name, cluster, data_center)
presentations/2016-02-04(Wall-Street-Letters-Data-Clusters).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ***<NAME>, 271 группа*** # + [markdown] pycharm={"name": "#%% md\n"} # # Задание: # $y''+ 2xy'-y = 2(x^2+1)cos(x)$ # $0 <= x <= 0.5$ # # $y(0.5) = 0.5sin(0.5)$ # # # + [markdown] pycharm={"name": "#%% md\n"} # # Код решения (Kotlin) # + pycharm={"name": "#%%\n"} from math import sin, cos import matplotlib.pyplot as plt # + pycharm={"name": "#%%\n"} def bvp(N, a, b, y0, yN, p, q, f): h = (b - a) / N x = [a + k * h for k in range(0, N + 1)] L = [-1, 0] # we don't use L[0] K = [-1, y0] # we don't use K[0] # L[k] and K[k] evaluation for j in range(2, N + 1): ap = 1 - p(x[j - 1]) * h / 2 bp = h * h * q(x[j - 1]) - 2 cp = 1 + p(x[j - 1]) * h / 2 fp = h * h * f(x[j - 1]) lc = - cp / (ap * L[j - 1] + bp) kc = (-ap * K[j - 1] + fp) / (ap * L[j - 1] + bp) L.append(lc) K.append(kc) # y[k] evaluation y = [yN] for j in range(N - 1, 0, -1): y.insert(0, L[j + 1] * y[0] + K[j + 1]) y.insert(0, y0) return (x, y) # + pycharm={"name": "#%%\n"} def graph_plot(x, y): plt.plot(x1, y1) plt.xlabel("x") plt.ylabel("y") plt.show() # + pycharm={"name": "#%%\n"} N1 = 10 N2 = 20 a = 0 b = 0.5 y0 = 0 s = 'left' yN = 0.5 * sin(0.5) p = lambda x: 2 * x q = lambda _: -1 f = lambda x: 2 * (x * x + 1) * cos(x) # + pycharm={"name": "#%%\n"} x1, y1 = bvp(N1, a, b, y0, yN, p, q, f) print("x189238329:", x1) print("y1329238923:", y1) print("032023:", x1) print("320:", y1) print("ggg:", x1) print("lllll:", y1) graph_plot(x1, y1) # + pycharm={"name": "#%%\n"} x2, y2 = bvp(N2, a, b, y0, yN, p, q, f) graph_plot(x2, y2) # + [markdown] pycharm={"name": "#%% md\n"} # # Погрешость # + pycharm={"name": "#%%\n"} f = 0 for i in range (0, 21, 2) : if (abs(y1[i // 2] - y2[i]) > f) : f = abs(y1[i // 2] - y2[i]) print(f)
notebooks/Boundary Value Problem8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import itertools from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import time from sklearn.linear_model import LogisticRegression from matplotlib.colors import ListedColormap from sklearn import neighbors from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB def plot_confusion_matrix(cnf_matrix, classesNames, normalize=False, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ np.set_printoptions(precision=2) if normalize: soma = cnf_matrix.sum(axis=1)[:, np.newaxis] cm = cnf_matrix.astype('float') / soma title = "Normalized confusion matrix" else: cm = cnf_matrix title = 'Confusion matrix, without normalization' print(cm) plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classesNames)) plt.xticks(tick_marks, classesNames, rotation=45) plt.yticks(tick_marks, classesNames) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') train = pd.read_csv("./clean_data/clean_train.csv") test = pd.read_csv("./clean_data/clean_test.csv") test_class = test.iloc[:,:1] test = test.iloc[:,1:] X = train.iloc[:,1:] y = train['class'] labels = pd.unique(y) print(train.columns.values) train.sample(5) print(test.columns.values) test.sample(5) # ## Logistic Regression logreg = LogisticRegression() logreg.fit(X,y) logreg.predict(X) y_pred=logreg.predict(X) len(y_pred) print(metrics.accuracy_score(y,y_pred)) # ## KNN (K=5) knn = KNeighborsClassifier(n_neighbors=2) model = knn.fit(X, y) y_pred = model.predict(X) print(metrics.accuracy_score(y,y_pred)) # ## Evaluation procedure 2 - Train/test split trX, trY, tsX, tsY = X, y, test, test_class print(X.shape) print(y.shape) print(X.shape) print(test.shape) # + #knn = KNeighborsClassifier(n_neighbors=3) #model = knn.fit(trX, trY) #predY = model.predict(tsX) #cnf_matrix = confusion_matrix(tsY, y_pred, labels) #plot_confusion_matrix(cnf_matrix, labels) # - logreg=LogisticRegression() logreg.fit(trX, trY) y_pred = logreg.predict(tsX) print(metrics.accuracy_score(tsY, y_pred)) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(trX, trY) y_pred = knn.predict(tsX) print(metrics.accuracy_score(tsY, y_pred)) knn = KNeighborsClassifier(n_neighbors=1) knn.fit(trX, trY) y_pred = knn.predict(tsX) print(metrics.accuracy_score(tsY, y_pred)) # + # try K=1 through K=25 and record testing accuracy k_range = range(1, 26) # We can create Python dictionary using [] or dict() scores = [] # We use a loop through the range 1 to 26 # We append the scores in the dictionary for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(trX, trY) y_pred = knn.predict(tsX) scores.append(metrics.accuracy_score(tsY, y_pred)) print(scores) # - plt.plot(k_range, scores) plt.xlabel('Value of K for KNN') plt.ylabel('Testing Accuracy') # ## Naive Bayes Classifier data = pd.read_csv("./clean_data/clean_train.csv") data1 = pd.read_csv("./clean_data/clean_test.csv") # + X_train, X_test = data, data1 used_features = X_train.iloc[:,1:] abc= X_test.iloc[:,1:] gnb = GaussianNB() print( used_features.values) gnb.fit( used_features.values, X_train["class"] ) y_pred = gnb.predict(abc) # Print results print("Number of mislabeled points out of a total {} points : {}, performance {:05.2f}%" .format( X_test.shape[0], (X_test["class"] != y_pred).sum(), 100*(1-(X_test["class"] != y_pred).sum()/X_test.shape[0]) )) # + import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.metrics import confusion_matrix from sklearn import metrics data = pd.read_csv("./clean_data/clean_train.csv") X = data.iloc[:, 1:] Y = data['class'] scores = [] #green.head() print("Train set dimensions : {}".format(data.shape)) data.groupby('class').size() #Visualization of data #green.groupby('consensus').hist(figsize=(14, 14)) data.isnull().sum() data.isna().sum() dataframe = pd.DataFrame(Y) #Encoding categorical data values from sklearn.preprocessing import LabelEncoder labelencoder_Y = LabelEncoder() Y = labelencoder_Y.fit_transform(Y) # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 44) #Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) #Fitting the Logistic Regression Algorithm to the Training Set from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #95.8 Acuracy #Fitting K-NN Algorithm from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 1, metric = 'minkowski', p = 2) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #95.1 Acuracy #Fitting SVM from sklearn.svm import SVC classifier = SVC(kernel = 'linear', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #97.2 Acuracy #Fitting K-SVM from sklearn.svm import SVC classifier = SVC(kernel = 'rbf', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #96.5 Acuracy #Fitting Naive_Bayes from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #91.6 Acuracy #Fitting Decision Tree Algorithm from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #95.8 Acuracy #Fitting Random Forest Classification Algorithm from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #98.6 Acuracy #predicting the Test set results Y_pred = classifier.predict(X_test) #Creating the confusion Matrix from sklearn.metrics import confusion_matrix #cm = confusion_matrix(Y_test, Y_pred) #c = print(cm[0, 0] + cm[1, 1]) print(scores) # + bars = ('Logistic Regression', 'K-NN Algorithm', 'SVM', 'K-SVM', 'Naive_Bayes','Decision Tree','Random Forest') y_pos = np.arange(len(bars)) # Create bars plt.bar(y_pos, scores) # Create names on the x-axis #plt.xticks(y_pos, bars, color='black') plt.yticks(color='black') # Rotation of the bars names plt.xticks(y_pos, bars, rotation=90) # Custom the subplot layout plt.subplots_adjust(bottom=0.4, top=0.99) plt.style.use('bmh') # Show graphic plt.show() # + from imblearn.over_sampling import SMOTE smote = SMOTE(kind = "regular") X_sm, y_sm = smote.fit_sample(X_train, Y_train) # + X = X_sm Y = y_sm scores = [] #green.head() print("Train set dimensions : {}".format(data.shape)) data.groupby('class').size() #Visualization of data #green.groupby('consensus').hist(figsize=(14, 14)) data.isnull().sum() data.isna().sum() dataframe = pd.DataFrame(Y) #Encoding categorical data values from sklearn.preprocessing import LabelEncoder labelencoder_Y = LabelEncoder() Y = labelencoder_Y.fit_transform(Y) # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 44) #Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) #Fitting the Logistic Regression Algorithm to the Training Set from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #95.8 Acuracy #Fitting K-NN Algorithm from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 1, metric = 'minkowski', p = 2) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #95.1 Acuracy #Fitting SVM from sklearn.svm import SVC classifier = SVC(kernel = 'linear', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #97.2 Acuracy #Fitting K-SVM from sklearn.svm import SVC classifier = SVC(kernel = 'rbf', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #96.5 Acuracy #Fitting Naive_Bayes from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #91.6 Acuracy #Fitting Decision Tree Algorithm from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #95.8 Acuracy #Fitting Random Forest Classification Algorithm from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores.append(metrics.accuracy_score(Y_test, y_pred)) #98.6 Acuracy #predicting the Test set results Y_pred = classifier.predict(X_test) #Creating the confusion Matrix from sklearn.metrics import confusion_matrix #cm = confusion_matrix(Y_test, Y_pred) #c = print(cm[0, 0] + cm[1, 1]) print(scores) # + bars = ('Logistic Regression', 'K-NN Algorithm', 'SVM', 'K-SVM', 'Naive_Bayes','Decision Tree','Random Forest') y_pos = np.arange(len(bars)) # Create bars plt.bar(y_pos, scores) # Create names on the x-axis #plt.xticks(y_pos, bars, color='black') plt.yticks(color='black') # Rotation of the bars names plt.xticks(y_pos, bars, rotation=90) # Custom the subplot layout plt.subplots_adjust(bottom=0.4, top=0.99) plt.style.use('bmh') # Show graphic plt.show()
Classification1st_Copy2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Copyright Netherlands eScience Center <br> # ** Function : Analyze the trend of net surface flux from coordinated experiments**** <br> # ** Author : <NAME> ** <br> # ** First Built : 2019.08.09 ** <br> # ** Last Update : 2019.08.29 ** <br> # Description : This notebook aims to analyze the trend of net surface flux from multiple coordinated experiments in Blue Action WP3. It contributes to the Deliverable 3.1. <br> # Return Values : netCDF4 <br> # Caveat : The fields used here are post-processed monthly mean fields. It includes AMET from: # * EC Earth (DMI) # * CMCC-CM (CMCC) # * WACCM6 (WHOI) # * NorESM (NERSC) # * HadGEM (UoS) # * EC Earth (NLeSC) # %matplotlib inline import numpy as np import sys #sys.path.append("/home/ESLT0068/NLeSC/Computation_Modeling/Bjerknes/Scripts/META") sys.path.append("../") import analyzer import visualizer import scipy as sp import time as tttt from netCDF4 import Dataset,num2date import os import matplotlib.pyplot as plt from matplotlib.pyplot import cm # constants constant = {'g' : 9.80616, # gravititional acceleration [m / s2] 'R' : 6371009, # radius of the earth [m] 'cp': 1004.64, # heat capacity of air [J/(Kg*K)] 'Lv': 2264670, # Latent heat of vaporization [J/Kg] 'R_dry' : 286.9, # gas constant of dry air [J/(kg*K)] 'R_vap' : 461.5, # gas constant for water vapour [J/(kg*K)] } # |Model and Organization | Exp 1 | Exp 2 | Exp 3 | Exp 4 | time | # |-----------------------|-------|-------|-------|-------|-----------| # | EC Earth (NLeSC) | 20 | 20 | 20 | 20 | 1979-2015 (exp1&2) 1979-2013 (exp3&4) | # | EC Earth (DMI) | 20 | 20 | 20 | 20 | 1979-2015 (exp1&2) 1979-2013 (exp3&4) | # | CMCC-CM (CMCC) | 10 | 10 | 0 | 0 | 1979-2014 | # | WACCM6 (WHOI) | 30 | 30 | 0 | 0 | 1979-2014 | # | NorESM (NERSC) | 20 | 20 | 20 | 20 | 1979-2014 (exp1&2) 1979-2013 (exp3&4) | # | HadGEM (UoS) | 10 | 10 | 5 | 5 | 1979-2014 (exp1&2) 1979-2013 (exp3&4) | # | IAP-AGCM (IAP-NZC) | 15 | 15 | 15 | 15 | 1979-2015 | # | IPSL-CM (CNRS) | 30 | 30 | 20 | 20 | 1979-2014 | # | MPIESM (MPI) | 10 | 10 | 5 | 5 | 1979-2015 | ################################ Input zone ###################################### # specify starting and ending time start_year = 1979 end_year = 2015 # specify data path datapath = '/home/ESLT0068/WorkFlow/Core_Database_BlueAction_WP3/ECEarth_NLeSC' # specify output path for figures output_path = '/home/ESLT0068/NLeSC/Computation_Modeling/BlueAction/WP3/JointAnalysis_AMET/AMIP/spatial' # ensemble number ensemble_1 = 10 #20 ensemble_2 = 9 #20 ensemble_34 = 1 # experiment number exp = 4 name_list_exp = {} name_list_exp['1'] = ['ITNV','O2Q7','W5S0','WCA4','Z5Z7','ph02','ph21','ph22','ph23','ph24'] name_list_exp['2'] = ['5DEH','ITNV','JI9Q','KB5R','O2Q7','W5S0','WCA4','YC1V','Z5Z7'] name_list_exp['3'] = ['JI9Q'] name_list_exp['4'] = ['KB5R'] # example file datapath_example = os.path.join(datapath, 'exp1', 'slhf', 'ECE_ITNV_SLHF_monthly_1979_v2.nc') #################################################################################### def var_key_retrieve(datapath, y, exp_num, ensemble_num): # get the path to each datasets print ("Start retrieving datasets of experiment {} ensemble number {} year {}".format(exp_num+1, ensemble_num, y+1979)) datapath_slhf = os.path.join(datapath, 'exp{}'.format(exp_num+1), 'slhf', 'ECE_{}_SLHF_monthly_{}_v2.nc'.format(name_list_exp['{}'.format(exp_num+1)][ensemble_num],y+1979)) datapath_sshf = os.path.join(datapath, 'exp{}'.format(exp_num+1),'sshf', 'ECE_{}_SSHF_monthly_{}_v2.nc'.format(name_list_exp['{}'.format(exp_num+1)][ensemble_num],y+1979)) datapath_ssr = os.path.join(datapath, 'exp{}'.format(exp_num+1),'ssr', 'ECE_{}_SSR_monthly_{}_v2.nc'.format(name_list_exp['{}'.format(exp_num+1)][ensemble_num],y+1979)) datapath_str = os.path.join(datapath, 'exp{}'.format(exp_num+1),'str', 'ECE_{}_STR_monthly_{}_v2.nc'.format(name_list_exp['{}'.format(exp_num+1)][ensemble_num],y+1979)) # get the variable keys key_slhf = Dataset(datapath_slhf) key_sshf = Dataset(datapath_sshf) key_ssr = Dataset(datapath_ssr) key_str = Dataset(datapath_str) print ("Retrieving datasets successfully and return the variable key!") return key_slhf, key_sshf, key_ssr, key_str def sflux(key_slhf, key_sshf, key_ssr, key_str, lat, lon): # get all the varialbes # make sure that all the input variables here are positive downward!!! var_slhf = key_slhf.variables['SLHF'][:,:85,:] # surface latent heat flux W/m2 var_sshf = key_sshf.variables['SSHF'][:,:85,:] # surface sensible heat flux W/m2 var_ssr = key_ssr.variables['SSR'][:,:85,:] # surface solar radiation W/m2 var_str = key_str.variables['STR'][:,:85,:] # surface thermal radiation W/m2 #size of the grid box #dx = 2 * np.pi * constant['R'] * np.cos(2 * np.pi * lat / # 360) / len(lon) #dy = np.pi * constant['R'] / len(lat) # calculate total net energy flux at TOA/surface turb_flux_surf = var_slhf + var_sshf turb_flux_surf = turb_flux_surf.reshape(-1,12,len(lat),len(lon)) net_flux_surf = var_slhf + var_sshf + var_ssr + var_str net_flux_surf = net_flux_surf.reshape(-1,12,len(lat),len(lon)) return net_flux_surf, turb_flux_surf if __name__=="__main__": #################################################################### ###### Create time namelist matrix for variable extraction ####### #################################################################### lat_NLeSC_60N = 84 # date and time arrangement # namelist of month and days for file manipulation namelist_month = ['01','02','03','04','05','06','07','08','09','10','11','12'] ensemble_list = ['01','02','03','04','05','06','07','08','09','10', '11','12','13','14','15','16','17','18','19','20', '21','22','23','24','25','26','27','28','29','30',] # index of months period_1979_2015 = np.arange(start_year,end_year+1,1) index_month = np.arange(1,13,1) #################################################################### ###### Extract invariant and calculate constants ####### #################################################################### # get basic dimensions from sample file key_example = Dataset(datapath_example) lat = key_example.variables['lat'][:85] lon = key_example.variables['lon'][:] # get invariant from benchmark file Dim_year_1979_2015 = len(period_1979_2015) Dim_month = len(index_month) Dim_latitude = len(lat) Dim_longitude = len(lon) ############################################# ##### Create space for stroing data ##### ############################################# # loop for calculation for i in range(exp): if i == 0: ensemble = ensemble_1 pool_sflux_1979_2015 = np.zeros((ensemble,Dim_year_1979_2015,Dim_month,Dim_latitude,Dim_longitude),dtype = float) pool_turb_1979_2015 = np.zeros((ensemble,Dim_year_1979_2015,Dim_month,Dim_latitude,Dim_longitude),dtype = float) elif i == 1: ensemble = ensemble_2 pool_sflux_1979_2015 = np.zeros((ensemble,Dim_year_1979_2015,Dim_month,Dim_latitude,Dim_longitude),dtype = float) pool_turb_1979_2015 = np.zeros((ensemble,Dim_year_1979_2015,Dim_month,Dim_latitude,Dim_longitude),dtype = float) else: ensemble = ensemble_34 pool_sflux_1979_2015 = np.zeros((ensemble,Dim_year_1979_2015,Dim_month,Dim_latitude,Dim_longitude),dtype = float) pool_turb_1979_2015 = np.zeros((ensemble,Dim_year_1979_2015,Dim_month,Dim_latitude,Dim_longitude),dtype = float) for j in range(ensemble): for y in range(Dim_year_1979_2015): # get variable keys key_slhf, key_sshf, key_ssr, key_str = var_key_retrieve(datapath, y, i, j) # compute amet pool_sflux_1979_2015[j,y,:,:,:],\ pool_turb_1979_2015[j,y,:,:,:] = sflux(key_slhf, key_sshf, key_ssr, key_str, lat, lon) #################################################################### ###### Calculating Trend (positive downward) ####### #################################################################### # calculate trend and take ensemble mean ens_avg_NLeSC_exp = analyzer.spatial(np.mean(pool_turb_1979_2015[:],0)) ens_avg_NLeSC_exp.anomaly() ens_avg_NLeSC_exp.trend() #ticks = [-0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8] ticks = np.linspace(-0.5,0.5,11) label = 'Trend of surface turbulent fluxes W/(m2*decade)' visualizer.plots.geograph(lat, lon, ens_avg_NLeSC_exp._a*10, label, ticks, os.path.join(output_path, 'Turbulent_trend', 'trend_spatial_ECEarth_NLeSC_turb_exp_{}.png'.format(i)), boundary = 'polar') # calculate trend and take ensemble mean ens_avg_NLeSC_exp = analyzer.spatial(np.mean(pool_sflux_1979_2015[:],0)) ens_avg_NLeSC_exp.anomaly() ens_avg_NLeSC_exp.trend() #ticks = [-0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8] ticks = np.linspace(-0.5,0.5,11) label = 'Trend of net surface fluxes W/(m2*decade)' visualizer.plots.geograph(lat, lon, ens_avg_NLeSC_exp._a*10, label, ticks, os.path.join(output_path, 'SFlux_trend', 'trend_spatial_ECEarth_NLeSC_SFlux_exp_{}.png'.format(i)), boundary = 'polar')
Analysis/Trend_spatial/SFlux_ECEarth_NLeSC_analysis_spatial_trend.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] cell_id="bd95d068-8e9e-43f6-a533-5bd926ce2ead" deepnote_cell_type="markdown" tags=[] # # K-Nearest Neighbors Regressor # # [YuJa recording](https://uci.yuja.com/V/Video?v=4348961&node=14654381&a=1301700135&autoplay=1) # # Before the recording, we introduced the K-Nearest Neighbors Classifier and the K-Nearest Neighbors Regressor. We mentioned that larger K corresponds to smaller variance (so over-fitting is more likely to occur with smaller values of K). We also discussed the training error curve and test error curve, like from the figures in Chapter 2 of *Introduction to Statistical Learning*. # + cell_id="54dd3d8a-8ded-4b22-a611-e0511993607e" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=25547 execution_start=1644423889409 source_hash="970b70e1" tags=[] import numpy as np import pandas as pd import seaborn as sns import altair as alt from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, mean_absolute_error # + cell_id="ec6f2c94-53bd-4d2f-a0e4-a9fac9dce1ee" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=10 execution_start=1644424021388 source_hash="fd5e24a9" tags=[] df = sns.load_dataset("penguins") #df = df.dropna() df.dropna(inplace=True) # + cell_id="e729e857-e300-4a1d-bd23-5289e46f27f1" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=19 execution_start=1644424063026 source_hash="de1e323c" tags=[] df.info() # - # It would be better to rescale the data first (i.e., to normalize the data). We'll talk about that soon but we're skipping it for now. # + cell_id="611d6deb-9246-4d29-857a-7ff9a36dfd9f" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=662 execution_start=1644424368678 source_hash="5e951575" tags=[] X_train, X_test, y_train, y_test = train_test_split( df[["bill_length_mm","bill_depth_mm","flipper_length_mm"]], df["body_mass_g"], test_size = 0.5) # + cell_id="073a7115-f335-4c18-99b9-79b5f6566ccd" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=24 execution_start=1644424383662 source_hash="38f368cd" tags=[] X_train.shape # - # The syntax for performing K-Nearest Neighbors regression using scikit-learn is essentially the same as the syntax for performing linear regression. # + cell_id="b9c258a2-8b0b-4916-87cb-0e3a4f7e2eb3" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1 execution_start=1644424128499 source_hash="ba1bc8eb" tags=[] reg = KNeighborsRegressor(n_neighbors=10) # + cell_id="082659a7-df06-44da-a424-5f92eb865076" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=16 execution_start=1644424523994 source_hash="d18187b5" tags=[] reg.fit(X_train, y_train) # + cell_id="1b7292c2-7421-42ce-b97c-de9dbf38da86" deepnote_cell_type="code" deepnote_output_heights=[405] deepnote_to_be_reexecuted=false execution_millis=2 execution_start=1644424524017 source_hash="6a021837" tags=[] reg.predict(X_test) # + cell_id="2af0b63d-fe4f-45fb-8104-38cd0d961721" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=16 execution_start=1644424648802 source_hash="251bcee5" tags=[] X_test.shape # + cell_id="7a5c2c55-846e-423a-a473-16c57106ddb1" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=30 execution_start=1644424658097 source_hash="f3019309" tags=[] mean_absolute_error(reg.predict(X_test), y_test) # + cell_id="c3451935-f192-4e4e-ba38-0b6edac8e390" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=271 execution_start=1644424707335 source_hash="abbbb7ce" tags=[] mean_absolute_error(reg.predict(X_train), y_train) # - # The above numbers are similar, with `reg` performing just slightly better on the training data. That suggests that for this training set, we are not overfitting the data when using K=10. # + cell_id="bb229005-cc38-471d-9bc0-97623b44cd40" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1 execution_start=1644424905451 source_hash="1cf46215" tags=[] def get_scores(k): reg = KNeighborsRegressor(n_neighbors=k) reg.fit(X_train, y_train) train_error = mean_absolute_error(reg.predict(X_train), y_train) test_error = mean_absolute_error(reg.predict(X_test), y_test) return (train_error, test_error) # + cell_id="1c52e947-f88f-4047-bf88-31774e92b34d" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=26 execution_start=1644424906679 source_hash="99708d02" tags=[] get_scores(10) # + cell_id="859f4f49-bd56-4926-905b-b6967961f514" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=21 execution_start=1644424907592 source_hash="698e82f2" tags=[] get_scores(1) # + cell_id="587af0d7-b8cd-4382-9705-bf6dc91d3ab0" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=5 execution_start=1644425035093 source_hash="95d406bb" tags=[] df_scores = pd.DataFrame({"k":range(1,150),"train_error":np.nan,"test_error":np.nan}) # + cell_id="1e5056f2-bc69-4b56-9ebd-6b929d0c6914" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=43 execution_start=1644425036512 source_hash="7ddb3e1e" tags=[] df_scores # + cell_id="09f31539-efb7-46f2-bc13-c00c6bfd8c6b" deepnote_cell_type="code" deepnote_output_heights=[59.59375] deepnote_to_be_reexecuted=false execution_millis=1 execution_start=1644425131876 source_hash="9bc8bb19" tags=[] df_scores.loc[0,["train_error","test_error"]] = get_scores(1) # + cell_id="3edfa3a2-f925-4703-b9f9-3a5cb6afaf95" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=11 execution_start=1644425139276 source_hash="f9244eee" tags=[] df_scores.head() # - # We often avoid using `for` loops in Math 10, but I couldn't find a better way to fill in this data. Let me know if you see a more Pythonic approach! # + cell_id="37e4efbb-e3e5-45b1-991d-2e7895513e30" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=25986 execution_start=1644425221029 source_hash="1ea9f290" tags=[] for i in df_scores.index: df_scores.loc[i,["train_error","test_error"]] = get_scores(df_scores.loc[i,"k"]) # + cell_id="de0c7969-550b-45f3-b20a-2c8300920046" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=51 execution_start=1644425256254 source_hash="7ddb3e1e" tags=[] df_scores # - # Usually when we plot a test error curve, we want higher flexibility (= higher variance) on the right. Since higher values of K correspond to lower flexibility, we are going to add a column to the DataFrame containing the reciprocals of the K values. # + cell_id="9e01f214-81fd-47a9-a4ed-fd737b7ad566" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=10 execution_start=1644425294964 source_hash="6d7b0128" tags=[] df_scores["kinv"] = 1/df_scores.k # + cell_id="4afe0ca2-5dbf-4b87-a99e-0fa1730884b6" deepnote_cell_type="code" deepnote_output_heights=[356] deepnote_to_be_reexecuted=false execution_millis=13 execution_start=1644425338598 source_hash="f9178bd6" tags=[] ctrain = alt.Chart(df_scores).mark_line().encode( x = "kinv", y = "train_error" ) # + cell_id="214ee1b2-2bcc-40c7-8e53-0c66bdcb3ba8" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=6 execution_start=1644425359637 source_hash="ceab4c59" tags=[] ctest = alt.Chart(df_scores).mark_line(color="orange").encode( x = "kinv", y = "test_error" ) # - # The blue curve is the training error, while the orange curve is the test error. Notice how underfitting occurs for very high values of K and notice how overfitting occurs for smaller values of K. # + cell_id="e35ccb04-769f-412e-aa37-324b4cfa5ecb" deepnote_cell_type="code" deepnote_output_heights=[356] deepnote_to_be_reexecuted=false execution_millis=63 execution_start=1644425372305 source_hash="1e9aad1a" tags=[] ctrain+ctest
_build/jupyter_execute/Week6/Week6-Wednesday.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venvpy37cu10 # language: python # name: venvpy37cu10 # --- # + from mmdet3d.apis import init_detector, inference_detector, show_result_meshlab import time config_file = 'configs/pointpillars/myhv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py' checkpoint_file = 'mypointpillar_waymocar/latest.pth' # build the model from a config file and a checkpoint file model = init_detector(config_file, checkpoint_file, device='cuda:0') # test a single image and show the results point_cloud = '/data/cmpe249-fa21/4c_train5678/training/velodyne/000170.bin' start_time=time.time() result, data = inference_detector(model, point_cloud) end_time=time.time() print("Elapsed time:{0}".format(end_time-start_time)) # visualize the results and save the results in 'results' folder show_result_meshlab(data, result, out_dir='results') print("complete") # - # %%bash . ~/.bashrc python tools/benchmark.py configs/pointpillars/myhv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py ./mypointpillar_waymocar/latest.pth
Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## Import libraries # + import sys sys.path.append("/home/jovyan/work/sem-covid/") sys.path = list(set(sys.path)) import os os.getcwd() os.chdir('/home/jovyan/work/sem-covid/') from legal_radar.services.faiss_indexing_pipeline import FaissIndexingPipeline from legal_radar.services.store_registry import store_registry from legal_radar.services.split_documents_pipeline import TEXT_PIECE_EMBEDDING # + [markdown] pycharm={"name": "#%% md\n"} # ## Define constants # + pycharm={"name": "#%%\n"} FAISS_BUCKET_NAME = 'faiss-index' FAISS_INDEX_FINREG_NAME = 'faiss_index_finreg' FIN_REG_SPLITTED_ES_INDEX = 'ds_finreg_splitted' # + pycharm={"name": "#%%\n"} EXPERIMENT_CONFIGS = [ (1, 1), (2, 1), (5, 2), (10,5), (20,10), (50,25), (100,50) ] # + pycharm={"name": "#%%\n"} for split_window_size, split_window_step in EXPERIMENT_CONFIGS: fin_reg_es_index_name = '_'.join(map(str,(FIN_REG_SPLITTED_ES_INDEX,split_window_size,split_window_step))) faiss_index_finreg_name = '_'.join(map(str,(FAISS_INDEX_FINREG_NAME,split_window_size,split_window_step,'.pkl'))) print(fin_reg_es_index_name,faiss_index_finreg_name) faiss_indexing_pipeline = FaissIndexingPipeline(es_index_name=fin_reg_es_index_name, embedding_column_name=TEXT_PIECE_EMBEDDING, result_bucket_name=FAISS_BUCKET_NAME, result_faiss_index_name=faiss_index_finreg_name, store_registry=store_registry) faiss_indexing_pipeline.execute() # + [markdown] pycharm={"name": "#%% md\n"} # ## Initialize faiss indexing pipeline # + pycharm={"name": "#%%\n"} faiss_indexing_pipeline = FaissIndexingPipeline(es_index_name=FIN_REG_SPLITTED_ES_INDEX, embedding_column_name=TEXT_PIECE_EMBEDDING, result_bucket_name=FAISS_BUCKET_NAME, result_faiss_index_name=FAISS_INDEX_FINREG_NAME, store_registry=store_registry) # + [markdown] pycharm={"name": "#%% md\n"} # ## Execute faiss indexing pipeline # + pycharm={"name": "#%%\n"} faiss_indexing_pipeline.execute()
notebooks/faiss_indexing_pipeline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="f7FPLsj4nB-6" # # Tutorial Part 14: Modeling Protein-Ligand Interactions with Atomic Convolutions # By [<NAME>](https://ncfrey.github.io/) | [Twitter](https://twitter.com/nc_frey) and [<NAME>](https://rbharath.github.io/) | [Twitter](https://twitter.com/rbhar90) # # This DeepChem tutorial introduces the [Atomic Convolutional Neural Network](https://arxiv.org/pdf/1703.10603.pdf). We'll see the structure of the `AtomicConvModel` and write a simple program to run Atomic Convolutions. # # ### ACNN Architecture # ACNN’s directly exploit the local three-dimensional structure of molecules to hierarchically learn more complex chemical features by optimizing both the model and featurization simultaneously in an end-to-end fashion. # # The atom type convolution makes use of a neighbor-listed distance matrix to extract features encoding local chemical environments from an input representation (Cartesian atomic coordinates) that does not necessarily contain spatial locality. The following methods are used to build the ACNN architecture: # # - __Distance Matrix__ # The distance matrix $R$ is constructed from the Cartesian atomic coordinates $X$. It calculates distances from the distance tensor $D$. The distance matrix construction accepts as input a $(N, 3)$ coordinate matrix $C$. This matrix is “neighbor listed” into a $(N, M)$ matrix $R$. # # ```python # R = tf.reduce_sum(tf.multiply(D, D), 3) # D: Distance Tensor # R = tf.sqrt(R) # R: Distance Matrix # return R # ``` # # - **Atom type convolution** # The output of the atom type convolution is constructed from the distance matrix $R$ and atomic number matrix $Z$. The matrix $R$ is fed into a (1x1) filter with stride 1 and depth of $N_{at}$ , where $N_{at}$ is the number of unique atomic numbers (atom types) present in the molecular system. The atom type convolution kernel is a step function that operates on the neighbor distance matrix $R$. # # - **Radial Pooling layer** # Radial Pooling is basically a dimensionality reduction process that down-samples the output of the atom type convolutions. The reduction process prevents overfitting by providing an abstracted form of representation through feature binning, as well as reducing the number of parameters learned. # Mathematically, radial pooling layers pool over tensor slices (receptive fields) of size (1x$M$x1) with stride 1 and a depth of $N_r$, where $N_r$ is the number of desired radial filters and $M$ is the maximum number of neighbors. # # - **Atomistic fully connected network** # Atomic Convolution layers are stacked by feeding the flattened ($N$, $N_{at}$ $\cdot$ $N_r$) output of the radial pooling layer into the atom type convolution operation. Finally, we feed the tensor row-wise (per-atom) into a fully-connected network. The # same fully connected weights and biases are used for each atom in a given molecule. # # Now that we have seen the structural overview of ACNNs, we'll try to get deeper into the model and see how we can train it and what we expect as the output. # # For the training, we will use the publicly available PDBbind dataset. In this example, every row reflects a protein-ligand complex and the target is the binding affinity ($K_i$) of the ligand to the protein in the complex. # # ## Colab # # This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link. # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/14_Modeling_Protein_Ligand_Interactions_With_Atomic_Convolutions.ipynb) # # ## Setup # # To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment. # + id="Y2xCQyOInB_D" # !curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py import conda_installer conda_installer.install() # !/root/miniconda/bin/conda info -e # + id="WKxOGlhhMrC7" # !/root/miniconda/bin/conda install -c conda-forge mdtraj -y -q # needed for AtomicConvs # + id="jFQmra_fFE8U" # !pip install --pre deepchem import deepchem deepchem.__version__ # + id="W1cCOOYXnB_L" import deepchem as dc import os import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from rdkit import Chem from deepchem.molnet import load_pdbbind from deepchem.models import AtomicConvModel from deepchem.feat import AtomicConvFeaturizer # + [markdown] id="fACLMson_-vk" # ### Getting protein-ligand data # If you worked through [Tutorial 13](https://github.com/deepchem/deepchem/blob/master/examples/tutorials/13_Modeling_Protein_Ligand_Interactions.ipynb) on modeling protein-ligand interactions, you'll already be familiar with how to obtain a set of data from PDBbind for training our model. Since we explored molecular complexes in detail in the [previous tutorial]((https://github.com/deepchem/deepchem/blob/master/examples/tutorials/13_Modeling_Protein_Ligand_Interactions.ipynb)), this time we'll simply initialize an `AtomicConvFeaturizer` and load the PDBbind dataset directly using MolNet. # + id="_qu5DlVa3aV3" f1_num_atoms = 100 # maximum number of atoms to consider in the ligand f2_num_atoms = 1000 # maximum number of atoms to consider in the protein max_num_neighbors = 12 # maximum number of spatial neighbors for an atom acf = AtomicConvFeaturizer(frag1_num_atoms=f1_num_atoms, frag2_num_atoms=f2_num_atoms, complex_num_atoms=f1_num_atoms+f2_num_atoms, max_num_neighbors=max_num_neighbors, neighbor_cutoff=4) # + [markdown] id="pyH9KUkvxlxk" # `load_pdbbind` allows us to specify if we want to use the entire protein or only the binding pocket (`pocket=True`) for featurization. Using only the pocket saves memory and speeds up the featurization. We can also use the "core" dataset of ~200 high-quality complexes for rapidly testing our model, or the larger "refined" set of nearly 5000 complexes for more datapoints and more robust training/validation. On Colab, it takes only a minute to featurize the core PDBbind set! This is pretty incredible, and it means you can quickly experiment with different featurizations and model architectures. # + id="Z9eyanh35qyj" colab={"base_uri": "https://localhost:8080/"} outputId="1bdc22d2-bf73-48cc-9f31-ecc0f56cf4bc" # %%time tasks, datasets, transformers = load_pdbbind(featurizer=acf, save_dir='.', data_dir='.', pocket=True, reload=False, set_name='core') # + colab={"base_uri": "https://localhost:8080/"} id="EaGn9UbwEdyY" outputId="6b235d21-88a2-45b0-d6ba-55a4d6a72dd9" datasets # + id="PQq0lkWIfVoE" train, val, test = datasets # + [markdown] id="GNilV3VXnB_j" # ### Training the model # + [markdown] id="WufupHBPnB_k" # Now that we've got our dataset, let's go ahead and initialize an `AtomicConvModel` to train. Keep the input parameters the same as those used in `AtomicConvFeaturizer`, or else we'll get errors. `layer_sizes` controls the number of layers and the size of each dense layer in the network. We choose these hyperparameters to be the same as those used in the [original paper](https://arxiv.org/pdf/1703.10603.pdf). # + id="ErBNNGH55-_B" acm = AtomicConvModel(n_tasks=1, frag1_num_atoms=f1_num_atoms, frag2_num_atoms=f2_num_atoms, complex_num_atoms=f1_num_atoms+f2_num_atoms, max_num_neighbors=max_num_neighbors, batch_size=12, layer_sizes=[32, 32, 16], learning_rate=0.003, ) # + id="4cNdP1b1hEQM" losses, val_losses = [], [] # + id="5g6b2qEwNwdL" colab={"base_uri": "https://localhost:8080/"} outputId="3caa11ac-18dd-4528-f966-dee61d2c508d" # %%time max_epochs = 50 for epoch in range(max_epochs): loss = acm.fit(train, nb_epoch=1, max_checkpoints_to_keep=1, all_losses=losses) metric = dc.metrics.Metric(dc.metrics.score_function.rms_score) val_losses.append(acm.evaluate(val, metrics=[metric])['rms_score']**2) # L2 Loss # + [markdown] id="aTFdba_KDDUQ" # The loss curves are not exactly smooth, which is unsurprising because we are using 154 training and 19 validation datapoints. Increasing the dataset size may help with this, but will also require greater computational resources. # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="pn4QWM1bizw0" outputId="f6399595-4622-41d0-80d1-92df68850cc2" f, ax = plt.subplots() ax.scatter(range(len(losses)), losses, label='train loss') ax.scatter(range(len(val_losses)), val_losses, label='val loss') plt.legend(loc='upper right'); # + [markdown] id="KzGW3ztODYzV" # The [ACNN paper](https://arxiv.org/pdf/1703.10603.pdf) showed a Pearson $R^2$ score of 0.912 and 0.448 for a random 80/20 split of the PDBbind core train/test sets. Here, we've used an 80/10/10 training/validation/test split and achieved similar performance for the training set (0.943). We can see from the performance on the training, validation, and test sets (and from the results in the paper) that the ACNN can learn chemical interactions from small training datasets, but struggles to generalize. Still, it is pretty amazing that we can train an `AtomicConvModel` with only a few lines of code and start predicting binding affinities! # From here, you can experiment with different hyperparameters, more challenging splits, and the "refined" set of PDBbind to see if you can reduce overfitting and come up with a more robust model. # + colab={"base_uri": "https://localhost:8080/"} id="VcDLwf-20tci" outputId="35ae9353-5dd8-4397-bbf0-d58cfebb6584" score = dc.metrics.Metric(dc.metrics.score_function.pearson_r2_score) for tvt, ds in zip(['train', 'val', 'test'], datasets): print(tvt, acm.evaluate(ds, metrics=[score])) # + [markdown] id="FrIO9CSgAHlz" # ### Further reading # We have explored the ACNN architecture and used the PDBbind dataset to train an ACNN to predict protein-ligand binding energies. For more information, read the original paper that introduced ACNNs: <NAME>, et al. "Atomic convolutional networks for predicting protein-ligand binding affinity." [arXiv preprint arXiv:1703.10603](https://arxiv.org/abs/1703.10603) (2017). There are many other methods and papers on predicting binding affinities. Here are a few interesting ones to check out: predictions using [only ligands or proteins](https://www.frontiersin.org/articles/10.3389/fphar.2020.00069/full), [molecular docking with deep learning](https://chemrxiv.org/articles/preprint/GNINA_1_0_Molecular_Docking_with_Deep_Learning/13578140), and [AtomNet](https://arxiv.org/abs/1510.02855). # + [markdown] id="wqS0gGXunB_s" # # Congratulations! Time to join the Community! # # Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways: # # ## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem) # This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build. # # ## Join the DeepChem Gitter # The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
examples/tutorials/14_Modeling_Protein_Ligand_Interactions_With_Atomic_Convolutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Deepak262/18CSE172/blob/main/dmdw_lab_16_Nov_2020.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="eVcTFG2FyK43" dataSetPath = "https://raw.githubusercontent.com/chirudukuru/DMDW/main/Toyota.csv" # + id="PGPniPS-yRV0" import pandas as pd # + id="L0HAJidrHPiT" data = pd.read_csv(dataSetPath) # + colab={"base_uri": "https://localhost:8080/", "height": 438} id="mQ-QpP-jgXWG" outputId="e1c68c90-fae8-4d24-a440-c86fbfdbc498" data # + id="a2YdTTmggdJz" data = pd.read_csv("https://raw.githubusercontent.com/chirudukuru/DMDW/main/Toyota.csv", index_col ="Unnamed: 0") # + colab={"base_uri": "https://localhost:8080/", "height": 402} id="V9bOKKtIgkCj" outputId="c23b3378-0d16-4237-c716-e20e38074a68" data # + colab={"base_uri": "https://localhost:8080/"} id="MyqAS7iBgnXn" outputId="4295bcbd-df12-4db0-89b8-b164c8aa9d19" type(data) # + colab={"base_uri": "https://localhost:8080/"} id="zfJ4TwjYgoyy" outputId="2f09e078-c633-46e8-8aea-c7fc8b5df268" data.shape # + colab={"base_uri": "https://localhost:8080/"} id="vt3a3EIyg0F3" outputId="b7b2fd20-1da2-41de-d411-7d232d2ebb2c" data.info() # + colab={"base_uri": "https://localhost:8080/"} id="clAhnY4Rg3Zs" outputId="024f78ae-3fb3-4652-bdc7-89a1ea552eb9" data.index # + colab={"base_uri": "https://localhost:8080/"} id="Lu7v6nrjg6W3" outputId="6cbcb9de-ed30-4e26-d2fb-56b68f59e25a" data.columns # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="JvKMGhGdg9kS" outputId="c4ff970e-6081-4c33-8846-f059b30c0405" data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="XUBkz0sLhBJS" outputId="83b5a429-3883-4fd3-ed3b-34c819bcef91" data.tail() # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="loT5FIghhDII" outputId="dd59cb05-a05f-4eeb-de5a-e65b70de36e9" data.head(9) # + colab={"base_uri": "https://localhost:8080/", "height": 343} id="1sC1-dnIhGu1" outputId="c1da1cec-77c8-4149-e41c-5e2ffc63c609" data[['Price',"Age"]].head(10) # + colab={"base_uri": "https://localhost:8080/"} id="7uPVGlRYhJzM" outputId="60055cbf-b981-46a1-b6fd-dac1b6075751" ### Data Wrangling (Working With Null Values) data.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="s5e4Ld-2hNuW" outputId="932234a9-2062-416e-ebdd-1ad584373272" data.dropna(inplace=True) # removed the null values 1st method remove rows when large data we having data.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="WV0adToDhRW0" outputId="4042a976-4608-4308-ca43-ada752d9f377" data.shape # + colab={"base_uri": "https://localhost:8080/", "height": 343} id="81OJw-N_hUHE" outputId="005bdcd4-ae52-4082-8423-e93948490861" data.head(10) # After removal of nullvalue rows # + colab={"base_uri": "https://localhost:8080/"} id="Nnnb_jXPhZLH" outputId="d98117f1-a938-4370-de70-14c51cde21cb" #2nd method handling missing values data['MetColor'].mean() # + colab={"base_uri": "https://localhost:8080/"} id="hC0BurE9heTj" outputId="c34f81b1-3b67-46d5-8573-8dfa3d417e47" data['MetColor'].head() # + id="3D4zjcaghgcd" import numpy as np # + colab={"base_uri": "https://localhost:8080/"} id="IfXCFFjphjUJ" outputId="94feb313-124e-4cef-aec2-31cb07e254ae" data['MetColor'].replace(np.NaN,data['MetColor'].mean()).head() # + colab={"base_uri": "https://localhost:8080/"} id="ziq0MatyhlXp" outputId="003184fd-ab90-44f2-c3c2-394aa8c1ab7f" # !pip install sklearn # + id="M1HHgoRBhnpi" x=data.iloc[: , :-1].values # + colab={"base_uri": "https://localhost:8080/"} id="XABmFn_Thqkj" outputId="0ef05c4c-c165-486e-f722-f2b982839fac" x # + id="Inbd_98shszi" from sklearn.preprocessing import LabelEncoder # + id="jkncSQ5rhwKN" label_encoder=LabelEncoder() # + id="r0Bl5kjbhyet" x[: ,0]= label_encoder.fit_transform(x[:, 1]) # + colab={"base_uri": "https://localhost:8080/"} id="Sv5Fn7qPh0X0" outputId="28a0dbaa-2126-4546-b660-5c70789566b1" x # + id="DAHISW1Sh1ax" from sklearn.preprocessing import OneHotEncoder # + id="zsFdkj74h4d4" dummy=pd.get_dummies(data['FuelType']) # + colab={"base_uri": "https://localhost:8080/", "height": 402} id="O74C9AIIh7Bu" outputId="7fded76e-45c9-4b9d-84c9-505f26cee2cb" dummy # + id="urHsEMWBh9ng" data=pd.concat([data,dummy], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 422} id="2mKo4Xg7iA0W" outputId="497f5a32-b201-4439-ad51-5dbb54d63b9b" data # + id="VGHwwoQQiBZa" y=data['KM'] # + id="l3DxzUGxiDgq" x=data.drop(['KM'], axis=1) # + id="HfzqUNxRiFQ8" from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2) # + colab={"base_uri": "https://localhost:8080/", "height": 422} id="RC566LqZiInV" outputId="0a91045a-06f1-459e-9bb0-57ae118e8308" x_train # + colab={"base_uri": "https://localhost:8080/", "height": 304} id="Smj2pPkCiLCA" outputId="245664b0-0c77-4ad5-980f-3592d3940434" x_train.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 422} id="Wj8RGJOliMvh" outputId="4574b4bd-b47a-46d1-b661-70a8fa32223e" x_test # + colab={"base_uri": "https://localhost:8080/", "height": 304} id="iTYlpThmiPQ_" outputId="26a64a60-1751-4ddf-cb16-70bc4a2490c8" x_test.describe() # + colab={"base_uri": "https://localhost:8080/"} id="U_y1vus4iRD9" outputId="5bff66c1-0371-4e22-b22f-95301e75efac" y_train # + colab={"base_uri": "https://localhost:8080/"} id="XHNpsWaKiS7e" outputId="53517123-1bc0-4f18-ca91-6f2a6e4474d1" y_train.describe() # + colab={"base_uri": "https://localhost:8080/"} id="u61Pjy0giVFp" outputId="58126505-de1c-4cdf-cf7d-3f52f2a37119" y_test # + colab={"base_uri": "https://localhost:8080/"} id="oQLpru6-iWqT" outputId="00108fdc-2113-4c63-da17-540f03d15ae8" y_test.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 215} id="ZaeNcWZ0iYeh" outputId="ad9cb5a7-ba52-448d-a8f4-b7cabe60103c" x_train.head() # + colab={"base_uri": "https://localhost:8080/"} id="-7PjF7DtiaM2" outputId="c0ca4920-3887-4dcf-9884-5e54f6e2ed4b" x_train.shape # + colab={"base_uri": "https://localhost:8080/", "height": 215} id="9foNmuMXibx5" outputId="81420319-14d3-49a4-bdbb-4387551f5f76" x_test.head() # + colab={"base_uri": "https://localhost:8080/"} id="aTnfhyeCidZr" outputId="4aa9b288-aa43-4735-a774-f39a5e3248cf" x_test.shape # + id="c5Dy1iomigWZ" #### Analyse The Data in graphs import seaborn as sns import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="Xi2BnExYiixa" outputId="2d8bf925-970c-40ec-f1fa-5d04d7ccaad6" sns.countplot(data['FuelType']) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="G5jF4kY4ilLX" outputId="cd4c68ed-bcd6-46b3-b7be-6e567617d18a" sns.countplot(data['MetColor']) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="0632wC5Fim9a" outputId="2871c73f-d178-4f23-a273-19b8181a630c" sns.countplot(data['Doors']) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="UEileBSPipg8" outputId="b7fd3c24-2e2c-4b28-a2c4-c622bb69d5c1" sns.countplot(data['FuelType'],hue=data['MetColor']) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="L4g7U3mYirm8" outputId="25787dc6-600e-4da2-e475-1b031830f249" sns.countplot(data['FuelType'],hue=data['Doors']) plt.show() # + id="2bN7M6IKiwqQ"
dmdw_lab_16_Nov_2020.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *Copyright (c) Microsoft Corporation. All rights reserved.* # *Licensed under the MIT License.* # # Named Entity Recognition Using BERT # # Before You Start # # The running time shown in this notebook is on a Standard_NC6 Azure Deep Learning Virtual Machine with 1 NVIDIA Tesla K80 GPU. # > **Tip**: If you want to run through the notebook quickly, you can set the **`QUICK_RUN`** flag in the cell below to **`True`** to run the notebook on a small subset of the data and a smaller number of epochs. # # The table below provides some reference running time on different machine configurations. # # |QUICK_RUN|Machine Configurations|Running time| # |:---------|:----------------------|:------------| # |True|4 **CPU**s, 14GB memory| ~ 2 minutes| # |False|4 **CPU**s, 14GB memory| ~1.5 hours| # |True|1 NVIDIA Tesla K80 GPUs, 12GB GPU memory| ~ 1 minute| # |False|1 NVIDIA Tesla K80 GPUs, 12GB GPU memory| ~ 7 minutes | # # If you run into CUDA out-of-memory error or the jupyter kernel dies constantly, try reducing the `BATCH_SIZE` and `MAX_SEQ_LENGTH`, but note that model performance will be compromised. ## Set QUICK_RUN = True to run the notebook on a small subset of data and a smaller number of epochs. QUICK_RUN = False # ## Summary # This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, model scoring, and model evaluation. # # [BERT (Bidirectional Transformers for Language Understanding)](https://arxiv.org/pdf/1810.04805.pdf) is a powerful pre-trained lanaguage model that can be used for multiple NLP tasks, including text classification, question answering, named entity recognition, etc. It's able to achieve state of the art performance with only a few epochs of fine tuning on task specific datasets. # The figure below illustrates how BERT can be fine tuned for NER tasks. The input data is a list of tokens representing a sentence. In the training data, each token has an entity label. After fine tuning, the model predicts an entity label for each token in a given testing sentence. # # <img src="https://nlpbp.blob.core.windows.net/images/bert_architecture.png"> # + import sys import os import random import scrapbook as sb from seqeval.metrics import classification_report import torch nlp_path = os.path.abspath('../../') if nlp_path not in sys.path: sys.path.insert(0, nlp_path) from utils_nlp.models.bert.token_classification import BERTTokenClassifier, create_label_map, postprocess_token_labels from utils_nlp.models.bert.common import Language, Tokenizer from utils_nlp.dataset.wikigold import load_train_test_dfs, get_unique_labels from utils_nlp.common.timer import Timer # - # ## Configurations # + tags=["parameters"] TRAIN_DATA_FRACTION = 1 TEST_DATA_FRACTION = 1 NUM_TRAIN_EPOCHS = 5 if QUICK_RUN: TRAIN_DATA_FRACTION = 0.1 TEST_DATA_FRACTION = 0.1 NUM_TRAIN_EPOCHS = 1 if torch.cuda.is_available(): BATCH_SIZE = 16 else: BATCH_SIZE = 8 CACHE_DIR="./temp" # set random seeds RANDOM_SEED = 100 torch.manual_seed(RANDOM_SEED) # model configurations LANGUAGE = Language.ENGLISHCASED DO_LOWER_CASE = False MAX_SEQ_LENGTH = 200 # optimizer configuration LEARNING_RATE = 3e-5 # data configurations TEXT_COL = "sentence" LABELS_COL = "labels" # - # ## Preprocess Data # ### Get training and testing data # The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). # # The helper function `load_train_test_dfs` downloads the data file if it doesn't exist in `local_cache_path`. It splits the dataset into training and testing sets according to `test_fraction`. Because this is a relatively small dataset, we set `test_fraction` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. # # The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). # # The maximum number of words in a sentence is 144, so we set MAX_SEQ_LENGTH to 200 above, because the number of tokens will grow after WordPiece tokenization. train_df, test_df = load_train_test_dfs(local_cache_path=CACHE_DIR, test_fraction=0.5,random_seed=RANDOM_SEED) label_list = get_unique_labels() print('\nUnique entity labels: \n{}\n'.format(label_list)) print('Sample sentence: \n{}\n'.format(train_df[TEXT_COL][0])) print('Sample sentence labels: \n{}\n'.format(train_df[LABELS_COL][0])) train_df = train_df.sample(frac=TRAIN_DATA_FRACTION).reset_index(drop=True) test_df = test_df.sample(frac=TEST_DATA_FRACTION).reset_index(drop=True) # **Note that the input text are lists of words instead of raw sentences. This format ensures matching between input words and token labels when the words are further tokenized by Tokenizer.tokenize_ner.** # ### Tokenization and Preprocessing # # **Create a dictionary that maps labels to numerical values** # Note there is an argument called `trailing_piece_tag`. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. "criticize" is tokenized into "critic" and "##ize". Since the input data only come with one token label for "criticize", within Tokenizer.prerocess_ner_tokens, the original token label is assigned to the first token "critic" and the second token "##ize" is labeled as "X". By default, `trailing_piece_tag` is set to "X". If "X" already exists in your data, you can set `trailing_piece_tag` to another value that doesn't exist in your data. label_map = create_label_map(label_list, trailing_piece_tag="X") # **Create a tokenizer** tokenizer = Tokenizer(language=LANGUAGE, to_lower=DO_LOWER_CASE, cache_dir=CACHE_DIR) # **Tokenize and preprocess text** # The `tokenize_ner` method of the `Tokenizer` class converts text and labels in strings to numerical features, involving the following steps: # 1. WordPiece tokenization. # 2. Convert tokens and labels to numerical values, i.e. token ids and label ids. # 3. Sequence padding or truncation according to the `max_seq_length` configuration. train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \ tokenizer.tokenize_ner(text=train_df[TEXT_COL], label_map=label_map, max_len=MAX_SEQ_LENGTH, labels=train_df[LABELS_COL], trailing_piece_tag="X") test_token_ids, test_input_mask, test_trailing_token_mask, test_label_ids = \ tokenizer.tokenize_ner(text=test_df[TEXT_COL], label_map=label_map, max_len=MAX_SEQ_LENGTH, labels=test_df[LABELS_COL], trailing_piece_tag="X") # `Tokenizer.tokenize_ner` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: # 1. token ids: list of numerical values each corresponds to a token. # 2. attention mask: list of 1s and 0s, 1 for input tokens and 0 for padded tokens, so that padded tokens are not attended to. # 3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ize. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. # 4. label ids: list of numerical values each corresponds to an entity label, if `labels` is provided. print("Sample token ids:\n{}\n".format(train_token_ids[0])) print("Sample attention mask:\n{}\n".format(train_input_mask[0])) print("Sample trailing token mask:\n{}\n".format(train_trailing_token_mask[0])) print("Sample label ids:\n{}\n".format(train_label_ids[0])) # ## Create Token Classifier # The value of the `language` argument determines which BERT model is used: # * Language.ENGLISH: "bert-base-uncased" # * Language.ENGLISHCASED: "bert-base-cased" # * Language.ENGLISHLARGE: "bert-large-uncased" # * Language.ENGLISHLARGECASED: "bert-large-cased" # * Language.CHINESE: "bert-base-chinese" # * Language.MULTILINGUAL: "bert-base-multilingual-cased" # * Language.ENGLISHLARGEWWM: "bert-large-uncased-whole-word-masking" # * Language.ENGLISHLARGECASEDWWM: "bert-large-cased-whole-word-masking" # # Here we use the base, cased pretrained model. token_classifier = BERTTokenClassifier(language=LANGUAGE, num_labels=len(label_map), cache_dir=CACHE_DIR) # ## Train Model with Timer() as t: token_classifier.fit(token_ids=train_token_ids, input_mask=train_input_mask, labels=train_label_ids, num_epochs=NUM_TRAIN_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE) print("Training time : {:.3f} hrs".format(t.interval / 3600)) # ## Predict on Test Data with Timer() as t: pred_label_ids = token_classifier.predict(token_ids=test_token_ids, input_mask=test_input_mask, labels=test_label_ids, batch_size=BATCH_SIZE) print("Prediction time : {:.3f} hrs".format(t.interval / 3600)) # ## Evaluate Model # The `predict` method of the token classifier outputs label ids for all tokens, including the padded tokens. `postprocess_token_labels` is a helper function that removes the predictions on padded tokens. If a `label_map` is provided, it maps the numerical label ids back to original token labels which are usually string type. pred_tags_no_padding = postprocess_token_labels(pred_label_ids, test_input_mask, label_map) true_tags_no_padding = postprocess_token_labels(test_label_ids, test_input_mask, label_map) report_no_padding = classification_report(true_tags_no_padding, pred_tags_no_padding, digits=2) print(report_no_padding) # `postprocess_token_labels` also provides an option to remove the predictions on trailing word pieces, e.g. ##ize, so that the final predicted labels correspond to the original words in the input text. The `trailing_token_mask` is obtained from `tokenizer.tokenize_ner` pred_tags_no_padding_no_trailing = postprocess_token_labels(pred_label_ids, test_input_mask, label_map, remove_trailing_word_pieces=True, trailing_token_mask=test_trailing_token_mask) true_tags_no_padding_no_trailing = postprocess_token_labels(test_label_ids, test_input_mask, label_map, remove_trailing_word_pieces=True, trailing_token_mask=test_trailing_token_mask) report_no_padding_no_trailing = classification_report(true_tags_no_padding_no_trailing, pred_tags_no_padding_no_trailing, digits=2) print(report_no_padding_no_trailing) # We can see that the metrics are worse after excluding trailing word pieces, because they are easy to predict. # ## Conclusion # By fine-tuning the pre-trained BERT model for token classification, we achieved significantly better results compared to the [original paper on the wikigold dataset](https://www.aclweb.org/anthology/W09-3302) with a much smaller training dataset. # + # for testing report_no_padding_split = report_no_padding.split('\n')[-2].split() report_no_padding_no_trailing_split = report_no_padding_no_trailing.split('\n')[-2].split() sb.glue("precision_1", float(report_no_padding_split[2])) sb.glue("recall_1", float(report_no_padding_split[3])) sb.glue("f1_1", float(report_no_padding_split[4])) sb.glue("precision_2", float(report_no_padding_no_trailing_split[2])) sb.glue("recall_2", float(report_no_padding_no_trailing_split[3])) sb.glue("f1_2", float(report_no_padding_no_trailing_split[4]))
examples/named_entity_recognition/ner_wikigold_bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="o-1dJdiadbyY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="646cb806-c659-414f-eb80-d8cf77dff516" executionInfo={"status": "ok", "timestamp": 1537361616532, "user_tz": -480, "elapsed": 651, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} import numpy as np print(np.__version__) # + id="PvM293zidgmN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="50f22781-887e-47b5-8a0e-9e324657aff2" executionInfo={"status": "ok", "timestamp": 1537361618858, "user_tz": -480, "elapsed": 629, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} import tensorflow as tf print(tf.__version__) # + id="QMmscX4sdkQs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5ba27c33-a521-437e-f674-836f4fc04f59" executionInfo={"status": "ok", "timestamp": 1537361620307, "user_tz": -480, "elapsed": 1350, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} import matplotlib print(matplotlib.__version__) # + id="g3ORI16g9ckg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="dd01ff56-8e24-4f40-caf0-a0c58103a3c7" executionInfo={"status": "ok", "timestamp": 1537361621653, "user_tz": -480, "elapsed": 1289, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} # 下载数据集 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print("训练集图像大小:{}".format(mnist.train.images.shape)) print("训练集标签大小:{}".format(mnist.train.labels.shape)) print("验证集图像大小:{}".format(mnist.validation.images.shape)) print("验证集标签大小:{}".format(mnist.validation.labels.shape)) print("测试集图像大小:{}".format(mnist.test.images.shape)) print("测试集标签大小:{}".format(mnist.test.labels.shape)) # + id="IqwO70h9Cqk7" colab_type="code" colab={} # 为了便于读取,我们把数据集先各自使用一个变量指向它们 x_train, y_train = mnist.train.images, mnist.train.labels x_valid, y_valid = mnist.validation.images, mnist.validation.labels x_test, y_test = mnist.test.images, mnist.test.labels # + id="MVp6_A-BkjLH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 127} outputId="2f7ecae5-429a-40eb-8564-a3099126ad7c" executionInfo={"status": "ok", "timestamp": 1537361674373, "user_tz": -480, "elapsed": 859, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} import matplotlib.pyplot as plt # %matplotlib inline import numpy as np # 绘制和显示前5个训练集的图像 fig = plt.figure(figsize=(10, 10)) for i in range(5): ax = fig.add_subplot(1, 5, i+1, xticks=[], yticks=[]) ax.imshow(np.reshape(x_train[i:i+1], (28, 28)), cmap='gray') # + id="QCySXT71yGgm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 127} outputId="36682b4a-62c3-4c7f-db70-806c3217d766" executionInfo={"status": "ok", "timestamp": 1537361676248, "user_tz": -480, "elapsed": 650, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} # 绘制和显示前(2*12)之后的五个训练集的图像 fig = plt.figure(figsize=(10, 10)) for i in range(5): ax = fig.add_subplot(1, 5, i+1, xticks=[], yticks=[]) ax.imshow(np.reshape(x_train[i+2*12:i+1+2*12], (28, 28)), cmap='gray') # + id="lk2W8-5CkkvK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 592} outputId="64f8998d-4daf-4fcd-dd91-1e422f75f89c" executionInfo={"status": "ok", "timestamp": 1537361681893, "user_tz": -480, "elapsed": 3831, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-<KEY>", "userId": "106859476729549749549"}} # 定义可视化图像的函数,传入一个图像向量和figure对象 def visualize_input(img, ax): # 绘制并输出图像 ax.imshow(img, cmap='gray') # 对于该图像的宽和高,我们输出它们的具体的数值, # 以便于我们更清晰的知道计算机是如何看待一张图像的 width, height = img.shape # 将图像中的具体数值转换成0-1之间的值 thresh = img.max()/2.5 # 遍历行 for x in range(width): # 遍历列 for y in range(height): # 将图像的数值在它对应的位置上标出,且水平垂直居中 ax.annotate(str(round(img[x][y],2)), xy=(y,x), horizontalalignment='center', verticalalignment='center', color='white' if img[x][y]<thresh else 'black') fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111) # 假设我们就取出下标为5的样本来作为例子 visualize_input(np.reshape(x_train[5:6], (28, 28)), ax) # + id="b8lIAXSekmMR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 43112} outputId="50d82b94-868f-4a2d-8d60-060f2d32c7e2" executionInfo={"status": "ok", "timestamp": 1537370142598, "user_tz": -480, "elapsed": 89021, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} import math # 参数准备 img_size = 28 * 28 num_classes = 10 learning_rate = 0.1 epochs = 100 batch_size = 128 # 创建模型 # x表示输入,创建输入占位符,该占位符会在训练时,会对每次迭代的数据进行填充上 x = tf.placeholder(tf.float32, [None, img_size]) # W表示weight,创建权重,初始化时都是为0,它的大小是(图像的向量大小,图像的总类别) W = tf.Variable(tf.zeros([img_size, num_classes])) # b表示bias,创建偏移项 b = tf.Variable(tf.zeros([num_classes])) # y表示计算输出结果,softmax表示激活函数是多类别分类的输出 # 感知器的计算公式就是:(x * W) + b y = tf.nn.softmax(tf.matmul(x, W) + b) # 定义输出预测占位符y_ y_ = tf.placeholder(tf.float32, [None, 10]) valid_feed_dict = { x: x_valid, y_: y_valid } test_feed_dict = { x: x_test, y_: y_test } # 通过激活函数softmax的交叉熵来定义损失函数 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) # 定义梯度下降优化器 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # 比较正确的预测结果 correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # 计算预测准确率 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) iteration = 0 # 定义训练时的检查点 saver = tf.train.Saver() # 创建一个TensorFlow的会话 with tf.Session() as sess: # 初始化全局变量 sess.run(tf.global_variables_initializer()) # 根据每批次训练128个样本,计算出一共需要迭代多少次 batch_count = int(math.ceil(mnist.train.labels.shape[0] / 128.0)) # 开始迭代训练样本 for e in range(epochs): # 每个样本都需要在TensorFlow的会话里进行运算,训练 for batch_i in range(batch_count): # 样本的索引,间隔是128个 batch_start = batch_i * batch_size # 取出图像样本 batch_x = mnist.train.images[batch_start:batch_start+batch_size] # 取出图像对应的标签 batch_y = mnist.train.labels[batch_start:batch_start+batch_size] # 训练模型 loss, _ = sess.run([cost, optimizer], feed_dict={x: batch_x, y_: batch_y}) # 每20个批次时输出一次训练损失等日志信息 if batch_i % 20 == 0: print("Epoch: {}/{}".format(e+1, epochs), "Iteration: {}".format(iteration), "Training loss: {:.5f}".format(loss)) iteration += 1 # 每128个样本时,验证一下训练的效果如何,并输出日志信息 if iteration % batch_size == 0: valid_acc = sess.run(accuracy, feed_dict=valid_feed_dict) print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Validation Accuracy: {:.5f}".format(valid_acc)) # 保存训练模型的检查点 saver.save(sess, "checkpoints/mnist_mlp_tf.ckpt") # + id="GPT5CBs4ko-a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="4ebe197e-f6e1-413d-cc2d-0fb7288be10b" executionInfo={"status": "ok", "timestamp": 1537370149721, "user_tz": -480, "elapsed": 828, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-nVI9QJN9Uk4/AAAAAAAAAAI/AAAAAAAAACk/6lxg_oQk5wU/s50-c-k-no/photo.jpg", "userId": "106859476729549749549"}} # 预测测试数据集精确度 saver = tf.train.Saver() with tf.Session() as sess: # 从训练模型的检查点恢复 saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) # 预测测试集精确度 test_acc = sess.run(accuracy, feed_dict=test_feed_dict) print("test accuracy: {:.5f}".format(test_acc)) # + id="eynty4q8Vxsi" colab_type="code" colab={}
mnist/mnist_mlp_tf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Meta-Learning with the Rank-Weighted GP Ensemble (RGPE) # # BoTorch is designed in to be model-agnostic and only requries that a model conform to a minimal interface. This tutorial walks through an example of implementing the rank-weighted Gaussian process ensemble (RGPE) [Feurer, Letham, Bakshy ICML 2018 AutoML Workshop] and using the RGPE in BoTorch to do meta-learning across related optimization tasks. # # * Original paper: https://arxiv.org/pdf/1802.02219.pdf # + import os import torch import math torch.manual_seed(29) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") dtype = torch.double SMOKE_TEST = os.environ.get("SMOKE_TEST") # - # ### Toy Problem # * We consider optimizing the following 1-D synthetic function # $$f(x, s_i) = \frac{1}{10}\bigg(x-1\bigg)\bigg(\sin(x+s_i)+\frac{1}{10}\bigg)$$ # where # $$s_i = \frac{(i+9)\pi}{8}$$ # is a task-dependent shift parameter and $i$ is the task index $i \in [1, t]$. # # * In this tutorial, we will consider the scenario where we have collected data from 5 prior tasks (referred to as base tasks), which with a different task dependent shift parameter $s_i$. # # * The goal now is use meta-learning to improve sample efficiency when optimizing a 6th task. # #### Toy Problem Setup # # First let's define a function for compute the shift parameter $s_i$ and set the shift amount for the target task. # + NUM_BASE_TASKS = 5 if not SMOKE_TEST else 2 def task_shift(task): """ Fetch shift amount for task. """ return math.pi * task /12.0 # set shift for target task TARGET_SHIFT = 0.0 # - # Then, let's define our function $f(x, s_i)$ and set bounds on $x$. # + BOUNDS = torch.tensor([[-10.0], [10.0]], dtype=dtype, device=device) def f(X, shift=TARGET_SHIFT): """ Torch-compatible objective function for the target_task """ f_X = X * torch.sin(X + math.pi + shift) + X/10.0 return f_X # - # #### Sample training data for prior base tasks # We sample data from a Sobol sequence to help ensure numerical stability when using a small amount of 1-D data. Sobol sequences help prevent us from sampling a bunch of training points that are close together. # + from botorch.utils.sampling import draw_sobol_samples from botorch.utils.transforms import normalize, unnormalize noise_std = 0.05 # Sample data for each base task data_by_task = {} for task in range(NUM_BASE_TASKS): num_training_points = 20 # draw points from a sobol sequence raw_x = draw_sobol_samples( bounds=BOUNDS, n=num_training_points, q=1, seed=task+5397923, ).squeeze(1) # get observed values f_x = f(raw_x, task_shift(task+1)) train_y = f_x + noise_std*torch.randn_like(f_x) train_yvar = torch.full_like(train_y, noise_std**2) # store training data data_by_task[task] = { # scale x to [0, 1] 'train_x': normalize(raw_x, bounds=BOUNDS), 'train_y': train_y, 'train_yvar': train_yvar, } # - # #### Let's plot the base tasks and the target task function along with the observed points # + from matplotlib import pyplot as plt # %matplotlib inline fig, ax = plt.subplots(1, 1, figsize=(12, 8)) x = torch.linspace(-10,10,51) for task in data_by_task: # plot true function and observed values for base runs t = ax.plot( unnormalize(data_by_task[task]['train_x'], bounds=BOUNDS).cpu().numpy(), data_by_task[task]['train_y'].cpu().numpy(), '.', markersize=10, label=f"Observed task {task}", ) ax.plot( x.detach().numpy(), f(x, task_shift(task+1)).cpu().numpy(), label=f"Base task {task}", color=t[0].get_color(), ) # plot true target function ax.plot( x.detach().numpy(), f(x, TARGET_SHIFT).detach().numpy(), '--', label="Target task", ) ax.legend(loc="lower right", fontsize=10) plt.tight_layout() # - # ### Fit base task models # First, let's define a helper function to fit a FixedNoiseGP with an fixed observed noise level. # + from gpytorch.mlls import ExactMarginalLogLikelihood from botorch.models import FixedNoiseGP from botorch.fit import fit_gpytorch_model def get_fitted_model(train_X, train_Y, train_Yvar, state_dict=None): """ Get a single task GP. The model will be fit unless a state_dict with model hyperparameters is provided. """ Y_mean = train_Y.mean(dim=-2, keepdim=True) Y_std = train_Y.std(dim=-2, keepdim=True) model = FixedNoiseGP(train_X, (train_Y - Y_mean)/Y_std, train_Yvar) model.Y_mean = Y_mean model.Y_std = Y_std if state_dict is None: mll = ExactMarginalLogLikelihood(model.likelihood, model).to(train_X) fit_gpytorch_model(mll) else: model.load_state_dict(state_dict) return model # - # #### Now let's fit a FixedNoiseGP for each base task # Fit base model base_model_list = [] for task in range(NUM_BASE_TASKS): print(f"Fitting base model {task}") model = get_fitted_model( data_by_task[task]['train_x'], data_by_task[task]['train_y'], data_by_task[task]['train_yvar'], ) base_model_list.append(model) # ### Implement the RGPE # # The main idea of the RGPE is to estimate the target function as weighted sum of the target model and the base models: # $$\bar f(\mathbf x | \mathcal D) = # \sum_{i=1}^{t} w_if^i(\mathbf x |\mathcal D_i)$$ # Importantly, the ensemble model is also a GP: # $$\bar f(\mathbf x | \mathcal D) \sim \mathcal N\bigg(\sum_{i=1}^{t} w_i\mu_i(\mathbf x), \sum_{i=1}^{t}w_i^2\sigma_i^2\bigg)$$ # # The weights $w_i$ for model $i$ are based on the the ranking loss between a draw from the model's posterior and the targets. Specifically, the ranking loss for model $i$ is: # $$\mathcal L(f^i, \mathcal D_t) = \sum_{j=1}^{n_t}\sum_{k=1}^{n_t}\mathbb 1\bigg[\bigg(f^i\big(\mathbf x^t_j\big) < f^i\big(\mathbf x_k^t\big)\bigg)\oplus \big(y_j^t < y_k^t\big)\bigg]$$ # where $\oplus$ is exclusive-or. # # The loss for the target model is computing using leave-one-out cross-validation (LOOCV) and is given by: # $$\mathcal L(f^t, \mathcal D_t) = \sum_{j=1}^{n_t}\sum_{k=1}^{n_t}\mathbb 1\bigg[\bigg(f^t_{-j}\big(\mathbf x^t_j\big) < f^t_{-j}\big(\mathbf x_k^t\big)\bigg)\oplus \big(y_j^t < y_k^t\big)\bigg]$$ # where $f^t_{-j}$ model fitted to all data from the target task except training example $j$. # # The weights are then computed as: # $$w_i = \frac{1}{S}\sum_{s=1}^S\mathbb 1\big(i = \text{argmin}_{i'}l_{i', s}\big)$$ def roll_col(X, shift): """ Rotate columns to right by shift. """ return torch.cat((X[..., -shift:], X[..., :-shift]), dim=-1) def compute_ranking_loss(f_samps, target_y): """ Compute ranking loss for each sample from the posterior over target points. Args: f_samps: `n_samples x (n) x n`-dim tensor of samples target_y: `n x 1`-dim tensor of targets Returns: Tensor: `n_samples`-dim tensor containing the ranking loss across each sample """ n = target_y.shape[0] if f_samps.ndim == 3: # Compute ranking loss for target model # take cartesian product of target_y cartesian_y = torch.cartesian_prod( target_y.squeeze(-1), target_y.squeeze(-1), ).view(n, n, 2) # the diagonal of f_samps are the out-of-sample predictions # for each LOO model, compare the out of sample predictions to each in-sample prediction rank_loss = ( (f_samps.diagonal(dim1=1, dim2=2).unsqueeze(-1) < f_samps) ^ (cartesian_y[..., 0] < cartesian_y[..., 1]) ).sum(dim=-1).sum(dim=-1) else: rank_loss = torch.zeros(f_samps.shape[0], dtype=torch.long, device=target_y.device) y_stack = target_y.squeeze(-1).expand(f_samps.shape) for i in range(1,target_y.shape[0]): rank_loss += ( (roll_col(f_samps, i) < f_samps) ^ (roll_col(y_stack, i) < y_stack) ).sum(dim=-1) return rank_loss # Define a function to: # 1. Create a batch mode-gp LOOCV GP using the hyperparameters from `target_model` # 2. Draw a joint sample across all points from the target task (in-sample and out-of-sample) def get_target_model_loocv_sample_preds(train_x, train_y, train_yvar, target_model, num_samples): """ Create a batch-mode LOOCV GP and draw a joint sample across all points from the target task. Args: train_x: `n x d` tensor of training points train_y: `n x 1` tensor of training targets target_model: fitted target model num_samples: number of mc samples to draw Return: `num_samples x n x n`-dim tensor of samples, where dim=1 represents the `n` LOO models, and dim=2 represents the `n` training points. """ batch_size = len(train_x) masks = torch.eye(len(train_x), dtype=torch.uint8, device=device).bool() train_x_cv = torch.stack([train_x[~m] for m in masks]) train_y_cv = torch.stack([train_y[~m] for m in masks]) train_yvar_cv = torch.stack([train_yvar[~m] for m in masks]) state_dict = target_model.state_dict() # expand to batch size of batch_mode LOOCV model state_dict_expanded = { name: t.expand(batch_size, *[-1 for _ in range(t.ndim)]) for name, t in state_dict.items() } model = get_fitted_model(train_x_cv, train_y_cv, train_yvar_cv, state_dict=state_dict_expanded) with torch.no_grad(): posterior = model.posterior(train_x) # Since we have a batch mode gp and model.posterior always returns an output dimension, # the output from `posterior.sample()` here `num_samples x n x n x 1`, so let's squeeze # the last dimension. sampler = SobolQMCNormalSampler(num_samples=num_samples) return sampler(posterior).squeeze(-1) def compute_rank_weights(train_x,train_y, base_models, target_model, num_samples): """ Compute ranking weights for each base model and the target model (using LOOCV for the target model). Note: This implementation does not currently address weight dilution, since we only have a small number of base models. Args: train_x: `n x d` tensor of training points (for target task) train_y: `n` tensor of training targets (for target task) base_models: list of base models target_model: target model num_samples: number of mc samples Returns: Tensor: `n_t`-dim tensor with the ranking weight for each model """ ranking_losses = [] # compute ranking loss for each base model for task in range(len(base_models)): model = base_models[task] # compute posterior over training points for target task posterior = model.posterior(train_x) sampler = SobolQMCNormalSampler(num_samples=num_samples) base_f_samps = sampler(posterior).squeeze(-1).squeeze(-1) # compute and save ranking loss ranking_losses.append(compute_ranking_loss(base_f_samps, train_y)) # compute ranking loss for target model using LOOCV # f_samps target_f_samps = get_target_model_loocv_sample_preds( train_x, train_y, train_yvar, target_model, num_samples, ) ranking_losses.append(compute_ranking_loss(target_f_samps, train_y)) ranking_loss_tensor = torch.stack(ranking_losses) # compute best model (minimum ranking loss) for each sample best_models = torch.argmin(ranking_loss_tensor, dim=0) # compute proportion of samples for which each model is best rank_weights = best_models.bincount(minlength=len(ranking_losses)).type_as(train_x) / num_samples return rank_weights # + from botorch.models.gpytorch import GPyTorchModel from gpytorch.models import GP from gpytorch.distributions import MultivariateNormal from gpytorch.lazy import PsdSumLazyTensor from gpytorch.likelihoods import LikelihoodList from torch.nn import ModuleList class RGPE(GP, GPyTorchModel): """ Rank-weighted GP ensemble. Note: this class inherits from GPyTorchModel which provides an interface for GPyTorch models in botorch. """ _num_outputs = 1 # metadata for botorch def __init__(self, models, weights): super().__init__() self.models = ModuleList(models) for m in models: if not hasattr(m, "likelihood"): raise ValueError( "RGPE currently only supports models that have a likelihood (e.g. ExactGPs)" ) self.likelihood = LikelihoodList(*[m.likelihood for m in models]) self.weights = weights self.to(weights) def forward(self, x): weighted_means = [] weighted_covars = [] # filter model with zero weights # weights on covariance matrices are weight**2 non_zero_weight_indices = (self.weights**2 > 0).nonzero() non_zero_weights = self.weights[non_zero_weight_indices] # re-normalize non_zero_weights /= non_zero_weights.sum() for non_zero_weight_idx in range(non_zero_weight_indices.shape[0]): raw_idx = non_zero_weight_indices[non_zero_weight_idx].item() model = self.models[raw_idx] posterior = model.posterior(x) # unstandardize predictions posterior_mean = posterior.mean.squeeze(-1)*model.Y_std + model.Y_mean posterior_cov = posterior.mvn.lazy_covariance_matrix * model.Y_std.pow(2) # apply weight weight = non_zero_weights[non_zero_weight_idx] weighted_means.append(weight * posterior_mean) weighted_covars.append(posterior_cov * weight**2) # set mean and covariance to be the rank-weighted sum the means and covariances of the # base models and target model mean_x = torch.stack(weighted_means).sum(dim=0) covar_x = PsdSumLazyTensor(*weighted_covars) return MultivariateNormal(mean_x, covar_x) # - # ### Optimize target function using RGPE + qNEI # + from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement from botorch.sampling.samplers import SobolQMCNormalSampler from botorch.optim.optimize import optimize_acqf # suppress GPyTorch warnings about adding jitter import warnings warnings.filterwarnings("ignore", "^.*jitter.*", category=RuntimeWarning) best_rgpe_all = [] best_random_all = [] best_vanilla_nei_all = [] N_BATCH = 10 if not SMOKE_TEST else 2 NUM_POSTERIOR_SAMPLES = 256 if not SMOKE_TEST else 16 RANDOM_INITIALIZATION_SIZE = 3 N_TRIALS = 10 if not SMOKE_TEST else 2 MC_SAMPLES = 512 if not SMOKE_TEST else 32 N_RESTART_CANDIDATES = 512 if not SMOKE_TEST else 8 N_RESTARTS = 10 if not SMOKE_TEST else 2 Q_BATCH_SIZE = 1 # Average over multiple trials for trial in range(N_TRIALS): print(f"Trial {trial + 1} of {N_TRIALS}") best_rgpe = [] best_random = [] best_vanilla_nei = [] # Initial random observations raw_x = draw_sobol_samples(bounds=BOUNDS, n=RANDOM_INITIALIZATION_SIZE, q=1, seed=trial).squeeze(1) train_x = normalize(raw_x, bounds=BOUNDS) train_y_noiseless = f(raw_x) train_y = train_y_noiseless + noise_std*torch.randn_like(train_y_noiseless) train_yvar = torch.full_like(train_y, noise_std**2) vanilla_nei_train_x = train_x.clone() vanilla_nei_train_y = train_y.clone() vanilla_nei_train_yvar = train_yvar.clone() # keep track of the best observed point at each iteration best_value = train_y.max().item() best_rgpe.append(best_value) best_random.append(best_value) vanilla_nei_best_value = best_value best_vanilla_nei.append(vanilla_nei_best_value) # Run N_BATCH rounds of BayesOpt after the initial random batch for iteration in range(N_BATCH): target_model = get_fitted_model(train_x, train_y, train_yvar) model_list = base_model_list + [target_model] rank_weights = compute_rank_weights( train_x, train_y, base_model_list, target_model, NUM_POSTERIOR_SAMPLES, ) # create model and acquisition function rgpe_model = RGPE(model_list, rank_weights) sampler_qnei = SobolQMCNormalSampler(num_samples=MC_SAMPLES) qNEI = qNoisyExpectedImprovement( model=rgpe_model, X_baseline=train_x, sampler=sampler_qnei, ) # optimize candidate, _ = optimize_acqf( acq_function=qNEI, bounds=torch.tensor([[0.],[1.]], dtype=dtype, device=device), q=Q_BATCH_SIZE, num_restarts=N_RESTARTS, raw_samples=N_RESTART_CANDIDATES, ) # fetch the new values new_x = candidate.detach() new_y_noiseless = f(unnormalize(new_x, bounds=BOUNDS)) new_y = new_y_noiseless + noise_std*torch.randn_like(new_y_noiseless) new_yvar = torch.full_like(new_y, noise_std**2) # update training points train_x = torch.cat((train_x, new_x)) train_y = torch.cat((train_y, new_y)) train_yvar = torch.cat((train_yvar, new_yvar)) random_candidate = torch.rand(1, dtype=dtype, device=device) next_random_noiseless = f(unnormalize(random_candidate, bounds=BOUNDS)) next_random = next_random_noiseless + noise_std * torch.randn_like(next_random_noiseless) next_random_best = next_random.max().item() best_random.append(max(best_random[-1], next_random_best)) # get the new best observed value best_value = train_y.max().item() best_rgpe.append(best_value) # Run Vanilla NEI for comparison vanilla_nei_model = get_fitted_model( vanilla_nei_train_x, vanilla_nei_train_y, vanilla_nei_train_yvar, ) vanilla_nei_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES) vanilla_qNEI = qNoisyExpectedImprovement( model=vanilla_nei_model, X_baseline=vanilla_nei_train_x, sampler=vanilla_nei_sampler, ) vanilla_nei_candidate, _ = optimize_acqf( acq_function=vanilla_qNEI, bounds=torch.tensor([[0.],[1.]], dtype=dtype, device=device), q=Q_BATCH_SIZE, num_restarts=N_RESTARTS, raw_samples=N_RESTART_CANDIDATES, ) # fetch the new values vanilla_nei_new_x = vanilla_nei_candidate.detach() vanilla_nei_new_y_noiseless = f(unnormalize(vanilla_nei_new_x, bounds=BOUNDS)) vanilla_nei_new_y = vanilla_nei_new_y_noiseless + noise_std*torch.randn_like(new_y_noiseless) vanilla_nei_new_yvar = torch.full_like(vanilla_nei_new_y, noise_std**2) # update training points vanilla_nei_train_x = torch.cat([vanilla_nei_train_x, vanilla_nei_new_x]) vanilla_nei_train_y = torch.cat([vanilla_nei_train_y, vanilla_nei_new_y]) vanilla_nei_train_yvar = torch.cat([vanilla_nei_train_yvar, vanilla_nei_new_yvar]) # get the new best observed value vanilla_nei_best_value = vanilla_nei_train_y.max().item() best_vanilla_nei.append(vanilla_nei_best_value) best_rgpe_all.append(best_rgpe) best_random_all.append(best_random) best_vanilla_nei_all.append(best_vanilla_nei) # - # #### Plot best observed value vs iteration # + import numpy as np best_rgpe_all = np.array(best_rgpe_all) best_random_all = np.array(best_random_all) best_vanilla_nei_all = np.array(best_vanilla_nei_all) x = range(RANDOM_INITIALIZATION_SIZE, RANDOM_INITIALIZATION_SIZE + N_BATCH + 1) fig, ax = plt.subplots(1, 1, figsize=(10, 6)) # Plot RGPE - NEI ax.errorbar( x, best_rgpe_all.mean(axis=0), yerr=1.96 * best_rgpe_all.std(axis=0) / math.sqrt(N_TRIALS), label="RGPE - NEI", linewidth=3, capsize=5, capthick=3, ) # Plot FixedNoiseGP - NEI ax.errorbar( x, best_vanilla_nei_all.mean(axis=0), yerr=1.96 * best_vanilla_nei_all.std(axis=0) / math.sqrt(N_TRIALS), label="FixedNoiseGP - NEI", linewidth=3, capsize=5, capthick=3, ) # Plot Random ax.errorbar( x, best_random_all.mean(axis=0), yerr= 1.96 * best_random_all.std(axis=0) / math.sqrt(N_TRIALS), label="Random", linewidth=3, capsize=5, capthick=3, ) ax.set_ylim(bottom=0) ax.set_xlabel('Iteration', fontsize=12) ax.set_ylabel('Best Observed Value', fontsize=12) ax.set_title('Best Observed Value by Iteration', fontsize=12) ax.legend(loc="lower right", fontsize=10) plt.tight_layout()
tutorials/meta_learning_with_rgpe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from math import sin, cos, radians from sympy import * init_printing(use_latex='mathjax') # # Дороги в симуляторе # # Дороги представлены в виде ломанных. Ломанные обеспечивают достаточную точность, нет необходимости усложнять и применять различные кривые Безье и прочее. # # Дорогу надо не просто отрендерить, но и передать в каком-то виде в систему управления автомобилем. В качестве представления неподвижных препядствий выбран OccupancyGrid, потому что это просто и хорошо подходит для статических препятствий. Непонятно, как еще можно представить произвольное окружение. Поэтому для представления дороги в виде OccupancyGrid ее надо растрировать. # # Для растрирования дороги применяется модифицированный алгоритм Брезенхэма для рисования линий с заданной толщины. Главная проблема - это соединение двух сегментов дороги под углом. # # Угловое соединение полосы многополосной дороги с шириной полосы $width$, сдвинутой от центральной полосы на $offset$. # ![](simulator_corner_multiline.png) # Дорога определяется центральной ломаной, шириной и количеством полос. На рисунке приведены два сегмента дороги, центральная линия которых определана отрезками $B_1B_2$ и $B_2B_3$ имеющих одну полосу шириной $width$, отстаящую от центральной линии на расстояние $offset$. Полоса параллельна центральной линии ($A_1A_2 \mathbin{\|} B_1 B_2 \mathbin{\|} O_1O_2$ и $A_2A_3 \mathbin{\|} B_2B_3 \mathbin{\|} O2_O3$ соответственно). # # Необходимо найти координаты точек $A_2$, $B_2$, чтобы правильно растрировать угловое соединение сегментов. Точку $A_2$ найдем как точку пересечения прямых $A_1A_2$ и $O_2A_2$, а точку $B_2$ найдем как точку пересеячения прымых $B_1B_2$ и $O_2A_2$. Для этого составим канонические уравнения этих прямых. # # $\vec{v}_1$, $\vec{v}_2$ - направляющие вектора двух сегментов дороги. $\vec{n}_1$, $\vec{n}_2$ - нормальные вектора к этим сегментам: # $$ # \vec{n}_1 = \vec{v}_1 \times \vec{up} \\ # \vec{n}_2 = \vec{v}_2 \times \vec{up} # $$ # $ # \begin{align*} # \text{где } & up - \text{вектор, направленный вверх (0, 0, 1).} # \end{align*} # $ # # Координаты точек $B_1$ и $A_1$: # $$ # \vec{B}_1 = \vec{O}_1 + \frac{\vec{n}_1}{\left\lVert \vec{n}_1 \right\rVert}\cdot offset \\ # \vec{A}_1 = \vec{B}_1 + \frac{\vec{n}_1}{\left\lVert \vec{n}_1 \right\rVert}\cdot width \\ # $$ # # Каноническое уравнение прямой $A_1A_2$: # $$ # \frac{x - A_{1x}}{\vec{v}_{1x}} = \frac{y - A_{1y}}{\vec{v}_{1y}} # $$ # # Каноническое уравнение прямой $B_1B_2$: # $$ # \frac{x - B_{1x}}{\vec{v}_{1x}} = \frac{y - B_{1y}}{\vec{v}_{1y}} # $$ # # Каноническое уравнение прямой $O_2A_3$: # $$ # \frac{x - O_{2x}}{\vec{n}_x} = \frac{y - O_{2y}}{\vec{n}_x} # $$ # # # Составим систему уравнений и найдем из нее координату точку $A_2$ пересечения прямых $A_1A_2$ и $O_2A_2$: # $$ # \begin{cases} # \frac{x - A_{1x}}{v_{1x}} = \frac{y - A_{1y}}{v_{1y}} \\ # \frac{x - O_{2x}}{\vec{n}_x} = \frac{y - O_{2y}}{\vec{n}_x} # \end{cases} # $$ # # Аналогично для точки $B_2$. # # Решим систему символьно: # + x, y = symbols('x y') nx, ny, vx, vy = symbols('nx ny vx vy') Ax, Ay, Ox, Oy = symbols('Ax Ay Ox Oy') e1 = (x - Ax)/vx - (y - Ay)/vy e2 = (x - Ox)/nx - (y - Oy)/ny res = solve([e1, e2], [x, y]) # - e1 e2 res # Для копипаста в питон: for r in res: print(res[r]) # ### Расчет стыков сегментов # + def draw_vec(P, vec, length): P1 = P + vec/np.linalg.norm(vec) * length coords = np.vstack([P, P1]) plt.plot(coords[:,0], coords[:,1]) def draw_point(P): plt.plot([P[0]], [P[1]], 'o') def draw_line(A, B): coords = np.vstack([A, B]) plt.plot(coords[:,0], coords[:,1]) def draw_line_segment(A1, B1, A2, B2): draw_line(A1, A2) draw_line(B1, B2) draw_line(A2, B2) def draw_segment(cur_points, next_points): for i in range(len(cur_points)-1): draw_line_segment(cur_points[i], cur_points[i+1], next_points[i], next_points[i+1]) def line_intersect(P1, v1, P2, v2): x = (v2[0]*(P1[0]*v1[1] - P1[1]*v1[0]) - v1[0]*(P2[0]*v2[1] - P2[1]*v2[0]))/(v2[0]*v1[1] - v2[1]*v1[0]) y = (v2[1]*(P1[0]*v1[1] - P1[1]*v1[0]) - v1[1]*(P2[0]*v2[1] - P2[1]*v2[0]))/(v2[0]*v1[1] - v2[1]*v1[0]) return np.array([x,y]) def normalize(vec): return vec/np.linalg.norm(vec) points = np.array([ [0,0], [10, 0], [10, 10], [15,15], [30, 15], [30, 0] ]) up = np.array([0,0,1]) lines_width = 1 lines_cnt = 6 plt.plot(points[:,0], points[:,1]) # Поддерживаем только дороги с четным количеством полос, половина в одну сторону, половина в другую # Расчитываем набор оффсетов для всех точек полос относительно центральной линии # Полосы начинаем считать с самой левой. offsets = (np.arange(-lines_cnt/2, lines_cnt/2 + 1) * lines_width).reshape((lines_cnt+1, 1)) # Расчитываем вектор нормали к начальному сегменту и координаты начальных точек # (на чертеже - A1, B1 итп) v1 = normalize(points[1] - points[0]) n1 = np.cross(up, v1)[:2] cur_points = points[0] + n1*offsets for seg_i in range(len(points)-2): # Расчет точек на изгибе v2 = normalize(points[seg_i+2] - points[seg_i+1]) n2 = np.cross(up, v2)[:2] n = (n1 + n2)/2 next_points = np.array([line_intersect(p, v1, points[seg_i+1], n) for p in cur_points]) draw_segment(cur_points, next_points) n1 = n2 v1 = v2 cur_points = next_points next_points = points[-1] + n1*offsets draw_segment(cur_points, next_points) plt.show() # -
research/sim_road_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/deterministic-algorithms-lab/Jax-Journey/blob/main/jax_basic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oXjPlcOX2G3I" # A notebook for this [blog](https://roberttlange.github.io/posts/2020/03/blog-post-10/) with additional notes. Implements MLP and CNN in ```JAX```. It is suggested to read that blog side-by-side. # + id="G5Q-brUn68dX" # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import numpy as onp import jax.numpy as np from jax import grad, jit, vmap, value_and_grad from jax import random # Generate key which is used to generate random numbers key = random.PRNGKey(1) #A key is always an nd-array of size (2,) # + id="U1I15wsT7C_H" # Generate a random matrix x = random.uniform(key, (1000, 1000)) # Compare running times of 3 different matrix multiplications # %time y = onp.dot(x, x) # %time y = np.dot(x, x); print(y) # %time y = np.dot(x, x).block_until_ready() # + [markdown] id="-SbZO63W7_st" # The above is due to [Asyncronous dispatch](https://jax.readthedocs.io/en/latest/async_dispatch.html). # + id="i5YUeW4V7QPR" def ReLU(x): """ Rectified Linear Unit (ReLU) activation function """ return np.maximum(0, x) jit_ReLU = jit(ReLU) # + [markdown] id="uLDbzLKu9HZQ" # JIT a simple python function using numpy to make it faster. Normally, each operation has its own kernel which are dispatched to GPU, one by one. If we have a sequence of operations, we can use the ```@jit decorator / jit()``` to compile multiple operations together using XLA. # + colab={"base_uri": "https://localhost:8080/"} id="dm0Oo83u8TdW" outputId="4025d3d7-6a90-4a54-f951-ff7926162976" # %time out = ReLU(x).block_until_ready() # Call jitted version to compile for evaluation time! # %time jit_ReLU(x).block_until_ready() #First time call will cause compilation, and may take longer. # %time out = jit_ReLU(x).block_until_ready() # + [markdown] id="pIMKKfKd98KT" # The ```grad()``` function takes as input a function ```f``` and returns the function ``` f' ``` . This ```f'``` should be ```jit()```-ted again. # + colab={"base_uri": "https://localhost:8080/"} id="JpKDUNP38hGD" outputId="f51a44c0-51a2-4e21-bbc6-27328acec43b" def FiniteDiffGrad(x): """ Compute the finite difference derivative approx for the ReLU""" return np.array((ReLU(x + 1e-3) - ReLU(x - 1e-3)) / (2 * 1e-3)) # Compare the Jax gradient with a finite difference approximation print("Jax Grad: ", jit(grad(jit(ReLU)))(2.)) print("FD Gradient:", FiniteDiffGrad(2.)) # + [markdown] id="VlG3QboG-dRO" # **vmap** - makes batching as easy as never before. While in PyTorch one always has to be careful over which dimension you want to perform computations, vmap lets you simply write your computations for a single sample case and afterwards wrap it to make it batch compatible. # + id="bfS5MpeR96Jj" batch_dim = 32 feature_dim = 100 hidden_dim = 512 # Generate a batch of vectors to process X = random.normal(key, (batch_dim, feature_dim)) # Generate Gaussian weights and biases params = [random.normal(key, (hidden_dim, feature_dim)), random.normal(key, (hidden_dim, ))] def relu_layer(params, x): """ Simple ReLu layer for single sample """ return ReLU(np.dot(params[0], x) + params[1]) def batch_version_relu_layer(params, x): """ Error prone batch version """ return ReLU(np.dot(X, params[0].T) + params[1]) def vmap_relu_layer(params, x): """ vmap version of the ReLU layer """ return jit(vmap(relu_layer, in_axes=(None, 0), out_axes=0)) out = np.stack([relu_layer(params, X[i, :]) for i in range(X.shape[0])]) out = batch_version_relu_layer(params, X) out = vmap_relu_layer(params, X) # + [markdown] id="bBgq7oJIKGC5" # ```vmap``` wraps the ```relu_layer``` function and takes as an input the axis over which to batch the inputs. In our case the first input to ```relu_layer``` are the parameters which are the same for the entire batch [```(None)```]. The second input is the feature vector, ```x```. We have stacked the vectors into a matrix such that our input has dimensions ```(batch_dim, feature_dim)```. We therefore need to provide ```vmap``` with batch dimension ```(0)``` in order to properly parallelize the computations. ```out_axes``` then specifies how to stack the individual samples' outputs. In order to keep things consistent, we choose the first dimension to remain the batch dimension. # + [markdown] id="ZAlWJJKWMI8q" # ## MLP # + id="rTUvezvFKnOl" from jax.scipy.special import logsumexp from jax.experimental import optimizers import torch from torchvision import datasets, transforms import time # + id="zJrb8CtwMTIg" batch_size = 100 train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True) # + colab={"base_uri": "https://localhost:8080/"} id="xZeZBPooOZLH" outputId="5ac0521b-5c56-4c62-d155-07198de28c3d" print(key) split = random.split(key, 5) #Can be split into any number of parts. New keys, along new axis print(split) print(random.split(split[0])) #Can only split "keys", i.e. , nd-array of size (2,) # + [markdown] id="cqSnsYLyR72N" # Since ```JAX``` offers only a functional programming interface, we can't write classes corresponding to modules, in ```JAX``` . We must write a function for initialization, and forward pass instead. # + id="Qy3Pytv4NW5g" def initialize_mlp(sizes, key): """ Initialize the weights of all layers of a linear layer network """ keys = random.split(key, len(sizes)) # Initialize a single layer with Gaussian weights - helper function def initialize_layer(m, n, key, scale=1e-2): w_key, b_key = random.split(key) return scale * random.normal(w_key, (n, m)), scale * random.normal(b_key, (n,)) return [initialize_layer(m, n, k) for m, n, k in zip(sizes[:-1], sizes[1:], keys)] layer_sizes = [784, 512, 512, 10] # Return a list of tuples of layer weights params = initialize_mlp(layer_sizes, key) # + [markdown] id="MtVhCfKlSnLQ" # The forward passs functions should take as input all the parameters(```params```) of the model, and the input(```in_array```) to it. Usually, we make a dictionary of all the parameters, so that the function can access them easily. # + id="yxHYcJ8fQSoJ" def forward_pass(params, in_array): """ Compute the forward pass for each example individually. Inputs : params: List of tuples. Tuples must be as required by relu_layer. in_array: Input array as needed by relu_layer. """ activations = in_array # Loop over the ReLU hidden layers for w, b in params[:-1]: activations = relu_layer([w, b], activations) # Perform final trafo to logits final_w, final_b = params[-1] logits = np.dot(final_w, activations) + final_b #Feel free to use any jit-numpy operations in your functions, anywhere. return logits - logsumexp(logits) #Just simple softmax, it is. # Make a batched version of the `predict` function batch_forward = vmap(forward_pass, in_axes=(None, 0), out_axes=0) # + id="ZoCF9Cu2R5Fy" def one_hot(x, k, dtype=np.float32): """Create a one-hot encoding of x of size k """ return np.array(x[:, None] == np.arange(k), dtype) def loss(params, in_arrays, targets): """ Compute the multi-class cross-entropy loss. Inputs : params: list of model parameters as accepted by forward_pass in_arrays: input_array as accepted by forward_pass targets: jit-numpy array containing one hot targets """ preds = batch_forward(params, in_arrays) return -np.sum(preds * targets) #Cross Entropy Loss. Divide by 784 to average. def accuracy(params, data_loader): """ Compute the accuracy for a provided dataloader """ acc_total = 0 num_classes = 10 for batch_idx, (data, target) in enumerate(data_loader): images = np.array(data).reshape(data.size(0), 28*28) #Need to make PyTorch tensors, into jit-numpy arrays targets = one_hot(np.array(target), num_classes) target_class = np.argmax(targets, axis=1) predicted_class = np.argmax(batch_forward(params, images), axis=1) acc_total += np.sum(predicted_class == target_class) return acc_total/len(data_loader.dataset) # + colab={"base_uri": "https://localhost:8080/"} id="OzuAhdz6UQlN" outputId="9ec82b1d-0077-4501-f0ba-603ca56ce437" x = np.arange(3) print(x.shape) print(x[None, :].shape) print(x[:,None].shape) print(x+x[None,:]) print(x[None,:]+x[:,None]) print(x+x[:,None]) # + [markdown] id="dzzVCvqpcWqw" # ```value_and_grad(fn)``` returns a function that takes same arguments(```x```) as ```fn``` and returns both the return value(```fn(x)```) of ```fn``` and its gradient(```fn'(x)```), as a tuple. # # The optimizer below stores its data(parameters and hyperparameters) in ```opt_state``` and its functionality is defined in ```opt_update()```, ```opt_init()``` and ```get_params()``` . Notice how there is no class. It would be better to put all 4 things in a dicionary, hence. # + id="39Pivo8eUk9X" @jit def update(params, x, y, opt_state): """ Compute the gradient for a batch and update the parameters Inputs : params: list of model parameters as accepted by loss function (in turn by forward_pass) x: input as accepted by loss_function(in turn by forward_pass) y: jit-numpy array containing one hot targets(as required by loss function) opt_state: as required by opt_update Returns : updated parameters, current optimizer state, computed value """ value, grads = value_and_grad(loss)(params, x, y) opt_state = opt_update(0, grads, opt_state) #opt_update is a function, not a variable, hence is available in this scope, although not defined here. return get_params(opt_state), opt_state, value #The first argument to the opt_update function is the optimizer step number. # Defining an optimizer in Jax step_size = 1e-3 opt_init, opt_update, get_params = optimizers.adam(step_size) opt_state = opt_init(params) #All the updatable parameters. First opt_state needs to be obtained this way, always. num_epochs = 10 num_classes = 10 # + [markdown] id="tnZioQ7tlGzx" # Notice how in all the above code, each function tries to make sure that its input fits well with the functions that it is calling. And this leads to a hierarchical structure, in stark comparison to the step-wise structure of PyTorch code. # + colab={"base_uri": "https://localhost:8080/"} id="nY3bya5GeQBF" outputId="0e85d070-9a6b-4784-c3e4-8fe676b4197e" def run_mnist_training_loop(num_epochs, opt_state, net_type="MLP"): """ Implements a learning loop over epochs. """ # Initialize placeholder for logging log_acc_train, log_acc_test, train_loss = [], [], [] # Get the initial set of parameters params = get_params(opt_state) #Assumes all parameters are updatable. Otherwise send as argument in this function. # Get initial accuracy after random init train_acc = accuracy(params, train_loader) test_acc = accuracy(params, test_loader) log_acc_train.append(train_acc) log_acc_test.append(test_acc) # Loop over the training epochs for epoch in range(num_epochs): start_time = time.time() for batch_idx, (data, target) in enumerate(train_loader): if net_type == "MLP": # Flatten the image into 784-sized vectors for the MLP x = np.array(data).reshape(data.size(0), 28*28) elif net_type == "CNN": # No flattening of the input required for the CNN x = np.array(data) y = one_hot(np.array(target), num_classes) params, opt_state, loss = update(params, x, y, opt_state) train_loss.append(loss) epoch_time = time.time() - start_time train_acc = accuracy(params, train_loader) test_acc = accuracy(params, test_loader) log_acc_train.append(train_acc) log_acc_test.append(test_acc) print("Epoch {} | T: {:0.2f} | Train A: {:0.3f} | Test A: {:0.3f}".format(epoch+1, epoch_time, train_acc, test_acc)) return train_loss, log_acc_train, log_acc_test train_loss, train_log, test_log = run_mnist_training_loop(num_epochs, opt_state, net_type="MLP") # + [markdown] id="wYmvwp9RloXc" # # CNN # + id="S8h_tPpakkU7" from jax.experimental import stax from jax.experimental.stax import (BatchNorm, Conv, Dense, Flatten, Relu, LogSoftmax) # + [markdown] id="QOMdDw6hnlsy" # The ```init_fun()``` below takes the ```key``` and the shape of input as its arguments. It returns the output shape and the randomly assigned parameters. # # The ```conv_net()``` function takes ```params``` and input of the shape specified in second argument of ```init_fun()``` and returns the result of the convolution operations specified in ```stax.serial()```. Note that if it is a function that returns ```f(x)``` , you can quickly make another one to get ```f'(x)``` . # + id="STD7OZT3l1JT" init_fun, conv_net = stax.serial(Conv(32, (5, 5), (2, 2), padding="SAME"), #First argument is number of out channels, second is filter shape, third stride. BatchNorm(), Relu, Conv(32, (5, 5), (2, 2), padding="SAME"), BatchNorm(), Relu, Conv(10, (3, 3), (2, 2), padding="SAME"), BatchNorm(), Relu, Conv(10, (3, 3), (2, 2), padding="SAME"), Relu, Flatten, Dense(num_classes), #Only final size needs to be specified !! LogSoftmax) output_shape, params = init_fun(key, (batch_size, 1, 28, 28)) # + [markdown] id="U_zUMJ4YqFO5" # Various types of initializations can also be specified for each layer. See [here](https://jax.readthedocs.io/en/latest/_modules/jax/experimental/stax.html#serial) for default initializations of each layer. # + id="7kH0B_kXpJ2o" def accuracy(params, data_loader): """ Compute the accuracy for the CNN case (no flattening of input)""" acc_total = 0 for batch_idx, (data, target) in enumerate(data_loader): images = np.array(data) targets = one_hot(np.array(target), num_classes) target_class = np.argmax(targets, axis=1) predicted_class = np.argmax(conv_net(params, images), axis=1) acc_total += np.sum(predicted_class == target_class) return acc_total/len(data_loader.dataset) def loss(params, images, targets): preds = conv_net(params, images) return -np.sum(preds * targets) # + id="DSD2aU40w2Ut" step_size = 1e-3 opt_init, opt_update, get_params = optimizers.adam(step_size) opt_state = opt_init(params) num_epochs = 10 train_loss, train_log, test_log = run_mnist_training_loop(num_epochs, opt_state, net_type="CNN")
jax_basic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime from datetime import timedelta from random import randrange #Import Price Data from picke files avax = pd.read_pickle("./Price_Data/1HOUR/AVAX-USD.pkl") eth = pd.read_pickle("./Price_Data/1HOUR/ETH-USD.pkl") btc = pd.read_pickle("./Price_Data/1HOUR/BTC-USD.pkl") btc.head() # + #Reduce DataFrame to close Prices btc_closes = btc[['startedAt', 'close']] eth_closes = eth[['startedAt', 'close']] avax_closes = avax[['startedAt', 'close']] #Change btc_closes.loc[:, 'close'] = btc_closes['close'].astype(float) eth_closes.loc[:, 'close'] = eth_closes['close'].astype(float) avax_closes.loc[:, 'close'] = avax_closes['close'].astype(float) btc_closes.loc[:, 'returns'] = btc_closes["close"].pct_change() eth_closes.loc[:, 'returns'] = eth_closes["close"].pct_change() avax_closes.loc[:, 'returns_avax'] = avax_closes["close"].pct_change() # + result = pd.merge(btc_closes[['startedAt','returns']], eth_closes[['startedAt','returns']], on=["startedAt"], how='inner', suffixes=('_btc', '_eth')) result = pd.merge(result, avax_closes[['startedAt','returns_avax']], on=["startedAt"], how='inner', suffixes=(tuple('_avax'))) result = result.iloc[1: , :] result.index = result['startedAt'] result = result.drop(['startedAt'], axis=1) result.head() # + #Initial Variables Initial_USDC = 8000 hours = 24 iterations = 1000 #Start prices btc_start_price = btc_closes.iloc[-1]['close'] eth_start_price = eth_closes.iloc[-1]['close'] avax_start_price = avax_closes.iloc[-1]['close'] #Position Sizes btc_position = 20000/btc_start_price eth_position = -10000/eth_start_price avax_position = -10000/avax_start_price btc_maintenence_margin = 0.03 eth_maintenence_margin = 0.03 avax_maintenence_margin = 0.05 Total_Maintenance_Margin_Requirement = abs(btc_position * btc_start_price * btc_maintenence_margin) + \ abs(eth_position * eth_start_price * eth_maintenence_margin) +\ abs(avax_position * avax_start_price * avax_maintenence_margin) print("The total maintenace margin of this porfolio is: $", Total_Maintenance_Margin_Requirement) # + # %%time portfolio_paths = pd.DataFrame() liquidation_scenarios = pd.DataFrame() for x in range(0,iterations): #Generate Price Paths price_paths = np.full((hours, 3), float(1)) price_paths[0] = [btc_start_price, eth_start_price, avax_start_price] for t in range(1, hours): price_paths[t] = np.array(price_paths[t-1]*(1 + result.iloc[randrange(len(result))]), dtype=float) price_paths #Calculate Maintenance Margin maintenance_margin = price_paths * np.array([abs(btc_position), abs(eth_position), abs(avax_position)]) * np.array([ btc_maintenence_margin, eth_maintenence_margin, avax_maintenence_margin]) maintenance_margin = np.sum(maintenance_margin, axis=1) maintenance_margin #Calculate Total Account Value Total_Account_Value = Initial_USDC + np.sum((price_paths - price_paths[0]) * np.array([ btc_position, eth_position, avax_position]), axis=1) Total_Account_Value portfolio_paths = pd.concat([portfolio_paths, pd.DataFrame(Total_Account_Value)], axis=1) liquidation_scenarios = pd.concat([liquidation_scenarios, pd.DataFrame(Total_Account_Value > maintenance_margin)], axis=1) # + from matplotlib.pyplot import figure figure(figsize=(8, 6), dpi=80) plt.plot(portfolio_paths) plt.show() df = liquidation_scenarios.apply(pd.Series.value_counts).T print("The portfolio would have been liquidated ", df[False].count(), " times out of 1000") # + print("The average portfolio value is: ", portfolio_paths.iloc[23].mean()) print("The median portfolio value is: ", portfolio_paths.iloc[23].median()) print("The maximum portfolio value is: ", portfolio_paths.iloc[23].max()) print("The minimum portfolio value is: ", portfolio_paths.iloc[23].min()) VaR = np.percentile(portfolio_paths.iloc[23], 5, axis=0) ES = portfolio_paths.iloc[23][portfolio_paths.iloc[23] <= np.percentile(portfolio_paths.iloc[23], 5, axis=0)].mean() print("\nPortfolio VaR: ", VaR, "\nA VaR of ", VaR, " suggests that we are \ 95% certain that our portfolio will be greater than ", VaR, "\n in the next 24 hours") print("\nExpected Shortfall: ", ES, "\nOn the condition that the 24h loss is greater than the 5th percentile" " of the loss distribution, it is expected that \n the portfolio will be ", ES) # - np.percentile(portfolio_paths.iloc[23], 5, axis=0) sns.displot(portfolio_paths.iloc[23]) plt.axvline(x=portfolio_paths.iloc[23].median()) plt.xlabel('Portfolio Value') # # Monte Carlo Simulation from scipy.stats import multivariate_normal from scipy.stats import norm correlations = result.corr(method='kendall') correlations random_vals = multivariate_normal(cov=correlations).rvs(24) copula = norm.cdf(random_vals) print(copula.shape) copula sns.scatterplot(x=copula[:, 0], y=copula[:, 1]) # + distribution_btc = norm(result['returns_btc'].mean(), result['returns_btc'].std()) distribution_eth = norm(result['returns_eth'].mean(), result['returns_eth'].std()) distribution_avax = norm(result['returns_avax'].mean(), result['returns_avax'].std()) btc_distribution = distribution_btc.ppf(copula[:, 0]) eth_distribution = distribution_eth.ppf(copula[:, 1]) avax_distribution = distribution_avax.ppf(copula[:, 2]) # + portfolio_paths = pd.DataFrame() liquidation_scenarios = pd.DataFrame() for x in range(0,10000): random_vals = multivariate_normal(cov=correlations).rvs(24) copula = norm.cdf(random_vals) btc_distribution = distribution_btc.ppf(copula[:, 0]) eth_distribution = distribution_eth.ppf(copula[:, 1]) avax_distribution = distribution_avax.ppf(copula[:, 2]) pct_paths = np.stack((btc_distribution, eth_distribution, avax_distribution), axis=1) price_paths = np.full((hours, 3), float(1)) price_paths[0] = [btc_start_price, eth_start_price, avax_start_price] for t in range(1, hours): price_paths[t] = np.array(price_paths[t-1]*(1 + pct_paths[t-1]), dtype=float) price_paths #Calculate Maintenance Margin maintenance_margin = price_paths * np.array([ abs(btc_position), abs(eth_position), abs(avax_position)]) * np.array([btc_maintenence_margin, eth_maintenence_margin, avax_maintenence_margin]) maintenance_margin = np.sum(maintenance_margin, axis=1) maintenance_margin #Calculate Total Account Value Total_Account_Value = Initial_USDC + np.sum((price_paths - price_paths[0]) * np.array([btc_position, eth_position, avax_position]), axis=1) Total_Account_Value portfolio_paths = pd.concat([portfolio_paths, pd.DataFrame(Total_Account_Value)], axis=1) liquidation_scenarios = pd.concat([liquidation_scenarios, pd.DataFrame(Total_Account_Value > maintenance_margin)], axis=1) # + from matplotlib.pyplot import figure figure(figsize=(8, 6), dpi=80) plt.plot(portfolio_paths) plt.show() df = liquidation_scenarios.apply(pd.Series.value_counts).T print("The portfolio would have been liquidated ", df[False].count(), " times out of 10000")
Monte Carlo DyDx.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import json df = pd.read_csv('./data/rev_ref.csv', encoding='utf-8') df = df.set_index('nid') cite_dict = df.to_dict('index') for key in cite_dict: print(key, cite_dict[key]) break # with open('./data/rev_ref.json', 'r') as f: # cite_dict = json.load(f, encoding='utf-8') # for idx, iid in enumerate(cite_dict): # if type(cite_dict[iid]['cited']) == str: # cite_dict[iid]['cited'] = json.loads(cite_dict[iid]['cited']) # with open('./data/rev_ref.json', 'w') as f: # json.dump(cite_dict, f, ensure_ascii=False) with open('./data/paper_author_coef.json', 'r') as f: paper_author_coef = json.load(f, encoding='utf-8') total_count = len(cite_dict) for key in cite_dict: cite_dict[key]['rank_score'] = 1/float(total_count) len(cite_dict) def paper_rank2(dic): total_count = len(dic) for _ in range(3): for idx, p in enumerate(dic): print(idx) if idx % 1000000 == 0 else None rank_sum = 0 # print(dic[p]) cited_list = json.loads(dic[p]['cited']) if cited_list: # print(cited_list) for c in cited_list: # print(c) c_row = dic[c] cite_cnt = c_row['n_ref'] rank_sum += (1/float(cite_cnt)) * c_row['rank_score'] dic[p]['rank_score'] = 0.45*(1/float(total_count)/float(paper_author_coef[p])) + 0.55*rank_sum # print(dic[p]['rank_score']) paper_rank2(cite_dict) with open('./data/pagerank_3_2.json', 'w') as f: json.dump(cite_dict, f, ensure_ascii=False) for idx, author in enumerate(author_article): for iid in author_article[author]['id']: author_article[author]['rank_score'].append(cite_dict[iid]['rank_score']) next(iter(author_article.values())) with open('./data/author_score_3.json', 'w') as f: json.dump(author_article, f, ensure_ascii=False) cite_score = pd.DataFrame.from_dict(cite_dict, orient='index') cite_score.rename(columns=dict(zip(cite_score.columns[[0,1,2]], ['cited', 'n_ref', 'rank_score'])),inplace=True) cite_score.head() # ### id-author coef with open('./data/mag_list_noid.json', 'r') as f: mag_list = json.load(f, encoding='utf-8') author_article = {} for idx, row in enumerate(mag_list): al = json.loads(row['authors']) for order, author in enumerate(al): if author not in author_article: author_article[author] = {'id': [], 'order': [], 'rank_score': []} author_article[author]['id'].append(idx) author_article[author]['order'].append(order) len(author_article) next(iter(author_article.values())) with open('./data/author_article.json', 'w') as f: json.dump(author_article, f, ensure_ascii=False) paper_author_coef = [] for idx, row in enumerate(mag_list): al = json.loads(row['authors']) coef = 0 for order, author in enumerate(al): # print(len(author_article[author]['id'])) coef += len(author_article[author]['id']) * (2**-order) paper_author_coef.append(coef) len(paper_author_coef) paper_author_coef[0] with open('./data/paper_author_coef.json', 'w') as f: json.dump(paper_author_coef, f, ensure_ascii=False)
pagerank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Lab 1: Introduction to curve fitting # # In this exercise, we will explore fitting climate data using a simple linear least-squares model. The dataset consists of daily temperature records at Hubbard Brook New Hampshire, extending back more than 60 years. Our goal is to analyze the data for any signals of long term climate trends. The exercise is divided into two components. In the first component, you will divide into groups of 2 to 4, and work together through the coding problems below. In this section, you will set up and "invert" for your best fitting model using linear least squares. The second component is meant to be completed individually as homework, and will test your ability to analyze and interpret the results. # # All answers must be provided in this notebook (either as code block or in Markdown) in the boxes provided below each question. To turn in the assignment, save the completed notebook and send in the .ipynb file (do not export to pdf!) so that all the code can be run. The lab is worth a total of 100 points (70 for part 1, 30 for part 2), with a few possible bonus points. # # ### Review of linear least-squares regression # $\color{green}{\text{*Note, this section is meant to review the material covered in the recorded lecture.}}$ # # One of the simplest aprroaches to curve fitting is linear least-squares regression. In this method, it is assumed that your data $\mathbf{d}$ can be modeled by a linear combination of some unknown model parameters $\mathbf{m}$. The most straightforward example of this is fitting a straight line to scattered data points. Mathematicaly, this can be written # # $d_{i} = m_{1} + m_{2}x_{i}$ # # where $d_{i}$ is the value of the $i^{th}$ data point, $x_{i}$ is the value of the independent variable $x$ at index $i$, and $m_{1}$ and $m_{2}$ are your unknown "model parameters", which in this case represent the y-intercept and the slope of you best fitting line, respectively. This equation represents a matrix-vector system, which can be solved for the unknown model parameters. The individual elements of the matrix-vector system are shown below # # $\begin{bmatrix} d_1 \\ d_2 \\ d_3 \\ \vdots \\ d_N \end{bmatrix} = \begin{bmatrix} 1 & x_1 \\ 1 & x_2 \\ 1 & x_3 \\ \vdots & \vdots \\ 1 & x_N \end{bmatrix} \begin{bmatrix} m_1 \\ m_2 \end{bmatrix} $ # # or, rewriting the data vector as $\mathbf{d}$, the data 'kernel' (also referred to as the sensitivity matrix) as $\mathbf{G}$, and the model vector as $\mathbf{m}$: # # $\mathbf{d} = \mathbf{G} \mathbf{m} $ # # This equation is the foundation of linear inverse theory. It can be solved in a variety of ways, but here we will focus the "least-squares" solution, which is one of the most common. The least-squares solution is the solution that minimizes the sum of the squared residuals (i.e., the misfit between your model prediction and your data). Mathematically, the least squares solution to the matrix vector system above is given as # # $\mathbf{m}^{lsq} = \left[ \mathbf{G}^T \mathbf{G} \right] ^{-1} \mathbf{G}^{T}\mathbf{d}$ # # where $\mathbf{m}^{lsq}$ is the solution vector, and the superscripts $^{T}$ and $^{-1}$ indicate the matrix transpose and inverse, respectively. In this exercise, we will perform these matrix operations using the python library numpy. # # # ### Extra resources # [Essence of Linear Alebgra, by ThreeBlueOneBrown](https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab): Excellent visual overview of basic matrix and vector operations. # # [pythoncheatsheet.org](https://www.pythoncheatsheet.org/): Handy resource for an overview of the python language. # # [matplotlib users guide](https://matplotlib.org/stable/users/index.html): Official documentation of matplotlib, including many examples. # ## Part 1, group exercise: Modeling 60+ years of climate data from the Hubbard Brook Experimental Forest # # The Hubbard Brook Experimental Forest in the White Mountains of New Hampshire is a unique "natural laboratory" that has been used for ecological research since 1955. Starting shortly after its inception, continous records of many types of data have been made, including observations of temperature, hydrology (e.g., stream flow), soil geochemistry, and even plant and animal species inventories. This data provides an exceptional opportunity to study how the environment at a single location on Earth has changed in recent history, and can help inform us about how the planet responds to a changing climate. # # Below, we will explore some of the Hubbard Brook time series data. In particular, we will analyze continuous temperature data that has been collected since 1957, and use this data to construct a simple model of how the temperature has changed during the time of this experiment. # ### Import statements # This coding exercise will utilze three commonly used python libraries; pandas, numpy, and matplotlib. In order to use them in your code you first need to 'import' them with the following statements. import pandas as pd #toolbox for reading and manipulating spreadsheet data import numpy as np #library for basic numerical operations import matplotlib.pyplot as plt #plotting toolkit # ### Loading the data with pandas # Pandas is a powerful python toolbox for reading, writing, and manipulating data in a variety of formats. Here, the data we will use is openly available through the [Hubbard Brook Data Catalog](https://hubbardbrook.org/d/hubbard-brook-data-catalog). We can access the data in "comma-separated value" format (.csv) directly through pandas. Below, a code segment is provided to read the data from the online catalog into a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) object. The structure of this object basically consists of serveral named columns of data, which include the date ("date"), the name of the temperature recording station ("STA"), as well as the minimum, maximum, and average temperature recorded ("MIN", "MAX", and "AVE"). As you will see, the dataset consists of daily temperature records starting on January 1st, 1957, measured at several stations. # # $\color{green}{\text{*Note, you are not expected to be an expert on pandas data structures. Here, all code segments required to download and extract the data are provided.}}$ url='https://pasta.lternet.edu/package/data/eml/knb-lter-hbr/59/9/9723086870f14b48409869f6c06d6aa8' dset = pd.read_csv(url) #read data into a variable we will call "dset". The "type" of dset is a pandas DataFrame. print(dset.head()) #Show the first serveral lines of the data. # Now that we have read in the data, and have an idea of how it's organized, we will select a subset of the data to analyze. In particular, we will work with the daily average temperature at station "STA1" from January 1, 1957, until December 31, 2019. The code block below will select the average temperate data from STA1 and assign it to variable d. Additionally, the date strings for this time period will be assigned to the variable "dates". Note the "sampling rate" is 1 sample per day. indices = np.where(dset['STA'] == 'STA1')[0] #find the indices of the data that correspond to station "STA1" d = dset.AVE[indices] #Average temperature data at STA1 dates = dset.date[indices] #date strings # ### Plotting the data with matplotlib # # ### <font color='red'>Question 1 (10 points) </font> # #### Using matplotlib, write a block of code to plot the temperature time series at station STA1. Make sure to label the axes properly, with day number (starting with 0) on the x-axis, and temperature (in degrees C) on the y-axis. # # #### Bonus (3 pts): Instead of plotting the x-axis in terms of number of days since the beginning of the experiment, use the date strings that give the day in YYYY-MM-DD. Choose a reasonable increment for the labeling (e.g., every 10 years). *Hint*, you need to change the [x-ticks](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.xticks.html) attribute. fig = plt.figure(figsize=[10,5]) t = np.linspace(0,len(d),len(d)) plt.plot(t,d,c='k',linewidth=0.25) plt.xlabel('days since 01-01-1957',fontsize=14) plt.ylabel('temp. (deg C)',fontsize=14) plt.xlim([0,np.max(t)]) # ### <font color='red'>Question 2 (10 points) </font> # #### What are the minimum and maxmimum temperatures recorded, and on what dates did they occur? i_min = np.argmin(d) i_max = np.argmax(d) print("The minimum temperature is {} C, and was recorded on {}".format(d[i_min],dates[i_min])) print("The maximum temperature is {} C, and was recorded on {}".format(d[i_max],dates[i_max])) # ### Modeling the data # # Our challenge is to create a simple model of this data that can separate the yearly periodic temperature fluxuations from the long term trend (if it is apparent). The simplest model that could represent both the yearly temperature variation and the long term trend is # # $d_{i} = m_{1} + m_{2}t_{i} +m_3\cos(2 \pi t_{i}/P) + m_4\sin(2 \pi t_{i}/P) $ # # where $d_{i}$ and $t_{i}$ are the the data and time (in days) at index $i$, respsectively, $P$ is the period (in our case 365.25 days), and $m_{1-4}$ are the model 'parameters' (i.e., the coefficients that we are solving for). This model represents the superposition of a harmonic function with a period of 1 year (to represent the annual seasonal temperature change), and a straight line (to represent the long term trend). # ### <font color='red'>Question 3 (10 points) </font> # #### We will set up our least squares inversion by solving the equation G$\textbf{m}$ = $\textbf{d}$. Remember, $\textbf{d}$ is the vector that contains your temperature data, and $\textbf{m}$ is the vector that contains your model parameters. # # #### i) What are the dimensions of G (given as rows x columns)? Hint, think about what the dimenions of $\textbf{m}$ and $\textbf{d}$ are. (5 pts) # # #### ii) Are all of the rows of G the same? Explain why or why not. (5 pts) # # + #Answer Question 3 here. N = len(d) #number of data points M = 4 # number of model parameters print("i) The dimensions of the G matrix are {} x {}".format(N,M)) print("ii) The rows are not the same because the values of each column vary with the indepedent variable, t.") # - # ### <font color='red'>Question 4 (20 points) </font> # #### Build the G matrix. # # ##### Think carefully about which each of the columns represents (e.g., think about what the functional form of each column should look like). Hint, first initialize G as an empty matrix (e.g., with np.zeros) with size N x M, where N is the number of data points, and M is the number of model parameters. Next, fill in each column based on the equation of our model. # # #### Bonus (3 pts). Use matplotlib's "imshow" to visualize the G matrix. Make sure to adjust the colorscale so that the structure of the matrix is apparent. P = 365.25 G = np.zeros((N,M)) G[:,0] = 1 G[:,1] = t G[:,2] = np.cos(2.*np.pi*t/P) G[:,3] = np.sin(2.*np.pi*t/P) fig = plt.figure(figsize=[6,6]) plt.imshow(G,aspect='auto',vmin=-2,vmax=2,cmap='magma') #plt.imshow(G,aspect='auto',vmin=0,vmax=np.max(t),cmap='magma') #plt.imshow(G,aspect='auto',vmin=0,vmax=1000) plt.xticks([0,1,2,3]) plt.xlabel('col',fontsize=12) plt.ylabel('row',fontsize=12) plt.title('G matrix') plt.ylim([0,1000]) plt.colorbar() # ### <font color='red'>Question 5 (10 points) </font> # #### i) Find the least squares solution using [numpy.linalg.lstsq](https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html). This function takes the G matrix and the data vector as arguments, and will calculate the least squares solution. As an output, it returns a list of items. The first item in the list is an array of the best fitting model parameters (in this case it will be an array containing $m_1, m_2, m_3$ and $m_4$). (5 pts) # # #### ii) What is the value of the slope of the linear trend? *Think about the units!* (5 pts) sol = np.linalg.lstsq(G,d,rcond=None) m_est = sol[0] slope = m_est[1] print("m_est = {}".format(m_est)) print('The slope of the linear trend is {} C/day'.format(slope)) # ### <font color='red'>Question 6 (10 points) </font> # # #### Plot your model against the observed data. To generate "synthetic" data (i.e., the data that your model predicts), use the equation $\mathbf{d_{pre}} = \mathbf{G} \mathbf{m_{lsq}}$, where $\mathbf{d_{pre}}$ is the predicted data, and $\mathbf{m_{lsq}}$ is the vector containing best fitting model parameters. *Hint*, use np.dot for matrix-vector multiplication. In your plot, make sure to clearly label the axes, and also use a legend to show which data is the observed, and which is the modeled. # + d_pre = np.dot(G,m_est) fig = plt.figure(figsize=[15,10]) plt.plot(t,d,c='black',linewidth=0.25,label='data') plt.plot(t,d_pre,c='C0',label='model',linewidth=4) plt.legend(loc='lower left') plt.xlabel('days since 01-01-1957',fontsize=14) plt.xlim([0,np.max(t)]) print(len(d_pre)) # - # ## Part 2, individual exercise: Interpretation of the results # ### <font color='red'>Question 7 (10 pts) </font> # #### i) Based on the slope of the linear trend you found, by how approximately much has Hubbard Brook warmed or cooled on average between 1957 and 2020? (5 pts) # # #### ii) If the trend continues how much warmer / colder will <NAME> be in 2050? (5 pts) # + #part 1 delta_temp = slope*N print('i) based on our linear trend of {:3.2e} C/day, Hubbard Brook warmed by {:2.2f} C between 1957 and 2020'.format(slope,delta_temp)) #part 2 ndays = (2050 - 2021) * 365.25 delta_temp = slope*ndays print('ii) if the warming continues, it will be {:2.2f} C hotter in 2050'.format(delta_temp)) # - # ### <font color='red'>Question 8 (20 pts) </font> # # #### Often, we would like some statistical measure of how well our model is fitting the data. This is usually done by analyzing the "error" ($\epsilon$) of your model, which can be thought of as the difference between the model predictions and the data (i.e., $\epsilon = \mathbf{d} - \mathbf{d_{pre}}$ ). One common measure of the error is the root-mean-square error (RMSE) which is given as # # $ \mathrm{RMSE} = \sqrt{\left(\overline{\mathbf{d} - \mathbf{d_{pred}}}\right)^2} $ # # #### i) Calculate the RMSE of your best fitting model (10 pts) # # #### ii) Caclulate a new linear least squares solution, but excluding the the linear trend term. i.e., your model will be described as # # $d_{i} = m_1\cos(2 \pi t_{i}/P) + m_2\sin(2 \pi t_{i}/P) $ # # #### How does the error compare to the previous best fitting model? What does this tell you about the importance of the linear trend? (10 pts) rms_err = np.sqrt(np.mean((d-d_pre)**2)) print('i) the RMSE of the best fitting model is {:2.2f} C'.format(rms_err)) P = 365.25 G_new = np.zeros((N,2)) G_new[:,0] = np.cos(2.*np.pi*t/P) G_new[:,1] = np.sin(2.*np.pi*t/P) sol_new = np.linalg.lstsq(G_new,d,rcond=None) m_est_new = sol_new[0] d_pre_new = np.dot(G_new,m_est_new) rms_err_new = np.sqrt(np.mean((d-d_pre_new)**2)) print('ii) the RMSE of the model with no linear trend is {:2.2f} C, which shows that \ including the warming trend fits the data better'.format(rms_err_new))
teaching_demo/Hubbard_Brook_Climate_KEY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline df = pd.read_csv('../data/Credit_Card_Applications.csv') df.head() X, y = df.iloc[:,:-1].to_numpy(), df.iloc[:,-1].to_numpy() from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler() X = sc.fit_transform(X) from minisom import MiniSom som = MiniSom(x=10, y=10, input_len=X.shape[1], random_seed=42) som.random_weights_init(X) som.train_random(X, 100) import seaborn as sns plt.figure(figsize=(12,8)) sns.heatmap(som.distance_map().T, annot=True, cmap='rainbow'); # + plt.figure(figsize=(12,8)) sns.heatmap(som.distance_map().T, cmap='gray'); markers = ['o', 'x'] colors = ['r', 'g'] for i, x in enumerate(X): w = som.winner(x) plt.plot(w[0] + 0.5, w[1] + 0.5, markers[y[i]], markeredgecolor=colors[y[i]], markerfacecolor='None', markersize=10, markeredgewidth=2 ) # - mappings = som.win_map(X) frauds = mappings[(5,6)] frauds = sc.inverse_transform(frauds) print('Customer IDs:\n', frauds[:,0])
notebooks/self_organizing_map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="https://tensorflowkorea.files.wordpress.com/2020/12/4.-e18492e185a9e186abe1848ce185a1-e18480e185a9e186bce18487e185aee18492e185a1e18482e185b3e186ab-e18486e185a5e18489e185b5e186abe18485e185a5e18482e185b5e186bce18483e185b5e186b8e18485e185a5e.png?w=972" width="250" height="250"><br> # </center> # # # - (https://bit.ly/hg-5-1) # - (https://bit.ly/hg-5-2) # - (https://bit.ly/hg-5-3) # # # # # Chapter05. 트리 알고리즘 (화이트 와인을 찾아라!) # # - 학습목표 # - 성능이 좋고 이해하기 쉬운 트리 알고리즘에 대해 배운다. # - 알고리즘의 성능을 최대화하기 위한 하이퍼파라미터 튜닝을 실습한다. # - 여러 트리를 합쳐 일반화 성능을 높일 수 있는 앙상블 모델을 배운다. # # # ## 05-1 결정트리 # # - 핵심키워드 # - 결정 트리 # - 불순도 # - 정보 이득 # - 가지치기 # - 특성 중요도 # # - 결정 트리 알고리즘을 사용해 새로운 분류 문제를 다루어 봅니다. 결정 트리가 머신러닝 문제를 어떻게 해결하는지 이해한다. # # #### 문제 (화이트 와인을 찾아라!) # # - 캔에 인쇄된 알코올 도수,당도,PH 값으로 와인 종류를 구별할 수 있는 방법이 있을까? # - 알코올 도수, 당도, PH 값에 로지스틱 회귀 모델을 적용할 계획을 세운다. # # ### 로지스틱 회귀로 와인 분류하기 # # 6,497개의 와인 샘플 데이터를 받았다. 이 데이터셋을 불러와 보자. 4장에서 처럼 판다스를 사용해 인터넷에서 직접 불러오자. # - https://bit.ly/wine_csv_data (와인 데이터셋의 출처는 캐글의 Red wine Quality) import pandas as pd wine = pd.read_csv('https://bit.ly/wine_csv_data') # 와인 데이터셋을 판다스 데이터프레임으로 제대로 읽어 들였는지 head() 메서드로 처음 5개의 샘플을 확인해 보자. wine.head() # 처음 3개의 열 (alcohol, sugar, PH)은 각각 올코올 도수, 당도, PH 값을나타낸다. 네 번째 열(class)은 타깃값으로 0이면 레드 와인, 1이면 화이트 화인이라고 한다.레드 와인과 화이트 와인을 구분하는 이진 분류 문제이고, 화이트 와인이 양성 클래스이다. 즉 전체 화인 데이터에서 화이트 와인을 골라내는 문제이다. # # 로지스틱 회귀 모델을 바로 훈련하기 전에 판다스 데이터프레임의 유용한 메서드 2개를 먼저 알아보자. # - 먼저 info() 메서드이다. 이 메서드는 데이터프레임의 각 열의 데이터 타입과 누락된 데이터가 있는지 확인하는데 유용하다. wine.info() # - 출력 결과를 보면 총 6,497개의 샘플이 있고, 4개의 열은 모두 실숫값이다. Non-Null Count가 모두 6497이므로 누락된 값은 없는 것 같다. # # - 누락된 값이 있으면 어떻게 하나요? # - 누락된 값이 있다면 그 데이터를 버리거나 평균값으로 채운 후 사용할 수 있다. 어떤 방식이 최선인지는 미리 알기 어렵다. 두 가지 모두 시도해 보자. 여기에서도 항상 훈련 세트의 통계값으로 테스트 세트를 변환한다는 것을 잊지 말자. 즉 훈련 세트의 평균값으로 테스트 세트의 누락된 값을 채워야 한다. # # 다음에 알아볼 메서드는 describe()이다. 이 메서드는 열에 대한 간략한 통계를 출력해 준다. 최소, 최대, 평균값 등을 볼 수 있다. 이 메서드를 호출해 보겠다. wine.describe() # - 사분위수는 데이터를 순서대로 4등분 한 값이다. 예를 들어 2사분위수(중간값)는 데이터를 일렬로 늘어놓았을 때 정중앙의 값이다. 만약 데이터 개수가 짝수개라 중앙값을 선택할 수 없다면 가운데 2개 값의 평균을 사용한다. # # 여기서 알 수 있는 것은 알코올 도수와 당도, PH 값의 스케일이 다르다는 것이다. 이전에 했던 것처럼 사이킷런의 StandardScaler 클래스를 사용해 특성을 표준화해야겠다. 그 전에 먼저 판다스 데이터프레임을 넘파이 배열로 바꾸고 훈련 세트와 테스트 세트로 나누자. data = wine[['alcohol', 'sugar', 'pH' ]].to_numpy() target = wine['class'].to_numpy() # wine 데이터프레임에서 처음 3개의 열을 넘파이 배열로 바꿔서 data 배열에 저장하고 마지막 class열을 넘파이 배열로 바꿔서 target 배열에 저장했다. 이제 훈련 세트와 테스트 세트로 나누어 보자. # + from sklearn.model_selection import train_test_split train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42) # - # - 실습과 결괏값이 같도록 random_state=42 로 설정 했다. # train_test_split() 함수는 설정값을 지정하지 않으면 25%를 테스트 세트로 지정한다. 샘플 개수가 충분히 많으므로 20% 정도만 테스트 세트로 나눴습니다. 코드의 test_size=0.2가 이런 의미이다. 만들어진 훈련 세트와 테스트 세트의 크기를 확인해 보자. print(train_input.shape, test_input.shape) # 훈련세트는 5,197개이고 테스트 세트는 1,300개이다. 좋다. 이제 StandardScaler 클래스를 사용해 훈련 세트를 전처리해 보자. 그다음 같은 객체를 그대로 사용해 테스트 세트를 변환하겠다. from sklearn.preprocessing import StandardScaler ss = StandardScaler() ss.fit(train_input) train_scaled = ss.transform(train_input) test_scaled = ss.transform(test_input) # 모든 준비가 끝났다. 이제 표준점수로 변환된 train_scaled 와 test_scaled를 사용해 로지스틱 회귀 모델을 훈련하자. # + from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train_scaled, train_target) print(lr.score(train_scaled, train_target)) print(lr.score(test_scaled, test_target)) # - # 음, 점수가 높지않다. 생각보다 화이트 와인을 골라내는게 어렵나 보다. 훈련 세트와 테스트 세트의 점수가 모두 낮으니 모델이 다소 과소적합된것 같다. 이 문제를 해결하기 위해 규제 매개변수 C의 값을 바꿔 볼까? 아니면 solver 매개변수에 다른 알고리즘을 선택할 수도 있다. 또는 다항 특성을 만들어 추가 할 수도 있다. # # > 설명하기 쉬운 모델과 어려운 모델 # # 제출한 보고서를 만들려고한다. 이 모델을 설명하기 위해 로지스틱 회귀가 학습한 계수와 절편을 출력해보자. # print(lr.coef_, lr.intercept_) # > 결정 트리 # # **결정 트리 Decision Tree** 모델이 "이유를 설명하기 쉽다"라고 알려주었다. 생각해 보니 언뜻 책에서 본 것도 같다. 결정 트리 모델은 스무고개와 같다. 질문을 하나씩 던져서 정답과 맞춰가는 것이다. # # 데이터를 잘 나눌 수 있는 질문을 찾는다면 계속 질문을 추가해서 분류 정확도를 높일 수 있다. 이미 예상했겠지만 사이킷런이 결정 트리 알고리즘을 제공한다. 사이킷런의 DecisionTreeClassfier 클래스를 사용해 결정 트리 모델을 훈련해 보자. 새로운 클래스이지만 사용법은 이전과 동일하다. fit() 메서드를 호출해서 모델을 훈련한 다음 score() 메서드로 정확도르 평가해 보자. # # # - 결정 트리 모델을 만들 때 왜 random_state 를 지정 하나요? # - 사이킷런의 결정 트리 알고리즘은 노드에서 최적의 분할으 찾기 전에 특성의 순서를 섞는다. 따라서 약간의 무작위성이 주입되는데 실행할 때마다 점수가 조금씩 달라질 수 있기 때문이다. 여기에서는 독자들이 실습한 결과와 책의 내용이 같도록 유지하기 위해 random_state를 지정하지만, 실전에서는 필요하지 않다. # + from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(random_state=42) dt.fit(train_scaled, train_target) print(dt.score(train_scaled, train_target)) # 훈련 세트 print(dt.score(test_scaled, test_target)) # 테스트 훈련 # - # 와우! 훈련 세트에 대한 점수가 엄청 높다. 거의 모두 맞춘 것 같다. 테스트 세트의 성능은 그에 비해 조금 낮다. 과대적합된 모델이라고 볼 수 있겠다. 그런데 이 모델을 그림으로 어떻게 표현할 수 있을까? 친절하게도 사이킷런은 plot_tree() 함수를 사용해 결정 트리를 이해하기 쉬운 트리 그림으로 출력해 준다. 위에서 만든 결정 트리 모델 객체를 plot_tree() 함수에 전달해서 어떤 트리가 만들어졌는지 그려보자. # + import matplotlib.pyplot as plt from sklearn.tree import plot_tree plt.figure(figsize=(10,7)) plot_tree(dt) plt.show() # - # 엄청난 트리가 만들어졌다. 수양버들 나뭇잎처럼 늘어졌다. 진짜 나무는 밑에서부터 하늘 위로 자라지만, 결정 트리는 위에서부터 아래로 거꾸로 자라난다. 맨 위의 노드node를 루트 노드 root node라 부르고 맨 아래 끝에 달린 노드를 리프 노드 leaf node 라고 한다. # # - 노드가 무엇인가? # - 노드는 결정 트리를 구성하는 핵심 요소이다. 노드는 훈련 데이터의 특성에 대한 테스트를 표현한다. 예를 들어 현재 샘플의 당도가 -0.239보다 작거나 같은지 테스트 한다. 가지(branch)는 테스트의 결과(True, False)를 나타내며 일반적으로 하나의 노드는 2개의 가지를 가진다. # # # 너무 복잡하니 plot_tree() 함수에서 트리의 깊이를 제한해서 출력해 보자. max_depth 매개변수를 1로 주면 루트 노드를 제외하고 하나의 노드를 더 확장하여 그린다. 또 filled 매개변수에서 클래스에 맞게 노드의 색을 칠할 수 있다. feature_names 매개변수에는 특성의 이름을 전달 할 수 있다. 이렇게 하면 노드가 어떤 특성으로 나뉘는지 좀더 잘 이해할 수 있다. 한번 이렇게 그려보자. plt.figure(figsize=(10,7)) plot_tree(dt, max_depth=1, filled=True, feature_names=['alcohol', 'sugar', 'pH']) plt.show() # 오, 훨씬 낫다! 이 그림을 읽는 방법을 알아보겠다. 기본적으로 그림이 담고 있는 정보는 다음과 같다. 루트 노드는 당도(sugar)가 -0.239 이하인지 질문을 한다. 만약 어떤 샘플의 당도가 -.0239와 같거나 작으면 왼쪽 가지로 간다. 그렇지 않으면 오른쪽 가지로 이동한다. 즉 왼쪽이 Yes, 오른쪽이 No이다. 루트 노드의 총 샘플 수 (samples)는 5,197개 이다. 이 중에서 음성 클래스(레드와인)는 1,258개이고, 양성 클래스(화이트 와인)는 3,939개 이다. 이 값이 value에 나타나 있다. # # 이어서 왼쪽 노드를 살펴보겠다. 이 노드는 당도가 더 낮은지를 물어본다. 당도가 -0.802와 같거나 낮다면 다시 왼쪽 가지로, 그렇지 않으면 오른쪽 가지로 이동한다. 이 노드에서 음성 클래스와 양성 클래스의 샘플 개수는 각각 1,177개와 1,745개 이다. 루트 노드보다 양성 클래스, 즉 화이트 와인의 비율이 크게 줄었다. 그 이유는 오른쪽 노드를 보면 알 수 있다. # # 오른쪽 노드는 음성 클래스가 81개, 양성 클래스가 2,194개로 대부분의 화이트 와인 샘플이 이 노드로 이동했다. 노드의 바탕 색깔을 유심히 보자. 루트 노드보다 이 노드가 더 진하고, 왼쪽 노드는 더 연해지지 않았나? plot_tree() 함수에서 filled=True로 지정하면 클래스 마다 색깔을 부여하고, 어떤 클래스의 비율이 높아지면 점점 진한 색으로 표시한다. 아주 직관적이다. # # 결정 트리에서 예측하는 방법은 간단하다. 리프 노드에서 가장 많은 클래스가 예측 클래스가 된다. 앞에서 보았던 k-최근접 이웃과 매우 비슷해보인다. 만약 이 결정 트리의 성장을 여기서 멈춘다면 왼쪽 노드에 도달한 샘플과 오른쪽 노드에 도달한 샘플은 모두 양성 클래스로 예측된다. 두 노드 모두 양성 클래스의 개수가 많기 때문이다. # # - 만약 결정 트리를 회귀 문제에 적용하려면 리프 노드에 도달한 샘플의 타깃을 평균하여 예측값으로 사용한다. 사이킷런의 결정 트리 회귀 모델은 DecisionTreeRegressor 이다. # # 그런데 노드 상자 안에 gini라는 것이 있다. 이것이 무엇인지 좀 더 자세히 알아보자. # # > 불순도 # # gini는 **지니 불순도 Gini impurity**를 의미한다. DecisionTreeClassifier 클래스의 criterion 매개변수의 기본값이 'gini'이다. criterion 매개변수의 용도는 노드에서 데이터를 분할할 기준을 정하는 것이다. 앞의 그린 트리에서 루트 노드는 어떻게 당도 -0.239를 기준으로 왼쪽과 오른쪽 노드로 나우었을까? 바로 criterion 매개변수에 지정한 지니 불순도를 사용한다. 그럼 지니 불순도를 어떻게 계산하는지 알아보자. # # 지니 불순도는 클래스의 비율을 제곱해서 더한 다음 1에서 빼면 된다. # # - 지니 불순도 = 1 - (음성 클래스 비율²+ 양성 클래스 비율²) # # 이게 끝이다. 다중 클래스 문제라면 클래스가 더 많겠지만 계산하는 방법은 동일하다. 그럼 이전 트리 그림에 있던 루트 노드의 지니 불순도를 계산해 보자. 루트 노드는 총 5,197개의 샘플이 있고 그 중에 1,258개가 음성 클래스, 3,939개가 양성 클래스이다. 따라서 다음과 같이 지니 불순도를 계산할 수 있다. # # - 1 - ((1258 / 5197)²+ (3939/5197)²) = 0.367 # # 왼쪽과 오른쪽 노드의 지니 불순도도 한번 계산해 보자. 만약 100개의 샘플이 있는 어떤 노드의 두 클래스의 비율이 정확히 1/2씩 이라면 지니 불순도는 0.5가 되어 최악이 된다. # # - 1 - ((50/100)²+(50/100)²) = 0.5 # # 노드에 하나의 클래스만 있다면 지니 불순도는 0이 되어 가장 작다. 이런 노드를 순수 노드라고도 부른다. # # - 1 - ((0/100)²+(100/100)²) = 0 # # 결정 트리 모델은 부모 노드parent node와 자식 노드 child node 의 불순도 차이가 가능한 크도록 트리를 성장시킨다. 부모 노드와 자식 노드의 불순도 차이를 계산하는 방법을 알아보자. 먼저 자식 노드의 분순도를 샘플 개수에 비례하여 모두 더한다. 그다음 부모 노드의 불순도에서 빼면 된다. # # 예를 들어 앞의 트리 그림에서 루트 노드를 부모 노드라 하면 왼쪽 노드와 오른쪽 노드가 자식 노드가 된다. 왼쪽 노드로 2,922개의 샘플이 이동했고, 오른쪽 노드로는 2,275개의 샘플이 이동했다. 그럼 불순도의 차이는 다음과 같이 계산한다. # # - 부모의 불순도 - (왼쪽 노드 샘플 수 / 부모의 샘플 수 ) x 왼쪽 노드 불순도 - (오른쪽 노드 샘플 수 / 부모의 샘플 수) x 오른쪽 노드 불순도 = 0.367 - (2922/5197) x 0.481 - (2275/5197) x 0.069 = 0.066 # # 이런 부모와 자식 노드 사이의 불순도 차이를 **정보 이득 information gain** 이라고 부른다. 좋다. 이제 결정 트리의 노드를 어떻게 나누는지 이해했다. 이 알고리즘은 정보 이득이 최대가 되도록 데이터를 나눈다. 이때 지니 불순도를 기준으로 사용한다. 그런데 사이킷런에는 또 다른 불순도 기준이 있다. # # DecisionTreeClassifier 클래스에서 criterion='entropy'를 지정하여 엔트로피 불순도를 사용할 수 있다. 엔트로피 불순도도 노드의 클래스 비율을 사용하지만 지니 불순도처럼 제곱이 아니라 밑이 2인 로그를 사용하여 곱한다. 예를 들어 루트 노드의 엔트로피 불순도는 다음과 같이 계산할 수 있다. # # - -음성 클래스 비율 x log₂(음성 클래스 비율) - 양성 클래스 비율 x log₂(양성 클래스 비율) = -(1258/5197) x log₂(3939/5197) = 0.798 # # 보통 기본값인 지니 불순도와 엔트로피 분순도가 만든 결과의 차이는 크지 않다. 여기서는 기본 값인 지니 불순도를 계속 사용하겠다. # # 이제 결정 트리 알고리즘을 확실히 이해했다. 불순도 기준을 사용해 정보 이득이 최대가 되도록 노드를 분할한다. 노드를 순수하게 나눌수록 정보 이득이 커진다. 새로운 샘플에 대해 예측할 때 에는 노드의 질문에 따라 트리를 이동한다. 그리고 마지막에 도달한 노드의 클래스 비율을 보고 예측을 만든다. # # 그런데 앞의 트리는 제한 없이 자라났기 때문에 훈련 세트보다 테스트 세트에서 점수가 크게 낮았다. 이 문제를 다루어 보자. # # > 가지치기 # # 열매를 잘 맺기 위해 과수원에서 가지치기를 하는 것처럼 결정 트리도 가지치기를 해야한다. 그렇지 않으면 무작정 끝까지 자라나는 트리가 만들어진다. 훈련 세트에는 아주 잘맞겠지만 테스트 세트에서 점수는 그에 못 미칠 것이다. 이를 두고 일반화가 잘 안 될것 같다고 말한다. 그럼 가지치기를 해보자. 결정 트리에서 가지치기를 하는 가장 간단한 방법은 자라날 수 있는 트리의 최대 깊이를 지정하는 것이다. DecisionTreeClassifier 클래스의 max_depth 매개변수를 3으로 지정하여 모델을 만들어 보겠다. 이렇게 하면 루트 노드 아래로 최대 3개의 노드까지만 성장 할 수 있다. # + dt = DecisionTreeClassifier(max_depth=3, random_state=42) dt.fit(train_scaled, train_target) print(dt.score(train_scaled, train_target)) print(dt.score(test_scaled, test_target)) # - # 훈련 세트의 성능은 낮아졌지만 테스트 세트의 성능은 거의 그대로이다. 이런 모델을 트리 그래프로 그린다면 훨씬 이해하기 쉬울 것 같다. plot_tree() 함수로 그려보자. plt.figure(figsize=(20,15)) plot_tree(dt, filled=True, feature_names=['alcohol', 'sugar', 'pH']) plt.show() # 훨씬 보기 좋다. 그래프를 따라가면서 샘플이 어떻게 나뉘는지 확인할 수 있다. 루트 노드 다음에 있는 깊이 1의 노드는 모두 당도(sugar)를 기준으로 훈련 세트를 나눈다. 하지만 깊이 2의 노드는 맨 왼쪽의 노드만 당도를 기준으로 나누고 왼쪽에서 두 번째 노드는 알코올 도수(alcohol)를 기준으로 나눈다. 오른쪽의 두 노드는 PH를 사용한다. # # 깊이 3에 있는 노드가 최종 노드인 리프 노드이다. 왼쪽에서 세 번째에 있는 노드만 음성 클래스가 더 많다. 이 노드에 도착해야만 레드 와인으로 예측한다. 그럼 루트 노드부터 이 노드까지 도달하려면 당도는 -0.239보다 작고 또 -0.802보다 커야 한다. 그리고 알코올 도수는 0.454보다 작아야 한다. 그럼 세 번째 리프 노드에 도달한다. 즉 당도가 -0.802보다 크고 -0.239보다 작은 와인 중에 알코올 도수가 0.454와 같거나 작은 것이 레드 와인이다. # # - 실습한 내용은 트리의 깊이가 비교적 얼마 되지 않아서 해석이 쉽다. 하지만 실전에서 결정 트리를 사용할 때는 많은 특성을 사용하고 트리의 깊이도 깊어진다. 이때는 생각만큼 해석이 쉽지 않을 수 있다. # # 그런데 -0.802라는 음수로 된 당도를 이사님께 어떻게 설명해야 할까? 잠깐, 뭔가 이상하다. 앞서 불순도를 기준으로 샘플을 나눈다고 했다. 분순도는 클래스별 비율을 가지고 계산했다. 샘플을 어떤 클래스 비율로 나누는지 계산할 때 특성값의 스케일이 계산에 영향을 미칠까? 아니요. 특성값의 스케일은 결정 트리 알고리즘에 아무런 영향을 미치지 않는다. 따라서 표준화 전처리를 할 필요가 없다. 이것이 결정 트리 알고리즘의 또 다른 장점 중 하나이다. # # 그럼 앞서 전처리하기 전의 훈련 세트(train_input)와 테스트 세트(test_input)로 결정 트리 모델을 다시 훈련해 보자. # + dt = DecisionTreeClassifier (max_depth=3, random_state=42) dt.fit(train_input, train_target) print(dt.score(train_input, train_target)) print(dt.score(test_input, test_target)) # - # 결과가 정확히 같다. 이번에는 트리를 그려보자. plt.figure(figsize=(20,15)) plot_tree(dt, filled=True, feature_names=['alcohol', 'sugar', 'pH']) plt.show() # 결과를 보면 같은 트리지만, 특성값을 표준점수로 바꾸지 않은 터라 이해하기가 훨씬 쉽다. 당도가 1.625보다 크고 4.325보다 작은 와인 중에 알코올 도수가 11.025와 같거나 작은 것이 레드와인이다. 그 외에는 모두 화이트 와인으로 예측했다. # # 마지막으로 결정 트리는 어떤 특성이 가장 유용한지 나타내는 특성 중요도를 계산해 준다. 이 트리의 루트 노드와 깊이 1에서 당도를 사용했기 때문에 아마도 당도(sugar)가 가장 유용한 특성 중 하나일 것 같다. 특성 중요도는 결정 트리 모델의 feature_importtances_속성에 저장되어 있다. 이값을 출력해 확인해 보자. print(dt.feature_importances_) # 네, 역시 두 번째 특성인 당도가 0.87 정도로 특성 중요다가 가장 높다. 그 다음 알코올 도수, PH 순이다. 이 값을 모두 더하면 1이 된다. 특성 중요도는 각 노드의 정보 이득과 전체 샘플에 대한 비율을 곱한 후 특성별로 더하여 계산한다. 특성 중요도를 활용하면 결정 트리 모델을 특성 선택에 활용할 수 있다. 이것이 결정 트리 알고리즘의 또 다른 장점 중 하나이다. # # 좋다. 이 모델은 비록 테스트 세트의 성능이 아주 높지 않아 많은 화이트 와인을 완벽하게 골라내지는 못하지만, 이사님에게 보고하기에는 아주 좋은 모델이다. 조금 부정확한 면이 걱정되면 와인을 위한 럭키백을 기획해 보자. # # ### 이해하기 쉬운 결정 트리 모델 - 문제해결 과정 # # 알코올 도수, 당도, PH 데이터를 기준으로 화이트 와인을 골라내는 이진 분류 로지스틱 회귀 모델을 훈련했다. 하지만 보고서를 작성할때 도통 이해할수 없다고 했다. # # 그다음 결정 트리를 사용해 레드와인과 화이트 와인을 분류하는 문제를 풀었다. 특성을 더 추가하지 않고도 결정 트리의 성능이 로지스틱 회귀 모델보다 더 좋았다. 게다가 결정 트리는 깊이가 너무 깊지 않다면 비교적 설명하기 쉽다. 또 결정 트리가 어떻게 데이터를 분할하는지 이해하기 위해 분순도 개념과 정보 이득에 대해 알아보았다. # # 머신러닝 모델을 종종 블랙박스와 같다고 말한다. 실제로 모델의 계수나 절편이 왜 그렇게 학습되었는지 설명하기가 어렵다. 이에 비해 결저 트리는 비교적 비전문가에게도 설명하기 쉬운 모델을 만든다. 하지만 결정 트리는 여기에서 끝이 아니다. 결정 트리는 많은 앙상블 학습 알고리즘의 기반이 된다. 앙상블 학습은 신경망과 함께 가장 높은 성능을 내기 때문에 인기가 높은 알고리즘이다. # # 다음 절에서 결정 트리의 다양한 매개변수, 즉 하이퍼파라미터를 자동으로 찾기 위한 방법을 알아보고 그다음 앙상블 학습을 다루어 본다. # # > 전체 소스 코드 # # - https://bit.ly/hg-05-1 # # 결정 트리 # # 로지스틱 회귀로 와인 분류하기 # + import pandas as pd wine = pd.read_csv('https://bit.ly/wine_csv_data') # - wine.head() wine.info() wine.describe() data = wine[['alcohol', 'sugar', 'pH']].to_numpy() target = wine['class'].to_numpy() # + from sklearn.model_selection import train_test_split train_input, test_input, train_target, test_target = train_test_split( data, target, test_size=0.2, random_state=42) # - print(train_input.shape, test_input.shape) # + from sklearn.preprocessing import StandardScaler ss = StandardScaler() ss.fit(train_input) train_scaled = ss.transform(train_input) test_scaled = ss.transform(test_input) # + from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train_scaled, train_target) print(lr.score(train_scaled, train_target)) print(lr.score(test_scaled, test_target)) # - # 설명하기 쉬운 모델과 어려운 모델 print(lr.coef_, lr.intercept_) # + from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(random_state=42) dt.fit(train_scaled, train_target) print(dt.score(train_scaled, train_target)) print(dt.score(test_scaled, test_target)) # + import matplotlib.pyplot as plt from sklearn.tree import plot_tree plt.figure(figsize=(10,7)) plot_tree(dt) plt.show() # - plt.figure(figsize=(10,7)) plot_tree(dt, max_depth=1, filled=True, feature_names=['alcohol', 'sugar', 'pH']) plt.show() # 가지치기 # + dt = DecisionTreeClassifier(max_depth=3, random_state=42) dt.fit(train_scaled, train_target) print(dt.score(train_scaled, train_target)) print(dt.score(test_scaled, test_target)) # - plt.figure(figsize=(20,15)) plot_tree(dt, filled=True, feature_names=['alcohol', 'sugar', 'pH']) plt.show() # + dt = DecisionTreeClassifier(max_depth=3, random_state=42) dt.fit(train_input, train_target) print(dt.score(train_input, train_target)) print(dt.score(test_input, test_target)) # - plt.figure(figsize=(20,15)) plot_tree(dt, filled=True, feature_names=['alcohol', 'sugar', 'pH']) plt.show() print(dt.feature_importances_) # ## 05-2 교차 검증과 그리드 서치 # # - 핵심 키워드 # - 검증 세트 # - 교차 검증 # - 그리드 서치 # - 랜덤 서치 # - 검증 세트가 필요한 이유를 이해하고 교차 검증에 대해 배운다. 그리드 서치와 랜덤 서치를 이용해 최적의 성능을 내는 하이퍼파라미터를 찾는다. # - 문제 상황 : # # " max_depth를 3말고 다른 값으로 하면 성능이 달라지나요?" # # "네, 아마 그럴것 같습니다. 모든 값을 다 시도할 수는 없지만 시간이 허락하는 대로 테스트 하려합니다." # # "이런저런 값으로 모델을 많이 만들어서 테스트 세트로 평가하면 결국 테스트 세트에 잘 맞는 모델이 만들어지는 것 아닌가요?" # # 지금까지 우리는 훈련 세트에서 모델을 훈련하고 테스트 세트에서 모델을 평가했다. 테스트 세트에서 얻은 점수를 보고 "실전에 투입하면 이 정도 성능을 기대할 수 있겠군"이라고 생각을 한다. 즉 일반화 성능을 가늠해 볼 수 있는 것이다. 그런데 테스트 세트를 사용해 자꾸 성능을 확인하다 보면 점점 테스트 세트에 맞추게 되는 셈이다. # # 이전까지는 문제를 간단히 하려고 테스트 세트를 사용했다. 하지만 테스트 세트로 일반화 성능을 올바르게 예측하려면 가능한 한 테스트 세트를 사용하지 말아야한다. 모델을 만들고 나서 마지막에 딱 한 번만 사용하는 것이 좋다. 그렇다면 max_depth 매개변수를 사용한 하이퍼파라미터 튜닝을 어떻게 할 수 있을까? 게다가 결정 트리는 테스트해 볼 매개변수가 많다. # # ### 검증 세트 # # 테스트 세트를 사용하지 않으면 모델이 과대적합인지 과소적합인지 판단하기 어렵다. 테스트 세트를 사용하지 않고 이를 측정하는 간단한 방법은 훈련 세트를 또 나누는 것이다. 이 데이터를 **검증 세트validation set**이라고 부른다. # # 이 방법이 너무 단순해서 이상하게 들릴 수도 있겠지만, 실제로 많이 사용하는 방법이다. 1절에서 전체 데이터 중 20%를 테스트 세트로 만들고 나머지 80%를 훈련 세트로 만들었다. 이 훈련 세트 중에서 다시 20%를 떼어 내어 검증 세트로 만든다. # # - 테스트 세트와 검증 세트에 얼마나 많은 샘플을 덜어 놔야 하나요? # - 보통 20~30%를 테스트 세트와 검증 세트로 떼어 놓는다. 하지만 문제에 따라 다르다. 훈련 데이터가 아주 많다면 단 몇 %만 떼어 놓아도 전체 데이터를 대표하는 데 문제가 없다. # # # 훈련 세트에서 모델을 훈련하고 검증 세트로 모델을 평가한다. 이런 식으로 테스트하고 싶은 매개변수를 바꿔가며 가장 좋은 모델을 고른다. 그다음 이 매개변수를 사용해 훈련세트와 검증 세트를 합쳐 전체 훈련 데이터에서 모델을 다시 훈련한다. 그리고 마지막에 테스트 세트에서 최종 점수를 평가한다. 아마도 실전에 투입했을 때 테스트 세트의 점수와 비슷한 성능을 기대할 수 있을 것이다. # # 그럼 이전 절에 사용했던 데이터를 다시 불러와서 검증 세트를 만들어 보자. 먼저 판다스로 csv 데이터를 읽자. import pandas as pd wine = pd.read_csv('https://bit.ly/wine_csv_data') # 그 다음 class 열을 타깃으로 사용하고 나머지 열은 특성 배열에 저장한다. data = wine[['alcohol', 'sugar', 'pH']].to_numpy() target = wine['class'].to_numpy() # 이제 훈련 세트와 테스트 세트를 나눌 차례이다. 방식은 이전과 동일하다. 훈련 세트의 입력 데이터와 타깃 데이터를 train_input과 train_target배열에 저장한다. from sklearn.model_selection import train_test_split train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42) print(train_input.shape, test_input.shape) # 그 다음 train_input과 train_target을 다시 train_test_split() 함수에 넣어 훈련 세트 sub_input, sub_target과 검증 세트 val_input, val_target을 만든다. 여기에서도 test_size 매개변수를 0.2로 지정하여 train_input의 약 20%를 val_input으로 만든다. sub_input, val_input, sub_target, val_target = train_test_split(train_input, train_target, test_size=0.2, random_state=42) # 어렵지 않군요. 단순히 train_test_split() 함수를 2번 적용해서 훈련 세트와 검증 세트로 나눠준 것뿐이다. 훈련 세트와 검증 세트의 크기를 확인해 보자. print(sub_input.shape, val_input.shape) # 네, 원래 5,197개 였던 훈련 세트가 4,157개로 줄고, 검증 세트는 1,040개가 되었다. 이제 sub_input, sub_target과 val_input, val_target을 사용해 모델을 만들고 평가해 보겠다. from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(random_state=42) dt.fit(sub_input, sub_target) print(dt.score(sub_input, sub_target)) print(dt.score(val_input, val_target)) # 네, 좋습니다. 이렇게 val_input, val_target을 사용해서 모델을 평가하면 됩니다. 이 모델은 확실히 훈련 세트에 과대적합되어 있다. 매개변수를 바꿔서 더 좋은 모델을 찾아야 한다. 그전에 검증 세트에 관해 좀더 알아야 할것이 있다. # # ### 교차 검증 # # 검증 세트를 만드느라 훈련 세트가 줄었다. 보통 많은 데이터를 훈련에 사용할수록 좋은 모델이 만들어진다. 그렇다고 검증 세트를 너무 조금 떼어 놓으면 검증 점수가 들쭉날쭉하고 불안정할 것이다. 이럴 때 **교차 검증 cross validation**을 이용하면 안정적인 검증 점수를 얻고 훈련에 더 많은 데이터를 사용할 수 있다. # # 교차 검증은 검증 세트를 떼어 내어 평가하는 과정을 여러 번 반복한다. 그 다음 이 점수를 평균하여 최종 검증 점수를 얻는다. 이 과정을 그림으로 보면 이해가 쉽다. 다음은 3-폴드 교차 검증 그림이다. # # - 3-폴드 교차 검증이 뭔가요? # - 훈련 세트를 세 부분으로 나눠서 교차 검증을 수행하는 것을 3-폴드 교차 검증이라고 한다. 통칭 k-폴드 교차 검증(k-fold cross validation)이라고 하며, 훈련 세트를 몇 부분으로 나누냐에 따라 다르게 부른다. k-겹 교차 검증이라고도 부른다. # # 이해를 돕기 위해 3-폴드 교차 검증을 예시로 들었지만, 보통 5-폴드 교차 검증이나 10-폴드 교차 검증을 많이 사용한다. 이렇게 하면 데이터의 80~90%까지 훈련에 사용할 수 있다. 검증 세트가 줄어들지만 각 폴드에서 계산한 검증 점수를 평균하기 때문에 안정된 점수로 생각 할 수 있다. # # 사이킷런에는 cross_validate()라는 교차 검증 함수가 있다. 사용법은 간단한데, 먼저 평가할 모델 객체를 첫 번째 매개변수로 전달한다. 그 다음 앞에서 처럼 직접 검증 세트를 떼어 내지 않고 훈련 세트 전체를 cross_validate() 함수에 전달한다. # # - 사이킷런에는 cross_validate() 함수의 전신인 cross_val_score() 함수도 있다. 이 함수는 cross_validate() 함수의 결과 중에서 test_score 값만 반환하게 된다. # + from sklearn.model_selection import cross_validate scores = cross_validate(dt, train_input, train_target) print(scores) # - # 이 함수는 fit_time, score_time, test_score 키를 가진 딕셔너리를 반환한다. 처음 2개의 키는 각각 모델을 훈련하는 시간과 검증하는 시간을 의미한다. 각 키마다 5개의 숫자가 담겨 있다. cross_validate()함수는 기본적으로 5-폴드 교차 검증을 수행한다. cv 매개변수에서 폴드 수를 바꿀 수도 있다. # # - 훈련과 검증 시간은 코랩에서 리소스를 사용하는 상황에 따라 달라질 수 있으므로 fit_time과 score_time 세트는 출력 결과가 책과 다를 수 있다. # # 교차 검증의 최종 점수는 test_score 키에 담긴 5개의 점수를 평균하여 얻을 수 있다. 이름은 test_score지만 검증 폴드의 점수이다. 혼동하지말자. # + import numpy as np print(np.mean(scores['test_score'])) # - # 교차 검증을 수행하면 입력한 모델에서 얻을 수 있는 최상의 검증 점수를 가늠해 볼 수 있다. # # 한 가지 주의할 점은 cross_validate()는 훈련 세트를 섞어 폴드를 나누지 않는다. 앞서 우리는 train_test_split() 함수로 전체 데이터를 섞은 후 훈련 세트를 준비했기 때문에 따로 섞을 필요가 없다. 하지만 만약 교차 검증을 할 때 훈련 세트를 섞으려면 분할기 splitter를 지정해야한다. # # 사이킷런의 분할기는 교차 검증에서 폴드를 어떻게 나눌지 결정해 준다. cross_validate() 함수는 기본적으로 회귀 모델일 경우 KFold 분할기를 사용하고 분류 모델일 경우 타깃 클래스를 골고루 나누기 위해 StratifiedkFold를 사용한다. 즉 앞서 수행한 교차 검증은 다음 코드와 동일하다. # + from sklearn.model_selection import StratifiedKFold scores = cross_validate(dt, train_input, train_target, cv=StratifiedKFold()) print(np.mean(scores['test_score'])) # - # 만약 훈련 세트를 섞은 후 10-폴드 교차 검증을 수행하려면 다음과 같이작성한다. # + splitter = StratifiedKFold(n_splits=10, shuffle=True, random_state=42) scores = cross_validate(dt, train_input, train_target, cv=splitter) print(np.mean(scores['test_score'])) # - # KFold 클래스도 동일한 방식으로 사용할 수 있다. 네, 좋다. 이제 교차 검증에 대해 이해했다. 이어서 결정 트리의 매개변수 값을 바꿔가며 가장 좋은 성능이 나오는 모델을 찾아 보겠다. 이때 테스트 세트를 사용하지 않고 교차 검증을 통해서 좋은 모델을 고르면 된다. 그럼 시작해 보자. # ### 하이퍼파라미터 튜닝 # # 머신러닝 모델이 학습하는 파라미터를 모델 파라미터라고 부른다고 했던 것을 기억하나? 반면에 모델이 학습할 수 없어서 사용자가 지정해야만 하는 파라미터를 하이퍼파라미터라고 합니다. 사이킷런과 같은 머신러니 라이브러리를 사용할 때 이런 하이퍼파라미터는 모두 클래스나 메서드의 매개변수로 표현 된다. # # - 하이퍼파라미터는 사용자가 지정 파라미터 이다. # # 그럼 이런 하이퍼파라미터를 튜닝하는 작업은 어떻게 진행할까? 먼저 라이브러리가 제공하는 기본값을 그대로 사용해 모델을 훈련한다. 그 다음 검증 세트의 점수나 교차 검증을 통해서 매개변수를 조금씩 바꿔 본다. 모델마다 적게는 1 ~ 2개에서, 많게는 5 ~ 6개의 매개변수를 제공한다. 이 매개변수를 바꿔가면서 모델을 훈련하고 교차 검즘을 수행해야 한다. # # - 사람의 개입 없이 하이퍼파라미터 튜닝을 자동으로 수행하는 기술을 'AutoML'이라고 부른다. # # 그런데 아주 중요한 점이 있다. 가령 결정 트리 모델에서 최적의 max_depth 값을 찾았다고 가정해보자. 그다음 max_depth를 최적의 값으로 고정하고 min_samples_split을 바꿔가며 최적의 값을 찾는다. 이렇게 한 매개변수의 최적값을 찾고 다른 매개변수의 최적값을 찾아도 될까요? 아니요, 틀렸다. 불행하게도 max_depth의 최적값은 min_samples_split 매개변수의 값이 바뀌면 함께 달라진다. 즉 이 두 매개변수를 동시에 바꿔가면 최적의 값을 찾아야 하는 것이다. # # 게다가 매개변수가 많아지면 문제는 더 복잡해 진다. 파이썬의 for반복문으로 이런 과정을 직접 구현할 수도 있지만, 이미 만들어진 도구를 사용하는게 편리하겠다. 사이킷런에서 제공하는 **그리드 서치 Grid Search**를 사용하자. # # 사이킷런의 GridSearchCV 클래스는 친절하게도 하이퍼파라미터 탐색과 교차 검증을 한 번에 수행한다. 별도로 cross_validat()함수를 호출할 필요가 없다. 그럼 어떻게 사용하는지 간단한 예를 만들어보자. 기본 매개변수를 사용할 결정 트리 모델에서 min_impurity_decrease 매개변수의 최적값을 찾아본다. 먼저 GridSearchCV 클래스를 임포트하고 탐색할 매개변수와 탐색할 값의 리스트를 딕셔너리로 만든다. # + from sklearn.model_selection import GridSearchCV params = {'min_impurity_decrease' : [0.0001, 0.0002, 0.0003, 0.0004, 0.0005]} # - # 여기서는 0.0001부터 0.0005까지 0.0001씩 증가하는 5개의 값을 시도하겠다. GridSearchCV 클래스에 탐색 대상 모델과 params 변수를 전달하여 그리드 서치 객체를 만든다. gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params,n_jobs=-1) # 결정 트리 클래스의 객체를 생성하자마자 바로 전달했다. 어렵지 않다. 그다음 일반 모델을 훈련하는 것처럼 gs 객체에 fit()메서드를 호출한다. 이 메서드를 호출하면 그리드 서치 객체는 결정 트리 모델 min_impuruty_decrease 값을 바꿔가며 총 5번 실행한다. # # GridSearchCV의 cv 매개변수 기본값은 5이다. 따라서 min_imputiry_decrease 값마다 5-폴드 교차 검증을 수행한다. 결국 5 x 5 = 25 개의 모델을 훈련한다. 많은 모델을 훈련하기 때문에 GridSearchCV 클래스의 n_jobs 매개변수에서 병렬 실행에 사용할 CPU 코어 수를 지정하는 것이 좋다. 이 매개변수의 기본값은 1이다. -1로 지정하면 시스템에 있는 모든 코어를 사용한다. 그럼 그리드 서치를 수행해보자. 실행 결과는 크게 중요하지 않아 건너 뛴다. gs.fit(train_input, train_target) # 교차 검증에서 최적의 하이퍼파라미터를 찾으면 전체 훈련 세트로 모델을 다시 만들어야 한다고 했던 것을 기억하나? # # 아주 편리하게도 사이킷런의 그리드 서치는 훈련이 끝나면 25개의 모델 중에서 검증 점수가 가장 높은 모델의 매개변수 조합으로 전체 훈련 세트에서 자동으로 다시 모델을 훈련한다. 이 모델은 gs 객체의 best_estimator_ 속성에 저장되어 있다. 이 모델을 일반 결정 트리 처럼 똑같이 사용 할 수 있다. dt = gs.best_estimator_ print(dt.score(train_input, train_target)) # 그리고 서치로 찾은 최적의 매개변수는 best_params_ 속성에 저장되어 있다. print(gs.best_params_) # 여기서는 0.0001이 가장 좋은 값으로 선택되었다. 각 매개변수에서 수행한 교차 검증의 평균 점수는 cv_results_ 속성의 'mean_test_score'키에 저장되어 있다. 5번의 교차 검증으로 얻은 점수를 출력해 보자. print(gs.cv_results_['mean_test_score']) # 첫 번째 값이 가장 큰 것 같다. 수동으로 고르는 것보다 넘파이 argmax() 함수를 사용하면 가장 큰 값의 인덱스를 추출할 수 있다. 그 다음 이 인덱스를 사용해 params키에 저장된 매개변수를 출력할 수 있다. 이 값이 최상의 검증 점수를 만든 매개변수 조합이다. 앞에서 출력한 gs.best_params_와 동일한지 확인해 보자. best_index = np.argmax(gs.cv_results_['mean_test_score']) print(gs.cv_results_['params'][best_index]) # 좋다. 이 과정을 정리해 보자. # # 1. 먼저 탐색할 매개변수를 지정한다. # 2. 그다음 훈련세트에서 그리드 서치를 수행하여 최상의 평균 검증 점수가 나오는 매개변수 조합을 찾는다. 이 조합은 그리드 서치 객체에 저장된다. # 3. 그리드 서치는 최상의 매개변수에서 (교차 검증에 사용한 훈련 세트가 아니라) 전체 훈련 세트를 사용해 최종 모델을 훈련한다. 이 모델도 그리드 서치 객체에 저장된다. # # 그럼 조금 더 복잡한 매개변수 조합을 탐색해보자. 결정 트리에서 min_impurity_decrease 는 노드를 분할하기 위한 불순도 감소 최소량을 지정한다. 여기에다가 max_depth로 트리의 깊이를 제한하고 min_samples_split 으로 노드를 나누기 위한 최소 샘플 수도 골라보겠다. params = {'min_impurity_decrease': np.arange(0.0001, 0.001, 0.0001), 'max_depth': range(5, 20, 1), 'min_samples_split': range(2, 100, 10)} # 넘파이 arange() 함수는 첫 번째 매개변수 값에서 시작하여 두 번째 매개변수에 도달할 때까지 세 번째 매개변수를 계속 더한 배열을 만든다. 코드에서는 0.0001 에서 시작하여 0.001이 될 때까지 0.0001을 계속 더한 배열이다. 두 번째 매개변수는 포함되지 않으므로 배열의 원소는 총 9개이다. # # 파이썬 range() 함수도 비슷하다. 하지만 이 함수는 정수만 사용할 수 있다. 이 경우 max_depth를 5에서 20까지 1씩 증가하면서 15개의 값을 만든다. min_samples_split은 2에서 100가지 10씩 증가하면서 10개의 값을 만든다. # # 따라서 이 매개변수로 수행할 교차 검증 횟수는 9 x 15 x 10 = 1,350개 이다. 기본 5-폴드 교차 검증을 수행하므로 만들어지는 모델의 수는 6,750개나 된다. n_jobs 매개변수를 -1로 설정하고 그리드 서치를 실행해 보자. # gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1) gs.fit(train_input, train_target) # 최상의 매개변수 조합을 확인해 보겠다. print(gs.best_params_) # 최상의 교차 검증 점수도 확인해 보겠다. print(np.max(gs.cv_results_['mean_test_score'])) # 훌륭하다. GridSearchCV 클래스를 사용하니 매개변수를 일일이 바꿔가며 교차 검증을 수행하지 않고 원하는 매개변수 값을 나열하면 자동으로 교차 검증을 수행해서 최상의 매개변수를 찾을 수 있다. # # 그런데 아직 조금 아쉬운 점이 있다. 앞에서 탐색할 매개변수의 간격을 0.0001 혹은 1로 설정했는데, 이렇게 간격을 둔 것에 특별한 근거가 없다. 이보다 더 좁거나 넓은 간격으로 시도해 볼 수 있지않을까? # # > 랜덤 서치 # # 매개변수의 값이 수치일 때 값의 범위나 간격을 미리 정하기 어려울 수 있다. 또 너무 많은 매개변수 조건이 있어 그리드 서치 수행 시간이 오래 걸릴 수 있다. 이럴때 **랜덤서치 Random Search**를 사용하면 좋다. # # 랜덤 서치에는 매개변수 값의 목록을 전달하는 것이 아니라 매개변수를 샘플링할 수 있는 확률 분포 객체를 전달한다. 확률 분포라 하니 조금 어렵게 들릴 수 있지만 간단하고 쉽다. 먼저 싸이파이에서 2개의 확률 분포 클래스를 임포트 해보자. # # - 싸이파이(scipy)는 어떤 라이브러라 인가? # - 싸이파이는 파이썬의 핵심 과학 라이브러리 중 하나이다. 적분, 보간, 선형대수, 확률 등을 포함한 수치 계산 전용 라이브러리이다. 사이킷런은 넘파이와 싸이파이 기능을 많이 사용한다. # # from scipy.stats import uniform, randint # 싸이파이의 stats 서브 패키지에 있는 uniform과 randint 클래스는 모두 주어진 범위에서 고르게 값을 뽑는다. 이를 '균등 분포에서 샘플링한다'라고 말한다. randint는 정숫값을 뽑고, uniform은 실숫값을 뽑는다. 사용하는 방법은 같다. 0에서 10 사이의 범위를 갖는 randint 객체를 만들고 10개의 숫자를 샘플링해 보자. rgen = randint(0,10) rgen.rvs(10) # - randint와 uniform은 임의로 샘플링하므로 실행 결과가 책과 다를 수 있다. 이어지는 실행 결과도 마찬가지이다. # # 10개밖에 되지 않기 때문에 고르게 샘플링되는 것 같지 않지만 샘플링 숫자를 늘리면 쉽게 확인할 수 있다 1,000개를 샘플링해서 각 숫자의 개수를 세어보겠다. np.unique(rgen.rvs(1000), return_counts=True) # 개수가 늘어나니 0에서 9까지의 숫자가 어느 정도 고르게 추출된 것을 볼 수 있다. uniform 클래스의 사용법도 동일하다. 0 ~ 1 사이에서 10개의 실수를 추출해 보자. ugen = uniform(0,1) ugen.rvs(10) # 좋다. 난수 발생기랑 유사하게 생각하면 된다. 랜덤 서치에 randint과 uniform 클래스 객체를 넘겨주고 총 몇 번을 샘플링해서 최적의 매개변수를 찾으라고 명령할 수 있다. 샘플링 횟수는 시스템 자원이 허락하는 범위 내에서 최대한 크게 하는 것이 좋다. # # 그럼 탐색할 매개변수의 딕셔너리를 만들어 보자. 여기에서는 min_samples_leaf 매개변수를 탐색 대상에 추가하겠다. 이 매개변수는 리프 노드가 되기 위한 최소 샘플의 개수이다. 어떤 노드가 분할하여 만들어질 자식 노드의 샘플 수가 이 값보다 작을 경우 분할하지 않는다. # # 탐색할 매개변수 범위는 다음과 같다. params = {'min_impurity_decrease': uniform(0.0001, 0.001), 'max_depth': randint(20, 50), 'min_samples_split': randint(2, 25), 'min_samples_leaf': randint(1,25), } # min_imputiry_decrease 는 0.0001에서 0.001 사이의 실숫값을 샘플링 한다. max_depth는 20에서 50 사이의 정수, min_samples_split은 2에서 25 사이의 정수, min_samples_leaf는 1에서 25 사이으이 정수를 샘플링 한다. 샘플링 횟수는 사이킷런의 랜덤 서치 클래스인 RandomizedSearchCV의 n_iter 매개변수에 지정한다. from sklearn.model_selection import RandomizedSearchCV gs = RandomizedSearchCV(DecisionTreeClassifier(random_state=42), params, n_iter=100, n_jobs=-1, random_state=42) gs.fit(train_input, train_target) # 위 params에 정의된 매개변수 범위에서 총 100번(n_iter 매개변수)을 샘플링하여 교차 검증을 수행하고 최적의 매개변수 조합을 찾는다. 앞서 그리드 서치보다 훨씬 교차 검증 수를 줄이면서 넓은 영역을 효과적으로 탐색할 수 있다. 결과를 확인해 보자. 먼저 최적의 매개변수 조합을 출력하겠다. print(gs.best_params_) # 최고의 교차 검증 점수도 확인해 보겠다. print(np.max(gs.cv_results_['mean_test_score'])) # 최적의 모델은 이미 전체 훈련 세트(train_input, train_target)로 훈련되어 best_estimator_ 속성에 저장되어있다. 이 모델을 최종 모델로 결정하고 테스트 세트의 성능을 확인해 보자. dt = gs.best_estimator_ print(dt.score(test_input, test_target)) # 테스트 세트 점수는 검증 세트에 대한 점수보다 조금 작은 것이 일반적이다. 테스트 세트 점수가 아주 만족 스럽지는 않지만 다양한 매개변수를 테스트해서 얻은 결과임을 자랑스럽게 말할 수 있을 것 같다. # # 앞으로 수동으로 매개변수를 바꾸는 대신에, 그리도 서치나 랜덤 서치를 사용해야 겠다. # # ### 최적의 모델을 위한 하이퍼파라미터 탐색 - 문제해결 과정 # # 레드 와인과 화이트 와인을 선별하는 작업의 성능을 끌어올리기 위해 결정 트리의 다양한 하이퍼파라미터를 시도해 봐야한다. 이런 과정에서 테스트 세트를 사용하면 결국 테스트 세트에 맞춰 모델을 훈련하는 효과를 만든다. # # 테스트 세트는 최종 모델을 선택할 때까지 사용하지 말아야 한다. 테스트 세트를 사용하지 않고 모델을 평가하려면 또 다른 세트가 필요하다. 이를 검증세트라고 부른다. 혹은 개발 세트 dev set 라고도 부른다. 검증 세트는 훈련세트 중 일부를 다시 덜어 내어 만든다. # # 검증 세트가 크지 않다면 어떻게 데이터를 나누었는지에 따라 검증 점수가 들쭉날쭉 할 것이다. 훈련한 모델의 성능을 안정적으로 평가하기 위해 검증 세트를 한 번 나누어 모델을 평가하는 것에 그치지 않고 여러 번 반복 할 수 있다. 이를 교차 검증 이라고 한다. # # 보통 훈련 세트를 5등분 혹은 10등분 한다. 나누어진 한 덩어리를 폴드라고 부르며 한 폴드씩 돌아가면서 검증 세트의 역할을 한다. 따라서 전체적으로 5개 혹은 10개의 모델을 만든다. 최종 검증 점수는 모든 폴드의 검증 점수를 평균하여 계산한다. # # 교차 검증을 사용해 다양한 하이퍼파라미터를 탐색한다. 머신러닝 라이브러리에서는 클래스와 메서드의 매개변수를 바꾸어 모델을 훈련하고 평가해 보는 작업이다. 이런 과정은 때론 지루하고 반복적이다. 테스트하고 싶은 매개변수 리스트를 만들어 이 과정을 자동화하는 그리드 서치를 사용하면 편리하다. # # 매개변수 값이 수치형이고 특히 연속적인 실숫값이라면 싸이파이의 확률 분포 객체를 전달하여 특정 범위 내에서 지정된 횟수만큼 매개변수 후보값을 샘플링하여 교차 검증을 시도할 수 있다. 이는 한정된 자원을 최대한 활용하여 효율적으로 하이퍼파라미터 공간을 탐색할 수 있는 아주 좋은 도구이다. # # 다음 절에서는 결정 트리를 확장하여 머신러닝 계를 제패한 앙상블 모델에 대해 알아보자. # # > 전체 소스 코드 # # - https://bit.ly/hg-05-2 에 접속하면 코랩에서 이 절의 코드를 바로 열어 볼 수 있다. # 교차 검증과 그리드 서치 # # 검증 세트 # + import pandas as pd wine = pd.read_csv('https://bit.ly/wine_csv_data') # - data = wine[['alcohol', 'sugar', 'pH']].to_numpy() target = wine['class'].to_numpy() # + from sklearn.model_selection import train_test_split train_input, test_input, train_target, test_target = train_test_split( data, target, test_size=0.2, random_state=42) # - sub_input, val_input, sub_target, val_target = train_test_split( train_input, train_target, test_size=0.2, random_state=42) print(sub_input.shape, val_input.shape) # + from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(random_state=42) dt.fit(sub_input, sub_target) print(dt.score(sub_input, sub_target)) print(dt.score(val_input, val_target)) # - # 교차 검증 # + from sklearn.model_selection import cross_validate scores = cross_validate(dt, train_input, train_target) print(scores) # + import numpy as np print(np.mean(scores['test_score'])) # + from sklearn.model_selection import StratifiedKFold scores = cross_validate(dt, train_input, train_target, cv=StratifiedKFold()) print(np.mean(scores['test_score'])) # - splitter = StratifiedKFold(n_splits=10, shuffle=True, random_state=42) scores = cross_validate(dt, train_input, train_target, cv=splitter) print(np.mean(scores['test_score'])) # 하이퍼파라미터 튜닝 # + from sklearn.model_selection import GridSearchCV params = {'min_impurity_decrease': [0.0001, 0.0002, 0.0003, 0.0004, 0.0005]} # - gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1) gs.fit(train_input, train_target) dt = gs.best_estimator_ print(dt.score(train_input, train_target)) print(gs.best_params_) print(gs.cv_results_['mean_test_score']) best_index = np.argmax(gs.cv_results_['mean_test_score']) print(gs.cv_results_['params'][best_index]) params = {'min_impurity_decrease': np.arange(0.0001, 0.001, 0.0001), 'max_depth': range(5, 20, 1), 'min_samples_split': range(2, 100, 10) } gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1) gs.fit(train_input, train_target) print(gs.best_params_) print(np.max(gs.cv_results_['mean_test_score'])) # 랜덤 서치 from scipy.stats import uniform, randint rgen = randint(0, 10) rgen.rvs(10) np.unique(rgen.rvs(1000), return_counts=True) ugen = uniform(0, 1) ugen.rvs(10) params = {'min_impurity_decrease': uniform(0.0001, 0.001), 'max_depth': randint(20, 50), 'min_samples_split': randint(2, 25), 'min_samples_leaf': randint(1, 25), } # + from sklearn.model_selection import RandomizedSearchCV gs = RandomizedSearchCV(DecisionTreeClassifier(random_state=42), params, n_iter=100, n_jobs=-1, random_state=42) gs.fit(train_input, train_target) # - print(gs.best_params_) print(np.max(gs.cv_results_['mean_test_score'])) # + dt = gs.best_estimator_ print(dt.score(test_input, test_target)) # - # ## 05-3 트리의 앙상블 # # - 핵심키워드 # - 앙상블 학습 # - 랜덤 포레스트 # - 엑스트라 트리 # - 그레이디언트 부스팅 # - 앙상블 학습이 무엇인지 이해하고 다양한 앙상블 학습 알고리즘을 실습을 통해 배운다. # # #### 문제 상황 # # "베스트 머신러닝 알고리즘을 찾아 보고하라시네" # "그건 문제마다 그때그때 다를텐데여.." # "그렇기는 한데, 그래도 대체로 성능이 좋은 알고리즘이 있지 않을까? 지난 번 결정 트리는 어때?" # "글쎄요. 논의좀 해 볼게요" # # - 가장 좋은 알고리즘이 있다고 해서 다른 알고리즘을 배울 필요가 없는 것은 아니다. 보편적으로 성능이 좋아 널리 사용되는 알고리즘이 있지만 문제마다 다를 수 있으며 어떤 알고리즘이 더 뛰어나다고 미리 판단해서는 안된다. # # ### 정형 데이터와 비정형 데이터 # # 랜덤 포레스트에 대해 배우기 전에 잠시 우리가 다루었던 데이터를 되돌아보겠다. 4장까지는 생선의 길이,높이,무게 등을 데이터로 사용했다.이 데이터는 CSV 파일에 가지런히 정리되어 있었다. 또 이번 장에서 사용한 와인 데이터도 CSV파일이 있었다. # # 이런 형태의 데이터를 **정형 데이터 structure data**라고 부른다. 쉽게 말해 어떤 구조로 되어 있다는 뜻이다. 이런 데이터는 CSV나 데이터베이스 Database, 혹은 엑셀 Excel에 저장하기 쉽다. # # 온라인 쇼핑몰에 진열된 상품과 우리가 구매한 쇼핑 정보는 모두 데이터베이스에 저장되는 정형 데이터에 속한다. 사실 프로그래머가 다루는 대부분의 데이터가 정형 데이터이다. 정형 데이터의 반대도 있겠네요? 네, 이와 반대되는 데이터를 **비정형 데이터 unstructured data**라고 부른다. # # 비정형 데이터는 데이터베이스나 엑셀로 표현하기 어려운 것들이다. 우리 주위에서 찾아보면 이책의 글과 같은 텍스트 데이터, 디지털 카메라로 찍은 사진, 핸드폰으로 듣느 디지털 음악 등이 있다. # # - 텍스트나 사진을 데이터베이스에 저장할 수는 없나요? # - 아니요. 저장할 수도 있다. 다만 여기에서는 보편적인 사례를 설명한 것이다. 데이터베이스 중에는 구조적이지 않은 데이터를 저장하는 데 편리하도록 발전한 것이 많다. 대표적으로 NoSQL 데이터베이스는 엑셀이나 CSV에 담기 어려운 텍스트나 JSON 데이터를 저장하는데 용이하다. # # 지금까지 배운 머신러닝 알고리즘은 정형 데이터에 잘맞는다. 그중에 정형 데이터를 다루는 데 가장 뛰어난 성과를 내는 알고리즘이 **앙상블 학습 ensemble learning** 이다. 이 알고리즘은 대부분 결정트리를 기반으로 만들어져 있다. 바로 이 절에서 배울 알고리즘들이 앙상블 학습에 속한다. # # 그럼 비정형 데이터에는 어떤 알고리즘을 사용해야 할까? 바로 7장에서 배울 신경망 알고리즘이다. 비정형 데이터는 규칙성을 찾기 어려워 전통적인 머신러닝 방법으로는 모델을 만들기 까다롭다. 하지만 신경망 알고리즘의 놀라운 발전 덕분에 사진을 인식하고 텍스트를 이해하는 모델을 만들 수 있다. # # 이제 사이킷런에서 제공하는 정형 데이터의 끝판왕인 앙상블 학습 알고리즘을 알아보겠다. # # ### 랜덤 포레스트 # # **랜덤 포레스트 Random Forest**는 앙상블 학습의 대표 주자 중 하나로 안정적인 성능 덕분에 널리 사용되고 있다. 앙상블 학습을 적용할 때 가장 먼저 랜덤 포레스트를 시도해 보길 권한다. # # 이름 자체로 유추할 수 있듯이 랜덤 포레스트는 결정 트리를 랜덤하게 만들어 결정 트리(나무)의 숲을 만든다.그리고 각 결정 트리의 에측을 사용해 최종 예측을 만든다. 그럼 랜덤 포레스트가 어떻게 숲을 구성하는지 관찰해 보자. # # - 이 절은 사이킷런에 구현된 앙상블 학습알고리즘을 기준으로 설명한다. 머신러닝 라이브러리마다 구현 방식에 조그씩 차익 있을 수 있다. # # 먼저 랜덤 포레스트는 각 트리를 훈련하기 위한 데이터를 랜덤하게 만드는데, 이 데이터를 만드는 방법이 독특하다. 우리가 입력한 훈련 데이터에서 랜덤하게 샘플을 추출하여 훈련 데이터를 만든다. 이때 한 샘플이 중복되어 추출될 수도 있다. # # 예를 들어 1,000개 가방에서 100개씩 샘플을 뽑는다면 먼저 1개를 뽑고, 뽑았던 1개를 다시 가방에 넣는다. 이런 식으로 계속해서 100개를 가방에서 뽑으면 중복된 샘플을 뽑을 수 있다. 이렇게 만들어진 샘플을 **부트스트랩 샘플 bootstrap sample**라고 부른다. 기본적으로 부트스트랩 샘플은 훈련 세트의 크기와 같게 만든다. 1,000개 가방에서 중복하여 1,000개의 샘플을 뽑기 때문에 부트스트랩 샘플은 훈련 세트와 크기가 같다. # # - 부트스트랩이 뭔가요? # - 보통 부트스트랩 방식이라고 하는데, 데이터 세트에서 중복을 허용하여 데이터를 샘플링하는 방식을 의미한다. 본문에서 설명한 것처럼 1,000개의 샘플이 있을 때 먼저 1개를 뽑고, 다시 가방에 넣어 그다음 샘플을 뽑는 방식을 뜻하는 거다. 부트스트랩 샘플이란 결국 부트스트랩 방식으로 샘플링하여 분류한 데이터라는 의미이다. # # 또한 각 노드를 분할할 때 전체 특성 중에서 일부 특성을 무작위로 고른 다음 이 중에서 최선의 분할을 찾는다. 분류 모델인 RandomForestClassifier는 기본적으로 전체 특성 개수의 제곱근만큼의 특성을 선택한다. 즉 4개의 특성이 있다면 노드마다 2개를 랜덤하게 선택하여 사용한다. 다만 회귀 모델인 RandomForestRegressor는 전체 특성을 사용한다. # # 사이킷런의 랜덤 포레스트는 기본적으로 100개의 결정 트리를 이런 방식으로 훈련한다. 그다음 분류일 때는 각 트리의 클래스별 확률을 평균하여 가장 높은 확률을 가진 클래스를 예측으로 삼는다. 회귀일 때는 단순히 각 트리의 예측을 평균한다. # # - 분류와 회귀를 다시 살펴보자 # - 지도 학습 알고리즘을 다루면서 분류와 회귀를 설명했다. 분류는 샘플을 몇개의 클래스 중 하나로 분류하는 문제고, 회귀는 임의의 어떤 숫자를 예측하는 문제였다. # # 랜덤 포레스트는 랜덤하게 선택한 샘플과 특성을 사용하기 때문에 훈련 세트에 과대적합되는 것을 막아주고 검증 세트와 테스트 세트에서 안정적인 성능을 얻을 수 있다. 종종 기본 매개변수 설정만으로도 아주 좋은 결과를 낸다. # # 그럼 사이킷런의 RandomForestClassifier 클래스를 화이트 와인을 분류하는 문제에 적용해 보자. 먼저 이전 절에서 했던 것처럼 와인 데이터셋을 판다스로 불러오고 훈련 세트와 테스트 세트로 나눈다. # + import numpy as np import pandas as pd from sklearn.model_selection import train_test_split wine = pd.read_csv('https://bit.ly/wine_csv_data') data = wine[['alcohol', 'sugar', 'pH']].to_numpy() target = wine['class'].to_numpy() train_input, test_input, train_target, test_target = train_test_split( data, target, test_size = 0.2, random_state=42) # - # cross_validate() 함수를 사용해 교차 검증을 수행해 보겠다. RandomForestClassifier 는 기본적으로 100개의 결정 트리를 사용하므로 n_jobs 매개변수를 -1로 지정하여 모든 CPU 코어를 사용하는 것이 좋다. cross_validate() 함수의 n_jobs 매개변수를 True로 지정하면 검증 점수뿐만 아니라 훈련 세트에 대한 점수도 같이 반환한다. 훈련 세트와 검증 세트의 점수를 비교하면 과대적합을 파악하는데 용이하다. (return_train_score 매개변수의 기본값은 False이다.) # + from sklearn.model_selection import cross_validate from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_jobs=-1, random_state=42) scores = cross_validate( rf, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 출력된 결과를 보면 훈련 세트에 다소 과대적합된 것 같다. 여기에서는 알고리즘을 조사하는 것이 목적이므로 매개변수를 더 조정하지 않도록 하겠다. # # - 사실 이 예제는 매우 간단하고 특성이 많지 않아 그리드 서치를 사용하더라도 하이퍼파라미터 튜닝의 결과가 크게 나아지지 않는다. # # 랜덤 포레스트 결정 트리의 앙상블이기 때문에 DecisionTreeClassifier가 제공하는 중요한 매개 변수를 모두 제공한다. criterion, max_depth, max_feature, min_samples_split, min_impurity_decrease, min_samples_leaf 등이다. 또한 결정 트리의 큰 장점 중 하나인 특성 중 요도를 계산한다. 랜덤 포레스트의 특성 중요도는 각 결정 트리의 특성 중요도를 취합한 것이다. 앞의 랜덤 포레스트 모델을 훈련 세트에 훈련한 후 특성 중요도를 출력해 보자. rf.fit(train_input, train_target) print(rf.feature_importances_) # 이 결과를 앞의 1절 '결정 트리'에서 만든 특성 중요도와 비교해보자. 결정트리에서 특성 중요도는 다음과 같았다. # # 0.12345626 , 0.86862934 , 0.0079144 # 각각 [알코올 도수, 당도, PH]였는데, 두 번째 특성인 당도의 중요도가 감소하고 알코올 도수와 PH특성의 중요도가 조금 상승했다. 이런 이유는 랜덤 포레스트가 특성의 일부를 랜덤하게 선택하여 결정 트리를 훈련하기 때문이다. 그 결과 하나의 특성에 과도하게 집중하지 않고 좀 더 많은 특성이 훈련에 기여할 기회를 얻는다. 이는 과대적합을 줄이고 일반화 성능을 높이는데 도움이 된다. # # RandomForestClassifier에는 재미있는 기능이 하나 더 있는데, 자체적으로 모델을 평가하는 점수를 얻을 수 있다. 랜덤 포레스트는 훈련 세트에서 중복을 허용하여 부트스트랩 샘플을 만들어 결정 트리를 훈련한다고 했다. 이때 부트스트랩 샘플에 포함되지 않고 남는 샘플이 있다. 이런 샘프을 OOB (out of bag) 샘플이라고 한다. 이 남는 샘플을 사용하여 부트스트랩 샘플로 훈련한 결정 트리를 평가할 수 있다. 마치 검증 세트의 역할을 하는 것이다. # # 이 점수를 얻으려면 RandomForestClassifier 클래스의 oob_score 매개변수를 True로 지정해야 한다.(이 매개변수의 기본값은 False이다.) 이렇게 하면 랜덤 포레스트는 각 결정 트리의 OOB 점수를 평균하여 출력한다. oob_score = True로 지정하고 모델을 훈련하여 OOB 점수를 출력해 보겠다. # # rf = RandomForestClassifier(oob_score=True, n_jobs= -1, random_state=42) rf.fit(train_input, train_target) print(rf.oob_score_) # 교차 검증에서 얻은 점수와 매우 비슷한 결과를 얻었다. OOB 점수를 사용하면 교차 검증을 대신할 수 있어서 결과적으로 훈련 세트에 더 많은 샘플을 사용할 수 있다. # # 다음에 알아볼 앙상블 학습은 랜덤 포레스트와 아주 비슷한 엑스트라 트리 이다. # # ### 엑스트라 트리 # # **엑스트라 트리 Extra Tree** 는 랜덤 포레스트와 매우 비슷하게 동작한다. 기본적으로 100개의 결정 트리를 훈련한다. 랜덤 포레스트와 동일하게 결정 트리가 제공하는 대부분의 매개변수를 지원한다. 또한 전체 특성 중에 일부 특성을 랜덤하게 선택하여 노드를 분할하는 데 사용 한다. # # 랜덤 포레스트와 엑스트라 트리의 차이점은 부트스트랩 샘플을 사용하지 않는다는 점이다. 즉각 결정 트리를 만들 때 전체 훈련 세트를 사용한다. 대신 노드를 분할할 때 가장 좋은 분할을 찾는 것이 아니라 무작위로 분할 하자! 실은 빼먹지 않고 책의 구석구석을 따라 읽고 실습했다면 이미 여러분은 엑스트라 트리를 조금 맛보았다. 2절의 확인 문제에서 DecisionTreeClassifier의 splitter 매개변수를 'random'으로 지정했다. 엑스트라 트리가 사용하는 결정 트리가 바로 splitter='random'인 결정 트리이다. # # 하나의 결정 트리에서 특성을 무작위로 분할 한다면 성능이 낮아지겠지만 많은 트리를 앙상블 하기때문에 과대적합을 막고 검증 세트의 점수를 높이는 효과가 있다. 사이킷런에서 제공하는 엑스트라 트리는 ExtraTreeClassifier 이다. 이 모델의 교차 검증 점수를 확인해 보자. # + from sklearn.ensemble import ExtraTreesClassifier et = ExtraTreesClassifier(n_jobs=-1, random_state=42) scores = cross_validate(et, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 랜덤 포레스트와 비슷한 결과를 얻었다. 이 예제는 특성이 많지 않아 두 모델의 차이가 크지 않다. 보통 엑스트라 트리가 무작위성이 좀 더 크기 때문에 랜덤 포레스트보다 더 많은 결정 트리를 훈련해야 한다. 하지만 랜덤하게 노드를 분할하기 때문에 빠른 계산 속도가 엑스트라 트리의 장점이다. # # - 결정 트리는 최적의 분할을 찾는 데 시간을 많이 소모한다. 특히 고려해야 할 특성의 개수가 많을 때 더 그렇다. 만약 무작위로 나눈다면 훨씬 빨리 트리를 구성할 수 있다. # # 엑스트라 트리도 랜덤 포레스트와 마찬가지로 특성 중요도를 제공한다. 순서는 [알코올 도수, 당도, PH]인데, 결과를 보면 엑스트라 트리도 결정 트리보다 당도에 대한 의존성이 작다. et.fit(train_input, train_target) print(et.feature_importances_) # 엑스트라 트리의 회귀 버전은 ExtraTreeRegressor 클래스이다. # # 네, 좋다. 지금까지 비슷하지만 조금 다른 2개의 앙상블 학습을 알아보았다. 다음에는 이 둘과 다른방식을 사용하는 앙상블 학습을 알아보겠다. 먼저 그레이디언트 부스팅이다. # # ### 그레이디언트 부스팅 # # **그레이디언트 부스팅 gradient boosting**은 깊이가 얕은 결정 트리를 사용하여 이전 트리의 오차를 보완하는 방식으로 앙상블 하는 방법이다. 사이킷런의 GradientBoostingClassifier 는 기본적으로 깊이가 3인 결정트리를 100개 사용한다. 깊이가 얕은 결정 트리를 사용하기 때문에 과대적합에 강하고 일반적으로 높은 일반화 성능을 기대할 수 있다. # # # 그레이디언트란 이름에서 눈치챘을지 모르지만 4장에서 배웠던 경사 하강법을 사용하여 트리를 앙상블레 추가한다. 분류에서는 로지스틱 손실 함수를 사용하고 회귀에서는 평균 제곱 오차 함수를 사용한다. # # 4장에서 경사 하강법은 손실 함수를 산으로 정의하고 가장 낮은 곳을 찾아 내려오는 과정으로 설명했다. 이때 가장 낮은 곳을 찾아 내려오는 방법은 모델의 가중치와 절편을 조금씩 바꾸는 것이다. 그레이디언트 부스팅은 결정 트리를 계속 추가하면서 가장 낮은 곳을 찾아 이동한다. 혹시 4장에서 손실 함수의 낮은 곳으로 천천히 조금씩 이동해야 한다고 말한 것을 기억하나요? 그레이디언트 부스팅도 마찬가지이다. 그래서 싶이가 얕은 트리를 사용하는 거다. 또 학습률 매개변수로 속도를 조절한다. # # 그레이디언트 부스팅의 개념에 대해 살펴 보았으니 이제 사이킷런에서 제공하는 GradientBoostingClassifier를 사용해 와인 데이터셋의 교차 검증 점수를 확인해 보자. # + from sklearn.ensemble import GradientBoostingClassifier gb = GradientBoostingClassifier(random_state=42) scores = cross_validate(gb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 와우! 거의 과대적합이 되지 않았다. 그레이디언트 부스팅은 결정 트리의 개수를 늘려도 과대적합에 매우 강하다. 학습률을 증가시키고 트리의 개수를 늘리면 조금 더 성능이 향상될 수 있다. # + gb = GradientBoostingClassifier(n_estimators=500, learning_rate=0.2, random_state=42) scores = cross_validate(gb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 결정 트리 개수를 500개로 5배나 늘렸지만 과대적합을 잘 억제하고 있다. 학습률 learning_rate 의 기본값은 0.1이다. 그레이디언트 부스팅도 특성 중요도를 제공한다. 결과에서 볼 수 있듯이 그레이디언트 부스팅이 랜덤 포레스트보다 일부 특성(당도)에 더 집중 한다. gb.fit(train_input, train_target) print(gb.feature_importances_) # 재미있는 매개변수가 하나 있다. 트리 훈련에 사용할 훈련 세트의 비율을 정하는 subsample이다. 이 매개변수의 기본값은 1.0으로 전체 훈련 세트를 사용한다. 하지만 subsampledl 1보다 작으면 훈련 세트의 일부를 사용한다. 이는 마치 경사 하강법 단계마다 일부 샘플을 랜덤하게 선택하여 진행하는 확률적 경사 하강법이나 미니배치 경사 하강법과 비슷하다. # # 일반적으로 그레이디언트 부스팅 랜덤 포레스트보다 조금 더 높은 성능을 얻을 수 있다. 하지만 순서대로 트리를 추가하기 때문에 훈련 속도가 느리다. 즉 GradientBoostingClassifier에는 n_jobs 매개변수가 없다. 그레이디언트 부스팅의 회귀 버전은 GradientBoostingRegressor이다. 그레이디언트 부스팅의 속도와 성능을 더욱 개선한 것이 다음에 살펴볼 히스토그램 기반 그레이디언트 부스팅이다. # # ### 히스토그램 기반 그레이디언트 부스팅 # # **히스토그램 기반 그레이디언트 부스팅 Histogram based Gradient Boosting** 은 정형 데이터를 다루는 머신러닝 알고리즘 중에 가장 인기가 높은 알고리즘이다. 히스토그램 기반 그레이디언트 부스팅은 먼저 입력 특성을 256개의 구간으로 나눈다. 따라서 노드를 분할할 때 최적의 분할을 매우 빠르게 찾을 수 있다. # # 히스토그램 기반 그레이디언트 부스팅은 256개의 구간 중에서 하나를 떼어 놓고 누락된 값을 위해서 사용한다. 따라서 입력에 누락된 특성이 있더라도 이를 따로 전처리할 필요가 없다. # # 사이킷런의 히스토그램 기반 그레이디언트 부스팅 클래스는 HistGradientBoostingClassifier 이다. 일반적으로 HistGradientBoostingClassifier는 기본 매개변수에서 안정적인 성능을 얻을 수 있다. HistGradientBoostingClassifier에는 트리의 개수를 지정하는데 n_estimators 대신에 부스팅 반복 횟수를 지정하는 max_iter를 사용한다. 성능을 높이려면 max_iter 매개변수를 테스트해 보자. # # 그럼 와인 데이터세에 HistGradientBoostingClassifier 클래스를 적용해 보자. 사이킷런의 히스토그램 기반 그레이디언트 부스팅은 아직 테스트 과정에 있다. 이 클래스를 사용하려면 sklearn.experimental 패키지 아래에 있는 enable_hist_gradient_boosting 모듈을 임포트해야 한다. # + from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier hgb = HistGradientBoostingClassifier(random_state=42) scores = cross_validate(hgb, train_input, train_target, return_train_score=True) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 과대적합을 잘 억제하면서 그레이디언트 부스팅보다 조금 더 높은 성능을 제공한다. 특성 중요를 확인해 보자. # # 히스토그램 기반 그레이디언트 부스팅의 특성 중요도를 계산하기 위해 permutation_importance() 함수를 사용하겠다. 이 함수는 특성을 하나씩 랜덤하게 섞어서 모델의 성능이 변화하는지를 관찰하여 어떤 특성이 중요한지를 계산한다. 훈련 세트뿐만 아니라 테스트 세트에도 적용할 수 있고 사이킷런에서 제공하는 추정기 모델에 모두 사용할 수 있다. # # 먼저 히스토그램 기반 그레이디언트 부스팅 모델을 훈련하고 훈련 세트에서 특성 중요도를 계산해보자. n_repeats 매개변수는 랜덤하게 섞을 횟수를 지정한다. 여기서는 10으로 지정하겠다. 기본값은 5이다. # + from sklearn.inspection import permutation_importance hgb.fit(train_input, train_target) result = permutation_importance(hgb, train_input, train_target, n_repeats=10, random_state=42, n_jobs=-1) print(result.importances_mean) # - # permutation_importance() 함수가 반화하는 객체는 반복하여 얻은 특성 중요도(importances), 평균(importances_mean),표준편차(importances_std)를 담고있다. 평균을 출력해 보면 랜덤 포레스트와 비슷한 비율임을 알 수 있다. 이번에는 테스트 세트에서 특성 중요도를 계산해 보겠다. result = permutation_importance(hgb, test_input, test_target, n_repeats=10, random_state=42, n_jobs=-1) print(result.importances_mean) # 테스트 세트의 결과를 보면 그레이디언트 부스팅과 비슷하게 조금 더 당도에 집중하고 있다는 것을 알 수 있다. 이런 분석을 통해 모델을 실전에 투입했을 때 어떤 특성에 관심을 둘지 예상할 수 있다. # # 그럼 HistGradientBoostingClassifier를 사용해 테스트 세트에서의 성능을 최종적으로 확인해보자. hgb.score(test_input, test_target) # 테스트 세트에서는 약 87% 정확도를 얻었다. 실전에 투입하면 성능은 이보다는 조금 더 낮을 것이다. 앙상블 모델은 확실히 단일 결정 트리보다 좋은 결과를 얻을 수 있다.(기억이 나지 않을 수 있는데 2절의 랜덤 서치에서 테스트 정확도는 86%였다.) # # 히스토그램 기반 그레이디언트 부스팅의 회귀 버전은 HistGradientBoostingRegressor 클래스에 구현되어 있다. 사이킷런에서 제공하는 히스토그램 기반 그레이디언트 부스팅이 비교적 새로운 기능이다. 하지만 사이킷런 말고도 히스토그램 기반 그레이디언트 부스팅 알고리즘을 구현한 라이브러리가 여럿 있다. # # 가장 대표적인 라이브러리는 XGBoost 이다. 놀랍게도 이 라이브러리도 코랩에서 사용할 수 있을 뿐만 아니라 사이킷런의 cross_validate()함수와 함께 사용할 수도 있다. XGBoost는 다양한 부스팅 알고리즘을 지원한다. tree_method 매개변수를 'hist'로 지정하면 히스토그램 기반 그레이디언트 부스팅을 사용할 수 있다. 그럼 XGBoost를 사용해 와인 데이터의 교차 검증 점수를 확인해 보자. # + from xgboost import XGBClassifier xgb = XGBClassifier(tree_method='hist', random_state=42) scores = cross_validate(xgb, train_input, train_target, return_train_score=True) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # + from xgboost import XGBClassifier xgb = XGBClassifier(tree_method='hist', random_state=42) scores = cross_validate(xgb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 널리 사용하는 또 다른 히스토그램 기반 그레이디언트 부스팅 라이브러리는 마이크로소프트에서 만든 LightGBM이다.LightGBM은 빠르고 최신 기술을 많이 적용하고 있어 인기가 점점 높아지고 있다. LightGBM도 코랩에 이미 설치되어 있어 바로 테스트해 볼 수 있다. # + # #!pip3 install lightgbm # + from lightgbm import LGBMClassifier lgb = LGBMClassifier(random_state=42) scores = cross_validate(lgb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # 사실 이 사이킷런의 히스토그램 기반 그레이디언트 부스팅이 LightGBM에서 영향을 많이 받았다. 이제 히스토그램 기반 그레이디언트 부스팅까지 4개의 앙상블을 모두 다루어 보았다. # # - 좀 더 열정적인 독자에게 # # - XGBoost: https://xgboost.readthedocs.io/en/latest # - LightGBM: https://lightgbm.readthedocs.io/en/latest # # # ### 앙상블 학습을 통한 성능 향상 - 문제해결 방식 # # # 앙상블 학습은 정형 데이터에서 가장 뛰어난 성능을 내는 머신러닝 알고리즘 중 하나이다. 대표적인 앙상블 학습은 다음과 같다. # # - 사이킷런 # - 랜덤포레스트: 부트스트랩 샘플사용. 대표 앙상블 학습 알고리즘임 # - 엑스트라 트리: 결정 트리의 노드를 랜덤하게 분할함 # - 그레이디언트 부스팅: 이진 트리의 손실을 보완하는 식으로 얕은 결정 트리를 연속하여 추가함 # - 히스토그램 기반 그레이디언트 부스팅: 훈련 데이터를 256개 정수 구간으로 나누어 빠르고 높은 성능을 냄 # # - 그외 라이브러리 # - XGBoost # - LightGBM # # 이번 절에서는 앙상블 학습을 배웠다. 결정 트리 기반의 앙상블 학습은 강력하고 뛰어난 성능을 제공하기 때문에 인기가 아주 높다. 사이킷런에서 제공하는 앙상블 학습 알고리즘 중 랜덤 포레스트, 엑스트라 트리, 그레이디언트 부스팅, 히스토그램 기반 그레이디언트 부스팅을 다루었다. # # 랜덤포레스트는 가장 대표적인 앙상블 학습 알고리즘이다. 성능이 좋고 안정적이기 때문에 첫 번째로 시도해 볼 수 있는 앙상블 학습 중 하나이다. 랜덤포레스트는 결정 트리를 훈련하기 위해 부트스트랩 샘플을 만들고 전체 특성 중 일부를 랜덤하게 선택하여 결정 트리를 만든다. # # 엑스트라 트리는 랜덤 포레스트와 매우 비슷하지만 부트스트랩 샘플을 사용하지 않고 노드를 분할할때 최선이 아니라 랜덤하게 분할 한다. 이런 특징 때문에 랜덤 포레스트보다 훈련 속도가 빠르지만 보통 더 많은 트리가 필요하다. # # 그레이디언트 부스팅은 깊이가 얕은 트리를 연속적으로 추가하여 손실 함수를 최소화하는 앙상블 방법이다. 성능이 뛰어나지만 병렬로 훈련할 수 없기 때문에 랜덤 포레스트나 엑스트라 트리보다 훈련 속도가 조금 느리다. 그레이디언트 부스팅에서 학습률 매개변수 조정하여 모델의 복잡도를 제어할 수 있다. 학습률 매개변수가 크면 복잡하고 훈련세트에 과대적합된 모델을 얻을 수 있다. # # 끝으로 가장 뛰어난 앙상블 학습으로 평가받는 히스토그램 기반 그레이디언트 부스팅 알고리즘을 살펴보았다. 히스토그램 기반 그레이디언트 부스팅은 훈련 데이터를 256개의 구간으로 변환하여 사용하기 때문에 노드 분할 속도가 매우 빠르다. 코랩에는 사이킷런뿐만 아니라 히스토그램 기반 그레이디언트 부스팅 라이브러리인 XGBoost와 LightGBM이 이미 설치되어 있어 바로 시험해 볼 수있다. # # 이 절에서 다양한 앙상블 학습 방법을 배워 보았다. 앙상블 학습과 그리드 서치, 랜덤 서치를 사용한 하이퍼파라미터 튜닝을 사용하면 최고 수준의 성능을 내는 머신러닝 모델을 얻을 수 있다. # # 지금까지는 입력과 타깃이 준비된 문제를 풀었다. 이런 머신러닝 분야를 지도학습 supervised learning 이라고 부른다. 타깃이 없다면 어떨까? # 이때에도 유용한 무언가를 학습할 수 있을까? 다음 장에서 이에 대해 배워보겠다. # # >전체 소스 코드 # # - https://bit.ly/hg-05-3 에 접속하면 코랩에서 이 절의 코드를 바로 열어 볼 수 있다. # 트리의 앙상블 # # 랜덤포레스트 # + import numpy as np import pandas as pd from sklearn.model_selection import train_test_split wine = pd.read_csv('https://bit.ly/wine_csv_data') data = wine[['alcohol', 'sugar', 'pH']].to_numpy() target = wine['class'].to_numpy() train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42) # + from sklearn.model_selection import cross_validate from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_jobs=-1, random_state=42) scores = cross_validate(rf, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - rf.fit(train_input, train_target) print(rf.feature_importances_) # + rf = RandomForestClassifier(oob_score=True, n_jobs=-1, random_state=42) rf.fit(train_input, train_target) print(rf.oob_score_) # - # 엑스트라트리 # + from sklearn.ensemble import ExtraTreesClassifier et = ExtraTreesClassifier(n_jobs=-1, random_state=42) scores = cross_validate(et, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - et.fit(train_input, train_target) print(et.feature_importances_) # 그레이디언트 부스팅 # + from sklearn.ensemble import GradientBoostingClassifier gb = GradientBoostingClassifier(random_state=42) scores = cross_validate(gb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # + gb = GradientBoostingClassifier(n_estimators=500, learning_rate=0.2, random_state=42) scores = cross_validate(gb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - gb.fit(train_input, train_target) print(gb.feature_importances_) # 히스토그램 기반 부스팅 # + from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier hgb = HistGradientBoostingClassifier(random_state=42) scores = cross_validate(hgb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # + from sklearn.inspection import permutation_importance hgb.fit(train_input, train_target) result = permutation_importance(hgb, train_input, train_target, n_repeats=10, random_state=42, n_jobs=-1) print(result.importances_mean) # - result = permutation_importance(hgb, test_input, test_target, n_repeats=10, random_state=42, n_jobs=-1) print(result.importances_mean) hgb.score(test_input, test_target) # XGBoost # + from xgboost import XGBClassifier xgb = XGBClassifier(tree_method='hist', random_state=42) scores = cross_validate(xgb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # LightGBM # + from lightgbm import LGBMClassifier lgb = LGBMClassifier(random_state=42) scores = cross_validate(lgb, train_input, train_target, return_train_score=True, n_jobs=-1) print(np.mean(scores['train_score']), np.mean(scores['test_score'])) # - # - 출처: 혼자 공부하는 머신러닝 + 딥러닝
Chapter05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import xarray as xr import numpy as np import matplotlib.pyplot as plt from math import cos, radians import xarray as xr import pandas as pd ####################you will need to change some paths here!##################### #list of input files filename_cpr='f:/data/project_data/NASA_biophysical/CPR_data/All CPR Sample catalogue.xlsx' filename_northpac_eddies='F:/data/project_data/NASA_biophysical/aviso/eddy_trajectory_19930101_20170106_north_pacific_2020_10_06a.nc' filename_cpr_eddy='F:/data/project_data/NASA_biophysical/collocated_data/CPR/eddy_cpr_data_north_pacific.nc' filename_eddy='F:/data/project_data/NASA_biophysical/collocated_data/CPR/eddy_ranking_data_north_pacific.nc' #output files filename_cpr_expanded='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with env info_2020_10_05' #filename_cpr_expanded='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with env info_2020_10_05.csv' #filename_cpr_expanded_netcdf='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with env info_2020_10_05.nc' #filename_cpr_eddy='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with eddy and env info_2020_10_05.csv' #filename_cpr_eddy_netcdf='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with eddy and env info_2020_10_05.nc' ################################################################################# #read in CPR data excell file using pandas library df = pd.read_excel(filename_cpr) df = df.rename(columns={'Sample ID':'cpr_sample_id','day':'cpr_sample_day', 'month':'cpr_sample_month','year':'cpr_sample_year', 'lat':'cpr_sample_lat','Long':'cpr_sample_lon','Already processed?':'cpr_sample_proc'}) ds_cpr = df.to_xarray() ds_cpr['index']=ds_cpr.index.astype('int') ds_cpr ilen = ds_cpr.cpr_sample_lat.size tt=np.empty(ilen,dtype='datetime64[ns]') for i in range(ilen): tstr=str(ds_cpr.cpr_sample_year[i].data)+'-'+str(ds_cpr.cpr_sample_month[i].data).zfill(2)+'-'+str(ds_cpr.cpr_sample_day[i].data).zfill(2) tem=np.datetime64(tstr) tt[i]=tem ds_cpr['cpr_sample_time']=xr.DataArray(tt,dims=['index']) ds_cpr ds_cpr.cpr_sample_lon.min().data,ds_cpr.cpr_sample_lon.max().data,ds_cpr.cpr_sample_lat.min().data,ds_cpr.cpr_sample_lat.max().data ds_eddy = xr.open_dataset(filename_northpac_eddies).rename({'Longitude':'lon','Latitude':'lat'}) ds_eddy ds_eddy.cyclonic_type.plot() ds_eddy_cpr = xr.open_dataset(filename_cpr_eddy) ds_eddy_cpr #get bathymetry from ETOPO1 fname_topo = 'F:/data/topo/ETOPO1_Ice_g_gmt4.grd' ds = xr.open_dataset(fname_topo) ds_topo = ds.rename_dims({'x':'lon','y':'lat'}).rename({'x':'lon','y':'lat'}) tem = ds_topo.isel(lat=slice(7000,9500),lon=slice(0,4500)) tem.z.plot() tt = ds_topo.z.interp(lat=ds_cpr.cpr_sample_lat,lon=ds_cpr.cpr_sample_lon,method='nearest').data ds_cpr['ETOPO_depth']= xr.DataArray(tt, coords={'index':ds_cpr.index}, dims=["index"]) ds_cpr['cpr_sample_lon2'] = np.mod(ds_cpr['cpr_sample_lon'],360) plt.scatter(ds_cpr.cpr_sample_lon2,ds_cpr.cpr_sample_lat,c=ds_cpr.ETOPO_depth,cmap='coolwarm',vmin=-8000,vmax=8000) # + def get_data(): #climatology years cyr1,cyr2='1993-01-01','2018-12-31' # CCMP test dir_pattern_zarr = 'F:/data/sat_data/ccmp/zarr/' ds= xr.open_zarr(dir_pattern_zarr) ds = ds.rename({'latitude':'lat','longitude':'lon'}) ds.coords['lon'] = (ds.coords['lon'] + 180) % 360 - 180 ds_ccmp = ds.sortby(ds.lon) ds_ccmp = ds_ccmp.drop('nobs') for var in ds_ccmp: tem = ds_ccmp[var].attrs tem['var_name']='ccmp_'+str(var) ds_ccmp[var].attrs=tem ds_ccmp_clim = ds_ccmp.sel(time=slice(cyr1,cyr2)) ds_ccmp_clim = ds_ccmp_clim.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) # AVISO test dir_pattern_zarr = 'F:/data/sat_data/aviso/zarr/' ds= xr.open_zarr(dir_pattern_zarr) ds = ds.rename({'latitude':'lat','longitude':'lon'}) ds.coords['lon'] = (ds.coords['lon'] + 180) % 360 - 180 ds_aviso = ds.sortby(ds.lon).drop({'lat_bnds','lon_bnds','crs','err'}) for var in ds_aviso: tem = ds_aviso[var].attrs tem['var_name']='aviso_'+str(var) ds_aviso[var].attrs=tem ds_aviso_clim = ds_aviso.sel(time=slice(cyr1,cyr2)) ds_aviso_clim = ds_aviso_clim.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) #sst dir_pattern_zarr = 'F:/data/sat_data/sst/cmc/zarr/' ds_sst= xr.open_zarr(dir_pattern_zarr) ds_sst = ds_sst.drop({'analysis_error','mask','sea_ice_fraction'}) tem = ds_sst.analysed_sst.attrs tem['var_name']='cmc_sst' ds_sst.analysed_sst.attrs=tem ds_sst_clim = ds_sst.sel(time=slice(cyr1,cyr2)) ds_sst_clim = ds_sst_clim.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) #put data into a dictionary data_dict={'aviso':ds_aviso, 'wnd':ds_ccmp, 'sst':ds_sst, 'topo':ds_topo} clim_dict={'aviso_clim':ds_aviso_clim, 'wnd_clim':ds_ccmp_clim, 'sst_clim':ds_sst_clim} return data_dict,clim_dict data,clim = get_data() # + #ds_cpr = xr.open_dataset(filename_bird_out_eddy_netcdf) ilen_bird1 = len(ds_cpr.cpr_sample_lon) clonmin,clonmax = ds_cpr.cpr_sample_lon.min().data,ds_cpr.cpr_sample_lon.max().data clatmin,clatmax = ds_cpr.cpr_sample_lat.min().data,ds_cpr.cpr_sample_lat.max().data t1save=0 for name in data: ds_data=data[name] if name=='topo': continue # if not name==input_data: # continue print('name',name) for var in ds_data: var_tem=ds_data[var].attrs['var_name'] ds_cpr[var_tem]=xr.DataArray(np.nan*np.empty((ilen_bird1), dtype=str(ds_data[var].dtype)), coords={'index': ds_cpr.index}, dims=('index')) ds_cpr[var_tem].attrs=ds_data[var].attrs print('var',var_tem) for i in range(ilen_bird1): if np.isnan(ds_cpr.cpr_sample_lat[i]): continue if ds_cpr.cpr_sample_time[i]<ds_data.time.min(): continue if ds_cpr.cpr_sample_time[i]>ds_data.time.max(): continue t1,t2 = ds_cpr.cpr_sample_time[i]-np.timedelta64(24,'h'), ds_cpr.cpr_sample_time[i]+np.timedelta64(24,'h') if not t1==t1save: tem2 = ds_data.sel(time=slice(t1,t2),lat=slice(clatmin-.5,clatmax+.5),lon=slice(clonmin-.5,clonmax+.5)).load() t1save=t1 print(i,ilen_bird1) # lat1,lat2=ds_cpr.cpr_sample_lat[i]-.5,ds_cpr.cpr_sample_lat[i,j]+.5 # lon1,lon2=ds_cpr.cpr_sample_lon[i]-.5,ds_cpr.cpr_sample_lon[i,j]+.5 # tem = ds_data.sel(time=slice(t1,t2),lat=slice(lat1,lat2),lon=slice(lon1,lon2)).load() tem = tem2.interp(time=ds_cpr.cpr_sample_time[i],lat=ds_cpr.cpr_sample_lat[i],lon=ds_cpr.cpr_sample_lon[i]) for var in ds_data: var_tem=ds_data[var].attrs['var_name'] ds_cpr[var_tem][i]=tem[var].data #output data df_bird = ds_cpr.to_dataframe() df_bird.to_csv(filename_cpr_expanded) ds_cpr.to_netcdf(filename_cpr_expanded_netcdf) # - # # Put files together # + file1 = filename_cpr_expanded+'aviso'+'.nc' file2 = filename_cpr_expanded+'wnd'+'.nc' file3 = filename_cpr_expanded+'sst'+'.nc' ds = xr.open_dataset(file1) ds2 = xr.open_dataset(file2) for var in ds2: if not var in ds: ds[var]=ds2[var] ds2 = xr.open_dataset(file3) for var in ds2: if not var in ds: ds[var]=ds2[var] ds.to_netcdf(filename_cpr_expanded+'.nc') df_bird = ds.to_dataframe() df_bird.to_csv(filename_cpr_expanded+'.csv') ds # - # # collocate with eddies ds_npac_eddy = xr.open_dataset(filename_northpac_eddies).rename({'Longitude':'lon','Latitude':'lat'}) for var in ds_npac_eddy: ds_npac_eddy = ds_npac_eddy.rename({var:str('cpr_eddy_data_'+var)}) ds_cpr_eddy = xr.open_dataset(filename_cpr_eddy) for var in ds_cpr_eddy: if var[0]=='s': ds_cpr_eddy = ds_cpr_eddy.rename({var:str('cpr_eddy_data_'+var[10:])}) else: ds_cpr_eddy = ds_cpr_eddy.rename({var:str('cpr_eddy_data_'+var[4:])}) ds_npac_eddy.close() ds_cpr_eddy.close() print(ds_npac_eddy) print(ds_cpr_eddy) # # make single array with all info # + ilen = len(ds_cpr_eddy.cpr_eddy_data_index) for var in ds_npac_eddy: if not var=='cpr_eddy_data_time': ds_cpr_eddy[var]=xr.DataArray(np.nan*np.empty(ilen, dtype=str(ds_npac_eddy[var].dtype)), dims=('z')) ds_cpr_eddy[var].attrs=ds_npac_eddy[var].attrs else: ds_cpr_eddy[var]=xr.DataArray(np.empty(ilen, dtype=str(ds_npac_eddy[var].dtype)), dims=('z')) for i in range(ilen): ii = ds_cpr_eddy.cpr_eddy_data_index[i] for var in ds_npac_eddy: ds_cpr_eddy[var][i]=ds_npac_eddy[var][ii] # - # # check where double crossing # + #proc_cpr ==1 whre distance is GREATER than radius of eddy #proc_cpr = np.where(ds_cpr_eddy.cpr_eddy_data_distance>ds_cpr_eddy.cpr_eddy_data_radius,1,0) #proc_cpr # - ilen = len(ds_cpr_eddy.cpr_eddy_data_track) ds_cpr_eddy['num_cross']=xr.DataArray(np.zeros(ilen, dtype='int32'), dims=('z')) ds_cpr_eddy['num_cross'].attrs={'description':'how many times eddy crossed by cpr data'} #calculate where cpr in eddy radius, put nan where not in eddy subset = ds_cpr_eddy.where(ds_cpr_eddy.cpr_eddy_data_distance<ds_cpr_eddy.cpr_eddy_data_radius) #find unique eddy track ids u, indices = np.unique(ds_cpr_eddy.cpr_eddy_data_track, return_index=True) #cycle through each unique eddy id to find unique years for i in range(len(u)): ind = np.where(subset.cpr_eddy_data_track==u[i]) ind_tem = np.where(ds_cpr_eddy.cpr_eddy_data_track==u[i]) tem = subset.cpr_eddy_data_year[ind] u1, indices1 = np.unique(tem, return_index=True) ds_cpr_eddy.num_cross[ind_tem]=len(u1) ds_cpr_eddy.num_cross.plot() ds_cpr_eddy ds_env = xr.open_dataset(filename_cpr_expanded+'.nc') ds_env.close() ds_env = ds_env.rename({'index':'z'}) ds_env for var in ds_env: var_tem = var if not var_tem[0:3]=='cpr': var_tem = 'cpr_sample_'+var ds_cpr_eddy[var_tem]=xr.DataArray(ds_env[var].data, dims=('z')) ds_cpr_eddy[var_tem].attrs=ds_env[var].attrs ds_cpr_eddy filename_cpr_expanded='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with eddy info_2020_10_06' ds_cpr_eddy.to_netcdf(filename_cpr_expanded+'.nc') df_bird = ds_cpr_eddy.to_dataframe() df_bird.to_csv(filename_cpr_expanded+'.csv') filename_cpr_expanded_netcdf='F:/data/project_data/NASA_biophysical/collocated_data/CPR/All CPR Sample catalogue with eddy info4.nc' ds_tem = xr.open_dataset(filename_cpr_expanded_netcdf) ds_tem.close() ds_tem.num_cross.plot() print(ds_tem) # + #chech on subset #subset = ds_cpr_eddy.where(ds_cpr_eddy.cpr_eddy_data_distance<ds_cpr_eddy.cpr_eddy_data_radius) #for i in range(100): # print(subset.cpr_eddy_data_track[i].data,ds_cpr_eddy.cpr_eddy_data_distance[i].data,ds_cpr_eddy.cpr_eddy_data_radius[i].data) # + #print(filename_eddy) #ds_unique = xr.open_dataset(filename_eddy,group='eddy_data') #ds_unique.close() #ds_unique # - ds_cpr_eddy.cpr_eddy_data_cyclonic_type.plot() plt.plot(ds_cpr_eddy.cpr_eddy_data_distance) plt.plot(ds_cpr_eddy.cpr_eddy_data_radius) for i in range(10): print(ds_cpr_eddy.cpr_eddy_data_distance[i].data,ds_cpr_eddy.cpr_eddy_data_radius[i].data) #cpr_eddy_data_speed_radius_deg[index]=speed_radius_eddy[index_eddy]*cos(radians(lats_eddy[index_eddy]))/111.0 #proc_cpr ==1 whre distance is GREATER than radius of eddy proc_cpr = np.where(ds_cpr_eddy.cpr_eddy_data_distance>ds_cpr_eddy.cpr_eddy_data_radius,1,0) proc_cpr # + import numpy.ma as ma from numpy import * #remove masked values from data data = np.ma.filled(cpr_sample_ucur, np.nan) data[isnan(data)] = -9999 cpr_sample_ucur2=data data = np.ma.filled(cpr_sample_vcur, np.nan) data[isnan(data)] = -9999 cpr_sample_vcur2=data data = np.ma.filled(cpr_sample_ucur_clim, np.nan) data[isnan(data)] = -9999 cpr_sample_ucur_clim2=data data = np.ma.filled(cpr_sample_vcur_clim, np.nan) data[isnan(data)] = -9999 cpr_sample_vcur_clim2=data data = np.ma.filled(cpr_sample_sst, np.nan) data[isnan(data)] = -9999 cpr_sample_sst2=data data = np.ma.filled(cpr_sample_sst_clim, np.nan) data[isnan(data)] = -9999 cpr_sample_sst_clim2=data data = np.ma.filled(cpr_sample_uwnd, np.nan) data[isnan(data)] = -9999 cpr_sample_uwnd2=data data = np.ma.filled(cpr_sample_uwnd_clim, np.nan) data[isnan(data)] = -9999 cpr_sample_uwnd_clim2=data data = np.ma.filled(cpr_sample_vwnd, np.nan) data[isnan(data)] = -9999 cpr_sample_vwnd2=data data = np.ma.filled(cpr_sample_vwnd_clim, np.nan) data[isnan(data)] = -9999 cpr_sample_vwnd_clim2=data # + #print(shape(df)) #print(shape(cpr_sample_jday)) #df_time=[0] * (ilen_cpr) #print(ilen_cpr) #for index in range(0,ilen_cpr): # df_time[index] = dt.datetime(cpr_sample_year[index],cpr_sample_month[index],cpr_sample_day[index]) #df_vars=['Sample ID','day','month','year','lat','lon','already processed?','ETOPO_depth (m) nearest neighbor','ETOPO_depth (m) interp','SST CMC 2.0','SST Climatology CMC 2.0','U_wnd CCMC m/s','V_wnd CCMC m/s','Climatology U_wnd CCMC m/s','Climatology V_wnd CCMC m/s','U_cur oscar m/s','V_cur oscar m/s','Climatology U_cur oscar m/s','Climatology V_cur oscar m/s'] # + #print(shape(df_time)) #print(shape(df_vars)) #print(type(df_time)) # + #type(df) #print(type(df)) #print(shape(df)) ##print(shape(df_time)) #print(shape(df_vars)) #df_out = xr.DataArray(df, coords=[df_time,df_vars]) #, dims=['time' 'vars']) # - #df_out.to_netcdf(filename_cpr_expanded_netcdf) # + #df_test=xr.open_dataset(filename_cpr_expanded_netcdf) #df_test # - print(len(cpr_sample_sst2)) print(cpr_sample_sst2[-11:-1]) # + #output in netcdf #get the values for a given column #f.close() filename_cpr_expanded_netcdf='f:/data/eddy/collocated_data/All CPR Sample catalogue with eddy info4.nc' print(type(cpr_sample_id)) print(len(cpr_sample_id)) print(cpr_sample_ucur_clim[9:10]) print(cpr_sample_ucur[9:10]) #f.close() ilen_cpr=len(cpr_sample_id) f = Dataset(filename_cpr_expanded_netcdf,'w', format='NETCDF4') #'w' stands for write #tempgrp = f.createGroup('CPR_data') f.createDimension('z', ilen_cpr) cpr_sample_id_netcdf = f.createVariable('cpr_sample_id', 'str', 'z') cpr_sample_day_netcdf = f.createVariable('cpr_sample_day', 'i4', 'z') cpr_sample_month_netcdf = f.createVariable('cpr_sample_month', 'i4', 'z') cpr_sample_year_netcdf =f.createVariable('cpr_sample_year', 'i4', 'z') cpr_sample_lat_netcdf = f.createVariable('cpr_sample_lat', 'f4', 'z') cpr_sample_lon_netcdf = f.createVariable('cpr_sample_lon', 'f4', 'z') cpr_sample_proc_netcdf = f.createVariable('cpr_sample_proc', 'c', 'z') eddy_dist_netcdf = f.createVariable('cpr_eddy_data_distance', 'f4', 'z') eddy_dist_from_land_netcdf = f.createVariable('cpr_eddy_data_distance_from_land', 'f4', 'z') eddy_rad_netcdf = f.createVariable('cpr_eddy_data_radius', 'f4', 'z') eddy_lon_netcdf = f.createVariable('cpr_eddy_data_lons', 'f4', 'z') eddy_lat_netcdf = f.createVariable('cpr_eddy_data_lats', 'f4', 'z') eddy_time_netcdf = f.createVariable('cpr_eddy_data_time', 'f4', 'z') eddy_amp_netcdf = f.createVariable('cpr_eddy_data_amplitude', 'f4', 'z') eddy_spd_netcdf = f.createVariable('cpr_eddy_data_speed_average', 'f4', 'z') eddy_rad2_netcdf = f.createVariable('cpr_eddy_data_speed_radius', 'f4', 'z') eddy_cyc_netcdf = f.createVariable('cpr_eddy_data_cyclonic_type', 'i4', 'z') eddy_id_netcdf = f.createVariable('cpr_eddy_data_track_id', 'i4', 'z') eddy_tdy_netcdf = f.createVariable('cpr_eddy_data_total_days', 'i4', 'z') eddy_ob_netcdf = f.createVariable('cpr_eddy_data_ob_num', 'i4', 'z') eddy_yr_netcdf = f.createVariable('cpr_eddy_data_year', 'i4', 'z') eddy_dy_netcdf = f.createVariable('cpr_eddy_data_idyjl', 'i4', 'z') eddy_crossings_netcdf = f.createVariable('num_cross', 'i4', 'z') ucur_netcdf = f.createVariable('cpr_sample_oscar_ucur', 'f4', 'z') vcur_netcdf = f.createVariable('cpr_sample_oscar_vcur', 'f4', 'z') ucur_clim_netcdf = f.createVariable('cpr_sample_oscar_ucur_clim', 'f4', 'z') vcur_clim_netcdf = f.createVariable('cpr_sample_oscar_vcur_clim', 'f4', 'z') sst_netcdf = f.createVariable('cpr_sample_cmc_sst', 'f4', 'z') sst_clim_netcdf = f.createVariable('cpr_sample_cmc_sst_clim', 'f4', 'z') uwnd_netcdf = f.createVariable('cpr_sample_ccmp_uwnd', 'f4', 'z') uwnd_clim_netcdf = f.createVariable('cpr_sample_ccmp_uwnd_clim', 'f4', 'z') vwnd_netcdf = f.createVariable('cpr_sample_ccmp_vwnd', 'f4', 'z') vwnd_clim_netcdf = f.createVariable('cpr_sample_ccmp_vwnd_clim', 'f4', 'z') depth_netcdf = f.createVariable('cpr_sample_ETOPO_depth', 'f4', 'z') tem=cpr_sample_id.tolist() print(type(tem)) print(tem[0:10]) cpr_sample_id_netcdf[:] = cpr_sample_id #tem cpr_sample_day_netcdf[:] = cpr_sample_day cpr_sample_month_netcdf[:] = cpr_sample_month cpr_sample_year_netcdf[:] = cpr_sample_year cpr_sample_lat_netcdf[:] = cpr_sample_lat cpr_sample_lon_netcdf[:] = cpr_sample_lon cpr_sample_proc_netcdf[:] = cpr_sample_proc eddy_dist_netcdf[:] = cpr_eddy_data_distance eddy_dist_from_land_netcdf[:] = cpr_eddy_data_distance_from_land eddy_rad_netcdf[:] = cpr_eddy_data_radius eddy_lon_netcdf[:] = cpr_eddy_data_lons eddy_lat_netcdf[:] = cpr_eddy_data_lats eddy_time_netcdf[:] = cpr_eddy_data_time eddy_amp_netcdf[:] = cpr_eddy_data_amplitude eddy_spd_netcdf[:] = cpr_eddy_data_speed_average eddy_rad2_netcdf[:] = cpr_eddy_data_speed_radius eddy_cyc_netcdf[:] = cpr_eddy_data_cyclonic_type eddy_id_netcdf[:] = cpr_eddy_data_track_id eddy_tdy_netcdf[:] = cpr_eddy_data_total_days eddy_ob_netcdf[:] = cpr_eddy_data_ob_num eddy_yr_netcdf[:] = cpr_eddy_data_year eddy_dy_netcdf[:] = cpr_eddy_data_idyjl eddy_crossings_netcdf[:] = num_cross ucur_netcdf[:] =cpr_sample_ucur2 vcur_netcdf[:] =cpr_sample_vcur2 ucur_clim_netcdf[:] = cpr_sample_ucur_clim2 vcur_clim_netcdf[:] = cpr_sample_vcur_clim2 sst_netcdf[:] =cpr_sample_sst2 sst_clim_netcdf[:] =cpr_sample_sst_clim2 uwnd_netcdf[:] =cpr_sample_uwnd2 uwnd_clim_netcdf[:] =cpr_sample_uwnd_clim2 vwnd_netcdf[:] =cpr_sample_vwnd2 vwnd_clim_netcdf[:] =cpr_sample_vwnd_clim2 depth_netcdf[:] =cpr_sample_depth_exact f.close() # - df_test=xr.open_dataset(filename_cpr_expanded_netcdf) df_test.cpr_sample_id # + #into excel file #from pandas import DataFrame #tem=cpr_sample_id.tolist() #df = DataFrame({'CPR Sample ID': tem, 'CPR sample day': cpr_sample_day}) #print(filename_cpr_expanded) #df.to_excel('filename_cpr_expanded,', sheet_name='sheet1', index=False) #find number of crossings print(cpr_eddy_data_speed_radius[1],cpr_eddy_data_speed_radius_deg[1]) filename_cpr # + wb = openpyxl.load_workbook(filename_cpr) sheet=wb['2000_2016'] #sheet = wb.get_sheet_by_name('2000_2016') for i in range(0,1): sheet['A' + str(i + 1)].value = 'cpr_sample_id' sheet['B' + str(i + 1)].value = 'cpr_sample_day' sheet['C' + str(i + 1)].value = 'cpr_sample_month' sheet['D' + str(i + 1)].value = 'cpr_sample_year' sheet['E' + str(i + 1)].value = 'cpr_sample_lat' sheet['F' + str(i + 1)].value = 'cpr_sample_lon' sheet['G' + str(i + 1)].value = 'cpr_sample_proc' sheet['H' + str(i + 1)].value = 'eddy_data_track_id' sheet['I' + str(i + 1)].value = 'eddy_data_distance' sheet['J' + str(i + 1)].value = 'eddy_data_distance_from_land' sheet['K' + str(i + 1)].value = 'eddy_data_radius' sheet['L' + str(i + 1)].value = 'eddy_data_lons' sheet['M' + str(i + 1)].value = 'eddy_data_lats' sheet['N' + str(i + 1)].value = 'eddy_data_time' sheet['O' + str(i + 1)].value = 'eddy_data_amplitude' sheet['P' + str(i + 1)].value = 'eddy_data_speed_average' sheet['Q' + str(i + 1)].value = 'eddy_data_speed_radius' sheet['R' + str(i + 1)].value = 'eddy_data_cyclonic_type' sheet['S' + str(i + 1)].value = 'eddy_data_total_days' sheet['T' + str(i + 1)].value = 'eddy_data_ob_num' sheet['U' + str(i + 1)].value = 'eddy_data_year' sheet['V' + str(i + 1)].value = 'eddy_data_idyjl' sheet['W' + str(i + 1)].value = 'number_times_cpr_crosses_this_eddy' sheet['X' + str(i + 1)].value = 'cpr_sample_oscar_ucur' sheet['Y' + str(i + 1)].value = 'cpr_sample_oscar_vcur' sheet['Z' + str(i + 1)].value = 'cpr_sample_oscar_ucur_clim' sheet['AA' + str(i + 1)].value = 'cpr_sample_oscar_vcur_clim' sheet['AB' + str(i + 1)].value = 'cpr_sample_cmc_sst' sheet['AC' + str(i + 1)].value = 'cpr_sample_cmc_sst_clim' sheet['AD' + str(i + 1)].value = 'cpr_sample_ccmp_uwnd' sheet['AE' + str(i + 1)].value = 'cpr_sample_ccmp_uwnd_clim' sheet['AF' + str(i + 1)].value = 'cpr_sample_ccmp_vwnd' sheet['AG' + str(i + 1)].value = 'cpr_sample_ccmp_vwnd_clim' sheet['AH' + str(i + 1)].value = 'cpr_sample_ETOPO_depth' ilen_cpr=len(cpr_sample_id) cpr_eddy_data_lons2=cpr_eddy_data_lons for i in range(0,ilen): if cpr_eddy_data_lons[i]>180.: cpr_eddy_data_lons2[i]=cpr_eddy_data_lons[i]-360. for i in range(0,ilen_cpr): sheet['A' + str(i + 2)].value = cpr_sample_id[i] sheet['B' + str(i + 2)].value = cpr_sample_day[i] sheet['C' + str(i + 2)].value = cpr_sample_month[i] sheet['D' + str(i + 2)].value = cpr_sample_year[i] sheet['E' + str(i + 2)].value = cpr_sample_lat[i] sheet['F' + str(i + 2)].value = cpr_sample_lon[i] sheet['G' + str(i + 2)].value = cpr_sample_proc[i] sheet['H' + str(i + 2)].value = cpr_eddy_data_track_id[i] sheet['I' + str(i + 2)].value = cpr_eddy_data_distance[i] sheet['J' + str(i + 2)].value = cpr_eddy_data_distance_from_land[i] sheet['K' + str(i + 2)].value = cpr_eddy_data_radius[i] sheet['L' + str(i + 2)].value = cpr_eddy_data_lons2[i] sheet['M' + str(i + 2)].value = cpr_eddy_data_lats[i] sheet['N' + str(i + 2)].value = cpr_eddy_data_time[i] sheet['O' + str(i + 2)].value = cpr_eddy_data_amplitude[i] sheet['P' + str(i + 2)].value = cpr_eddy_data_speed_average[i] sheet['Q' + str(i + 2)].value = cpr_eddy_data_speed_radius[i] sheet['R' + str(i + 2)].value = cpr_eddy_data_cyclonic_type[i] sheet['S' + str(i + 2)].value = cpr_eddy_data_total_days[i] sheet['T' + str(i + 2)].value = cpr_eddy_data_ob_num[i] sheet['U' + str(i + 2)].value = cpr_eddy_data_year[i] sheet['V' + str(i + 2)].value = cpr_eddy_data_idyjl[i] sheet['W' + str(i + 2)].value = num_cross[i] sheet['X' + str(i + 2)].value = cpr_sample_ucur2[i] sheet['Y' + str(i + 2)].value = cpr_sample_vcur2[i] sheet['Z' + str(i + 2)].value = cpr_sample_ucur_clim2[i] sheet['AA' + str(i + 2)].value = cpr_sample_vcur_clim2[i] sheet['AB' + str(i + 2)].value = cpr_sample_sst2[i] sheet['AC' + str(i + 2)].value = cpr_sample_sst_clim2[i] sheet['AD' + str(i + 2)].value = cpr_sample_uwnd2[i] sheet['AE' + str(i + 2)].value = cpr_sample_uwnd_clim2[i] sheet['AF' + str(i + 2)].value = cpr_sample_vwnd2[i] sheet['AG' + str(i + 2)].value = cpr_sample_vwnd_clim2[i] sheet['AH' + str(i + 2)].value = cpr_sample_depth_exact[i] wb.save(filename_cpr_expanded) # - # + f = plt.figure() clats=[] clons=[] clats2=[] clons2=[] for i in range(0,len(cpr_sample_lat)): tem=cpr_sample_proc[i] if cpr_eddy_data_distance[i]<=cpr_eddy_data_radius[i] and tem=='Yes' : clats.append(cpr_sample_lat[i]) clons.append(cpr_sample_lon[i]) elif cpr_eddy_data_distance[i]<=cpr_eddy_data_radius[i] and tem=='No' : clats2.append(cpr_sample_lat[i]) clons2.append(cpr_sample_lon[i]) map = Basemap(projection='merc', lat_0 = 45, lon_0 = -130, resolution = 'l', area_thresh = 0.1, llcrnrlon=-180.25, llcrnrlat=30.0,urcrnrlon=-115.25, urcrnrlat=62.75) #map.drawcoastlines() #map.drawcountries() map.fillcontinents(color = 'coral') #map.drawmapboundary() #xx=cpr_sample_lon[i] #map.plot(xx,yy,'ko',markersize=24) x,y = map(clons,clats) map.plot(x, y, 'bo', markersize=.2) x,y = map(clons2,clats2) map.plot(x, y, 'ro', markersize=.2) plt.show() f.savefig("F:/data/eddy/figures/all_collocated_cpr_data.pdf", bbox_inches='tight') # - print(cpr_eddy_data_speed_radius[1],cpr_eddy_data_speed_radius_deg[1]) # + f = plt.figure() clats=[] clons=[] clats2=[] clons2=[] elats=[] elons=[] erads=[] erads2=[] espokes=[] ecross=[] for i in range(0,len(cpr_sample_lat)): tem=cpr_sample_proc[i] if cpr_eddy_data_distance[i]<=cpr_eddy_data_radius[i] and tem=='Yes' : clats.append(cpr_sample_lat[i]) clons.append(cpr_sample_lon[i]) elats.append(cpr_eddy_data_lats[i]) elons.append(cpr_eddy_data_lons2[i]) erads.append(cpr_eddy_data_speed_radius[i]) erads2.append(cpr_eddy_data_speed_radius_deg[i]) ecross.append(num_cross[i]) espokes.append(50) elif cpr_eddy_data_distance[i]<=cpr_eddy_data_radius[i] and tem=='No' : clats2.append(cpr_sample_lat[i]) clons2.append(cpr_sample_lon[i]) elats.append(cpr_eddy_data_lats[i]) elons.append(cpr_eddy_data_lons2[i]) erads.append(cpr_eddy_data_speed_radius[i]) erads2.append(cpr_eddy_data_speed_radius_deg[i]) ecross.append(num_cross[i]) espokes.append(50) # - print(cpr_eddy_data_speed_radius[1],cpr_eddy_data_speed_radius_deg[1]) def shoot(lon, lat, azimuth, maxdist=None): """Shooter Function Original javascript on http://williams.best.vwh.net/gccalc.htm Translated to python by <NAME> """ glat1 = lat * np.pi / 180. glon1 = lon * np.pi / 180. s = maxdist / 1.852 faz = azimuth * np.pi / 180. EPS= 0.00000000005 if ((np.abs(np.cos(glat1))<EPS) and not (np.abs(np.sin(faz))<EPS)): alert("Only N-S courses are meaningful, starting at a pole!") a=6378.13/1.852 f=1/298.257223563 r = 1 - f tu = r * np.tan(glat1) sf = np.sin(faz) cf = np.cos(faz) if (cf==0): b=0. else: b=2. * np.arctan2 (tu, cf) cu = 1. / np.sqrt(1 + tu * tu) su = tu * cu sa = cu * sf c2a = 1 - sa * sa x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.)) x = (x - 2.) / x c = 1. - x c = (x * x / 4. + 1.) / c d = (0.375 * x * x - 1.) * x tu = s / (r * a * c) y = tu c = y + 1 while (np.abs (y - c) > EPS): sy = np.sin(y) cy = np.cos(y) cz = np.cos(b + y) e = 2. * cz * cz - 1. c = y x = e * cy y = e + e - 1. y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) * d / 4. - cz) * sy * d + tu b = cu * cy * cf - su * sy c = r * np.sqrt(sa * sa + b * b) d = su * cy + cu * sy * cf glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi c = cu * cy - su * sy * cf x = np.arctan2(sy * sf, c) c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16. d = ((e * cy * c + cz) * sy * c + y) * sa glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi) glon2 *= 180./np.pi glat2 *= 180./np.pi baz *= 180./np.pi return (glon2, glat2, baz) def equi(m, centerlon, centerlat, radius, *args, **kwargs): glon1 = centerlon glat1 = centerlat X = [] Y = [] for azimuth in range(0, 360): glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius) X.append(glon2) Y.append(glat2) X.append(X[0]) Y.append(Y[0]) #m.plot(X,Y,**kwargs) #Should work, but doesn't... X,Y = m(X,Y) plt.plot(X,Y,**kwargs) # + fig = plt.figure(figsize=(11.7,8.3)) #Custom adjust of the subplots plt.subplots_adjust(left=0.05,right=0.95,top=0.90,bottom=0.05,wspace=0.15,hspace=0.05) ax = plt.subplot(111) print(cpr_eddy_data_speed_radius[1],cpr_eddy_data_speed_radius_deg[1]) #Let's create a basemap of the world m = Basemap(projection='merc', lat_0 = 45, lon_0 = -130, resolution = 'l', area_thresh = 0.1, llcrnrlon=-180.25, llcrnrlat=30.0,urcrnrlon=-115.25, urcrnrlat=62.75) m.fillcontinents(color='coral',lake_color='white') x,y = m(clons,clats) m.plot(x, y, 'bo', markersize=.2) x,y = m(clons2,clats2) m.plot(x, y, 'ro', markersize=.2) for i in range(0,len(erads)): centerlon = elons[i] centerlat = elats[i] radius = erads[i] if abs(centerlon-erads2[i])<177: equi(m, centerlon, centerlat, radius,lw=1.) plt.savefig("F:/data/eddy/figures/all_collocated_cpr_data3.pdf",dpi=300) plt.show() # - #make a list of eddy id that have two visits #make icheck have a 1 where trackid has more than two visits icheck=[] for i in range(0,len(cpr_sample_lat)): if num_cross[i]>1: itest=0 for i2 in range(0,len(icheck)): if icheck[i2]==cpr_eddy_data_track_id[i]: itest=1 if itest==0: icheck.append(cpr_eddy_data_track_id[i]) print(icheck) # + #now just do for eddies that have 2 visits for i_tem in range(0,len(icheck)): tem_id=icheck[i_tem] #get all lat/lon for specific eddy to pring alats=[] alons=[] for i in range(0,len(lons_eddy)): if tem_id==track_eddy[i]: alats.append(lats_eddy[i]) if lons_eddy[i]<=180: alons.append(lons_eddy[i]) if lons_eddy[i]>180: alons.append(lons_eddy[i]-360) clats=[] clons=[] clats2=[] clons2=[] elats=[] elons=[] erads=[] erads2=[] for i in range(0,len(cpr_sample_lat)): tem=cpr_sample_proc[i] if cpr_sample_lon[i]>0: continue if cpr_eddy_data_distance[i]<=cpr_eddy_data_radius[i] \ and tem=='Yes' and cpr_eddy_data_track_id[i]==tem_id: clats.append(cpr_sample_lat[i]) clons.append(cpr_sample_lon[i]) elats.append(cpr_eddy_data_lats[i]) elons.append(cpr_eddy_data_lons2[i]) erads.append(cpr_eddy_data_speed_radius[i]) erads2.append(cpr_eddy_data_speed_radius_deg[i]) elif cpr_eddy_data_distance[i]<=cpr_eddy_data_radius[i] \ and tem=='No' and cpr_eddy_data_track_id[i]==tem_id: clats2.append(cpr_sample_lat[i]) clons2.append(cpr_sample_lon[i]) elats.append(cpr_eddy_data_lats[i]) elons.append(cpr_eddy_data_lons2[i]) erads.append(cpr_eddy_data_speed_radius[i]) erads2.append(cpr_eddy_data_speed_radius_deg[i]) if len(clons2)<1 and len(clats)<1: continue fig = plt.figure(figsize=(11.7,8.3)) #Custom adjust of the subplots plt.subplots_adjust(left=0.05,right=0.95,top=0.90,bottom=0.05,wspace=0.15,hspace=0.05) ax = plt.subplot(111) print(cpr_eddy_data_speed_radius[1],cpr_eddy_data_speed_radius_deg[1]) #Let's create a basemap of the world m = Basemap(projection='merc', lat_0 = 45, lon_0 = -130, resolution = 'l', area_thresh = 0.1, llcrnrlon=-180.25, llcrnrlat=30.0,urcrnrlon=-115.25, urcrnrlat=62.75) m.fillcontinents(color='coral',lake_color='white') x,y = m(clons,clats) m.plot(x, y, 'bo', markersize=.2) x,y = m(clons2,clats2) m.plot(x, y, 'ro', markersize=.2) x,y = m(alons,alats) m.plot(x, y, 'k') for i in range(0,len(erads)): centerlon = elons[i] centerlat = elats[i] radius = erads[i] if abs(centerlon-erads2[i])<177: equi(m, centerlon, centerlat, radius,lw=1.) # plt.show() fig_fname="F:/data/eddy/figures/all_collocated_cpr_data_doubles" + str(tem_id) + ".pdf" plt.savefig(fig_fname,dpi=300) print(fig_fname) # - print(len(alons)) print(alons[1:200]) for i in range(0,len(cpr_sample_lat)): tem=cpr_sample_proc[i] if cpr_eddy_data_track_id[i]==tem_id: print(i,cpr_eddy_data_distance[i],cpr_eddy_data_radius[i],tem) print('clons2',clons2) print('clats2',clats2) print(len(clons)) print('clons',clons) print('clats',clats) filename='F:/data/eddy/collocated_data/All CPR Sample catalogue with eddy info4.nc' ds_eddy = xr.open_dataset(filename) ds_eddy ds_eddy.cpr_sample_id[2].values print(type(ds_eddy)) fig, (ax1) = plt.subplots(nrows=1, figsize=(6, 5.4)) #f = plt.figure() #map = Basemap(projection='merc', lat_0 = 45, lon_0 = -130, resolution = 'l', area_thresh = 0.1, # llcrnrlon=-180.25, llcrnrlat=30.0,urcrnrlon=-115.25, urcrnrlat=62.75) #map.fillcontinents(color = 'coral') #x,y = map(ds_eddy.cpr_sample_lon.values,ds_eddy.cpr_sample_lat.values) d2=ds_eddy.where(ds_eddy.cpr_sample_lon<0) print(len(d2)) print(type(d2)) ax1.scatter(d2.cpr_sample_lon.values,d2.cpr_sample_lat.values, c = cpr_sample_depth_exact,s=1) #plt.scatter(ds_eddy.cpr_sample_lon.values,ds_eddy.cpr_sample_lat.values, c = ds_eddy.cpr_sample_ETOPO_depth.values) #plt.plot(ds_eddy.cpr_sample_lon.values[0:1000],ds_eddy.cpr_sample_ETOPO_depth.values[0:1000],'.') plt.show() f.savefig('F:/data/eddy/collocated_data/depth_image.png', transparent=False, format='png') fig, (ax1) = plt.subplots(nrows=1, figsize=(6, 5.4)) im = ax1.imshow(ds_topo.z[7000:9500,0:4500], interpolation='bilinear',vmin=-7000.0, vmax=1.0,aspect='auto',origin='lower') plt.show() ds_eddy.cpr_sample_ETOPO_depth.values[0:10] ds_eddy.cpr_sample_id[0:1000]
.ipynb_checkpoints/match cpr and eddy info and output file_updatexarray-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/PacktPublishing/Hands-On-Computer-Vision-with-PyTorch/blob/master/Chapter01/Learning_rate.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={} colab_type="code" id="0w-8b749EeF4" x = [[1],[2],[3],[4]] y = [[3],[6],[9],[12]] # + colab={} colab_type="code" id="TkdJPAuqEe3o" from copy import deepcopy import numpy as np def feed_forward(inputs, outputs, weights): out = np.dot(inputs,weights[0])+ weights[1] mean_squared_error = np.mean(np.square(out - outputs)) return mean_squared_error # + colab={} colab_type="code" id="QvV_SKS3Eg98" def update_weights(inputs, outputs, weights, lr): original_weights = deepcopy(weights) org_loss = feed_forward(inputs, outputs, original_weights) updated_weights = deepcopy(weights) for i, layer in enumerate(original_weights): for index, weight in np.ndenumerate(layer): temp_weights = deepcopy(weights) temp_weights[i][index] += 0.0001 _loss_plus = feed_forward(inputs, outputs, temp_weights) grad = (_loss_plus - org_loss)/(0.0001) updated_weights[i][index] -= grad*lr return updated_weights # + colab={} colab_type="code" id="ca1g0frrEizm" W = [np.array([[0]], dtype=np.float32), np.array([[0]], dtype=np.float32)] # + colab={} colab_type="code" id="nR0QCz_7Ekbx" weight_value = [] for epx in range(1000): W = update_weights(x,y,W,0.01) weight_value.append(W[0][0][0]) # + colab={"base_uri": "https://localhost:8080/", "height": 312} colab_type="code" id="056x8N01EngD" outputId="27c5128a-c8f2-4a97-c9b1-bb08854d660a" import matplotlib.pyplot as plt # %matplotlib inline plt.plot(weight_value) plt.title('Weight value over increasing epochs') plt.xlabel('Epochs') plt.ylabel('Weight value') # + colab={"base_uri": "https://localhost:8080/", "height": 312} colab_type="code" id="ynL-lHkiEn6u" outputId="0fb3f07e-b202-439a-fb8c-29d9c3f302ff" W = [np.array([[0]], dtype=np.float32), np.array([[0]], dtype=np.float32)] weight_value = [] for epx in range(1000): W = update_weights(x,y,W,0.1) weight_value.append(W[0][0][0]) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(weight_value) plt.title('Weight value over increasing epochs with learning rate of 0.1') plt.xlabel('Epochs') plt.ylabel('Weight value') # + colab={} colab_type="code" id="lvJLj7xNEr9q" # + colab={"base_uri": "https://localhost:8080/", "height": 312} colab_type="code" id="dkv3Qm-JEsAZ" outputId="c35f1b1c-c70d-4b02-b54b-5a6bd2b5043b" W = [np.array([[0]], dtype=np.float32), np.array([[0]], dtype=np.float32)] weight_value = [] for epx in range(1000): W = update_weights(x,y,W,1) weight_value.append(W[0][0][0]) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(weight_value) plt.title('Weight value over increasing epochs with learning rate of 1') plt.xlabel('Epochs') plt.ylabel('Weight value') # + colab={} colab_type="code" id="wndidxTwE4DF" # + colab={} colab_type="code" id="dl1yS2wfE4Fo" # + colab={} colab_type="code" id="VcevM3y7EsDq" def update_weights(inputs, outputs, weights, lr): original_weights = deepcopy(weights) org_loss = feed_forward(inputs, outputs, original_weights) updated_weights = deepcopy(weights) for i, layer in enumerate(original_weights): for index, weight in np.ndenumerate(layer): temp_weights = deepcopy(weights) temp_weights[i][index] += 0.0001 _loss_plus = feed_forward(inputs, outputs, temp_weights) grad = (_loss_plus - org_loss)/(0.0001) updated_weights[i][index] -= grad*lr if(i % 2 == 0): print('weight value:', np.round(original_weights[i][index],2), 'original loss:', np.round(org_loss,2), 'loss_plus:', np.round(_loss_plus,2), 'gradient:', np.round(grad,2), 'updated_weights:', np.round(updated_weights[i][index],2)) return updated_weights # + colab={"base_uri": "https://localhost:8080/", "height": 499} colab_type="code" id="xkNksaKxE42f" outputId="c922e251-0b59-4263-c8f8-79002688c002" W = [np.array([[0]], dtype=np.float32), np.array([[0]], dtype=np.float32)] weight_value = [] for epx in range(10): W = update_weights(x,y,W,0.01) weight_value.append(W[0][0][0]) print(W) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(weight_value[:100]) plt.title('Weight value over increasing epochs when learning rate is 0.01') plt.xlabel('Epochs') plt.ylabel('Weight value') # + colab={} colab_type="code" id="pRGvkqXfE7qQ" # + colab={"base_uri": "https://localhost:8080/", "height": 499} colab_type="code" id="mW6DSH8lFB__" outputId="c1b0a228-8ee8-49ba-bf73-b68087373fac" W = [np.array([[0]], dtype=np.float32), np.array([[0]], dtype=np.float32)] weight_value = [] for epx in range(10): W = update_weights(x,y,W,0.1) weight_value.append(W[0][0][0]) print(W) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(weight_value[:100]) plt.title('Weight value over increasing epochs when learning rate is 0.1') plt.xlabel('Epochs') plt.ylabel('Weight value') # + colab={} colab_type="code" id="KfPSrkGCFDlg" # + colab={"base_uri": "https://localhost:8080/", "height": 499} colab_type="code" id="cFfeONvlFERA" outputId="83639333-e51d-4bfc-886b-a0c34b42bf7c" W = [np.array([[0]], dtype=np.float32), np.array([[0]], dtype=np.float32)] weight_value = [] for epx in range(10): W = update_weights(x,y,W,1) weight_value.append(W[0][0][0]) print(W) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(weight_value[:100]) plt.title('Weight value over increasing epochs when learning rate is 1') plt.xlabel('Epochs') plt.ylabel('Weight value') # + colab={} colab_type="code" id="3YI32P69FGEt"
Chapter01/Learning_rate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:infomax] # language: python # name: conda-env-infomax-py # --- # + import os import sys sys.path.append("../meta_infomax/") from typing import Iterator, List, Dict, Union import torch import torch.optim as optim import torch.nn as nn import numpy as np import pandas as pd from sklearn.model_selection import train_test_split torch.manual_seed(1) # %load_ext autoreload # %autoreload 2 from datasets import utils, fudan_reviews import torchtext from torchtext.data import Field, LabelField, TabularDataset, Dataset from models.classifier import FeedForward, SentimentClassifier # - bert, tokenizer, embedding_dim = utils.get_transformer('bert-base-uncased') # + jupyter={"outputs_hidden": true} # data = datasets.fudan_reviews.get_fudan_datasets(tokenizer, datasets=['apparel'], data_dir='../meta_infomax/tests/fixtures/') data = fudan_reviews.get_fudan_datasets(tokenizer) # - thrown_away = utils.remove_outlier_lengths(data) # call only once! domains = ['apparel', 'baby', 'books', 'camera_photo', 'electronics', 'health_personal_care', 'imdb', 'kitchen_housewares', 'magazines', 'music', 'software', 'sports_outdoors', 'toys_games', 'video'] for domain in domains: print(len(data[domain]['train']), len(data[domain]['val']), len(data[domain]['test'])) # + ff = FeedForward(768, 3, [512, 256, 2], nn.ReLU()) feedforward_optimizer = optim.Adam(ff.parameters()) bert_optimizer = optim.AdamW(bert.parameters(), lr=2e-5) model = SentimentClassifier(bert, ff) # - model.head if torch.cuda.is_available(): cuda_device = 0 model = model.cuda(cuda_device) else: cuda_device = -1 # + # one classifier for each domain feedforward_classifiers = {domain: FeedForward(768, 3, [512, 256, 2], nn.ReLU()) for domain in data.keys()} feedforward_optimizers = {domain: optim.Adam(net.parameters()) for domain, net in feedforward_classifiers.items()} # needs to be adam warmup variant bert_optimizer = optim.AdamW(bert.parameters(), lr=2e-5) models = {domain: SentimentClassifier(bert, ff) for domain, ff in feedforward_classifiers.items()} # - domains_batch = utils.sample_domains(data) train_domains = ['apparel', 'baby', 'books', 'camera_photo', 'health_personal_care', 'imdb', 'kitchen_housewares', 'magazines', 'sports_outdoors', 'toys_games'] valid_domains = ['software', 'electronics'] test_domains = ['music', 'video'] train_iter = utils.get_iterators(data, include_domains=train_domains, split='train', batch_size=8, collapse_domains=True) valid_iter = utils.get_iterators(data, include_domains=valid_domains, split='val', batch_size=16, collapse_domains=True) test_iter = utils.get_iterators(data, include_domains=test_domains, split='test', batch_size=16, collapse_domains=True) batch = next(iter(train_iter['all'])) batch for batch in train_iter['all']: (text, text_lengths), label = batch.text, batch.label if text.shape[1] <= 512: break model(text, label), label losses = [] for i in range(100): output = model(text, label) logits = output['logits'] loss = output['loss'] bert_optimizer.zero_grad() feedforward_optimizer.zero_grad() loss.backward() bert_optimizer.step() feedforward_optimizer.step() print(loss.item()) losses.append(loss.item())
notebooks/bert-pipeline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Peer-graded Assignment: Capstone Project - The Battle of Neighborhoods (Week 2) # # **<NAME>** # #### INTRODUCTION / BUSINESS PROBLEM SECTION # New York City's demographics show that it is a large and ethnically diverse metropolis. It is the largest city in the United States with a long history of international immigration. New York City was home to nearly 8.5 million people in 2014, accounting for over 40% of the population of New York State and a slightly lower percentage of the New York metropolitan area, home to approximately 23.6 million. Over the last decade the city has been growing faster than the region. The New York region continues to be by far the leading metropolitan gateway for legal immigrants admitted into the United States. # # Throughout its history, New York City has been a major point of entry for immigrants; the term "melting pot" was coined to describe densely populated immigrant neighborhoods on the Lower East Side. As many as 800 languages are spoken in New York, making it the most linguistically diverse city in the world. English remains the most widely spoken language, although there are areas in the outer boroughs in which up to 25% of people speak English as an alternate language, and/or have limited or no English language fluency. English is least spoken in neighborhoods such as Flushing, Sunset Park, and Corona. # # With it's diverse culture , comes diverse food items. There are many resturants in New york City, each beloning to different categories like Chinese , Indian , French etc. # # So as part of this project , we will list and visualize all major parts of New York City that has great indian resturants. # #### DATA # # Data # For this project we need the following data : # # New York City data that contains list Boroughs, Neighborhoods along with their latitude and longitude. # Data source : https://cocl.us/new_york_dataset # Description : This data set contains the required information. And we will use this data set to explore various neighborhoods of new york city. # Indian resturants in each neighborhood of new york city. # Data source : Fousquare API # Description : By using this api we will get all the venues in each neighborhood. We can filter these venues to get only indian resturants. # GeoSpace data # Data source : https://data.cityofnewyork.us/City-Government/Borough-Boundaries/tqmj-j8zm # Description : By using this geo space data we will get the New york Borough boundaries that will help us visualize choropleth map. # #### APPROACH # - Collect the new york city data from https://cocl.us/new_york_dataset # - Using FourSquare API we will find all venues for each neighborhood. # - Filter out all venues that are Indian Resturants. # - Find rating , tips and like count for each Indian Resturants using FourSquare API. # - Using rating for each resturant , we will sort that data. # - Visualize the Ranking of neighborhoods using folium library(python) # #### QUESTIONS THAT CAN BE ANSWERED USING ABOVE DATASET # - What is best location in New York City for Indian Cuisine ? # - Which areas have potential Indian Resturant Market ? # - Which all areas lack Indian Resturants ? # - Which is the best place to stay if I prefer Indian Cuisine ? # ## ANALYSIS # #### REQUIRED LIBRARIES # # - pandas and numpy for handling data. # - request module for using FourSquare API. # - geopy to get co-ordinates of City of New York. # - folium to visualize the results on a map # + import pandas as pd import numpy as np pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) import requests import sys from bs4 import BeautifulSoup import geocoder import os import folium # map rendering library from geopy.geocoders import Nominatim import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as colors # %matplotlib inline print('Libraries imported.') # - # Now we define a function to get the geocodes i.e latitude and longitude of a given location using geopy. def geo_location(address): # get geo location of address geolocator = Nominatim(user_agent="ny_<PASSWORD>") location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude return latitude,longitude # We define a function to intract with FourSquare API and get top 100 venues within a radius of 1000 metres for a given latitude and longitude. Below function will return us the venue id , venue name and category. def get_venues(lat,lng): #set variables radius=1000 LIMIT=100 CLIENT_ID = '########################################' # changed my Foursquare ID CLIENT_SECRET = '##################################' # changed Foursquare Secret VERSION = '20180605' # Foursquare API version #url to fetch data from foursquare api url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, LIMIT) # get all the data results = requests.get(url).json() venue_data=results["response"]['groups'][0]['items'] venue_details=[] for row in venue_data: try: venue_id=row['venue']['id'] venue_name=row['venue']['name'] venue_category=row['venue']['categories'][0]['name'] venue_details.append([venue_id,venue_name,venue_category]) except KeyError: pass column_names=['ID','Name','Category'] df = pd.DataFrame(venue_details,columns=column_names) print("done") return df # Now we will define a function to get venue details like like count , rating , tip counts for a given venue id. This will be used for ranking. def get_venue_details(venue_id): CLIENT_ID = '###########################' # i have changed the id with ## CLIENT_SECRET = '############################' # i have changed the secret with ## VERSION = '20180605' # Foursquare API version #url to fetch data from foursquare api url = 'https://api.foursquare.com/v2/venues/{}?&client_id={}&client_secret={}&v={}'.format( venue_id, CLIENT_ID, CLIENT_SECRET, VERSION) # get all the data results = requests.get(url).json() venue_data=results['response']['venue'] venue_details=[] try: venue_id=venue_data['id'] venue_name=venue_data['name'] venue_likes=venue_data['likes']['count'] venue_rating=venue_data['rating'] venue_tips=venue_data['tips']['count'] venue_details.append([venue_id,venue_name,venue_likes,venue_rating,venue_tips]) except KeyError: pass column_names=['ID','Name','Likes','Rating','Tips'] df = pd.DataFrame(venue_details,columns=column_names) return df # Now we define a funtion to get the new york city data such as Boroughs, Neighborhoods along with their latitude and longitude. def get_new_york_data(): url='https://cocl.us/new_york_dataset' resp=requests.get(url).json() # all data is present in features label features=resp['features'] # define the dataframe columns column_names = ['Borough', 'Neighborhood', 'Latitude', 'Longitude'] # instantiate the dataframe new_york_data = pd.DataFrame(columns=column_names) for data in features: borough = data['properties']['borough'] neighborhood_name = data['properties']['name'] neighborhood_latlon = data['geometry']['coordinates'] neighborhood_lat = neighborhood_latlon[1] neighborhood_lon = neighborhood_latlon[0] new_york_data = new_york_data.append({'Borough': borough, 'Neighborhood': neighborhood_name, 'Latitude': neighborhood_lat, 'Longitude': neighborhood_lon}, ignore_index=True) return new_york_data # # We will call the above funtion to get the new york city data. # get new york data new_york_data=get_new_york_data() new_york_data.head() new_york_data.shape # So there are total of 306 different Neighborhoods in New York plt.figure(figsize=(9,5), dpi = 100) # title plt.title('Number of Neighborhood for each Borough in New York City') #On x-axis plt.xlabel('Borough', fontsize = 15) #On y-axis plt.ylabel('No.of Neighborhood', fontsize=15) #giving a bar plot new_york_data.groupby('Borough')['Neighborhood'].count().plot(kind='bar') #legend plt.legend() #displays the plot plt.show() # We see that Queens has highest number of neighborhoods # # Now we will collect Indian resturants for each Neighborhood # prepare neighborhood list that contains indian resturants column_names=['Borough', 'Neighborhood', 'ID','Name'] indian_rest_ny=pd.DataFrame(columns=column_names) count=1 for row in new_york_data.values.tolist(): Borough, Neighborhood, Latitude, Longitude=row venues = get_venues(Latitude,Longitude) indian_resturants=venues[venues['Category']=='Indian Restaurant'] print('(',count,'/',len(new_york_data),')','Indian Resturants in '+Neighborhood+', '+Borough+':'+str(len(indian_resturants))) for resturant_detail in indian_resturants.values.tolist(): id, name , category=resturant_detail indian_rest_ny = indian_rest_ny.append({'Borough': Borough, 'Neighborhood': Neighborhood, 'ID': id, 'Name' : name }, ignore_index=True) count+=1 # Now that we have got all the indian resturants in new york city , we will analyze it indian_rest_ny.head() indian_rest_ny.shape # # We got 153 Indian Resturants across New York City plt.figure(figsize=(9,5), dpi = 100) # title plt.title('Number of Indian Resturants for each Borough in New York City') #On x-axis plt.xlabel('Borough', fontsize = 15) #On y-axis plt.ylabel('No.of Indian Resturants', fontsize=15) #giving a bar plot indian_rest_ny.groupby('Borough')['ID'].count().plot(kind='bar') #legend plt.legend() #displays the plot plt.show() # # We see that Queens has the largest number of indian resturants plt.figure(figsize=(9,5), dpi = 100) # title plt.title('Number of Indian Resturants for each Neighborhood in New York City') #On x-axis plt.xlabel('Neighborhood', fontsize = 15) #On y-axis plt.ylabel('No.of Indian Resturants', fontsize=15) #giving a bar plot indian_rest_ny.groupby('Neighborhood')['ID'].count().nlargest(5).plot(kind='bar') #legend plt.legend() #displays the plot plt.show() indian_rest_ny[indian_rest_ny['Neighborhood']=='Floral Park'] # So Floral Park in Queens has the highest number of Indian Resturants with a total count of 9. # # Now we will get the ranking of each resturant for further analysis. # + # prepare neighborhood list that contains indian resturants column_names=['Borough', 'Neighborhood', 'ID','Name','Likes','Rating','Tips'] indian_rest_stats_ny=pd.DataFrame(columns=column_names) count=1 for row in indian_rest_ny.values.tolist(): Borough,Neighborhood,ID,Name=row try: venue_details=get_venue_details(ID) print(venue_details) id,name,likes,rating,tips=venue_details.values.tolist()[0] except IndexError: print('No data available for id=',ID) # we will assign 0 value for these resturants as they may have been #recently opened or details does not exist in FourSquare Database id,name,likes,rating,tips=[0]*5 print('(',count,'/',len(indian_rest_ny),')','processed') indian_rest_stats_ny = indian_rest_stats_ny.append({'Borough': Borough, 'Neighborhood': Neighborhood, 'ID': id, 'Name' : name, 'Likes' : likes, 'Rating' : rating, 'Tips' : tips }, ignore_index=True) count+=1 # - indian_rest_stats_ny.head() indian_rest_stats_ny.shape indian_rest_ny.shape # So we got data for all resturants Now lets save this data to a csv sheet. In case we by mistake modify it. As the number of calls to get details for venue are premium call and have limit of 500 per day, we will refer to saved data sheet csv if required indian_rest_stats_ny.to_csv('indian_rest_stats_ny.csv', index=False) # # Lets verify the data from saved csv file indian_rest_stats_ny_csv=pd.read_csv('indian_rest_stats_ny.csv') # + indian_rest_stats_ny_csv.shape # - indian_rest_stats_ny_csv.head() indian_rest_stats_ny.info() # # We see that values like Likes, Tips are strig values. We would need to convert them into float for further analysis indian_rest_stats_ny['Likes']=indian_rest_stats_ny['Likes'].astype('float64') # + indian_rest_stats_ny['Tips']=indian_rest_stats_ny['Tips'].astype('float64') # + indian_rest_stats_ny.info() # - # Now the data types looks correct # Resturant with maximum Likes indian_rest_stats_ny.iloc[indian_rest_stats_ny['Likes'].idxmax()] # Resturant with maximum Rating indian_rest_stats_ny.iloc[indian_rest_stats_ny['Rating'].idxmax()] # Resturant with maximum Tips indian_rest_stats_ny.iloc[indian_rest_stats_ny['Tips'].idxmax()] ny_neighborhood_stats=indian_rest_stats_ny.groupby('Neighborhood',as_index=False).mean()[['Neighborhood','Rating']] ny_neighborhood_stats.columns=['Neighborhood','Average Rating'] ny_neighborhood_stats.sort_values(['Average Rating'],ascending=False).head(10) # # Above are the top neighborhoods with top average rating of Indian resturant ny_borough_stats=indian_rest_stats_ny.groupby('Borough',as_index=False).mean()[['Borough','Rating']] ny_borough_stats.columns=['Borough','Average Rating'] ny_borough_stats.sort_values(['Average Rating'],ascending=False).head() # # Similarly these are the average rating of Indian Resturants for each Borough # # Lets visualize it plt.figure(figsize=(9,5), dpi = 100) # title plt.title('Average rating of Indian Resturants for each Borough') #On x-axis plt.xlabel('Borough', fontsize = 15) #On y-axis plt.ylabel('Average Rating', fontsize=15) #giving a bar plot indian_rest_stats_ny.groupby('Borough').mean()['Rating'].plot(kind='bar') #legend plt.legend() #displays the plot plt.show() # We will consider all the neighborhoods with average rating greater or equal 9.0 to visualize on map ny_neighborhood_stats=ny_neighborhood_stats[ny_neighborhood_stats['Average Rating']>=9.0] # + ny_neighborhood_stats # - # # We will join this dataset to original new york data to get lonitude and latitude ny_neighborhood_stats=pd.merge(ny_neighborhood_stats,new_york_data, on='Neighborhood') ny_neighborhood_stats=ny_neighborhood_stats[['Borough','Neighborhood','Latitude','Longitude','Average Rating']] ny_neighborhood_stats # Now we will show this data on a map # create map and display it ny_map = folium.Map(location=geo_location('New York'), zoom_start=12) # + # instantiate a feature group for the incidents in the dataframe incidents = folium.map.FeatureGroup() # loop through the 100 crimes and add each to the incidents feature group for lat, lng, in ny_neighborhood_stats[['Latitude','Longitude']].values: incidents.add_child( folium.CircleMarker( [lat, lng], radius=10, # define how big you want the circle markers to be color='yellow', fill=True, fill_color='blue', fill_opacity=0.6 ) ) # - # Lets add a new field to dataframe for labeling purpose ny_neighborhood_stats['Label']=ny_neighborhood_stats['Neighborhood']+', '+ny_neighborhood_stats['Borough']+'('+ny_neighborhood_stats['Average Rating'].map(str)+')' # + # add pop-up text to each marker on the map for lat, lng, label in ny_neighborhood_stats[['Latitude','Longitude','Label']].values: folium.Marker([lat, lng], popup=label).add_to(ny_map) # add incidents to map ny_map.add_child(incidents) # - # # Now that we have visualized the Neighborhoods. # Lets Visualize Boroughs based on average Rating # + ny_map = folium.Map(location=geo_location('New York'), zoom_start=12) ny_geo = r'Borough Boundaries.geojson' ny_map.choropleth( geo_data=ny_geo, data=ny_borough_stats, columns=['Borough', 'Average Rating'], key_on='feature.properties.boro_name', fill_color='YlOrRd', fill_opacity=0.7, line_opacity=0.2, legend_name='Average Rating' ) # display map # as this is huge map data , we will save it to a file ny_map.save('borough_rating.html') # - # # ### Conclusion # - Astoria(Queens), Blissville(Queens), Civic Center(Manhattan) are some of the best neighborhoods for indian cuisine. # - Manhattan have potential Indian Resturant Market # - Staten Island ranks last in average rating of Indian Resturants. # - Manhattan is the best place to stay if you prefer Indian Cuisine. # - Limitations # - The ranking is purely on basis of rating of resturants # - The accuracy of data depends purely depends on the data provided by FourSquare # # ### Limitations # - The ranking is purely on basis of rating of resturants # - The accuracy of data depends purely depends on the data provided by FourSquare
The Battle of Neighborhoods/IBM Capstone Project - The Battle of Neighborhoods (Week 2).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Introdução # Aqui estão algumas possíveis soluções para os exercícios propostos nos cadernos. Fique a vontade para criar a sua própria solução e tenha em mente que as linguagens de programação normalmente já tem funções prontas para boa parte dos problemas apresentados aqui. # # Use esse caderno como consulta para corrigir as suas soluções. Evite usar o caminho da cópia se quer realmente aprender os conceitos :-) # ## Pilhas # ** Ex 1: Implemente uma pilha que armazenará 3 notas ** pilha = [10,4,8] # ** Ex 2: Agora crie a pilha a partir de 3 valores digitados pelo usuário. Que operação (pop, push) você irá usar? ** # + pilha = [] for i in range(3): pilha.append(int(input("Digite uma nota: "))) print(pilha) # - # ** Ex 3: Faça o cálculo da média das 3 notas armazenadas. Você deve usar a operação de saída da pilha (pop) para retirar cada nota da pilha. ** # + nota1 = int(input("Digite uma nota: ")) nota2 = int(input("Digite uma nota: ")) nota3 = int(input("Digite uma nota: ")) pilha = [nota1, nota2, nota3] soma = 0 for i in range(3): soma +=pilha.pop() media = soma/3 print("A média é: ", media) # - # ** Ex 4: O seu desafio é alterar o código do exercício anterior para usar o mínimo possível de variáveis. ** # + pilha = [] for i in range(3): pilha.append(int(input("Digite uma nota: "))) print("A média é: ", (pilha[0]+pilha[1]+pilha[2])/3) # - # ** Ex 5: Escreva um programa que imprima seu nome de maneira reversa. Use pilhas na sua implementação ** # + nome = input("Qual é o seu nome? ") pilha = [] for letra in nome: pilha.append(letra) invertido = "" for i in range(len(pilha)): invertido += pilha.pop() print(invertido) # - # ** Ex 6 (Adaptado de [Sedgewick - Algorithms, 4th Edition](https://algs4.cs.princeton.edu/home/)): Uma letra significa push e um asterisco significa pop na sequência seguinte. Dê a sequência de valores devolvidos pelas operações pop quando esta seqüência de operações é realizada em uma pilha LIFO inicialmente vazia.** # # E U V \* O \* U P \* A S S\* A \* \* \* R \* \* # V O P S A S A R U # texto = "E U V * O * U P * A S S* A * * * R * *" pilha = [] for l in texto: if l==" ": continue elif l=="*": print(pilha.pop()) else: pilha.append(l) # **Ex 7: Considerando o código abaixo, implemente as operações de push e pop para que a pilha inicial seja transformada na pilha "6 7 1 9 3", onde 3 representa o topo da pilha** pilha = [6,7,1,8,2] print("Pilha Inicial:", pilha) #implemente a partir daqui pilha.pop() pilha.pop() pilha.append(9) pilha.append(3) # a saida deve ser 6 7 1 9 3 print("Pilha Final:", pilha) # ** Ex 8 (Adaptado de Concurso da Petrobras): Considere que:** # # - P1 é uma pilha com 5 posições, v(1) a v(5), na qual v(5) é o topo. # - De v(1) até v(5), a pilha P1 está preenchida, respectivamente, com os símbolos Q5, Q3, Q1, Q4, Q2. # - Há ainda mais duas pilhas, inicialmente vazias, P2 e P3, com o mesmo tamanho. # # Implemente um código em Python de modo que a pilha P1 esteja preenchida de v(5) até v(1), respectivamente, com os símbolos Q1, Q2, Q3, Q4, Q5 # + p1 = ["Q5","Q3","Q1","Q4","Q2"] p2 = [] p3 = [] #implemente sua solução aqui p2.append(p1.pop()) p3.append(p1.pop()) p2.append(p1.pop()) p2.append(p1.pop()) p1.append(p3.pop()) p1.append(p2.pop()) p3.append(p2.pop()) p1.append(p2.pop()) p1.append(p3.pop()) #deve imprimir Q5, Q4, Q3, Q2, Q1 print(p1) # - # ** DESAFIO: Implemente o problema da Torre de Hanoi usando pilhas em Python. O conceito que foi dado nessa aula é suficiente para resolver o problema. Então, esforce-se primeiro para resolver o problema sem consultas externas. Lembre-se de desafiar a si próprio para ajudar na criação de novas conexões neurais e, com isso, aumentar o seu poder de raciocínio. ** # # Sugestão: estabeleça um tempo para si próprio para descobrir como resolver o problema em Python. Caso não consiga nesse tempo, aí sim pode buscar soluções já implementadas. Digamos que menos de 10 minutos é desistir fácil :-) # + p1 = [5,4,3,2,1] p2 = [] p3 = [] p3.append(p1.pop()) p2.append(p1.pop()) p2.append(p3.pop()) p3.append(p1.pop()) p1.append(p2.pop()) p3.append(p2.pop()) p3.append(p1.pop()) p2.append(p1.pop()) p2.append(p3.pop()) p1.append(p3.pop()) p1.append(p2.pop()) p2.append(p3.pop()) p3.append(p1.pop()) p2.append(p1.pop()) p2.append(p3.pop()) p3.append(p1.pop()) p1.append(p2.pop()) p3.append(p2.pop()) p3.append(p1.pop()) p1.append(p2.pop()) p2.append(p3.pop()) p1.append(p3.pop()) p1.append(p2.pop()) p3.append(p2.pop()) p3.append(p1.pop()) p2.append(p1.pop()) p2.append(p3.pop()) p3.append(p1.pop()) p1.append(p2.pop()) p3.append(p2.pop()) p3.append(p1.pop()) print(p1) print(p2) print(p3) # - # ## Filas # **Ex 1: Crie uma fila e faça a inserção de 3 elementos.** # + import queue fila = queue.Queue() fila.put("A") fila.put("B") fila.put("C") # - # **Ex 2: Crie uma fila, faça a inserção de 3 elementos. Depois implemente a saída de cada elemento da fila, imprimindo o nome do elemento que saiu.** # + import queue fila = queue.Queue() fila.put("A") fila.put("B") fila.put("C") for i in range(fila.qsize()): print(fila.get()) # - # **Ex 3: Crie uma fila a partir da digitação de 3 nomes digitados pelo usuário.** import queue tamanho = 3 fila = queue.Queue(maxsize=tamanho) for i in range(tamanho): fila.put(input("Digite um nome: ")) # ** Ex 4: Considere que você está implementando um sistema que imprima em um visor o nome das pessoas que serão atendidas em um banco. Durante um período, qualquer pessoa que chegar, será adicionada em uma fila. Após esse período, o atendimento é iniciado e seu sistema deverá imprimir o nome de cada uma, imprimindo o nome e o seu número de atendimento. (O primeiro a ser atendido recebe o número 1, o segundo o número 2 e assim pode diante). ** # + import queue fila = queue.Queue() while True: op = input("Digite o nome de uma pessoa ou x para sair: ") if op=="x": break else: fila.put(op) print("Iniciando o atendimento...") for i in range(fila.qsize()): print("Atendendo pessoa número {}: {}".format(i+1, fila.get())) # - # **Ex 5: Crie uma fila com o máximo de 3 elementos e crie um código para, caso a fila esteja cheia, imprimir uma mensagem "Fila Cheia"**. import queue fila = queue.Queue(maxsize=3) while True: op = input("Digite o nome de uma pessoa ou x para sair: ") if op=="x": break else: if fila.full(): print("Fila cheia") break else: fila.put(op) # **Ex 6: Crie uma fila com o máximo de 3 elementos e crie um código para que, caso a fila esteja cheia, remova um elemento da fila para abrir vaga na fila para mais um elemento. ** import queue fila = queue.Queue(maxsize=3) while True: op = input("Digite o nome de uma pessoa ou x para sair: ") if op=="x": break else: if fila.full(): print("Fila cheia. Vamos atender a primeira pessoa...", fila.get()) fila.put(op) else: fila.put(op) # ** Ex 7: Considere que você está implementando um servidor que vai gerenciar requisições de serviços de diversas aplicações. Inicialmente, cada aplicação poderá fazer várias requisições ao servidor e o seu sistema atenderá uma por vez por ordem de chegada. Ocorre que, ao criar a lista inicial de requisições, o programdor anterior criou uma pilha e não uma fila. Dessa forma, você precisará continuará implementar o sistema, transportando (usando código) as requisições da pilha inicial para a nova fila, certificando-se de respeitar a ordem de uma fila (FIFO)** # + # crie sua fila aqui. Não esqueça de importar o módulo e iniciar uma fila vazia. import queue fila = queue.Queue() #pilha inicial de serviços criados pelo outro programador pilhaInicial = ["app1", "app2","app3","app1", "app4", "app3", "app2","app5"] # a partir daqui, implemente o código que irá transportar as requisições da pilha anterior para a fila # que você irá criar. Cuidado que na Pilha a ordem está LIFO mas na fila será FIFO. # Logo, o primeiro a ser retirado da pilha será o último da nova fila pilhaInvertida = [] for i in range(len(pilhaInicial)): pilhaInvertida.append(pilhaInicial.pop()) print(pilhaInvertida) for i in range(len(pilhaInvertida)): fila.put(pilhaInvertida.pop()) # - # ## Listas # ** Ex 1: Crie uma lista com 4 nomes. Sua solução pode usar a inicialização da variável ou com as funções append ou insert** lista = ["Pessoa A", "Pessoa B", "Pessoa C", "Pessoa D"] # ** Ex 2: Altere o exemplo anterior para que, ao invés de valores fixos, o usuário digite 4 nomes. Use 4 instruções para leitura dos valores** # + nome1 = input("Digite um nome: ") nome2 = input("Digite um nome: ") nome3 = input("Digite um nome: ") nome4 = input("Digite um nome: ") lista = [nome1, nome2, nome3, nome4] # - # ** Ex 3: A partir da lista criada no exercício anterior, informe ao usuário a lista ordenada dos valores** # + nome1 = input("Digite um nome: ") nome2 = input("Digite um nome: ") nome3 = input("Digite um nome: ") nome4 = input("Digite um nome: ") lista = [nome1, nome2, nome3, nome4] lista.sort() print(lista) # - # ** Ex 4: Altere a solução do exercício 2 para que seja usada uma estrutura de repetição para ler os nomes digitados pelo usuário. ** # + lista = [] for i in range(4): lista.append(input("Digite um nome: ")) lista.sort() print(lista) # - # ** Ex 5: Mude a solução do exercício 5, considerando os novos requisitos** # # - será perguntado ao usuário quantos nomes deseja digitar (x) # - o sistema perguntará x vezes para que o usuário digite um nome # - quando o usuário digitar o último nome, o sistema deve encerrar e imprimir a lista ordenada de valores # # # + lista = [] qt = int(input("Quantos números deseja digitar? ")) for i in range(qt): lista.append(input("Digite um nome: ")) lista.sort() print(lista) # - # ** Ex 6: A partir da lista abaixo, continue a implementação para que seja perguntado ao usuário em qual posição deseja inserir o novo valor. Após o usuário informar a posição, deverá ser pedido que digite um novo nome. Seu sistema deverá inserir esse novo nome na posição informada pelo usuário.** # + # implemente sua solução aqui linguagens = ["Python", "Java", "Lua"] pos = int(input("Em que posição? ")) nome = input("Qual o nome da linguagem? ") linguagens.insert(pos, nome) print(linguagens) # - # ** Ex 7: A partir da lista abaixo, implemente o mesmo exemplo do exercício anterior, dessa vez pedindo ao usuário que informe de qual posição será excluído o registro e implemente a remoção do valor da posição informada pelo usuário. ** # + # implemente sua solução aqui linguagens = ["Python", "Java", "Lua"] pos = int(input("Em que posição? ")) for i in range(len(linguagens)): if i==pos: linguagens.remove(linguagens[i]) break print(linguagens) # - # ** Ex 8: Implemente um sistema que pergunte ao usuário qual opção gostaria de usar para remoção: por posição ou por valor informado. De acordo com a opção escolhida, implemente a remoção. Exemplo: se o usuário escolheu remover por posição e informou a posição 2, o sistema deverá remover o valor Lua. Se escolher remover por valor e informou Python, então o sistema deverá remover o item Python da lista.** # + # implemente sua solução aqui linguagens = ["Python", "Java", "Lua"] posicao = None nome = None op = int(input("Remover por 1) Posição; 2) Valor ")) if op==1: posicao = int(input("Qual posição? ")) elif op==2: nome = input("Qual nome? ") else: print("Opção inválida") if nome!=None: linguagens.remove(nome) elif posicao!=None: for i in range(len(linguagens)): if i==pos: linguagens.remove(linguagens[i]) break print(linguagens) # - # ** Ex 9: Dada a lista abaixo, implemente uma solução que pergunte ao usuário qual valor deseja localizar. O seu sistema deverá informar quantas vezes o valor procurado foi achado na lista. Note que, caso o valor não esteja na lista, seu sistema deverá informar uma mensagem clara, sem ser a mensagem de erro do Python. Para isso você usará uma estrutura de try-except mostrada na parte de explicações do assunto neste caderno** # implemente sua solução aqui linguagens = ["Python", "Java", "Lua", "Java", "C", "Python", "JS"] nome = input("Qual linguagem? ") qt = 0 for l in linguagens: if l==nome: qt+=1 print("A linguagem {} foi encontrada {} vezes na lista".format(nome, qt)) # implemente sua solução aqui linguagens = ["Python", "Java", "Lua"] nome = input("Qual linguagem? ") print(linguagens.count(nome)) # ** Ex 10: Crie um sistema interativo que forneça as seguintes opções ao usuário:** # # - inserir valores em uma lista # - remover valor de uma lista # - ordenar valores de uma lista # - localização valores de uma lista # - sair # + lista = [] while True: op = int(input("Digite 1) inserir; 2) remover; 3) ordenar; 4) buscar; 5) sair")) if op==5: break elif op==1: pos = int(input("Posição? ")) valor = input("Valor? ") lista.insert(pos, valor) elif op==2: valor = input("Valor? ") lista.remove(valor) elif op==3: lista.sort() print(lista) elif op==4: valor = input("Valor? ") print(lista.index(valor))
Fontes-UDF/datastructures/Gabaritos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + slideshow={"slide_type": "skip"} import os import numpy as np try: os.makedirs("fake_files") except: pass for idx in range(100_000): fname = "fake_files/file_{}.npy".format(idx) if not os.path.exists(fname): np.save(fname, np.random.normal(size=100000)) # + [markdown] slideshow={"slide_type": "slide"} # # Diseño de software para cómputo científico # # --- # ## Unidad 4: Optimización, paralelismo, concurrencia y cómputo distribuido en alto nivel. # # **NO BORRAR OUTPUT** # + [markdown] slideshow={"slide_type": "slide"} # ## Agenda de la Unidad 4 # # - **Multithreading, Paralelismo, Concurrencia.** # - Optimización y Optimización prematura. Cuellos de botella, Legibilidad vs. Optimización. # - Profiling. # - Compiladores justo a tiempo (numba). # - Cómputo distribuido con Dask. # + [markdown] slideshow={"slide_type": "slide"} # ## Concurrencia # # > se refiere a la habilidad de distintas partes de un programa, algoritmo, o problema de ser ejecutado en desorden o en orden parcial, sin afectar el resultado final. # # > Los cálculos (operaciones) pueden ser ejecutados en múltiples procesadores, o ejecutados en procesadores separados físicamente o virtualmente en distintos hilos de ejecución. # # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Hilos vs Procesos # # ![image.png](attachment:image.png) # # - **Hilos:** # - Se ejecutan en el mismo proceso. # - Comparten memoria. # - **Procesos:** # - **NO** comparten memoria. # - Pueden ejecutarse en diferentes procesadores. # + [markdown] slideshow={"slide_type": "slide"} # ## Qué es un sistema operativo? # # Es una **colección** de programas que funciona como: # # - **Una máquina extendida** (top-down): Es una abstracción sobre el hardware. # - **Un manejador de recursos** (bottom-up): Un proveedor de servicios solicitados por el software. # # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Si nos centramos en el kernel # # Linux tiene estos subsistemas # # ![image.png](attachment:image.png) # # https://www.makelinux.net/kernel_map/ # + [markdown] slideshow={"slide_type": "slide"} # ## Llamadas al sistema # # - Es el mecanismo usado por una aplicación para solicitar un servicio al sistema operativo. # # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Interrupción # # Es una señal recibida por el procesador de una computadora, para indicarle que debe «interrumpir» el curso de ejecución actual y pasar a ejecutar código específico para tratar esta situación. # # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Python - Multithreaded Programming # # Es **similar** a ejecutar varios programas diferentes al mismo tiempo, pero con los siguientes beneficios: # # - Comparten el mismo espacio de datos con el subproceso principal y, por lo tanto, pueden compartir información o comunicarse entre ellos fácilmente. # - No requieren mucha sobrecarga de memoria; son más baratos que los procesos. # # **Problema:** EL &Ç@$!! GIL # + [markdown] slideshow={"slide_type": "slide"} # ## Global interpreter lock (GIL) # # - Es un mecanismo utilizado en los intérpretes de lenguaje de computadora para sincronizar la ejecución de threads para que solo **un** subproceso nativo pueda ejecutarse a la vez. # # **Pero... WHY?** # # - Mayor velocidad de los programas de un solo subproceso (no es necesario adquirir o liberar bloqueos en todas las estructuras de datos por separado). # - Fácil integración de bibliotecas C que generalmente no son seguras para subprocesos, # - Facilidad de implementación. # # + [markdown] slideshow={"slide_type": "slide"} # ## Python `threading` # # Vamos con un ejemplo secuencial # - import numpy as np # + # %%time def sts(name, nums, verbose=True): mean, std = np.mean(nums), np.std(nums) if verbose: print("Name {}".format(name)) print("\t Mean {}".format(mean)) print("\t Std {}".format(std)) for idx in range(10): name = "F{}".format(idx) nums = np.random.normal(size=1000) sts(name, nums, False) # + [markdown] slideshow={"slide_type": "slide"} # ## Python `threading` # + import numpy as np import threading class MyThread(threading.Thread): def __init__(self, name, nums): super().__init__() self.name = name self.nums = nums def run(self): print("Starting ", self.name) self.mean = np.mean(self.nums) self.std = np.std(self.nums) # + [markdown] slideshow={"slide_type": "slide"} # ## Python `threading` # + # %%time threads = [] for tidx in range(10): thread = MyThread( name="T{}".format(tidx), nums=np.random.normal(size=1000)) thread.start() threads.append(thread) print("-" * 10) for thread in threads: thread.join() # + [markdown] slideshow={"slide_type": "slide"} # ### Threading que si sirve # + # %%time def open_file(idx): fname = "fake_files/file_{}.npy".format(idx) with open(fname, "rb") as fp: nums = np.load(fp) for idx in range(12_000): open_file(idx) # + threads = [] def open_file(idx): fname = "fake_files/file_{}.npy".format(idx) with open(fname, "rb") as fp: nums = np.load(fp) for idx in range(12_000): thread = threading.Thread(target=open_file, kwargs={"idx": idx}) threads.append(thread) # + # %%time for thread in threads: thread.start() for thread in threads: thread.join() # + [markdown] slideshow={"slide_type": "slide"} # ## Multiprocessing # # Es un paquete que admite la generación de procesos utilizando una API similar al módulo de multithreading. # # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Multiprocessing # + import numpy as np import multiprocessing as mp class MyProc(mp.Process): def __init__(self, name, nums): super().__init__() self.name = name self.nums = nums def run(self): print("Starting ", self.name) self.mean = np.mean(self.nums) self.std = np.std(self.nums) # + [markdown] slideshow={"slide_type": "slide"} # ## Multiprocessing # + procs = [] for pidx in range(4): proc = MyProc( name="P{}".format(pidx), nums=np.random.normal(size=(100, 10))) proc.start() procs.append(proc) print("-" * 10) for proc in procs: proc.join() print("\t Mean {}".format(proc.mean)) print("\t Std {}".format(proc.std)) # + [markdown] slideshow={"slide_type": "slide"} # ## Multiprocessing comunication # + import numpy as np import multiprocessing as mp class MyProc(mp.Process): def __init__(self, name, nums): super().__init__() self.name = name self.nums = nums self._q = mp.Queue() def run(self): print("Starting ", self.name) self._q.put_nowait({"mean": np.mean(self.nums), "std": np.std(self.nums)}) def communicate(self): if not hasattr(self, "_mean"): data = self._q.get_nowait() self._mean = data["mean"] self._std = data["std"] @property def mean(self): self.communicate() return self._mean @property def std(self): self.communicate() return self._std # + [markdown] slideshow={"slide_type": "slide"} # ## Multiprocessing comunication # + procs = [] for pidx in range(8): proc = MyProc( name="P{}".format(pidx), nums=np.random.normal(size=(100, 10))) proc.start() procs.append(proc) print("-" * 10) for proc in procs: proc.join() print("Proc {}".format(proc.name)) print("\t Mean {}".format(proc.mean)) print("\t Std {}".format(proc.std)) # + [markdown] slideshow={"slide_type": "slide"} # ## Joblib # # ![image.png](attachment:image.png) # # Ayudante vergonzosamente simple para facilitar la escritura de código paralelo legible y depurarlo rápidamente. # # ```bash # $ pip install joblib # ``` # + [markdown] slideshow={"slide_type": "slide"} # ## Joblib - Multiprocessing # + # %%time import joblib # import Parallel, delayed def open_file(idx): fname = "fake_files/file_{}.npy".format(idx) with open(fname, "rb") as fp: nums = np.load(fp) return nums with joblib.Parallel(n_jobs=-1) as parallel: results = parallel( joblib.delayed(open_file)(idx) for idx in range(10_000)) # + [markdown] slideshow={"slide_type": "slide"} # ## Joblib - Multi-Threading # + # %%time import joblib # import Parallel, delayed def open_file(idx): fname = "fake_files/file_{}.npy".format(idx) with open(fname, "rb") as fp: nums = np.load(fp) return nums with joblib.Parallel(n_jobs=1, prefer="threads") as parallel: results = parallel( joblib.delayed(open_file)(idx) for idx in range(10_000)) # + [markdown] slideshow={"slide_type": "slide"} # ## Referencias # # - https://es.wikipedia.org/wiki/Concurrencia_(inform%C3%A1tica) # - http://materias.fi.uba.ar/7508/MOS4/Operating.Systems.4th.Edi.pdf # - https://es.wikipedia.org/wiki/Interrupci%C3%B3n # - https://es.wikipedia.org/wiki/Llamada_al_sistema # - https://www.tutorialspoint.com/python/python_multithreading.htm # - https://docs.python.org/2/library/multiprocessing.html # - https://www.tutorialspoint.com/concurrency_in_python/concurrency_in_python_multiprocessing.htm # - https://joblib.readthedocs.io/en/latest/ # -
legacy/unidad_4/00_thread_process.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload import os import time from fastai.text import * import sys sys.path.append('./imdb_scripts/') from create_toks import * import sentencepiece as spm from fastai.text import * # ### define common variables such as path, file prefixes... currvocab = 8 is_lowercase = True curr_vocab_str = f"{currvocab}k" if is_lowercase: curr_vocab_str = curr_vocab_str + '_lowercase' train_dir_str = 'data/aclImdb/train/all/' all_name = 'train_all_lower.txt' BOS = 'xbos' # beginning-of-sentence tag FLD = 'xfld' # data field tag SPM_MODEL_PATH=Path(f'data/aclImdb_spm/{curr_vocab_str}/') PATH=Path('data/aclImdb/') CLAS_PATH=Path('data/imdb_clas/') LM_PATH=Path('data/imdb_lm/') CLAS_PATH_SPM=Path('data/imdb_clas_spm/') LM_PATH_SPM=Path('data/imdb_lm_spm/') chunksize=24000 # ## First of all, all training and testing datasets are lowercased. # This step is ommited for its simplicity. (There's literally 1000 ways to do it!) full_p = train_dir_str + all_name to_txt = 'train_all_lower.txt' arg_str = '--input='+ train_dir_str + to_txt + f' --model_prefix=model_' + curr_vocab_str + f' --vocab_size={currvocab}000' print(arg_str) # ### This is where google SentencePiece tokenizes the texts. (<5 min) # It can be called directly from commandline or inside python. spm.SentencePieceTrainer.Train(arg_str) # ## modify get_texts and get_all for SentencePiece, to turn dataframe into tokens # ### google already threads it for you, so one less thing to worry about! def get_texts_spm(spm_model, df, n_lbls): tstart = time.time() if len(df.columns) == 1: labels = [] texts = f'\n{BOS} {FLD} 1 ' + df[0].astype(str) texts = texts.apply(fixup).values.astype(str) else: labels = df.iloc[:,range(n_lbls)].values.astype(np.int64) texts = f'\n{BOS} {FLD} 1 ' + df[n_lbls].astype(str) for i in range(n_lbls+1, len(df.columns)): texts += f' {FLD} {i-n_lbls} ' + df[i].astype(str) texts = texts.apply(fixup).values.astype(str) #tok = proc_all_mp_smp(partition_by_cores(texts)) tok = [spm_model.EncodeAsIds(t) for t in texts] tend = time.time() print(f'{(tend-tstart)/(len(texts)/1000):.2f}sec per 1k rows') return tok, list(labels) def get_all_spm(spm_model, df, n_lbls): tok, labels = [], [] for i, r in enumerate(df): print(i) tok_, labels_ = get_texts_spm(spm_model, r, n_lbls) tok += tok_; labels += labels_ return tok, labels sp8_lower = spm.SentencePieceProcessor() sp8_lower.Load('data/imdb_lm_spm/model_8k_lowercase.model') print(sp8_lower.EncodeAsPieces("I wish I knew what the FUCKingHELL is up with the thingy".lower())) print(sp8_lower.EncodeAsPieces("Shittingduckcrappoopercrackingjack".lower())) print(sp8_lower.EncodeAsPieces("reaaaaaaaaaaaaaaaaaally".lower())) print(sp8_lower.EncodeAsPieces('ElectricDildoInYourButt'.lower())) print(sp8_lower.EncodeAsPieces('There is value to simplicity\nWhich offers more explicitly\nSoundings more exquisitely\nWhen words are not too long\n'.lower())) df_trn = pd.read_csv(LM_PATH/'train_lower.csv', header=None, chunksize=chunksize) df_val = pd.read_csv(LM_PATH/'test_lower.csv', header=None, chunksize=chunksize) # + #get_all_spm(sp32_lower, df_trn, 1) # - tok_trn_spm, trn_labels_spm = get_all_spm(sp8_lower, df_trn, 1) tok_val_spm, val_labels_spm = get_all_spm(sp8_lower, df_val, 1) len(tok_trn_spm), len(tok_val_spm) # + (CLAS_PATH/'tmp').mkdir(exist_ok=True) np.save(LM_PATH/'tmp'/'tok_trn_spm8_lower_ids.npy', tok_trn_spm) np.save(LM_PATH/'tmp'/'tok_val_spm8_lower_ids.npy', tok_val_spm) # - # # now tokenize for classification df_trn = pd.read_csv(CLAS_PATH/'train_lower.csv', header=None, chunksize=chunksize) df_val = pd.read_csv(CLAS_PATH/'test_lower.csv', header=None, chunksize=chunksize) tok_trn_spm, trn_labels_spm = get_all_spm(sp8_lower, df_trn, 1) tok_val_spm, val_labels_spm = get_all_spm(sp8_lower, df_val, 1) len(tok_trn_spm), len(tok_val_spm) np.save(CLAS_PATH/'tmp'/'tok_trn_spm8_lower_ids.npy', tok_trn_spm) np.save(CLAS_PATH/'tmp'/'tok_val_spm8_lower_ids.npy', tok_val_spm) # + # labels are no good. ignore. np.save(CLAS_PATH/'tmp'/'trn_labels_lower_spm8.npy', trn_labels_spm) np.save(CLAS_PATH/'tmp'/'val_labels_lower_spm8.npy', val_labels_spm) # -
_jupyter/blog_spm_tokenizer_8k-lowercase.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda2] # language: python # name: conda-env-anaconda2-py # --- # # pyPCGA-MARE2DEM tutorial example - resistivity estimation for MT survey # ## Problem description # + Domain: 2 x 2 km # + 10000 (nx, nz = 100, 100) log-normal resistivity using Gaussian covariance kernel with scale parameter of [500, 200] m # + Number of unkonwns: 10,000 resistivities # + Measurements: 5,248 (21 recievers x 32 frequency bands, EM amplitude and phase) and noise level 0.1 # %matplotlib inline # - Test environments import sys print(sys.version) import platform print(platform.platform()) # ubuntu 18.04 import numpy as np print(np.__version__) import psutil print('# of physcial CPU cores %s' % (psutil.cpu_count(logical=False))) import matplotlib.pyplot as plt import numpy as np import mare2dem from pyPCGA import PCGA import math import warnings; warnings.simplefilter('ignore') # model domain and discretization nx = ny = 100 m = nx*ny N = np.array([nx,ny]) xmin = np.array([0,0]) xmax = np.array([2000.0,2000.0]) pts = None # While MARE2DEM supports unstructured grids, here we use equally spaced points interpolated from FE grids to use FFT. will update it later # load observations and synthetic true and define intial guess s_true = np.loadtxt('true_100x100.txt') obs = np.loadtxt('obs.txt') s_init = np.mean(s_true) * np.ones((m, 1)) # - True resistivity field (nx = 100, nz = 100) # - At the ground surface (z = 0 m), 21 recievers were employed to measure EM amplitude and phase (with 32 frequency bands) plt.title(r'True resistivity ' + r'$[\Omega \cdot m]$') im0 = plt.imshow(s_true.reshape(ny,nx), vmin = 0, vmax = np.log(1000), cmap=plt.get_cmap('jet'),extent=(0,2000,2000,0)) plt.xticks(np.linspace(0,2000,3)) plt.yticks(np.linspace(0,2000,3)) plt.xlabel('x [m]') plt.ylabel('depth [m]') plt.gca().set_aspect('equal','box-forced') cbar = plt.colorbar(im0, ticks=[np.log(1),np.log(10), np.log(100),np.log(1000)]) cbar.ax.set_yticklabels(['1','10','100','1000']) plt.tight_layout() plt.show() # + # covairance kernel and scale parameters prior_std = 1.0 prior_cov_scale = np.array([500.0,200.0]) def kernel(r): return (prior_std ** 2) * np.exp(-r**2) # - # forward model wrapper for pyPCGA # prepare interface to run as a function def forward_model(s, parallelization, ncores=None): params = {'nx':nx,'ny':ny} model = mare2dem.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs # PCGA parameters params = {'R': (0.1) ** 2, 'n_pc': 50, 'maxiter': 4, 'restol': 0.1, 'matvec': 'FFT', 'xmin': xmin, 'xmax': xmax, 'N': N, 'prior_std': prior_std, 'prior_cov_scale': prior_cov_scale, 'kernel': kernel, 'post_cov': "diag", 'precond': True, 'LM': True, 'parallel': True, 'linesearch': True, 'forward_model_verbose': False, 'verbose': False, 'iter_save': True, 'precision':1.E-4} # # Inversion # - intialize pyPCGA # initialize prob = PCGA(forward_model, s_init, pts, params, s_true, obs) # - run pyPCGA # run inversion s_hat, simul_obs, post_diagv, iter_best = prob.Run() # # Post-processing # + fig, ax = plt.subplots(nrows=1,ncols=2,sharey=True) im0 = ax[0].imshow(s_true.reshape(ny,nx),vmin=0,vmax=np.log(1000), cmap=plt.get_cmap('jet'),extent=(0,2000,2000,0)) ax[0].set_title('true') ax[0].set_aspect('equal','box-forced') ax[1].imshow(s_hat.reshape(ny,nx),vmin=0,vmax=np.log(1000), cmap=plt.get_cmap('jet'),extent=(0,2000,2000,0)) ax[1].set_title('estimated') ax[1].set_aspect('equal','box-forced') ax[0].set_xticks(np.linspace(0,2000,3)) ax[0].set_yticks(np.linspace(0,2000,3)) ax[1].set_xticks(np.linspace(0,2000,3)) ax[0].set_ylabel('depth [m]') ax[0].set_xlabel('x [m]') ax[1].set_xlabel('x [m]') cbar = fig.colorbar(im0, ticks=[np.log(1),np.log(10), np.log(100),np.log(1000)],ax=ax.ravel().tolist(),shrink=0.5) cbar.ax.set_yticklabels(['1','10','100','1000']) # + post_std = np.sqrt(post_diagv) plt.title(r'Uncertainty - posterior std(estimate $\ln \; \rho$)') im0 = plt.imshow(post_std.reshape(ny,nx), cmap=plt.get_cmap('jet'),extent=(0,2000,2000,0)) plt.gca().set_aspect('equal','box-forced') plt.xticks(np.linspace(0,2000,3)) plt.yticks(np.linspace(0,2000,3)) plt.xlabel('x [m]') plt.ylabel('depth [m]') plt.colorbar(im0) plt.tight_layout() # - nobs = obs.shape[0] plt.title('obs. vs simul.') plt.plot(obs, simul_obs, '.') plt.xlabel('observed') plt.ylabel('simulated') minobs = np.hstack((obs, simul_obs.reshape(-1))).min() maxobs = np.hstack((obs, simul_obs.reshape(-1))).max() plt.plot(np.linspace(minobs, maxobs, 20), np.linspace(minobs, maxobs, 20), 'k-') axes = plt.gca() axes.set_xlim([math.floor(minobs), math.ceil(maxobs)]) axes.set_ylim([math.floor(minobs), math.ceil(maxobs)]) plt.gca().set_aspect('equal','box-forced') plt.xticks(np.linspace(0,60,3)) plt.yticks(np.linspace(0,60,3)) plt.tight_layout()
examples/mare2dem_MT/inversion_mare2dem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # According to forums, the VGGish model code was not the original code used to generate the released embeddings. This code serves to compare the output of the VGGish model against the embedding. # # See https://groups.google.com/forum/#!topic/audioset-users/EITl3rcNDI8 # + import os import sys import pandas as pd import numpy as np from scipy.io import wavfile from scipy.spatial.distance import euclidean, cosine from sklearn.externals import joblib import matplotlib.pyplot as plt import six import tensorflow as tf from youtube_audioset import get_data, get_recursive_sound_names, get_all_sound_names from youtube_audioset import explosion_sounds, motor_sounds, wood_sounds, human_sounds, nature_sounds from youtube_audioset import download_clip # + sys.path.append(os.path.abspath('./externals/tensorflow_models/research/audioset/')) from vggish_input import wavfile_to_examples, waveform_to_examples import vggish_input import vggish_params import vggish_postprocess import vggish_slim # + # This is an all silent clip ytid = 'RhSLUvQ_LuM' yt_start = 30 yt_end = 40 audio_file_path = 'sounds/audioset/'+ ytid+'-'+str(yt_start)+'-'+str(yt_end)+'.wav' download_clip(ytid, yt_start, yt_end) # - examples_batch = wavfile_to_examples(audio_file_path) # + sr, wav_data = wavfile.read(audio_file_path) print "Energy of signal:", np.square(wav_data).sum() # - # It is confirmed that the audio signal only contains zero samples. # + # Copied from https://github.com/tensorflow/models/blob/master/research/audioset/vggish_inference_demo.py flags = tf.app.flags flags.DEFINE_string( 'wav_file', None, 'Path to a wav file. Should contain signed 16-bit PCM samples. ' 'If none is provided, a synthetic sound is used.') flags.DEFINE_string( 'checkpoint', './externals/tensorflow_models/research/audioset/vggish_model.ckpt', 'Path to the VGGish checkpoint file.') flags.DEFINE_string( 'pca_params', './externals/tensorflow_models/research/audioset/vggish_pca_params.npz', 'Path to the VGGish PCA parameters file.') flags.DEFINE_string( 'tfrecord_file', None, 'Path to a TFRecord file where embeddings will be written.') FLAGS = flags.FLAGS # + # Copied from https://github.com/tensorflow/models/blob/master/research/audioset/vggish_inference_demo.py # Prepare a postprocessor to munge the model embeddings. pproc = vggish_postprocess.Postprocessor(FLAGS.pca_params) # If needed, prepare a record writer to store the postprocessed embeddings. writer = tf.python_io.TFRecordWriter( FLAGS.tfrecord_file) if FLAGS.tfrecord_file else None with tf.Graph().as_default(), tf.Session() as sess: # Define the model in inference mode, load the checkpoint, and # locate input and output tensors. vggish_slim.define_vggish_slim(training=False) vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint) features_tensor = sess.graph.get_tensor_by_name( vggish_params.INPUT_TENSOR_NAME) embedding_tensor = sess.graph.get_tensor_by_name( vggish_params.OUTPUT_TENSOR_NAME) # Run inference and postprocessing. [embedding_batch] = sess.run([embedding_tensor], feed_dict={features_tensor: examples_batch}) print(embedding_batch) postprocessed_batch = pproc.postprocess(embedding_batch) print(postprocessed_batch) # Write the postprocessed embeddings as a SequenceExample, in a similar # format as the features released in AudioSet. Each row of the batch of # embeddings corresponds to roughly a second of audio (96 10ms frames), and # the rows are written as a sequence of bytes-valued features, where each # feature value contains the 128 bytes of the whitened quantized embedding. seq_example = tf.train.SequenceExample( feature_lists=tf.train.FeatureLists( feature_list={ vggish_params.AUDIO_EMBEDDING_FEATURE_NAME: tf.train.FeatureList( feature=[ tf.train.Feature( bytes_list=tf.train.BytesList( value=[embedding.tobytes()])) for embedding in postprocessed_batch ] ) } ) ) print(seq_example) # - # The results should be identical for each embedding of every 1 second interval. So let's just look at the first result. # + processed_embedding = postprocessed_batch[0,:] processed_embedding # + silence_embedding = joblib.load('parameter/silence_embedding.pkl') silence_embedding.round() # - cosine(silence_embedding, processed_embedding) euclidean(silence_embedding, processed_embedding) # Cosine distance is low but euclidean distance is very high.
ipython_notebooks/compare_vggish_out_vs_released_embedding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ReLeaSE # language: python # name: release # --- # # JAK2 activity optimization with ReLeaSE algorithm # In this experiment we will optimized parameters of pretrained generative RNN to produce molecules with maximized and minimized pIC50 for JAK2. We use policy gradient algorithm with custom reward function to bias the properties of generated molecules aka Reinforcement Learninf for Structural Evolution (ReLeaSE) as was proposed in **<NAME>., <NAME>., & <NAME>. (2018). *Deep reinforcement learning for de novo drug design*. Science advances, 4(7), eaap7885.** # ## Imports # %env CUDA_VISIBLE_DEVICES=0 # %load_ext autoreload # %autoreload 2 import sys sys.path.append('./release/') import torch import torch.nn as nn from torch.optim.lr_scheduler import ExponentialLR, StepLR import torch.nn.functional as F use_cuda = torch.cuda.is_available() use_cuda import numpy as np from tqdm import tqdm, trange import pickle from rdkit import Chem, DataStructs from stackRNN import StackAugmentedRNN from data import GeneratorData from utils import canonical_smiles # + import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # - # ## Setting up the generator # ### Loading data for the generator gen_data_path = './data/test2.smi' tokens = ['<', '>', '#', '%', ')', '(', '+', '-', '/', '.', '1', '0', '3', '2', '5', '4', '7', '6', '9', '8', '=', 'A', '@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'P', 'S', '[', ']', '\\', 'c', 'e', 'i', 'l', 'o', 'n', 'p', 's', 'r', '\n'] gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\t', cols_to_read=[0], keep_header=True, tokens=tokens) # ## Util functions # **plot_hist** function plots histogram of predicted properties and a vertical line for thershold. def plot_hist(prediction, n_to_generate): print("Mean value of predictions:", prediction.mean()) print("Proportion of valid SMILES:", len(prediction)/n_to_generate) ax = sns.kdeplot(prediction, shade=True) ax.set(xlabel='Predicted pIC50', title='Distribution of predicted pIC50 for generated molecules') plt.show() # **estimate_and_update** function: # # 1) generates n_to_generate number of SMILES strings # # 2) filters invalid SMILES # # 3) predicts pIC50 for valid SMILES # # 4) plots histogram of predicted pIC50 # # 5) Returns valid SMILES and their predicted pIC50s def estimate_and_update(generator, predictor, n_to_generate, **kwargs): generated = [] pbar = tqdm(range(n_to_generate)) for i in pbar: pbar.set_description("Generating molecules...") generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1]) sanitized = canonical_smiles(generated, sanitize=False, throw_warning=False)[:-1] unique_smiles = list(np.unique(sanitized))[1:] smiles, prediction, nan_smiles = predictor.predict(unique_smiles, get_features=get_fp) plot_hist(prediction, n_to_generate) return smiles, prediction # ## Initializing and training the generator # We will used stack augmented generative GRU as a generator. The model was trained to predict the next symbol from SMILES alphabet using the already generated prefix. Model was trained to minimize the cross-entropy loss between predicted symbol and ground truth symbol. Scheme of the generator when inferring new SMILES is shown below: # # <img src="./figures/generator.png"> # Initialize stack-augmented generative RNN: # + hidden_size = 1500 stack_width = 1500 stack_depth = 200 layer_type = 'GRU' lr = 0.001 optimizer_instance = torch.optim.Adadelta my_generator = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size, output_size=gen_data.n_characters, layer_type=layer_type, n_layers=1, is_bidirectional=False, has_stack=True, stack_width=stack_width, stack_depth=stack_depth, use_cuda=use_cuda, optimizer_instance=optimizer_instance, lr=lr) # - # If you want train the model from scratch, uncomment the lines below: model_path = './checkpoints/generator/checkpoint_biggest_rnn' # + #losses = my_generator.fit(gen_data, 1500000) # + #plt.plot(losses) # + #my_generator.evaluate(gen_data) # + #my_generator.save_model(model_path) # - !"C://Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe" # Alternatively, you can skip the process of training and load the pretrained parameters into the model: my_generator.load_model(model_path) # ## Setting up the predictor # For this demo we will use Random Forest predictor instead of Recurrent Neural Network, since the availability of the dataset with JAK2 activity data used in the "Deep Reinforcement Learning for de novo Drug Design" paper is restricted under the license agreement. Here instead we use the JAK2 activity data downladed from ChEMBL. The size of this dataset is ~2000 data points, which is not enough to build a reliable deep neural network. Is you want to see a demo with RNN, please checkout logP optimization demo. from data import PredictorData from utils import get_desc, get_fp from mordred import Calculator, descriptors calc = Calculator(descriptors, ignore_3D=True) pred_data = PredictorData(path='./data/jak2_data.csv', get_features=get_fp) from predictor import VanillaQSAR from sklearn.ensemble import RandomForestRegressor as RFR model_instance = RFR model_params = {'n_estimators': 250, 'n_jobs': 10} my_predictor = VanillaQSAR(model_instance=model_instance, model_params=model_params, model_type='regressor') my_predictor.fit_model(pred_data, cv_split='random') # Here we produce the unbiased distribution of the property: smiles_unbiased, prediction_unbiased = estimate_and_update(my_generator, my_predictor, n_to_generate=10) # ## Biasing the distribution of the generator with reinforcement learning (policy gradient) # We combine the generator and the predictor into a single pipeline. The generator produces new SMILES string, which is then evaluated by the predictor. Based on the obtain prediction and our goal, we assign a numerical reward value and update the parameters of the generator using policy gradient algorithm. # # <img src="./figures/rl_pipeline.png"> # # Policy gradient loss is defined as: # $$ # L(S|\theta) = -\dfrac{1}{n}\sum_{i=1}^{|S|} \sum_{j=1}^{length(s_i)} R_i\cdot \gamma^i \cdot \log p(s_i|s_0 \dots s_{i-1}\theta), # $$ # # where $R_i$ is the reward obtained at time step $i$ $\gamma$ is the discount factor and $p(s_i|s_0 \dots s_{i-1}, \theta)$ is the probability of the next character given the prefix, which we obtain from the generator. # # In our case the reward is the same for every time step and is equal to the reward for the whole molecule. Discount factor $\gamma$ is a number close to $1.0$ (it could be $1.0$). # ### Maximizing pIC50 for JAK2 from reinforcement import Reinforcement # Making a copy of the generator that will be optimized # + my_generator_max = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size, output_size=gen_data.n_characters, layer_type=layer_type, n_layers=1, is_bidirectional=False, has_stack=True, stack_width=stack_width, stack_depth=stack_depth, use_cuda=use_cuda, optimizer_instance=optimizer_instance, lr=lr) my_generator_max.load_model(model_path) # - # Setting up some parameters for the experiment n_to_generate = 200 n_policy_replay = 10 n_policy = 15 n_iterations = 100 def simple_moving_average(previous_values, new_value, ma_window_size=10): value_ma = np.sum(previous_values[-(ma_window_size-1):]) + new_value value_ma = value_ma/(len(previous_values[-(ma_window_size-1):]) + 1) return value_ma def get_reward_max(smiles, predictor, invalid_reward=0.0, get_features=get_fp): mol, prop, nan_smiles = predictor.predict([smiles], get_features=get_features) if len(nan_smiles) == 1: return invalid_reward return np.exp(prop[0]/3) # The reward function we will use here is # $$ # R(s) = \exp(\dfrac{predictor(s)}{3}) # $$ x = np.linspace(0, 12) y = np.exp(x/3) plt.plot(x, y) plt.xlabel('pIC50 value') plt.ylabel('Reward value') plt.title('Reward function for JAK2 activity maximization') plt.show() RL_max = Reinforcement(my_generator_max, my_predictor, get_reward_max) rewards_max = [] rl_losses_max = [] for i in range(n_iterations): for j in trange(n_policy, desc='Policy gradient...'): cur_reward, cur_loss = RL_max.policy_gradient(gen_data, get_features=get_fp) rewards_max.append(simple_moving_average(rewards_max, cur_reward)) rl_losses_max.append(simple_moving_average(rl_losses_max, cur_loss)) plt.plot(rewards_max) plt.xlabel('Training iteration') plt.ylabel('Average reward') plt.show() plt.plot(rl_losses_max) plt.xlabel('Training iteration') plt.ylabel('Loss') plt.show() smiles_cur, prediction_cur = estimate_and_update(RL_max.generator, my_predictor, n_to_generate, get_features=get_fp) print('Sample trajectories:') for sm in smiles_cur[:5]: print(sm) smiles_biased_max, prediction_biased_max = estimate_and_update(RL_max.generator, my_predictor, n_to_generate=10000) sns.kdeplot(prediction_biased_max,label='Maximized', shade=True, color='red') sns.kdeplot(prediction_unbiased, label='Unbiased', shade=True, color='grey') plt.xlabel('pIC50 values') plt.show() # ### Minimizing pIC50 for JAK2 # Next we will minimize the pIC50 fpr JAK2. # # The reward function we will use here is # $$ # R(s) = \exp(\dfrac{-predictor(s)}{3} + 3) # $$ def get_reward_min(smiles, predictor, invalid_reward=0.0, get_features=get_fp): mol, prop, nan_smiles = predictor.predict([smiles], get_features=get_features) if len(nan_smiles) == 1: return invalid_reward return np.exp(-prop[0]/3 + 3) x = np.linspace(0, 12) y = np.exp(-x/3 + 3) plt.plot(x, y) plt.xlabel('pIC50 value') plt.ylabel('Reward value') plt.title('Reward function for JAK2 activity minimization') plt.show() # Making a copy of the generator that will be optimized my_generator_min = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size, output_size=gen_data.n_characters, layer_type=layer_type, n_layers=1, is_bidirectional=False, has_stack=True, stack_width=stack_width, stack_depth=stack_depth, use_cuda=use_cuda, optimizer_instance=optimizer_instance, lr=lr) my_generator_min.load_model(model_path) RL_min = Reinforcement(my_generator_min, my_predictor, get_reward_min) rewards_min = [] rl_losses_min = [] for i in range(n_iterations): for j in trange(n_policy, desc='Policy gradient...'): cur_reward, cur_loss = RL_min.policy_gradient(gen_data, get_features=get_fp) rewards_min.append(simple_moving_average(rewards_min, cur_reward)) rl_losses_min.append(simple_moving_average(rl_losses_min, cur_loss)) plt.plot(rewards_min) plt.xlabel('Training iteration') plt.ylabel('Average reward') plt.show() plt.plot(rl_losses_min) plt.xlabel('Training iteration') plt.ylabel('Loss') plt.show() smiles_cur, prediction_cur = estimate_and_update(RL_min.generator, my_predictor, n_to_generate) print('Sample trajectories:') for sm in smiles_cur[:5]: print(sm) smiles_biased_min, prediction_biased_min = estimate_and_update(RL_min.generator, my_predictor, n_to_generate=10000) sns.kdeplot(prediction_biased_max, label='Maximized', shade=True, color='red') sns.kdeplot(prediction_biased_min, label='Minimized', shade=True, color='blue') sns.kdeplot(prediction_unbiased, label='Unbiased', shade=True, color='grey') plt.xlabel('pIC50 values') plt.title('Distributions of predicted pIC50 for unbiased,' + ' maximized and minimized generator') plt.show() # ## Drawing random molecules # Now we will draw some random compounds from the biased library: from rdkit.Chem.Draw import DrawingOptions from rdkit.Chem import Draw DrawingOptions.atomLabelFontSize = 50 DrawingOptions.dotsPerAngstrom = 100 DrawingOptions.bondLineWidth = 3 # ### Molecules with maximized pIC50 generated_mols_max = [Chem.MolFromSmiles(sm, sanitize=True) for sm in smiles_biased_max] sanitized_gen_mols_max = [generated_mols_max[i] for i in np.where(np.array(generated_mols_max) != None)[0]] n_to_draw = 20 ind = np.random.randint(0, len(sanitized_gen_mols_max), n_to_draw) mols_to_draw_max = [sanitized_gen_mols_max[i] for i in ind] legends = ['pIC50 = ' + str(prediction_biased_max[i]) for i in ind] Draw.MolsToGridImage(mols_to_draw_max, molsPerRow=5, subImgSize=(300,300), legends=legends) # ### Molecules with minimized pIC50 generated_mols_min = [Chem.MolFromSmiles(sm, sanitize=True) for sm in smiles_biased_min] sanitized_gen_mols_min = [generated_mols_min[i] for i in np.where(np.array(generated_mols_min) != None)[0]] n_to_draw = 20 ind = np.random.randint(0, len(sanitized_gen_mols_min), n_to_draw) mols_to_draw_min = [sanitized_gen_mols_min[i] for i in ind] legends = ['pIC50 = ' + str(prediction_biased_min[i]) for i in ind] Draw.MolsToGridImage(mols_to_draw_min, molsPerRow=5, subImgSize=(300,300), legends=legends)
JAK2_min_max_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pymt.models import PRMSSurface, PRMSSoil import gridmet_etl msurf = PRMSSurface() msoil = PRMSSoil() msurf.initialize(config_surf, run_dir) msoil.initialize(config_soil, run_dir) gmdata = gridmet_etl(msurf.start_time, end_date=msurf.end_time, map=True, hru_id=msurf.get_value('nhm_id'), wght_file=wght) for i in range(int(msurf.start_time),int(msurf.end_time)): msurf.set_value('hru_ppt', gmdata.prcp.[i,:]) msurf.set_value('tmax', gmdata.tmax.[i,:]) msurf.set_value('tmin', gmdata.tmin.[i,:]) msurf.update() msoil.set_value('potet', msurf.get_value('potet')) msoil.update() msurf.finalize() msoil.finalize()
notebooks/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf2] # language: python # name: conda-env-tf2-py # --- # ### Flatten np arrays and plot PCA(2) # + import os import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import librosa import librosa.display from pathlib import Path from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, MinMaxScaler sys.path.append('../src') from datagen import DataGenerator # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - DATA = Path('../data') datagen = DataGenerator(DATA/'training', include=['Rock', 'Hip-Hop'], batch_size=1000) datagen.label_dict X, y = datagen.__getitem__(0) y = np.argmax(y, axis=1) X.shape X[:,:,1].shape y.shape X_flat = X.reshape(X.shape[0], -1) X_flat.shape # scaler = StandardScaler() # X = scaler.fit_transform(flat) X = X_flat pca = PCA(2).fit(X) Xt = pca.transform(X) # + fig, ax = plt.subplots(figsize=(7,7)) labels = datagen.label_dict for k, v in labels.items(): ax.scatter(Xt[y==v, 0], Xt[y==v, 1], alpha=.5, label=k) ax.legend(loc='best', shadow=True) ax.set_title('PCA of Genres') # - def plot_pca(n=2000, labels=['Rock', 'Hip-Hop', 'Instrumental']): fig, ax = plt.subplots(figsize=(6,6)) datagen = DataGenerator(DATA/'training', include=labels, batch_size=n) X, y = datagen.__getitem__(0) X = X.reshape(X.shape[0], -1) y = np.argmax(y, axis=1) scaler = MinMaxScaler() X = scaler.fit_transform(X) pca = PCA(2).fit(X) X = pca.transform(X) labels = datagen.label_dict for k, v in labels.items(): ax.scatter(X[y==v, 0], X[y==v, 1], alpha=.3, label=k) ax.legend(loc='best', shadow=True) ax.set_title('PCA of Genres') return ax ax = plot_pca(n=1500, labels=['Rock', 'Hip-Hop', 'Instrumental']) ax.set_ylim(-75,75) ax.set_xlim(-100,120) plt.savefig('../images/PCA_rock_hiphop_inst.png', dpi=200) plot_pca(labels=['Rock','Hip-Hop', 'Folk', 'Instrumental'])
nbs/PCA_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Shuvamjoy34/NumberCrunchers/blob/master/CNN_car_vs_bike_vs_random_Image_Classification_Final%20with%20own%20architecture%2C%20Alex%20Net%2C%20VGG16%20and%20Lenet-5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="anIWn3g4IiBr" # # **Convolutional Neural Network Based Car vs Bike vs Random Images Multi Class Classification**(Own Architechture,AlexNet,VGG16 and LeNet-5) # # + [markdown] id="vHQrsv4mJQSW" # **Importing required python packages** # + id="1IwzPSB_5BdE" from PIL import Image import numpy as np import os import cv2 import keras from keras.utils import np_utils from keras.models import Sequential from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import matplotlib.pyplot as plt # %matplotlib inline from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import seaborn as sns from keras.utils.data_utils import Sequence from imblearn.over_sampling import RandomOverSampler from imblearn.keras import balanced_batch_generator import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import keras from tqdm import tqdm import os from sklearn.model_selection import train_test_split from cv2 import cv2 from PIL import Image import tensorflow as tf from matplotlib import pyplot as plt from keras.layers import Dense, Dropout, Flatten, Input, ZeroPadding2D from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from keras.preprocessing import image from keras.utils import plot_model from keras.models import Model from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from numpy import array from keras import regularizers from keras import optimizers from keras.models import load_model from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True #config = tf.ConfigProto() #config.gpu_options.allow_growth = True #sess = tf.Session(config=config) # + [markdown] id="49hqPMWwL2gC" # **Connecting google colab notebook to my google drive** # + id="Ws4O_ErfJ2Dh" outputId="f332eea8-e21a-4618-c4cf-5ad1705e967b" colab={"base_uri": "https://localhost:8080/", "height": 35} from google.colab import drive drive.mount('/content/drive') # + [markdown] id="V4vi8xaKMvbj" # **Importing the image files with JPG format** # # # **Resizing and Labelling the images** # # # # + id="xvMbsJ3t5Pec" data=[] labels=[] car=os.listdir("/content/drive/My Drive/Images/car/") for cars in car: try: image=cv2.imread("/content/drive/My Drive/Images/car/"+ cars) image_from_array = Image.fromarray(image, 'RGB') size_image = image_from_array.resize((128,128)) data.append(np.array(size_image)) labels.append(0) except AttributeError: print("") bike=os.listdir("/content/drive/My Drive/Images/bike/") for bikes in bike: try: image=cv2.imread("/content/drive/My Drive/Images/bike/"+ bikes) image_from_array = Image.fromarray(image, 'RGB') size_image = image_from_array.resize((128,128)) data.append(np.array(size_image)) labels.append(1) except AttributeError: print("") random=os.listdir("/content/drive/My Drive/Images/random/") for randoms in random: try: image=cv2.imread("/content/drive/My Drive/Images/random/"+ randoms) image_from_array = Image.fromarray(image, 'RGB') size_image = image_from_array.resize((128,128)) data.append(np.array(size_image)) labels.append(2) except AttributeError: print("") # + id="e5VbX_iG5tDU" images=np.array(data) labels=np.array(labels) # + id="BpIgw9cX7Brg" s=np.arange(images.shape[0]) np.random.shuffle(s) images=images[s] labels=labels[s] num_classes=len(np.unique(labels)) len_data=len(images) # + [markdown] id="2Ul0FTN2OW71" # **Diving the image dataset into training, validation and testing dataset** # + id="8KuERdWF7C2h" outputId="6fb0090b-b51c-4ade-9c31-d0b632e1c3c7" colab={"base_uri": "https://localhost:8080/", "height": 336} images =images.astype(np.float32) images = images/255 train_x , x , train_y , y = train_test_split(images , labels , test_size = 0.2 , random_state = 111) eval_x , test_x , eval_y , test_y = train_test_split(x , y , test_size = 0.5 , random_state = 111) plt.figure(1 , figsize = (15 ,5)) n = 0 for z , j in zip([train_y , eval_y , test_y] , ['train labels','eval labels','test labels']): n += 1 plt.subplot(1 , 3 , n) sns.countplot(x = z ) plt.title(j) plt.show() # + id="m7FXYtXDRP6d" x=x.reshape(x.shape[0], -1) y=y.reshape(y.shape[0], -1) train_x=train_x.reshape(train_x.shape[0], -1) train_y=train_y.reshape(train_y.shape[0], -1) eval_x=eval_x.reshape(eval_x.shape[0], -1) eval_y=eval_y.reshape(eval_y.shape[0], -1) # + id="e4WA6XozR4RF" outputId="4a56bd61-32f3-4be1-8eae-e8ba2b2a32a3" colab={"base_uri": "https://localhost:8080/", "height": 126} #from imblearn.over_sampling import SMOTE #Over-sampling: SMOTE #SMOTE (Synthetic Minority Oversampling TEchnique) consists of synthesizing elements for the minority class, #based on those that already exist. It works randomly picking a point from the minority class and computing #the k-nearest neighbors for this point.The synthetic points are added between the chosen point and its neighbors. #We'll use ratio='minority' to resample the minority class. smote = SMOTE('minority',kind='regular',k_neighbors=2) #train_x_sm, train_y_sm = smote.fit_sample(train_x,train_y) #print(train_x_sm.shape, train_y_sm.shape) # + [markdown] id="4GuPx6wdTv3Y" # **Encoding the labels of the target class of training,validation and testing dataset from(0,1,2) to (0,1) where 1 pops up when there is a class in the array else 0 pops up** # + id="OqGsbD8a_DsY" from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() train_y = lb.fit_transform(train_y) test_y = lb.transform(test_y) eval_y = lb.transform(eval_y) # + id="_QuZ9pYQLquy" from tensorflow.python.keras import regularizers from keras.layers.normalization import BatchNormalization #kernel_regularizer=regularizers.l2(0.05) # + [markdown] id="ACWg91qrU_Qf" # **Creating a basic Convolutional Neural Network architecture and using Random Search and Grid Search to find the best Hyper Parameters** # + id="RTaPHgBu_ULu" from sklearn.model_selection import GridSearchCV from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasClassifier def create_classifier(activation,dropout_rate_opts,optimizer='adam'): #BasicConvNet classifier = Sequential() classifier.add(Conv2D(filters=32,kernel_size=(3,3),padding='same',activation=activation, input_shape=(128,128,3))) keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=48,kernel_size=(3,3),padding='valid',activation=activation, kernel_regularizer=regularizers.l2(0.05))) keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=256,kernel_size=(3,3),padding='valid',activation=activation, kernel_regularizer=regularizers.l2(0.05))) keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Dropout(dropout_rate_opts)) classifier.add(Flatten())# Flattening the #classifier.add(Dense(200,activation="relu")) #200 is the number of nuerons in hidden layers classifier.add(Dense(256,activation=activation)) classifier.add(Dense(84,activation=activation)) #classifier.add(Dense(32,activation=activation)) classifier.add(Dropout(dropout_rate_opts)) classifier.add(Dense(3,activation="softmax")) #3 represent output layer neurons for three different classes classifier.summary() classifier.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) return classifier # + id="FdAM3WSHNyEV" from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import RandomizedSearchCV # + id="KqrIJvn2Og0O" outputId="163ec931-aa8c-4f3d-e746-91c6a8f9266a" colab={"base_uri": "https://localhost:8080/", "height": 35} _activations=['tanh','relu','selu'] _optimizers=['sgd', 'adam'] dropout_rate_opts = [0, 0.2, 0.5] _batch_size=[16,32,64] params=dict(activation=_activations, optimizer=_optimizers, batch_size=_batch_size, dropout_rate_opts=dropout_rate_opts ) print(params) # + id="Un-Pv1-XHvBQ" outputId="eae69a44-440a-49d1-ed3e-351f5c869848" colab={"base_uri": "https://localhost:8080/", "height": 35} classifier = KerasClassifier(build_fn=create_classifier,epochs=100,batch_size=16) classifier # + id="H47ucQsgIvj_" outputId="04ee3432-f416-48be-82ab-048dc01f634f" colab={"base_uri": "https://localhost:8080/", "height": 1000} from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import RandomizedSearchCV np.random.seed(42) rscv = RandomizedSearchCV(classifier, param_distributions=params, cv=3, n_iter=10) rscv_results = rscv.fit(train_x_sm,train_y_sm) print('Best score is: {} using {}'.format(rscv_results.best_score_, rscv_results.best_params_)) # + id="aJeljKDG5hjo" from sklearn.model_selection import GridSearchCV from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasClassifier def create_classifier(): #BasicConvNet classifier = Sequential() classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation="relu", input_shape=(256,256,3))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation="relu", kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation="relu", kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) #classifier.add(Dropout(dropout_rate_opts)) classifier.add(Flatten())# Flattening the #classifier.add(Dense(200,activation="relu")) #200 is the number of nuerons in hidden layers classifier.add(Dense(128,activation="relu")) classifier.add(Dense(64,activation="relu")) classifier.add(Dense(32,activation="relu")) #classifier.add(Dropout(dropout_rate_opts)) classifier.add(Dense(3,activation="softmax")) #3 represent output layer neurons for three different classes classifier.summary() classifier.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) return classifier # + id="20l9gZwspu_6" outputId="10c5b549-96a2-4a86-9438-abb9d5889c56" colab={"base_uri": "https://localhost:8080/", "height": 443} # create model classifier = KerasClassifier(build_fn=create_classifier, verbose=0) # define the grid search parameters batch_size = [10, 20, 30, 40, 50] epochs = [10, 50, 100, 150, 200] param_grid = dict(batch_size=batch_size, epochs=epochs) grid = GridSearchCV(estimator=classifier, param_grid=param_grid, n_jobs=-1, cv=3) grid_result = grid.fit(x,y) # summarize results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # + id="K7RnJ1QXrtcY" def create_classifier(optimizer='adam'): #BasicConvNet classifier = Sequential() classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation='relu',input_shape=(256,256,3))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation='relu',kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation='relu',kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) #classifier.add(Dropout(0.5)) classifier.add(Flatten())# Flattening the #classifier.add(Dense(200,activation="relu")) #200 is the number of nuerons in hidden layers classifier.add(Dense(128,activation="relu")) classifier.add(Dense(64,activation="relu")) classifier.add(Dense(32,activation="relu")) #classifier.add(Dropout(0.5)) classifier.add(Dense(3,activation="softmax")) #3 represent output layer neurons for three different classes classifier.summary() classifier.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return classifier # + id="0kBKxU63sABc" outputId="91fcf788-cadd-4847-866e-277e87367f63" colab={"base_uri": "https://localhost:8080/", "height": 692} classifier = KerasClassifier(build_fn=create_classifier, epochs=50, batch_size=10, verbose=0) # define the grid search parameters optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam'] param_grid = dict(optimizer=optimizer) grid = GridSearchCV(estimator=classifier, param_grid=param_grid, n_jobs=-1, cv=3) grid_result = grid.fit(x,y) # summarize results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # + id="pfTCoSnxtDxF" from keras.optimizers import Adamax def create_classifier(learn_rate=0.01, beta_1=0): #BasicConvNet classifier = Sequential() classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation='relu',input_shape=(256,256,3))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation='relu',kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid',activation='relu',kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) #classifier.add(Dropout(0.5)) classifier.add(Flatten())# Flattening the #classifier.add(Dense(200,activation="relu")) #200 is the number of nuerons in hidden layers classifier.add(Dense(128,activation="relu")) classifier.add(Dense(64,activation="relu")) classifier.add(Dense(32,activation="relu")) #classifier.add(Dropout(0.5)) classifier.add(Dense(3,activation="softmax")) #3 represent output layer neurons for three different classes classifier.summary() optimizer = Adamax(lr=learn_rate, beta_1=beta_1) classifier.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return classifier # + id="_M-zBULS0adN" outputId="85067b23-980d-440d-c54e-6bd0553ecc07" colab={"base_uri": "https://localhost:8080/", "height": 1000} from sklearn.model_selection import GridSearchCV from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasClassifier classifier = KerasClassifier(build_fn=create_classifier, epochs=50, batch_size=10, verbose=0) # define the grid search parameters learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3] beta_1 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9] param_grid = dict(learn_rate=learn_rate, beta_1=beta_1) grid = GridSearchCV(estimator=classifier, param_grid=param_grid, n_jobs=1, cv=3) grid_result = grid.fit(x,y) # summarize results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # + id="8SCq_1GKM5Lb" outputId="60de8e24-d50d-4037-bca7-13f57fec055a" colab={"base_uri": "https://localhost:8080/", "height": 117} # ! pip install keras # + id="QY7i_KRdO7Qy" #,kernel_regularizer=regularizers.l2(0.05) # + id="stqCn0cw8Ez6" outputId="c5e7ffa8-41bf-4d00-896a-202aed634905" colab={"base_uri": "https://localhost:8080/", "height": 586} from tensorflow.keras.activations import selu #BasicConvNet classifier = Sequential() classifier.add(Conv2D(filters=96,kernel_size=(3,3),padding='valid',activation="relu",input_shape=(128,128,3))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=128,kernel_size=(3,3),padding='valid',activation="relu",kernel_regularizer=regularizers.l2(0.05))) #keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Conv2D(filters=256,kernel_size=(3,3),padding='valid',activation="relu",kernel_regularizer=regularizers.l2(0.05))) keras.layers.BatchNormalization() classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Dropout(0.5)) classifier.add(Flatten())# Flattening #classifier.add(Dense(200,activation="relu")) classifier.add(Dense(512,activation="relu")) classifier.add(Dense(256,activation="relu")) #classifier.add(Dense(128,activation="relu")) #200 is the number of nuerons in hidden layers classifier.add(Dropout(0.5)) classifier.add(Dense(3,activation="softmax")) #3 represent output layer neurons for three different classes classifier.summary() # + id="p_c3aQTCBO0W" outputId="ceb9f27e-81ee-4cc2-f8ca-22e921b58f1e" colab={"base_uri": "https://localhost:8080/", "height": 1000} from keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.activations import selu checkpoint = ModelCheckpoint("convnet_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto') #from sklearn.utils import class_weight #class_weight = class_weight.compute_class_weight('balanced' ,np.unique(y_train_labels) ,y_train_labels) from keras.optimizers import Adam #from keras.wrappers.scikit_learn import KerasClassifier #from keras.optimizers import SGD #sgd=SGD(lr=0.01,momentum=0.6,nesterov=False) #adam = Adam(lr=0.01) classifier.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) conv= classifier.fit(train_x,train_y,batch_size =32,epochs=100,verbose=1,validation_data=(eval_x, eval_y),callbacks=[checkpoint,early]) conv # + id="jjJBZEdUszUB" outputId="a995c8d6-1130-463d-b281-5eef2e5966ae" colab={"base_uri": "https://localhost:8080/", "height": 282} # plot the training and validation accuracy acc = conv.history['accuracy'] val_acc = conv.history['val_accuracy'] epochs = range(len(acc)) plt.plot(epochs, acc, label='Training acc') plt.plot(epochs, val_acc, label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.ylim(0.0,1) plt.show() # + id="Qk4iuwnVC-TV" outputId="c354f027-3787-4b40-c28e-588723f90403" colab={"base_uri": "https://localhost:8080/", "height": 299} #Loss and Accuracy in one graph(Training & Validation) import matplotlib.pyplot as plt #plt.plot(conv.history["accuracy"]) #plt.plot(conv.history['val_accuracy']) plt.plot(conv.history['loss']) plt.plot(conv.history['val_loss']) plt.title("Training and Validation Loss ") plt.ylabel("Loss") plt.xlabel("Epoch") plt.legend(["Training loss","Validation Loss"]) plt.show() # + id="Qgj8LbNMAtzE" outputId="f40bfbea-5b14-44c2-eeeb-64ef9a10e711" colab={"base_uri": "https://localhost:8080/", "height": 299} import matplotlib.pyplot as plt #plt.plot(conv.history["accuracy"]) #plt.plot(conv.history['val_accuracy']) plt.plot(conv.history['loss']) plt.plot(conv.history['val_loss']) plt.title("Training and Validation Loss ") plt.ylabel("Loss") plt.xlabel("Epoch") plt.legend(["Training loss","Validation Loss"]) plt.show() # + id="WnZST5sdSHhf" classifier.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) # + id="kv1rm3NOSUHc" outputId="cc39ec6e-6c9a-4d8a-a936-90193cdd2bbe" colab={"base_uri": "https://localhost:8080/", "height": 992} #STEP_SIZE_TRAIN=balanced_gen.n//balanced_gen.batch_size #STEP_SIZE_VALID=balanced_gen.n//balanced_gen.batch_size #STEP_SIZE_TEST=balanced_gen.n//balanced_gen.batch_size #ep = 100 #batch_size=32 classifier.fit_generator(balanced_gen,steps_per_epoch, ) # + id="SeoWYDPiBVTM" #using Image Augmentation Approach(By using Image Data Generator) from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1/255., horizontal_flip = True, zoom_range = 0.3, rotation_range = 30) val_datagen= ImageDataGenerator(rescale=1./255) train_gen = train_datagen.flow(train_x,train_y, batch_size=32) val_gen = val_datagen.flow(eval_x,eval_y, batch_size=32) test_gen = val_datagen.flow(test_x,test_y, batch_size =32, shuffle = False) # + id="fmrxG-bLv_tB" outputId="4f7a725c-ecf6-4451-c7fd-f4bb3b610726" colab={"base_uri": "https://localhost:8080/", "height": 1000} STEP_SIZE_TRAIN=train_gen.n//train_gen.batch_size STEP_SIZE_VALID=val_gen.n//val_gen.batch_size STEP_SIZE_TEST=test_gen.n//test_gen.batch_size ep = 100 classifier.fit_generator(generator = train_gen, validation_data= val_gen, steps_per_epoch=STEP_SIZE_TRAIN, epochs=ep, validation_steps=STEP_SIZE_VALID) # + id="c4OOZImRwOZW" outputId="f6aacd68-9155-4002-fc14-d4e3746b11c9" colab={"base_uri": "https://localhost:8080/", "height": 52} #Once the model is trained we can evaluate it on Test data. # Evaluating the model for convnet score = classifier.evaluate(test_x, test_y, verbose=0) print('Test Loss:', score[0]) print('Test accuracy:', score[1]) # + id="DfJUUhfA2RM2" outputId="7e41919b-2878-495a-ad6a-da582903c68f" colab={"base_uri": "https://localhost:8080/", "height": 248} from sklearn.metrics import classification_report,confusion_matrix Y_pred = classifier.predict(test_x) y_pred = np.argmax(Y_pred, axis=1) target_names = ['class 0(car)', 'class 1(bike)','class 2(random)'] print(classification_report(np.argmax(test_y,axis=1), y_pred,target_names=target_names)) print(confusion_matrix(np.argmax(test_y,axis=1), y_pred)) # + id="6YIr9Px0pjcc" #AlexNet model = keras.models.Sequential([ keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(128,128,3)), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)), keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same",kernel_regularizer=regularizers.l2(0.05)), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)), keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"), keras.layers.BatchNormalization(), keras.layers.Conv2D(filters=384, kernel_size=(1,1), strides=(1,1), activation='relu', padding="same"), keras.layers.BatchNormalization(), keras.layers.Conv2D(filters=256, kernel_size=(1,1), strides=(1,1), activation='relu', padding="same"), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)), #keras.layers.Dropout(0.5), keras.layers.Flatten(), keras.layers.Dense(1024, activation='relu'), #keras.layers.Dropout(0.5), keras.layers.Dense(1024, activation='relu'), keras.layers.Dropout(0.5), keras.layers.Dense(1000, activation='relu'), #keras.layers.Dropout(0.5), keras.layers.Dense(3, activation='softmax') ]) # + id="ZvzMvEJPj-n4" outputId="037ffff5-17fb-4b6e-f683-3eb0c21ad8a8" colab={"base_uri": "https://localhost:8080/", "height": 1000} #Importing library import keras from keras.models import Sequential from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization import numpy as np np.random.seed(1000) #Instantiation AlexNet = Sequential() #1st Convolutional Layer AlexNet.add(Conv2D(filters=96, input_shape=(227,227,3), kernel_size=(11,11), strides=(4,4), padding='same')) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same')) #2nd Convolutional Layer AlexNet.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1,1), padding='same')) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same')) #3rd Convolutional Layer AlexNet.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same')) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) #4th Convolutional Layer AlexNet.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same')) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) #5th Convolutional Layer AlexNet.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same')) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same')) #Passing it to a Fully Connected layer AlexNet.add(Dropout(0.4)) AlexNet.add(Flatten()) # 1st Fully Connected Layer AlexNet.add(Dense(1024, input_shape=(128,128,3,))) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) # Add Dropout to prevent overfitting AlexNet.add(Dropout(0.4)) #2nd Fully Connected Layer AlexNet.add(Dense(1024)) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) #Add Dropout AlexNet.add(Dropout(0.5)) #3rd Fully Connected Layer AlexNet.add(Dense(1000)) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('relu')) #Add Dropout AlexNet.add(Dropout(0.5)) #Output Layer AlexNet.add(Dense(3)) #AlexNet.add(BatchNormalization()) AlexNet.add(Activation('softmax')) #Model Summary AlexNet.summary() # + id="e0YQ7j7Lpvv_" outputId="fc4ab179-fdb8-4757-fbe0-1f875a89ffa3" colab={"base_uri": "https://localhost:8080/", "height": 835} model.summary() # + id="CW43wFVHw5GL" #callbacks=[checkpoint,early] # + id="Xg64Pmo1qFhM" outputId="e2e79f64-7860-4b96-f42c-f2b8358e31bd" colab={"base_uri": "https://localhost:8080/", "height": 1000} from keras.callbacks import ModelCheckpoint, EarlyStopping checkpoint = ModelCheckpoint("alexnet_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto') #from keras.optimizers import Adam #from keras.optimizers import SGD #opt = Adam(lr=0.001) model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) lizon=model.fit(train_x,train_y,batch_size = 64,epochs=50,verbose=1,validation_data=(eval_x, eval_y),callbacks=[checkpoint,early]) lizon # + id="NWTB5BinJv2k" outputId="0e491d0c-0d81-4df6-f053-139f45796913" colab={"base_uri": "https://localhost:8080/", "height": 299} import matplotlib.pyplot as plt plt.plot(lizon.history["accuracy"]) plt.plot(lizon.history['val_accuracy']) plt.plot(lizon.history['loss']) plt.plot(lizon.history['val_loss']) plt.title("model accuracy") plt.ylabel("Loss/Accuracy") plt.xlabel("Epoch") plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"]) plt.show() # + id="MF1B1r82sUEx" from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1/255., horizontal_flip = True, zoom_range = 0.3, rotation_range = 30) val_datagen= ImageDataGenerator(rescale=1./255) train_gen = train_datagen.flow(train_x,train_y, batch_size=32) val_gen = val_datagen.flow(eval_x,eval_y, batch_size=32) test_gen = val_datagen.flow(test_x,test_y, batch_size =32, shuffle = False) # + id="CPHolMmlsWT0" outputId="0347631f-701b-4dd5-9617-5364c38dfd66" colab={"base_uri": "https://localhost:8080/", "height": 1000} STEP_SIZE_TRAIN=train_gen.n//train_gen.batch_size STEP_SIZE_VALID=val_gen.n//val_gen.batch_size STEP_SIZE_TEST=test_gen.n//test_gen.batch_size ep = 100 classifier.fit_generator(generator = train_gen, validation_data= val_gen, steps_per_epoch=STEP_SIZE_TRAIN, epochs=ep, validation_steps=STEP_SIZE_VALID) # + id="OHAob_P6qpO6" outputId="f9d611c1-ad71-4a8d-c272-4fa872f6b139" colab={"base_uri": "https://localhost:8080/", "height": 52} #Once the model is trained we can evaluate it on Test data. # Evaluating the model alexscore = AlexNet.evaluate(test_x, test_y, verbose=0) print('Test Loss:', alexscore[0]) print('Test accuracy:', alexscore[1]) # + id="dZAcXDv4qtnc" outputId="803f5dd5-7ed8-40ae-95db-c94e2f0a8ff4" colab={"base_uri": "https://localhost:8080/", "height": 304} from sklearn.metrics import classification_report,confusion_matrix Y_pred = model.predict(test_x) y_pred = np.argmax(Y_pred, axis=1) target_names = ['class 0(car)', 'class 1(bike)','class 2(random)'] print(classification_report(np.argmax(test_y,axis=1), y_pred,target_names=target_names)) print(confusion_matrix(np.argmax(test_y,axis=1), y_pred)) # + id="HH-ZuqHW2L3x" import keras,os from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D , Flatten from keras.preprocessing.image import ImageDataGenerator import numpy as np # + id="nA5xkUha1BUo" #vgg16 vggmodel = Sequential() vggmodel.add(Conv2D(input_shape=(128,128,3),filters=64,kernel_size=(3,3),padding="same", activation="relu")) vggmodel.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu")) vggmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) vggmodel.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) vggmodel.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) vggmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) vggmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) vggmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) # + id="l-Oa35rl2Q5M" keras.layers.Dropout(0.5) vggmodel.add(Flatten()) vggmodel.add(Dense(units=200,activation="relu")) keras.layers.Dropout(0.5) vggmodel.add(Dense(units=200,activation="relu")) keras.layers.Dropout(0.5) vggmodel.add(Dense(units=3, activation="softmax")) # + id="vj3OkIly2xbF" outputId="bab21a80-4753-4cb8-e402-dab2f4d4bec2" colab={"base_uri": "https://localhost:8080/", "height": 941} vggmodel.summary() # + id="-97kUBD_ZfaH" #from keras.optimizers import Adam #opt = Adam(lr=0.001) vggmodel.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy']) # + id="fGczUBGS24ft" outputId="3dc2a05c-b0f8-4012-f698-138767b0192f" colab={"base_uri": "https://localhost:8080/", "height": 959} from keras.callbacks import ModelCheckpoint, EarlyStopping checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto') vggmodel.compile(optimizer="adam", loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) VGG=vggmodel.fit(train_x,train_y,batch_size = 32,epochs=100,verbose=1,validation_data=(eval_x, eval_y),callbacks=[checkpoint,early]) VGG # + id="U3VfSOlu5Fd_" outputId="c7e9742a-2213-45f0-8703-96836a75e412" colab={"base_uri": "https://localhost:8080/", "height": 50} #Once the model is trained we can evaluate it on Test data. # Evaluating the model vggscore = vggmodel.evaluate(test_x, test_y, verbose=0) print('Test Loss:', vggscore[0]) print('Test accuracy:', vggscore[1]) # + id="6xb0R3Ul5OzS" outputId="da427bc1-89f1-47de-9516-0944cdb43da6" colab={"base_uri": "https://localhost:8080/", "height": 233} from sklearn.metrics import classification_report,confusion_matrix Y_pred = vggmodel.predict(test_x) y_pred = np.argmax(Y_pred, axis=1) target_names = ['class 0(car)', 'class 1(bike)','class 2(random)'] print(classification_report(np.argmax(test_y,axis=1), y_pred,target_names=target_names)) print(confusion_matrix(np.argmax(test_y,axis=1), y_pred)) # + id="DEu6Rpr-BrvZ" outputId="22222774-df71-4b81-ec8f-9dac557e417e" colab={"base_uri": "https://localhost:8080/", "height": 295} import matplotlib.pyplot as plt plt.plot(vgg.history["accuracy"]) plt.plot(vgg.history['val_accuracy']) plt.plot(vgg.history['loss']) plt.plot(vgg.history['val_loss']) plt.title("model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epoch") plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"]) plt.show() # + id="0PA5jf1eNZUu" outputId="1a30cea5-b6b1-4fee-f5bd-a2635d7437bf" colab={"base_uri": "https://localhost:8080/", "height": 132} #lenet lenet_model = keras.models.Sequential([ keras.layers.Conv2D(6, kernel_size=5, strides=1, activation='relu', input_shape=train_x[0].shape, padding='same'), #C1 keras.layers.AveragePooling2D(), #S2 keras.layers.Conv2D(16, kernel_size=5, strides=1, activation='relu', padding='same'), #C3 keras.layers.AveragePooling2D(), #S4 keras.layers.Flatten(), #Flatten keras.add(Dropout(0.5)) keras.layers.Dense(120, activation='relu'), #C5 keras.layers.Dense(84, activation='relu'), #F6 keras.add(Dropout(0.5)) keras.layers.Dense(3, activation='softmax') #Output layer ]) # + id="nMadyQkS35ei" outputId="b8977571-f71b-41e0-8f90-db57a1d4e9fb" colab={"base_uri": "https://localhost:8080/", "height": 515} from keras.models import Sequential from keras import models, layers import keras #Instantiate an empty model lenet = Sequential() # C1 Convolutional Layer lenet.add(layers.Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation='relu', input_shape=(128,128,3), padding='same')) # S2 Pooling Layer lenet.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')) # C3 Convolutional Layer lenet.add(layers.Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid')) # S4 Pooling Layer lenet.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')) # C5 Fully Connected Convolutional Layer lenet.add(layers.Conv2D(120, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid')) lenet.add(Dropout(0.5)) #Flatten the CNN output so that we can connect it with fully connected layers lenet.add(layers.Flatten()) # FC6 Fully Connected Layer lenet.add(layers.Dense(84, activation='relu')) lenet.add(Dropout(0.5)) #Output Layer with softmax activation lenet.add(layers.Dense(3, activation='softmax')) lenet.summary() # + id="VBlH2QRBOelA" outputId="db62895d-0aeb-4b29-a21a-2caaf0bf6182" colab={"base_uri": "https://localhost:8080/", "height": 1000} from keras.callbacks import ModelCheckpoint, EarlyStopping checkpoint = ModelCheckpoint("lenet_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto') #from keras.optimizers import Adam #opt = Adam(lr=0.001) lenet.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) lenetmodel=lenet.fit(train_x,train_y,batch_size = 32,epochs=100,verbose=1,validation_data=(eval_x, eval_y),callbacks=[checkpoint,early]) # + id="yd4Rz9K4O4_i" outputId="e5004503-df2f-4095-ae35-e852443882fd" colab={"base_uri": "https://localhost:8080/", "height": 52} #Once the model is trained we can evaluate it on Test data. # Evaluating the model lenetscore = lenet.evaluate(test_x, test_y, verbose=0) print('Test Loss:', lenetscore[0]) print('Test accuracy:', lenetscore[1]) # + id="b4ymiRWGO-kY" outputId="6d0b5600-5b57-43e4-dd52-68161925532e" colab={"base_uri": "https://localhost:8080/", "height": 248} from sklearn.metrics import classification_report,confusion_matrix lenet_pred = lenet.predict(test_x) y_pred = np.argmax(lenet_pred, axis=1) target_names = ['class 0(car)', 'class 1(bike)','class 2(random)'] print(classification_report(np.argmax(test_y,axis=1), y_pred,target_names=target_names)) print(confusion_matrix(np.argmax(test_y,axis=1), y_pred)) # + id="TqsC1L2uNES5" outputId="98b0d02a-e531-45ba-8983-09572bbc1508" colab={"base_uri": "https://localhost:8080/", "height": 212} # !pip install scikit-plot from scikitplot.metrics import plot_roc_curve # + id="CiWvGyjTQKSz" outputId="481df0d3-b970-4045-8055-654855d12ac0" colab={"base_uri": "https://localhost:8080/", "height": 279} import matplotlib.pyplot as plt #plt.plot(lenetmodel.history["accuracy"]) #plt.plot(lenetmodel.history['val_accuracy']) plt.plot(lenetmodel.history['loss']) plt.plot(lenetmodel.history['val_loss']) #plt.title("model accuracy") plt.ylabel("Training vs Validation Loss") plt.xlabel("Epoch") plt.legend([" Training loss","Validation Loss"]) plt.show()
CNN_car_vs_bike_vs_random_Image_Classification_Final with own architecture, Alex Net, VGG16 and Lenet-5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Face landmarks Detection Starter Pack by Bongsang # ### Step 1. Data Analysis and dataset making # # The first step is to load in the images of faces and their keypoints and visualize them. # This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints. # # <img src="https://www.cs.tau.ac.il/~wolf/ytfaces/logo.jpg" width=50%> # # ### Training and Testing Data # # This facial landmarks dataset consists of 5,770 images. All of these images are separated into either a training or a test set of data. # # Total | Training | Testing # ----- | -------- | ------- # 5,770 | 3,462 | 2,308 # 100% | 60% | 40% # # The information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y). # # --- # + pycharm={"is_executing": true} import pandas as pd import os import numpy as np import cv2 import matplotlib.pyplot as plt # import glob # import skimage.io as io # from torch.utils.data import DataLoader # from torchvision import transforms, utils # # Bongsang's personal pacakges # from landmarks_dataset import LandmarksDataset # from landmarks_transform import Rescale, RandomCrop, Normalize, ToTensor # %matplotlib inline # - ## Debug On/Off # debug = True debug = False # + pycharm={"is_executing": true} landmarks_frame = pd.read_csv('data/training_landmarks.csv') print(f'The number of training dataset is {len(landmarks_frame)}. {3462/5770*100}% of 5,770') landmarks_frame.head() # - # #### Plotting some images from data frame # + pycharm={"is_executing": true} def get_image_name(frame, idx): return frame.iloc[idx, 0] def get_landmarks(frame, idx): landmark_points = frame.iloc[idx, 1:].to_numpy() landmark_points = landmark_points.astype('float').reshape(-1, 2) return landmark_points def show_landmarks(image, points, name): plt.title(name) plt.imshow(image) plt.scatter(points[:, 0], points[:, 1], s=20, marker='.', c='m') plt.pause(0.1) # + pycharm={"is_executing": true} for _ in range(2): idx = np.random.randint(0, len(landmarks_frame)) image_name = get_image_name(landmarks_frame, idx) image_path = os.path.join('data/training/', image_name) image = cv2.imread(image_path) image_data = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) landmarks = get_landmarks(landmarks_frame, idx) show_landmarks(image_data, landmarks, image_name) # - # ## Dataset class and Transformations # # To prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). # # <img src="https://pytorch.org/tutorials/_images/landmarked_face2.png" width=25%> # # ### PyTorch Dataset class # # ``torch.utils.data.Dataset`` is an abstract class representing a # dataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network. # # # Your custom dataset should inherit ``Dataset`` and override the following # methods: # # - ``__len__`` so that ``len(dataset)`` returns the size of the dataset. # - ``__getitem__`` to support the indexing such that ``dataset[i]`` can # be used to get the i-th sample of image/keypoint data. # # Let's create a dataset class for our face keypoints dataset. We will # read the CSV file in ``__init__`` but leave the reading of images to # ``__getitem__``. This is memory efficient because all the images are not # stored in the memory at once but read as required. # # A sample of our dataset will be a dictionary # ``{'image': image, 'keypoints': key_pts}``. Our dataset will take an # optional argument ``transform`` so that any required processing can be # applied on the sample. We will see the usefulness of ``transform`` in the # next section. # # + pycharm={"is_executing": true} import torch from torch.utils.data import Dataset class LandmarksDataset(Dataset): """Face Landmarks dataset.""" def __init__(self, csv_file, root_dir, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.landmarks_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.landmarks_frame) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() image_name = self.landmarks_frame.iloc[idx, 0] image_path = os.path.join(self.root_dir, image_name) image = cv2.imread(image_path) image_data = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) landmarks = self.landmarks_frame.iloc[idx, 1:] landmarks = np.array([landmarks]) landmarks = landmarks.astype('float').reshape(-1, 2) sample = {'image': image_data, 'landmarks': landmarks, 'name': image_name} if debug: print(f'LandmarksDataset shape, file={image_name}, image={image_data.shape}, landmarks={landmarks.shape}') if self.transform: sample = self.transform(sample) return sample # - # #### Plotting some images from PyTorch dataset # + pycharm={"is_executing": true} landmarks_dataset = LandmarksDataset(csv_file='data/training_landmarks.csv', root_dir='data/training', transform=None) for _ in range(2): idx = np.random.randint(0, len(landmarks_dataset)) sample = landmarks_dataset[idx] show_landmarks(sample['image'], sample['landmarks'], sample['name']) # - # ## PyTorch Transforms # # Now, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors. # # Therefore, we will need to write some pre-processing code. # Let's create four transforms: # # - ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1] # - ``Rescale``: to rescale an image to a desired size. # - ``RandomCrop``: to crop an image randomly. # - ``ToTensor``: to convert numpy images to torch images. # # # We will write them as callable classes instead of simple functions so # that parameters of the transform need not be passed everytime it's # called. For this, we just need to implement ``__call__`` method and # (if we require parameters to be passed in), the ``__init__`` method. # We can then use a transform like this: # # tx = Transform(params) # transformed_sample = tx(sample) # # Observe below how these transforms are generally applied to both the image and its keypoints. # # # + pycharm={"is_executing": true} class Normalize(object): """Convert a color image to grayscale and normalize the color range to [0,1].""" def __call__(self, sample): image, landmarks, name = sample['image'], sample['landmarks'], sample['name'] image_copy = np.copy(image) landmarks_copy = np.copy(landmarks) # convert image to grayscale image_copy = cv2.cvtColor(image_copy, cv2.COLOR_RGB2GRAY) # scale color range from [0, 255] to [0, 1] image_copy = image_copy / 255.0 # scale landmarks to be centered around 0 with a range of [-1, 1] # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50 landmarks_copy = (landmarks_copy - 100) / 50.0 if debug: print(f'Normalized LandmarksDataset, file={name}, rescaled shape = {image_copy.shape}') return {'image':image_copy, 'landmarks':landmarks_copy, 'name': name} class Rescale(object): """Rescale the image in a sample to a given size. Args: output_size (tuple or int): Desired output size. If tuple, output is matched to output_size. If int, smaller of image edges is matched to output_size keeping aspect ratio the same. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, sample): image, landmarks, name = sample['image'], sample['landmarks'], sample['name'] h, w = image.shape[:2] if isinstance(self.output_size, int): if h > w: new_h, new_w = self.output_size * h / w, self.output_size else: new_h, new_w = self.output_size, self.output_size * w / h else: new_h, new_w = self.output_size new_h, new_w = int(new_h), int(new_w) image_copy = cv2.resize(image, (new_w, new_h)) landmarks_copy = landmarks * [new_w / w, new_h / h] if debug: print(f'Rescaled shape, file={name}, rescaled shape = {image_copy.shape}') return {'image':image_copy, 'landmarks':landmarks_copy, 'name': name} class RandomCrop(object): """Crop randomly the image in a sample. Args: output_size (tuple or int): Desired output size. If int, square crop is made. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, sample): image, landmarks, name = sample['image'], sample['landmarks'], sample['name'] h, w = image.shape[:2] new_h, new_w = self.output_size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) image_copy = image[top: top + new_h, left: left + new_w] landmarks_copy = landmarks - [left, top] if debug: print(f'Croped shape, file={name}, rescaled shape = {image_copy.shape}') return {'image':image_copy, 'landmarks':landmarks_copy, 'name': name} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): image, landmarks, name = sample['image'], sample['landmarks'], sample['name'] # When use Normalize, the image shape dosen't include color channle so you have to reshape like below. if(len(image.shape) == 2): # add that third color dim image = image.reshape(image.shape[0], image.shape[1], 1) # swap color axis because # numpy image: H x W x C # torch image: C X H X W image_copy = image.transpose((2, 0, 1)) if debug: print(f'Transformed and changed to Tensor, file={name}, rescaled shape = {image_copy.shape}') return {'image': torch.from_numpy(image_copy), 'landmarks': torch.from_numpy(landmarks), 'name': name} # - # ### Test out the transforms: Rescale any images to 250 size (height) # + pycharm={"is_executing": true} from torchvision import transforms rescale = Rescale(250) idx = np.random.randint(0, len(landmarks_dataset)) sample = landmarks_dataset[idx] transformed_sample = rescale(sample) show_landmarks(transformed_sample['image'], transformed_sample['landmarks'], transformed_sample['name']) # - # ### Test out the transforms: Crop any images to 50 size # + pycharm={"is_executing": true} from torchvision import transforms crop = RandomCrop(80) idx = np.random.randint(0, len(landmarks_dataset)) sample = landmarks_dataset[idx] transformed_sample = crop(sample) show_landmarks(transformed_sample['image'], transformed_sample['landmarks'], transformed_sample['name']) # - # ### Test out the transforms: Rescale to 250 size and then crop 224 size # + pycharm={"is_executing": true} from torchvision import transforms composed = transforms.Compose([Rescale(250), RandomCrop(224)]) idx = np.random.randint(0, len(landmarks_dataset)) sample = landmarks_dataset[idx] transformed_sample = composed(sample) show_landmarks(transformed_sample['image'], transformed_sample['landmarks'], transformed_sample['name']) # - # ## Create the transformed dataset # # Apply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size). # + pycharm={"is_executing": true} # define the data tranform # order matters! i.e. rescaling should come before a smaller crop data_transform = transforms.Compose([Rescale(250), RandomCrop(224), Normalize(), ## for faster training ToTensor()]) # create the transformed dataset transformed_dataset = LandmarksDataset(csv_file='data/training_landmarks.csv', root_dir='data/training/', transform=data_transform) # - # ### Check all sizes # + pycharm={"is_executing": true} for i in range(3): idx = np.random.randint(0, len(transformed_dataset)) sample = transformed_dataset[idx] print(f"{i}: {sample['name']}, image size={sample['image'].size()}, landmark size={sample['landmarks'].size()}") print('-'*100) # - # ## Data Iteration and Batch # # Right now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to: # # - Batch the data # - Shuffle the data # - Load the data in parallel using ``multiprocessing`` workers. # # ``torch.utils.data.DataLoader`` is an iterator which provides all these # features, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network! # + from torch.utils.data import DataLoader batch_num = 8 worker_num = 4 dataloader = DataLoader(transformed_dataset, batch_size=batch_num, shuffle=True, num_workers=worker_num) # - # ### Bach sample check # + from torchvision import utils # Helper function to show a batch def show_landmarks_batch(sample_batch): """Show image with landmarks for a batch of samples.""" images_batch, landmarks_batch = sample_batch['image'], sample_batch['landmarks'] batch_size = len(images_batch) im_size = images_batch.size(2) print(f'batch shape = {images_batch.shape}') grid_border_size = 2 grid = utils.make_grid(images_batch) plt.imshow(grid.numpy().transpose((1, 2, 0))) # C*W*H --> W*H*C for i in range(batch_size): plt.scatter(landmarks_batch[i, :, 0].numpy() + i * im_size + (i + 1) * grid_border_size, landmarks_batch[i, :, 1].numpy() + grid_border_size, s=10, marker='.', c='r') plt.title('Batch from dataloader') # - for i_batch, sample_batch in enumerate(dataloader): print(i_batch, sample_batch['image'].size(), sample_batch['landmarks'].size()) # observe batch and stop. if i_batch == batch_num-1: plt.figure(figsize=(15, 10)) show_landmarks_batch(sample_batch) plt.axis('off') plt.ioff() plt.show() break # --- # ## Step 2. Convolutional Neural Network # # 1. Define a CNN with images as input and landmarks as output # 2. Train the CNN on the training data tracking loss # 3. Evaluate how the trained model performs on test data # 4. Try and Error! Update the CNN structure and model hyperparameters, so that it performs well enough. # --- # + import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F # can use the below import should you choose to initialize the weights of your Net import torch.nn.init as I """ https://pytorch.org/docs/stable/nn.html#conv2d torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros') """ def layer_out(inputs, kernel, stride=1, padding=0, dilation=1): return (inputs - kernel + 2*padding)//stride + 1 class Net(nn.Module): def __init__(self): super(Net, self).__init__() ## 1. This network takes in a square (same width and height), grayscale image as input ## 2. It ends with a linear layer that represents the landmarks ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs ## As an example, input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel # torch.nn.Conv2d(in_channels, out_channels, kernel_size, # stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros') # Assume that input image size is 96 x 96 pixels self.image_width = 224 self.n_classes = 136 fc_inputs = 0 self.kernel = 5 self.stride = 2 self.conv1 = nn.Conv2d(1, 32, self.kernel) # Convolution layer's output tensor = ((Width-Kernel)+2Padding)/Stride + 1 = (96-5+0)/1 + 1 = 92 # (32, 92, 92) fc_inputs = layer_out(self.image_width, self.kernel) self.pool1 = nn.MaxPool2d(self.stride, self.stride) # Pooling layer's output tensor = W // S = 92/2 = 46 # (32, 46, 46) fc_inputs = fc_inputs // self.stride self.conv2 = nn.Conv2d(32, 64, 5) # (46-5+0)/1 +1 = 42 # (64, 42, 42) fc_inputs = layer_out(fc_inputs, self.kernel) self.pool2 = nn.MaxPool2d(2, 2) # 42/2 = 21 # (64, 21, 21) fc_inputs = fc_inputs // self.stride print(f"final fully connected layer's input = {fc_inputs}") self.fc1 = nn.Linear(64*fc_inputs*fc_inputs, 1000) self.fc2 = nn.Linear(1000, 500) self.fc3 = nn.Linear(500, self.n_classes) self.dropout = nn.Dropout(p=0.2) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.pool1(x) x = self.conv2(x) x = F.relu(x) x = self.pool2(x) x = self.fc1(x) x = self.dropout(x) x = self.fc2(x) x = self.fc3(x) return x # + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") cnn = Net() cnn.to(device) # cnn.cuda() print(cnn) # - # ## Step 3. Apply the model on a test sample # # To test the model on a test sample of data, you have to follow these steps: # 1. Extract the image and ground truth keypoints from a sample # 2. Make sure the image is a FloatTensor, which the model expects. # 3. Forward pass the image through the net to get the predicted, output keypoints. # # This function test how the network performs on the first batch of test data. It returns the images, the transformed images, the predicted keypoints (produced by the model), and the ground truth keypoints. # # --- # + pycharm={"is_executing": true} # test the model on a batch of test images def net_sample_output(): # iterate through the test dataset for i, sample in enumerate(dataloader): # get sample data: images and ground truth keypoints images = sample['image'] landmarks = sample['landmarks'] if "GPU" in device: images = images.type(torch.cuda.FloatTensor) else: images = images.type(torch.FloatTensor) # forward pass to get net output outputs = cnn(images) # reshape to batch_size x 68 x 2 pts outputs = output.view(output.size()[0], 68, -1) # break after first image is tested if i == 0: return images, outputs, landmarks # call the above function # returns: test images, test predicted keypoints, test ground truth keypoints test_images, test_outputs, ground = net_sample_output() # print out the dimensions of the data to see if they make sense print(test_images.data.size()) print(test_outputs.data.size()) print(gt_pts.size()) # - # ## Ready to Train! # # Now that you've seen how to load and transform our data, you're ready to build a neural network to train on this data. # # In the next notebook, you'll be tasked with creating a CNN for facial keypoint detection. # ### <NAME> # - homepage: https://bongsang.github.io # - Linkedin: https://www.linkedin.com/in/bongsang
Bongsang-FaceLandmarksDetectionStarterPack.ipynb