code
stringlengths
2.5k
150k
kind
stringclasses
1 value
# DATASET 2 - GPU Runtime ``` import pandas as pd import numpy as np import seaborn as sns #visualisation import matplotlib.pyplot as plt #visualisation %matplotlib inline import pandas as pd import numpy as np from sklearn import tree from sklearn.tree import DecisionTreeClassifier,export_graphviz from sklearn.model_selection import train_test_split from matplotlib import pyplot as plt import seaborn as sns import graphviz import pydotplus import io from scipy import misc from sklearn.ensemble import AdaBoostClassifier #importing the data dt = pd.read_csv("C:\\ITM SPRING 2020\\ML\\sgemm_product_dataset\\processed_sgemm_product.csv") df = dt.copy() #classifying numeric runtime into two classes. Run time higher than 250 is 0 and lower will be 1 df['target'] = np.where(df['MeanRun']>250, 0, 1) #dropping the numeric target column df.drop('MeanRun',axis=1,inplace=True) #As SVM takes long time to run, sampling only 25000 records for running this algorithm dt = df.sample(n = 50000) # X_dataset=dt.drop(columns=['target']) y=dt['target'] from sklearn import preprocessing X = preprocessing.scale(X_dataset) from sklearn.model_selection import cross_val_score tree_g = DecisionTreeClassifier() array_scores_g = cross_val_score(tree_g,X,y,cv=5) array_mean_g = array_scores_g.mean() tree_e = DecisionTreeClassifier(criterion='entropy') array_scores_e = cross_val_score(tree_e,X,y,cv=5) array_mean_e = array_scores_e.mean() print("GINI : Accuracy of decision tree without hyperparameter tuning: ",array_mean_g) print(tree_g) print() print() print() print("ENTROPY :Accuracy of decision tree without hyperparameter tuning: ",array_mean_e) print(tree_e) from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score max_depth_list = [5,10,15,20,25,30] max_depth_ =[] accuracy_list_g = [] accuracy_list_e = [] for i in max_depth_list: tree_g = DecisionTreeClassifier(criterion='gini',max_depth=i) array_scores_g = cross_val_score(tree_g,X,y,cv=5) array_mean_g = array_scores_g.mean() accuracy_list_g.append(array_mean_g) tree_e = DecisionTreeClassifier(criterion='entropy',max_depth=i) array_scores_e = cross_val_score(tree_e,X,y,cv=5) array_mean_e = array_scores_e.mean() accuracy_list_e.append(array_mean_e) max_depth_.append(i) plt.plot(max_depth_,accuracy_list_g,label='Gini') plt.plot(max_depth_,accuracy_list_e,label='Entropy') plt.xlabel('Maximum Depth') plt.ylabel('Accuracy') plt.title('Accuracy vs Minimum Sample Leaf') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score max_leaf_nodes_list = [5,10,15,20,25,30] max_leaf_nodes_ =[] accuracy_list_g = [] accuracy_list_e = [] C_params = [] for i in max_leaf_nodes_list: tree_g = DecisionTreeClassifier(criterion='gini',max_leaf_nodes=i) array_scores_g = cross_val_score(tree_g,X,y,cv=5) array_mean_g = array_scores_g.mean() accuracy_list_g.append(array_mean_g) tree_e = DecisionTreeClassifier(criterion='entropy',max_leaf_nodes=i) array_scores_e = cross_val_score(tree_e,X,y,cv=5) array_mean_e = array_scores_e.mean() accuracy_list_e.append(array_mean_e) max_leaf_nodes_.append(i) plt.plot(max_leaf_nodes_,accuracy_list_g,label='Gini') plt.plot(max_leaf_nodes_,accuracy_list_e,label='Entropy') plt.xlabel('Maximum Leaf node') plt.ylabel('Accuracy') plt.title('Accuracy vs Maximum Leaf Nodes') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score min_samples_leaf_list = [5,10,15,20,25,30] min_samples_leaf_ =[] accuracy_list_g = [] accuracy_list_e = [] for i in max_leaf_nodes_list: tree_g = DecisionTreeClassifier(criterion='gini',min_samples_leaf=i) array_scores_g = cross_val_score(tree_g,X,y,cv=5) array_mean_g = array_scores_g.mean() accuracy_list_g.append(array_mean_g) tree_e = DecisionTreeClassifier(criterion='entropy',min_samples_leaf=i) array_scores_e = cross_val_score(tree_e,X,y,cv=5) array_mean_e = array_scores_e.mean() accuracy_list_e.append(array_mean_e) min_samples_leaf_.append(i) plt.plot(min_samples_leaf_,accuracy_list_g,label='Gini') plt.plot(min_samples_leaf_,accuracy_list_e,label='Entropy') plt.xlabel('Minimum Sample Leaf ') plt.ylabel('Accuracy') plt.title('Accuracy vs Minimum Sample Leaf') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) ``` # ADAPTIVE BOOSTING ``` accuracy_list = [] learning_rates =[.001,.01,.1,1] for i in learning_rates: model = DecisionTreeClassifier(criterion='entropy',max_depth =15,min_samples_leaf=15,max_leaf_nodes=15) Adaboost = AdaBoostClassifier(base_estimator=model,n_estimators=10,learning_rate=i) #boostmodel = Adaboost.fit(X_train,y_train) array_scores = cross_val_score(Adaboost,X,y,cv=5) array_mean = array_scores.mean() print("for learning rate= ",i," Accuracy is : ",array_mean) accuracy_list.append(array_mean) plt.plot(np.log10(learning_rates),accuracy_list) #plt.plot(min_samples_leaf_,accuracy_list_e,label='Entropy') plt.xlabel('Learning Rates ') plt.ylabel('Accuracy') plt.title('Accuracy vs Learning Rates') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) ``` # ADAPTIVE BOOSTING - Pruned ``` from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score min_samples_leaf_list = [5,10,15,20,25,30] min_samples_leaf_ =[] accuracy_list_g = [] accuracy_list_e = [] for i in min_samples_leaf_list: model_g = DecisionTreeClassifier(criterion='gini',min_samples_leaf =i) Adaboost_g = AdaBoostClassifier(base_estimator=model_g,n_estimators=100,learning_rate=1) array_scores_g = cross_val_score(Adaboost_g,X,y,cv=5) array_mean_g = array_scores_g.mean() print('GINI: for minimum sample = ',i,' mean accuracy is ',array_mean_g) accuracy_list_g.append(array_mean_g) model_e = DecisionTreeClassifier(criterion='entropy',min_samples_leaf =i) Adaboost_e = AdaBoostClassifier(base_estimator=model_e,n_estimators=100,learning_rate=1) array_scores_e = cross_val_score(Adaboost_e,X,y,cv=5) array_mean_e = array_scores_e.mean() print('ENTROPY: for minimum sample = ',i,' mean accuracy is ',array_mean_e) accuracy_list_e.append(array_mean_e) min_samples_leaf_.append(i) plt.plot(min_samples_leaf_,accuracy_list_g,label='Gini') plt.plot(min_samples_leaf_,accuracy_list_e,label='Entropy') plt.xlabel('Minimum Sample Leaf ') plt.ylabel('Accuracy') plt.title('Accuracy vs Minimum Sample Leaf') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score max_leaf_nodes_list = [5,10,15,20,25,30] max_leaf_nodes_ =[] accuracy_list_g = [] accuracy_list_e = [] for i in max_leaf_nodes_list: model_g = DecisionTreeClassifier(criterion='gini',max_leaf_nodes=i) Adaboost_g = AdaBoostClassifier(base_estimator=model_g,n_estimators=100,learning_rate=1) array_scores_g = cross_val_score(Adaboost_g,X,y,cv=5) array_mean_g = array_scores_g.mean() print('GINI: for maximum leaf node = ',i,' mean accuracy is ',array_mean_g) accuracy_list_g.append(array_mean_g) model_e = DecisionTreeClassifier(criterion='entropy',min_samples_leaf =i) Adaboost_e = AdaBoostClassifier(base_estimator=model_e,n_estimators=100,learning_rate=1) array_scores_e = cross_val_score(Adaboost_e,X,y,cv=5) array_mean_e = array_scores_e.mean() print('ENTROPY: for maximum leaf nodes = ',i,' mean accuracy is ',array_mean_e) accuracy_list_e.append(array_mean_e) max_leaf_nodes_.append(i) plt.plot(max_leaf_nodes_,accuracy_list_g,label='Gini') plt.plot(max_leaf_nodes_,accuracy_list_e,label='Entropy') plt.xlabel('Maximum Leaf Nodes ') plt.ylabel('Accuracy') plt.title('Accuracy vs Maximum Lead Nodes') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score max_depth_list = [5,10,15,20,25,30] max_depth_ =[] accuracy_list_g = [] accuracy_list_e = [] for i in max_leaf_nodes_list: model_g = DecisionTreeClassifier(criterion='gini',max_depth=i) Adaboost_g = AdaBoostClassifier(base_estimator=model_g,n_estimators=100,learning_rate=1) array_scores_g = cross_val_score(Adaboost_g,X,y,cv=5) array_mean_g = array_scores_g.mean() print('GINI: for maximum depth = ',i,' mean accuracy is ',array_mean_g) accuracy_list_g.append(array_mean_g) model_e = DecisionTreeClassifier(criterion='entropy',max_depth =i) Adaboost_e = AdaBoostClassifier(base_estimator=model_e,n_estimators=100,learning_rate=1) array_scores_e = cross_val_score(Adaboost_e,X,y,cv=5) array_mean_e = array_scores_e.mean() print('ENTROPY: for maximum depth = ',i,' mean accuracy is ',array_mean_e) accuracy_list_e.append(array_mean_e) max_depth_.append(i) plt.plot(max_leaf_nodes_,accuracy_list_g,label='Gini') plt.plot(max_leaf_nodes_,accuracy_list_e,label='Entropy') plt.xlabel('Maximum Depth ') plt.ylabel('Accuracy') plt.title('Accuracy vs Maximum Depth') plt.legend() plt.show plt.rcParams['figure.figsize']=(8,6) ``` # Finding Best Parameters for Decision Tree using Grid Search ``` from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV def dtree_grid_search(X,y,nfolds): #create a dictionary of all values we want to test param_grid = { 'criterion':['gini','entropy'], 'max_depth': np.arange(5, 15), 'max_leaf_nodes':np.arange(15,30), 'min_samples_leaf':np.arange(15,30) } # decision tree model dtree_model=DecisionTreeClassifier() #use gridsearch to test all values dtree_gscv = GridSearchCV(dtree_model, param_grid, cv=nfolds) #fit model to data dtree_gscv.fit(X, y) return dtree_gscv.best_params_ dtree_grid_search(X,y,3) import pandas as pd import numpy as np import seaborn as sns import graphviz import pydotplus import io from scipy import misc from sklearn import tree from sklearn.tree import DecisionTreeClassifier,export_graphviz from sklearn.model_selection import train_test_split from matplotlib import pyplot as plt %matplotlib inline df.columns features = ['MWG_Ordinal', 'NWG_Ordinal', 'KWG_Ordinal', 'MDIMC_ordinal', 'NDIMC_ordinal', 'MDIMA_ordinal', 'NDIMB_ordinal', 'KWI_ordinal', 'VWM_ordinal', 'VWN_ordinal', 'STRM_1', 'STRN_1', 'SA_1', 'SB_1'] c=DecisionTreeClassifier(criterion='gini',max_depth= 8,max_leaf_nodes= 29,min_samples_leaf=15) array_scores = cross_val_score(c,X,y,cv=5) Accuracy = array_scores.mean() print(Accuracy) d_t = c.fit(X,y) def show_tree(tree, features, path): f = io.StringIO() export_graphviz(tree, out_file=f, feature_names=features) pydotplus.graph_from_dot_data(f.getvalue()).write_png(path) img = misc.imread(path) plt.rcParams["figure.figsize"]=(20,20) plt.imshow(img) show_tree(d_t,features,'DT_Dataset2.png') ADAPTIVE BOOSTING - GRIDSEARCH from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV def dtree_grid_search(X,y,nfolds): #create a dictionary of all values we want to test param_grid = { 'criterion':['gini','entropy'], 'max_depth': np.arange(5, 15), 'max_leaf_nodes':np.arange(15,30), 'min_samples_leaf':np.arange(15,30) } # decision tree model dtree_model=DecisionTreeClassifier() Adaboost_e = AdaBoostClassifier(base_estimator=dtree_model,n_estimators=100,learning_rate=1) #use gridsearch to test all values dtree_gscv = GridSearchCV(dtree_model, param_grid, cv=4) #fit model to data dtree_gscv.fit(X, y) return dtree_gscv.best_params_ dtree_grid_search(X,y,3) c=DecisionTreeClassifier(criterion='gini',max_depth=8 ,max_leaf_nodes=29 ,min_samples_leaf=15) Adaboost = AdaBoostClassifier(base_estimator=c,n_estimators=100,learning_rate=1) array_scores = cross_val_score(Adaboost,X,y,cv=3) Accuracy = array_scores.mean() print(Accuracy) ```
github_jupyter
# Horovod ## Introduction This recipe shows how to run [Horovod](https://github.com/uber/horovod) distributed training framework using Batch AI. Currently Batch AI has no native support for Horovod framework, but it's easy to run it using customtoolkit and job preparation command line. ## Details - Standard Horovod [tensorflow_mnist.py](https://github.com/uber/horovod/blob/v0.9.10/examples/tensorflow_mnist.py) example will be used; - tensorflow_mnist.py downloads training data on its own during execution; - The job will be run on standard tensorflow container tensorflow/tensorflow:1.4.0-gpu; - Horovod framework will be installed in the container using job preparation command line. Note, you can build your own docker image containing tensorflow and horovod instead. - Standard output of the job will be stored on Azure File Share. ## Instructions ### Install Dependencies and Create Configuration file. Follow [instructions](/recipes) to install all dependencies and create configuration file. ### Read Configuration and Create Batch AI client ``` from __future__ import print_function from datetime import datetime import sys from azure.storage.file import FileService import azure.mgmt.batchai.models as models # utilities.py contains helper functions used by different notebooks sys.path.append('../../') import utilities cfg = utilities.Configuration('../../configuration.json') client = utilities.create_batchai_client(cfg) ``` ## 1. Prepare Training Dataset and Script in Azure Storage ### Create File Share For this example we will create a new File Share with name `batchaisample` under your storage account. This share will be populated with sample scripts and will contain job's output. **Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you. ``` azure_file_share_name = 'batchaisample' service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.create_share(azure_file_share_name, fail_on_exist=False) print('Done') ``` ### Deploy Sample Script and Configure the Input Directories - Download original sample script: ``` sample_script_url = 'https://raw.githubusercontent.com/uber/horovod/v0.9.10/examples/tensorflow_mnist.py' utilities.download_file(sample_script_url, 'tensorflow_mnist.py') ``` - Create a folder in the file share and upload the sample script to it. ``` samples_dir = 'horovod_samples' service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.create_directory( azure_file_share_name, samples_dir, fail_on_exist=False) service.create_file_from_path( azure_file_share_name, samples_dir, 'tensorflow_mnist.py', 'tensorflow_mnist.py') ``` ## 2. Create Azure Batch AI Compute Cluster ### Configure Compute Cluster - For this example we will use a GPU cluster of `STANDARD_NC6` nodes. Number of nodes in the cluster is configured with `nodes_count` variable; - We will mount file share at folder with name `afs`. Full path of this folder on a computer node will be `$AZ_BATCHAI_MOUNT_ROOT/afs`; - We will call the cluster `nc6`. So, the cluster will have the following parameters: ``` azure_file_share = 'afs' nodes_count = 2 cluster_name = 'nc6' volumes = models.MountVolumes( azure_file_shares=[ models.AzureFileShareReference( account_name=cfg.storage_account_name, credentials=models.AzureStorageCredentialsInfo( account_key=cfg.storage_account_key), azure_file_url = 'https://{0}.file.core.windows.net/{1}'.format( cfg.storage_account_name, azure_file_share_name), relative_mount_path=azure_file_share) ] ) parameters = models.ClusterCreateParameters( location=cfg.location, vm_size="STANDARD_NC6", virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher="microsoft-ads", offer="linux-data-science-vm-ubuntu", sku="linuxdsvmubuntu", version="latest")), scale_settings=models.ScaleSettings( manual=models.ManualScaleSettings(target_node_count=nodes_count) ), node_setup=models.NodeSetup( mount_volumes=volumes ), user_account_settings=models.UserAccountSettings( admin_user_name=cfg.admin, admin_user_password=cfg.admin_password, admin_user_ssh_public_key=cfg.admin_ssh_key ) ) ``` ### Create Compute Cluster ``` _ = client.clusters.create(cfg.resource_group, cluster_name, parameters).result() ``` ### Monitor Cluster Creation utilities.py contains a helper function allowing to wait for the cluster to become available - all nodes are allocated and finished preparation. ``` cluster = client.clusters.get(cfg.resource_group, cluster_name) utilities.print_cluster_status(cluster) ``` ## 3. Run Azure Batch AI Training Job ### Configure Input Directories The job needs to know where to find mnist_replica.py and input MNIST dataset. We will create two input directories for this: The job needs to know where to find train_mnist.py script (the chainer will download MNIST dataset on its own). So, we will configure an input directory for the script: ``` input_directories = [ models.InputDirectory( id='SCRIPTS', path='$AZ_BATCHAI_MOUNT_ROOT/{0}/{1}'.format(azure_file_share, samples_dir))] ``` The job will be able to reference those directories using ```$AZ_BATCHAI_INPUT_SCRIPTS``` environment variable. ### Configure Output Directories We will store standard and error output of the job in File Share: ``` std_output_path_prefix = '$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(azure_file_share) ``` The model output will be stored in File Share: ``` output_directories = [ models.OutputDirectory( id='MODEL', path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(azure_file_share), path_suffix='Models')] ``` ### Configure Job - Will use configured previously input and output directories; - We will use custom toolkit job to run tensorflow_mnist.py on multiple nodes (use node_count parameter to specify number of nodes). Note, Batch AI will create a hostfile for the job, it can be found via ```$AZ_BATCHAI_MPI_HOST_FILE``` environment variable; - Horovod framework will be installed by job preparation command line; - Will output standard output and error streams to file share. You can delete ```container_settings``` from the job definition to run the job directly on host DSVM. ``` parameters = models.job_create_parameters.JobCreateParameters( location=cfg.location, cluster=models.ResourceId(id=cluster.id), node_count=2, input_directories=input_directories, output_directories=output_directories, std_out_err_path_prefix=std_output_path_prefix, container_settings=models.ContainerSettings( image_source_registry=models.ImageSourceRegistry(image='tensorflow/tensorflow:1.4.0-gpu')), job_preparation=models.JobPreparation( command_line='apt update; apt install mpi-default-dev mpi-default-bin -y; pip install horovod'), custom_toolkit_settings = models.CustomToolkitSettings( command_line='mpirun -mca btl_tcp_if_exclude docker0,lo --allow-run-as-root --hostfile $AZ_BATCHAI_MPI_HOST_FILE python $AZ_BATCHAI_INPUT_SCRIPTS/tensorflow_mnist.py')) ``` ### Create a training Job and wait for Job completion ``` job_name = datetime.utcnow().strftime('horovod_%m_%d_%Y_%H%M%S') job = client.jobs.create(cfg.resource_group, job_name, parameters).result() print('Created Job: {}'.format(job.name)) ``` ### Wait for Job to Finish The job will start running when the cluster will have enough idle nodes. The following code waits for job to start running printing the cluster state. During job run, the code prints current content of stderr.txt. **Note** Execution may take several minutes to complete. ``` utilities.wait_for_job_completion(client, cfg.resource_group, job_name, cluster_name, 'stdouterr', 'stderr.txt') ``` ### Download stdout.txt and stderr.txt files for the Job and job preparation command ``` files = client.jobs.list_output_files(cfg.resource_group, job_name, models.JobsListOutputFilesOptions(outputdirectoryid='stdouterr')) for f in list(files): if f.download_url: utilities.download_file(f.download_url, f.name) print('All files downloaded') ``` ## 4. Clean Up (Optional) ### Delete the Job ``` _ = client.jobs.delete(cfg.resource_group, job_name) ``` ### Delete the Cluster When you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code. ``` _= client.clusters.delete(cfg.resource_group, cluster_name) ``` ### Delete File Share When you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code. ``` service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.delete_share(azure_file_share_name) ```
github_jupyter
To aid autoassociative recall (sparse recall using partial pattern), we need to two components - 1. each pattern remembers a soft mask of the contribution of each element in activating it. For example, if an element varies a lot at high activation levels, that element should be masked out when determining activation. On the other hand, if an element has a very specific value every time the element has high activation, then that element is important and should be considered (masked-in). 2. Among the masked-in elements for a pattern, even a small subset (say 20%) perfect match should be able to activate the pattern. This can be achieved by considering number of elements that have similarity above a threshold, say 0.9. Sum up similarity of this subset and apply an activation curve that is a sharp sigmoid centered at a value that represents (# of masked-in element) * 0.2 * 0.9. ``` import math import torch import matplotlib.pyplot as plt import pdb import pandas as pd import seaborn as sns import numpy as np # import plotly.graph_objects as go from matplotlib.patches import Ellipse %matplotlib inline device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) from sklearn.datasets import load_boston def normalize(df): df1 = (df - df.mean())/df.std() return df1 def scale(df): min = df.min() max = df.max() df1 = (df - min) / (max - min) return df1 dataset = load_boston() dataset = pd.DataFrame(dataset.data, columns=dataset.feature_names) dataset = pd.DataFrame(np.c_[scale(normalize(dataset['LSTAT'])), scale(normalize(dataset['RM']))], columns = ['LSTAT','RM']) dataset = torch.tensor(dataset.to_numpy()).float().to(device) dataset1 = dataset[dataset[:,0] < 0.33] dataset2 = dataset[(dataset[:,0] >= 0.33) & (dataset[:,0] < 0.66)] dataset3 = dataset[dataset[:,0] >= 0.66] # dataset = [[0.25, 0.4], [0.75, 0.75], [0.85, 0.65]] original_dataset = dataset print("dataset", dataset.shape) # from https://kornia.readthedocs.io/en/latest/_modules/kornia/utils/grid.html from typing import Optional def create_meshgrid( height: int, width: int, normalized_coordinates: Optional[bool] = True, device: Optional[torch.device] = torch.device('cpu')) -> torch.Tensor: """Generates a coordinate grid for an image. When the flag `normalized_coordinates` is set to True, the grid is normalized to be in the range [-1,1] to be consistent with the pytorch function grid_sample. http://pytorch.org/docs/master/nn.html#torch.nn.functional.grid_sample Args: height (int): the image height (rows). width (int): the image width (cols). normalized_coordinates (Optional[bool]): whether to normalize coordinates in the range [-1, 1] in order to be consistent with the PyTorch function grid_sample. Return: torch.Tensor: returns a grid tensor with shape :math:`(1, H, W, 2)`. """ # generate coordinates xs: Optional[torch.Tensor] = None ys: Optional[torch.Tensor] = None if normalized_coordinates: xs = torch.linspace(-1, 1, width, device=device, dtype=torch.float) ys = torch.linspace(-1, 1, height, device=device, dtype=torch.float) else: xs = torch.linspace(0, width - 1, width, device=device, dtype=torch.float) ys = torch.linspace(0, height - 1, height, device=device, dtype=torch.float) # generate grid by stacking coordinates base_grid: torch.Tensor = torch.stack( torch.meshgrid([xs, ys])).transpose(1, 2) # 2xHxW return torch.unsqueeze(base_grid, dim=0).permute(0, 2, 3, 1) # 1xHxWx2 def add_gaussian_noise(tensor, mean=0., std=1.): t = tensor + torch.randn(tensor.size()).to(device) * std + mean t.to(device) return t def plot_patterns(patterns, pattern_lr, pattern_var, dataset): patterns = patterns.cpu() dataset = dataset.cpu() assert len(patterns.shape) == 2 # (pattern count, 2) assert patterns.shape[1] == 2 # 2D rgba_colors = torch.zeros((patterns.shape[0], 4)) # for blue the last column needs to be one rgba_colors[:,2] = 1.0 # the fourth column needs to be your alphas alpha = (1.2 - pattern_lr.cpu()).clamp(0, 1) * 0.2 rgba_colors[:, 3] = alpha # make ellipses marker_list = [] min_size = 0.02 max_size = 2.0 for i in range(patterns.shape[0]): pattern = patterns[i] var = pattern_var[i].clamp(0.01, 2.0) marker_list.append(Ellipse((pattern[0], pattern[1]), var[0], var[1], edgecolor='none', facecolor=rgba_colors[i], fill=True)) plt.figure(figsize=(7,7), dpi=100) ax = plt.gca() ax.cla() # clear things for fresh plot ax.scatter(patterns[:, 0], patterns[:, 1], marker='.', c='b') ax.scatter(dataset[:, 0], dataset[:, 1], marker='.', c='r', s=10) ax.set_xlim(0, 1) ax.set_ylim(0, 1) for marker in marker_list: ax.add_artist(marker) plt.show() grid_size = 4 patterns = create_meshgrid(grid_size, grid_size, normalized_coordinates=False).reshape(-1, 2) / (grid_size-1) patterns = patterns.to(device) pattern_lr = torch.ones((patterns.shape[0],)).to(device) pattern_var = torch.ones_like(patterns).to(device) * 0 # start with high var indicating no specificity to any value # patterns = torch.rand((50, 2)) # patterns = torch.tensor([[0.25, 0.30]]) # patterns plot_patterns(patterns, pattern_lr, pattern_var, dataset) original_patterns = patterns.clone().to(device) def similarity(x, patterns, subset_threshold=0.2): # Formula derivation https://www.desmos.com/calculator/iokn9kyuaq # print("x", x) dist_i = ((x - patterns) ** 2) dist = dist_i.sum(dim=-1) # print("patterns", patterns) # print("dist", dist) #dist = dist.sum(dim=-1) # TODO: use subset activation # TODO: apply mask (inverse variance) winner_index = dist.min(dim=0)[1] # print("winner_index", winner_index) winning_pattern = patterns[winner_index] a_scale = 0.2 a = a_scale * ((x - winning_pattern) ** -2) a[a > 15000.0] = 15000.0 # print("a", a) s = 0.8 sim = (-a * ((x - patterns) ** 2)).mean(dim=-1) # print("sim1", sim) # scale = 0.685 scale = 1.0 sim = (torch.exp(sim) - s * torch.exp(sim * 0.9)) / ((1 - s) * scale) sim[sim>1.0] = 1.0 # print("sim", sim) return sim, winner_index, dist, dist_i sim, winner_index, dist, dist_i = similarity(dataset[0], patterns) patterns = original_patterns pattern_lr = torch.ones((patterns.shape[0],)).to(device) pattern_var = torch.ones_like(patterns).to(device) * 10 # start with high var indicating no specificity to any value def run_dataset(dataset, patterns, pattern_lr): for x in dataset: # print("-------") sim, winner_index, dist, dist_i = similarity(x, patterns) sim = sim.unsqueeze(-1) # print("dist[winner_index]", dist[winner_index] * 100) pattern_lr[winner_index] = 0.9 * pattern_lr[winner_index] + 0.1 * (1.0 - torch.exp(-dist[winner_index])) pattern_var[winner_index] = 0.9 * pattern_var[winner_index] + 0.1 * (1.0 - torch.exp(-dist_i[winner_index])) * 100 # print("x", x) # print("(x - patterns)", (x - patterns)) # print("sim", sim) delta = (x - patterns) * sim * lr * pattern_lr.unsqueeze(-1) # print("delta", delta) patterns = patterns + delta patterns.clamp_(0, 1) pattern_lr.clamp(0, 1) # print("patterns", patterns) # print("pattern_lr", pattern_lr) # print("pattern_var", pattern_var) return patterns, pattern_lr lr = 1 epochs = 10 noise = 0.0 for _ in range(2): for i in range(epochs): dataset = add_gaussian_noise(dataset1, std=noise) if i % int(epochs / 2) == 0: print("Iteration ", i) plot_patterns(patterns, pattern_lr, pattern_var, dataset) patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr) for i in range(epochs): dataset = add_gaussian_noise(dataset2, std=noise) if i % int(epochs / 2) == 0: print("Iteration ", i) plot_patterns(patterns, pattern_lr, pattern_var, dataset) patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr) for i in range(epochs): dataset = add_gaussian_noise(dataset3, std=noise) if i % int(epochs / 2) == 0: print("Iteration ", i) plot_patterns(patterns, pattern_lr, pattern_var, dataset) patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr) plot_patterns(patterns, pattern_lr, pattern_var, original_dataset) ``` Notes - - Patterns that see data (are winners) become "sticky", while rest of the pattern-pool remains more fluid to move towards subspaces that were previously unused. For example, learning an unrelated task. This could implications on meta-learning. - Available pattern pool gets used to locally optimally represent data. This can be seen by using a small number of patterns (say 3x3) or a large number of patterns (say 100x100). The fact that a dense grid is not required should come in handy to fight the curse of dimentionality.
github_jupyter
Original samples in https://fslab.org/FSharp.Charting/FurtherSamples.html ``` #load "FSharp.Charting.Paket.fsx" #load "FSharp.Charting.fsx" ``` ## Sample data ``` open FSharp.Charting open System open System.Drawing let data = [ for x in 0 .. 99 -> (x,x*x) ] let data2 = [ for x in 0 .. 99 -> (x,sin(float x / 10.0)) ] let data3 = [ for x in 0 .. 99 -> (x,cos(float x / 10.0)) ] let timeSeriesData = [ for x in 0 .. 99 -> (DateTime.Now.AddDays (float x),sin(float x / 10.0)) ] let rnd = new System.Random() let rand() = rnd.NextDouble() let pointsWithSizes = [ for i in 0 .. 30 -> (rand() * 10.0, rand() * 10.0, rand() / 100.0) ] let pointsWithSizes2 = [ for i in 0 .. 10 -> (rand() * 10.0, rand() * 10.0, rand() / 100.0) ] let timeHighLowOpenClose = [ for i in 0 .. 10 -> let mid = rand() * 10.0 (DateTime.Now.AddDays (float i), mid + 0.5, mid - 0.5, mid + 0.25, mid - 0.25) ] let timedPointsWithSizes = [ for i in 0 .. 30 -> (DateTime.Now.AddDays(rand() * 10.0), rand() * 10.0, rand() / 100.0) ] ``` ## Examples ``` Chart.Line(data).WithXAxis(MajorGrid=ChartTypes.Grid(Enabled=false)) Chart.Line [ DateTime.Now, 1; DateTime.Now.AddDays(1.0), 10 ] Chart.Line [ for h in 1 .. 50 -> DateTime.Now.AddHours(float h), sqrt (float h) ] Chart.Line [ for h in 1 .. 50 -> DateTime.Now.AddMinutes(float h), sqrt (float h) ] Chart.Line(data,Title="Test Title") Chart.Line(data,Title="Test Title").WithTitle(InsideArea=false) Chart.Line(data,Title="Test Title").WithTitle(InsideArea=true) Chart.Line(data,Title="Test Title") |> Chart.WithTitle(InsideArea=true) Chart.Line(data,Name="Test Data") |> Chart.WithXAxis(Enabled=true,Title="X Axis") Chart.Line(data,Name="Test Data") |> Chart.WithXAxis(Enabled=false,Title="X Axis") Chart.Line(data,Name="Test Data") .WithXAxis(Enabled=false,Title="X Axis") Chart.Line(data,Name="Test Data") .WithXAxis(Enabled=true,Title="X Axis",Max=10.0, Min=0.0) .WithYAxis(Max=100.0,Min=0.0) Chart.Line(data,Name="Test Data").WithLegend(Title="Hello") Chart.Line(data,Name="Test Data").WithLegend(Title="Hello",Enabled=false) Chart.Line(data,Name="Test Data").With3D() // TODO: x/y axis labels are a bit small by default Chart.Line(data,Name="Test Data",XTitle="hello", YTitle="goodbye") Chart.Line(data,Name="Test Data").WithXAxis(Title="XXX") Chart.Line(data,Name="Test Data").WithXAxis(Title="XXX",Max=10.0,Min=4.0) .WithYAxis(Title="YYY",Max=100.0,Min=4.0,Log=true) Chart.Combine [ Chart.Line(data,Name="Test Data 1 With Long Name") Chart.Line(data2,Name="Test Data 2") ] |> Chart.WithLegend(Enabled=true,Title="Hello",Docking=ChartTypes.Docking.Left) Chart.Combine [ Chart.Line(data,Name="Test Data 1") Chart.Line(data2,Name="Test Data 2") ] |> Chart.WithLegend(Docking=ChartTypes.Docking.Left, InsideArea=true) Chart.Combine [ Chart.Line(data,Name="Test Data 1") Chart.Line(data2,Name="Test Data 2") ] |> Chart.WithLegend(InsideArea=true) Chart.Rows [ Chart.Line(data,Title="Chart 1", Name="Test Data 1") Chart.Line(data2,Title="Chart 2", Name="Test Data 2") ] |> Chart.WithLegend(Title="Hello",Docking=ChartTypes.Docking.Left) // TODO: this title and docking left doesn't work Chart.Columns [ Chart.Line(data,Name="Test Data 1") Chart.Line(data2,Name="Test Data 2")] |> Chart.WithLegend(Title="Hello",Docking=ChartTypes.Docking.Left) Chart.Combine [ Chart.Line(data,Name="Test Data 1") Chart.Line(data2,Name="Test Data 2") ] |> Chart.WithLegend(Title="Hello",Docking=ChartTypes.Docking.Bottom) Chart.Line(data,Name="Test Data") Chart.Line(data,Name="Test Data").WithLegend(Enabled=false) Chart.Line(data,Name="Test Data").WithLegend(InsideArea=true) Chart.Line(data,Name="Test Data").WithLegend(InsideArea=false) Chart.Line(data).WithLegend().CopyAsBitmap() Chart.Line(data) Chart.Line(data,Name="Test Data").WithLegend(InsideArea=false) Chart.Area(data) Chart.Area(timeSeriesData) Chart.Line(data) Chart.Bar(data) Chart.Bar(timeSeriesData) Chart.Spline(data) Chart.Spline(timeSeriesData) Chart.Bubble(pointsWithSizes) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Star10) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Diamond) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Cross,Color=Color.Red) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Cross,Color=Color.Red,MaxPixelPointWidth=3) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Cross,Size=3) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Cross,PointWidth=0.1) Chart.Bubble(pointsWithSizes) .WithMarkers(Style=ChartTypes.MarkerStyle.Cross,PixelPointWidth=3) Chart.Bubble(pointsWithSizes).WithMarkers(Style=ChartTypes.MarkerStyle.Circle) Chart.Bubble(pointsWithSizes).WithMarkers(Style=ChartTypes.MarkerStyle.Square) Chart.Bubble(pointsWithSizes).WithMarkers(Style=ChartTypes.MarkerStyle.Star6) Chart.Combine [ Chart.Bubble(pointsWithSizes,UseSizeForLabel=true) .WithMarkers(Style=ChartTypes.MarkerStyle.Circle) Chart.Bubble(pointsWithSizes2).WithMarkers(Style=ChartTypes.MarkerStyle.Star10) ] Chart.Bubble(timedPointsWithSizes) Chart.Candlestick(timeHighLowOpenClose) Chart.Column(data) Chart.Column(timeSeriesData) Chart.Pie(Name="Pie", data=[ for i in 0 .. 10 -> i, i*i ]) Chart.Pie(Name="Pie", data=timeSeriesData) Chart.Doughnut(data=[ for i in 0 .. 10 -> i, i*i ]) Chart.Doughnut(timeSeriesData) Chart.FastPoint [ for x in 1 .. 10000 -> (rand(), rand()) ] Chart.FastPoint timeSeriesData Chart.Polar ([ for x in 1 .. 100 -> (360.0*rand(), rand()) ] |> Seq.sortBy fst) Chart.Pyramid ([ for x in 1 .. 100 -> (360.0*rand(), rand()) ] |> Seq.sortBy fst) Chart.Radar ([ for x in 1 .. 100 -> (360.0*rand(), rand()) ] |> Seq.sortBy fst) Chart.Range ([ for x in 1.0 .. 10.0 -> (x, x + rand(), x-rand()) ]) Chart.RangeBar ([ for x in 1.0 .. 10.0 -> (x, x + rand(), x-rand()) ]) Chart.RangeColumn ([ for x in 1.0 .. 10.0 -> (x, x + rand(), x-rand()) ]) Chart.SplineArea ([ for x in 1.0 .. 10.0 -> (x, x + rand()) ]) Chart.SplineRange ([ for x in 1.0 .. 10.0 -> (x, x + rand(), x - rand()) ]) Chart.StackedBar ([ [ for x in 1.0 .. 10.0 -> (x, x + rand()) ]; [ for x in 1.0 .. 10.0 -> (x, x + rand()) ] ]) Chart.StackedColumn ([ [ for x in 1.0 .. 10.0 -> (x, x + rand()) ]; [ for x in 1.0 .. 10.0 -> (x, x + rand()) ] ]) Chart.StackedArea ([ [ for x in 1.0 .. 10.0 -> (x, x + rand()) ]; [ for x in 1.0 .. 10.0 -> (x, x + rand()) ] ]) Chart.StackedArea ([ [ for x in 1.0 .. 10.0 -> (DateTime.Now.AddDays x, x + rand()) ]; [ for x in 1.0 .. 10.0 -> (DateTime.Now.AddDays x, x + rand()) ] ]) Chart.StepLine(data,Name="Test Data").WithLegend(InsideArea=false) Chart.StepLine(timeSeriesData,Name="Test Data").WithLegend(InsideArea=false) Chart.Line(data,Name="SomeData").WithDataPointLabels(PointToolTip="Hello, I am #SERIESNAME") Chart.Stock(timeHighLowOpenClose) Chart.ThreeLineBreak(data,Name="SomeData").WithDataPointLabels(PointToolTip="Hello, I am #SERIESNAME") Chart.Histogram([for x in 1 .. 100 -> rand()*10.],LowerBound=0.,UpperBound=10.,Intervals=10.) // Example of .ApplyToChart() used to alter the settings on the window chart and to access the chart child objects. // This can normally be done manually, in the chart property grid (right click the chart, then "Show Property Grid"). // This is useful when you want to try out carious settings first. But once you know what you want, .ApplyToChart() // allows programmatic access to the window properties. The two examples below are: IsUserSelectionEnabled essentially // allows zooming in and out along the given axes, and the longer fiddly example below does the same work as .WithDataPointLabels() // but across all series objects. [ Chart.Column(data); Chart.Column(data2) |> Chart.WithSeries.AxisType( YAxisType = Windows.Forms.DataVisualization.Charting.AxisType.Secondary ) ] |> Chart.Combine |> fun c -> c.WithLegend() .ApplyToChart( fun c -> c.ChartAreas.[0].CursorX.IsUserSelectionEnabled <- true ) .ApplyToChart( fun c -> let _ = [0 .. c.Series.Count-1] |> List.map ( fun s -> c.Series.[ s ].ToolTip <- "#SERIESNAME (#VALX, #VAL{0:00000})" ) in () ) ```
github_jupyter
<a href="https://colab.research.google.com/github/c-w-m/btap/blob/master/ch02/API_Data_Extraction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Original source: [**Blueprints for Text Analysis Using Python**](https://github.com/blueprints-for-text-analytics-python/blueprints-text)<br> Jens Albrecht, Sidharth Ramachandran, Christian Winkler # Chapter 2: API Data Extraction<div class='tocSkip'/> ## Remark<div class='tocSkip'/> The code in this notebook differs slightly from the printed book. For example we frequently use pretty print (`pp.pprint`) instead of `print` and `tqdm`'s `progress_apply` instead of Pandas' `apply`. Moreover, several layout and formatting commands, like `figsize` to control figure size or subplot commands are removed in the book. You may also find some lines marked with three hashes ###. Those are not in the book as well as they don't contribute to the concept. All of this is done to simplify the code in the book and put the focus on the important parts instead of formatting. ## Setup<div class='tocSkip'/> Set directory locations. If working on Google Colab: copy files and install required libraries. ``` import sys, os ON_COLAB = 'google.colab' in sys.modules if ON_COLAB: GIT_ROOT = 'https://github.com/c-w-m/btap/raw/master' os.system(f'wget {GIT_ROOT}/ch02/setup.py') %run -i setup.py ``` ## Load Python Settings<div class="tocSkip"/> Common imports, defaults for formatting in Matplotlib, Pandas etc. ``` %run "$BASE_DIR/settings.py" if ON_COLAB: %reload_ext autoreload %autoreload 2 %config InlineBackend.figure_format = 'png' # to print output of all statements and not just the last from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # otherwise text between $ signs will be interpreted as formula and printed in italic pd.set_option('display.html.use_mathjax', False) # path to import blueprints packages sys.path.append(BASE_DIR + '/packages') # adjust matplotlib resolution for book version matplotlib.rcParams.update({'figure.dpi': 200 }) ``` # How to use APIs to extract and derive insights from text data # Application Programming Interface # Blueprint - Extracting data from an API using the requests module ``` import requests response = requests.get('https://api.github.com/repositories', headers={'Accept': 'application/vnd.github.v3+json'}) print(response.status_code) print('encoding: {}'.format(response.encoding)) print('Content-Type: {}'.format(response.headers['Content-Type'])) print('server: {}'.format(response.headers['server'])) response.headers import json print(json.dumps(response.json()[0], indent=2)[:200]) response = requests.get('https://api.github.com/search/repositories') print(response.status_code) response = requests.get('https://api.github.com/search/repositories', params={'q': 'data_science+language:python'}, headers={'Accept': 'application/vnd.github.v3.text-match+json'}) print(response.status_code) from IPython.display import Markdown, display ### def printmd(string): ### display(Markdown(string)) ### for item in response.json()['items'][:5]: printmd('**' + item['name'] + '**' + ': repository ' + item['text_matches'][0]['property'] + ' - \"*' + item['text_matches'][0]['fragment'] + '*\" matched with ' + '**' + item['text_matches'][0]['matches'][0]['text'] + '**') response = requests.get( 'https://api.github.com/repos/pytorch/pytorch/issues/comments') print('Response Code', response.status_code) print('Number of comments', len(response.json())) response.links def get_all_pages(url, params=None, headers=None): output_json = [] response = requests.get(url, params=params, headers=headers) if response.status_code == 200: output_json = response.json() if 'next' in response.links: next_url = response.links['next']['url'] if next_url is not None: output_json += get_all_pages(next_url, params, headers) return output_json out = get_all_pages( "https://api.github.com/repos/pytorch/pytorch/issues/comments", params={ 'since': '2020-07-01T10:00:01Z', 'sorted': 'created', 'direction': 'desc' }, headers={'Accept': 'application/vnd.github.v3+json'}) df = pd.DataFrame(out) pd.set_option('display.max_colwidth', -1) if ('body' in df.index): print(df['body'].count()) print(df[['id','created_at','body']].sample(1, random_state=42)) response = requests.head( 'https://api.github.com/repos/pytorch/pytorch/issues/comments') print('X-Ratelimit-Limit', response.headers['X-Ratelimit-Limit']) print('X-Ratelimit-Remaining', response.headers['X-Ratelimit-Remaining']) # Converting UTC time to human-readable format import datetime print( 'Rate Limits reset at', datetime.datetime.fromtimestamp(int( response.headers['X-RateLimit-Reset'])).strftime('%c')) from datetime import datetime import time def handle_rate_limits(response): now = datetime.now() reset_time = datetime.fromtimestamp( int(response.headers['X-RateLimit-Reset'])) remaining_requests = response.headers['X-Ratelimit-Remaining'] remaining_time = (reset_time - now).total_seconds() intervals = remaining_time / (1.0 + int(remaining_requests)) print('Sleeping for', intervals) time.sleep(intervals) return True from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry retry_strategy = Retry( total=5, status_forcelist=[500, 503, 504], backoff_factor=1 ) retry_adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", retry_adapter) http.mount("http://", retry_adapter) response = http.get('https://api.github.com/search/repositories', params={'q': 'data_science+language:python'}) for item in response.json()['items'][:5]: print(item['name']) from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry retry_strategy = Retry( total=5, status_forcelist=[500, 503, 504], backoff_factor=1 ) retry_adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", retry_adapter) http.mount("http://", retry_adapter) def get_all_pages(url, param=None, header=None): output_json = [] response = http.get(url, params=param, headers=header) if response.status_code == 200: output_json = response.json() if 'next' in response.links: next_url = response.links['next']['url'] if (next_url is not None) and (handle_rate_limits(response)): output_json += get_all_pages(next_url, param, header) return output_json out = get_all_pages("https://api.github.com/repos/pytorch/pytorch/issues/comments", param={'since': '2020-04-01T00:00:01Z'}) df = pd.DataFrame(out) ``` # Blueprint - Extracting Twitter data with Tweepy ``` import tweepy app_api_key = 'YOUR_APP_KEY_HERE' app_api_secret_key = 'YOUR_APP_SECRET_HERE' app_api_key = 'CWIBFKPrcOU4GsdRr6J5fpaps' app_api_secret_key = 'SghP0LINUECDj0PzIi1vmDfRtNopqJNfb5xd3fH7XpO9ZaEtme' auth = tweepy.AppAuthHandler(app_api_key, app_api_secret_key) api = tweepy.API(auth) print('API Host: {}'.format(api.host)) print('API Version: {}'.format(api.api_root)) pd.set_option('display.max_colwidth', None) search_term = 'cryptocurrency' tweets = tweepy.Cursor(api.search, q=search_term, lang="en").items(100) retrieved_tweets = [tweet._json for tweet in tweets] df = pd.json_normalize(retrieved_tweets) df[['text']].sample(3) api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=5, retry_delay=10) search_term = 'cryptocurrency OR crypto -filter:retweets' tweets = tweepy.Cursor(api.search, q=search_term, lang="en", tweet_mode='extended', count=30).items(12000) # Note: the following code might return 'Rate limit reached. Sleeping for: 750 retrieved_tweets = [tweet._json for tweet in tweets] df = pd.json_normalize(retrieved_tweets) print('Number of retrieved tweets {}'.format(len(df))) df[['created_at','full_text','entities.hashtags']].sample(2) def extract_entities(entity_list): entities = set() if len(entity_list) != 0: for item in entity_list: for key,value in item.items(): if key == 'text': entities.add(value.lower()) return list(entities) df['Entities'] = df['entities.hashtags'].apply(extract_entities) pd.Series(np.concatenate(df['Entities'])).value_counts()[:25].plot(kind='barh') api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) tweets = tweepy.Cursor(api.user_timeline, screen_name='MercedesAMGF1', lang="en", tweet_mode='extended', count=100).items(5000) retrieved_tweets = [tweet._json for tweet in tweets] df = pd.io.json.json_normalize(retrieved_tweets) print('Number of retrieved tweets {}'.format(len(df))) def get_user_timeline(screen_name): api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, lang="en", tweet_mode='extended', count=200).items() retrieved_tweets = [tweet._json for tweet in tweets] df = pd.io.json.json_normalize(retrieved_tweets) df = df[~df['retweeted_status.id'].isna()] return df df_mercedes = get_user_timeline('MercedesAMGF1') print('Number of Tweets from Mercedes {}'.format(len(df_mercedes))) df_ferrari = get_user_timeline('ScuderiaFerrari') print('Number of Tweets from Ferrari {}'.format(len(df_ferrari))) import regex as re import nltk from collections import Counter from wordcloud import WordCloud stopwords = set(nltk.corpus.stopwords.words('english')) RE_LETTER = re.compile(r'\b\p{L}{2,}\b') def tokenize(text): return RE_LETTER.findall(text) def remove_stop(tokens): return [t for t in tokens if t.lower() not in stopwords] pipeline = [str.lower, tokenize, remove_stop] def prepare(text): tokens = text for transform in pipeline: tokens = transform(tokens) return tokens def count_words(df, column='tokens', preprocess=None, min_freq=2): # process tokens and update counter def update(doc): tokens = doc if preprocess is None else preprocess(doc) counter.update(tokens) # create counter and run through all data counter = Counter() df[column].map(update) # transform counter into data frame freq_df = pd.DataFrame.from_dict(counter, orient='index', columns=['freq']) freq_df = freq_df.query('freq >= @min_freq') freq_df.index.name = 'token' return freq_df.sort_values('freq', ascending=False) def wordcloud(word_freq, title=None, max_words=200, stopwords=None): wc = WordCloud(width=800, height=400, background_color= "black", colormap="Paired", max_font_size=150, max_words=max_words) # convert data frame into dict if type(word_freq) == pd.Series: counter = Counter(word_freq.fillna(0).to_dict()) else: counter = word_freq # filter stop words in frequency counter if stopwords is not None: counter = {token:freq for (token, freq) in counter.items() if token not in stopwords} wc.generate_from_frequencies(counter) plt.title(title) plt.imshow(wc, interpolation='bilinear') plt.axis("off") def wordcloud_blueprint(df, colName, max_words, num_stopwords): # Step 1: Convert input text column into tokens df['tokens'] = df[colName].map(prepare) # Step 2: Determine the frequency of each of the tokens freq_df = count_words(df) # Step 3: Generate the wordcloud using the frequencies controlling for stopwords wordcloud(freq_df['freq'], max_words, stopwords=freq_df.head(num_stopwords).index) plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) wordcloud_blueprint(df_mercedes, 'full_text', max_words=100, num_stopwords=5) plt.subplot(1, 2, 2) wordcloud_blueprint(df_ferrari, 'full_text', max_words=100, num_stopwords=5) from datetime import datetime import math class FileStreamListener(tweepy.StreamListener): def __init__(self, max_tweets=math.inf): self.num_tweets = 0 self.TWEETS_FILE_SIZE = 10 self.num_files = 0 self.tweets = [] self.max_tweets = max_tweets def on_data(self, data): while (self.num_files * self.TWEETS_FILE_SIZE < self.max_tweets): self.tweets.append(json.loads(data)) self.num_tweets += 1 if (self.num_tweets < self.TWEETS_FILE_SIZE): return True else: filename = 'Tweets_' + str(datetime.now().time()) + '.txt' print(self.TWEETS_FILE_SIZE, 'Tweets saved to', filename) file = open(filename, "w") json.dump(self.tweets, file) file.close() self.num_files += 1 self.tweets = [] self.num_tweets = 0 return True return False def on_error(self, status_code): if status_code == 420: print('Too many requests were made, please stagger requests') return False else: print('Error {}'.format(status_code)) return False user_access_token = 'YOUR_USER_ACCESS_TOKEN_HERE' user_access_secret = 'YOUR_USER_ACCESS_SECRET_HERE' app_api_key = 'CWIBFKPrcOU4GsdRr6J5fpaps' app_api_secret_key = 'SghP0LINUECDj0PzIi1vmDfRtNopqJNfb5xd3fH7XpO9ZaEtme' auth = tweepy.OAuthHandler(app_api_key, app_api_secret_key) auth.set_access_token(user_access_token, user_access_secret) api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) fileStreamListener = FileStreamListener(20) fileStream = tweepy.Stream(auth=api.auth, listener=fileStreamListener, tweet_mode='extended') fileStream.filter(track=['cryptocurrency']) if ON_COLAB: df = pd.json_normalize(json.load(open('ch02/Tweets_01:01:36.656960.txt'))) else: df = pd.json_normalize(json.load(open('Tweets_01:01:36.656960.txt'))) df.head(2) import wikipediaapi wiki_wiki = wikipediaapi.Wikipedia( language='en', extract_format=wikipediaapi.ExtractFormat.WIKI ) p_wiki = wiki_wiki.page('Cryptocurrency') print(p_wiki.text[:200], '....') ``` # Closing Remarks
github_jupyter
# Examples for Bounded Innovation Propagation (BIP) MM ARMA parameter estimation ``` import numpy as np import scipy.signal as sps import robustsp as rsp import matplotlib.pyplot as plt import matplotlib # Fix random number generator for reproducibility np.random.seed(1) ``` ## Example 1: AR(1) with 30 percent isolated outliers ``` # Generate AR(1) observations N = 300 a = np.random.randn(N) x = sps.lfilter([1],[1,-.8],a) p = 1 q = 0 ``` ### Generate isolated Outliers ``` cont_prob = 0.3 # outlier contamination probability outlier_ind = np.where(np.sign(np.random.rand(N)-cont_prob)<0)# outlier index outlier = 100*np.random.randn(N) # contaminating process v = np.zeros(N) # additive outlier signal v[outlier_ind] = outlier[outlier_ind] v[0] = 0 # first sample should not be an outlier x_ao = x+v # 30% of isolated additive outliers ``` ### BIP MM Estimation ``` result = rsp.arma_est_bip_mm(x_ao,p,q) print('Example: AR(1) with ar_coeff = -0.8') print('30% of isolated additive outliers') print('estimaed coefficients: %.3f' % result['ar_coeffs']) %matplotlib inline matplotlib.rcParams['figure.figsize'] = [10, 10] plt.subplot(2,1,1) plt.plot(x_ao,'-',lw=2,label='outlier contaminated AR(1)') plt.plot(result['cleaned_signal'],'-.',c='y',label='cleaned') plt.xlabel('samples') plt.ylabel('Amplitude') plt.title('BIP-AR(1) cleaned signal') plt.legend() plt.subplot(2,1,2) plt.plot(x,lw=2,label='original AR(1)') plt.plot(result['cleaned_signal'],'-.',label='cleaned') plt.xlabel('samples') plt.ylabel('Amplitude') plt.title('BIP-AR(1) cleaned signal') plt.legend() plt.show() ``` # Example 2: ARMA(1,1) with 10% patchy outliers ## Generate ARMA(1,1) observations ``` N = 1000 a = np.random.randn(N) x = sps.lfilter([1, 0.2],[1, -.8],a) p = 1 q = 1 ``` ## Generate a patch of outliers of length 101 samples ``` v = 1000*np.random.randn(101) ``` ## 10% of patch additive outliers ``` x_ao = np.array(x) x_ao[99:200] += v ``` ### BIP-MM estimation ``` result = rsp.arma_est_bip_mm(x_ao,p,q) print('''Example 2: ARMA(1,1) with ar_coeff = -0.8, ma_coeff 0.2' \n 10 percent patchy additive outliers \n estimated coefficients: \n ar_coeff_est = %.3f \n ma_coeff_est = %.3f''' %(result['ar_coeffs'],result['ma_coeffs'])) plt.subplot(2,1,1) plt.plot(x_ao,'-',lw=2,label='outlier contaminated AR(1)') plt.plot(result['cleaned_signal'],label='cleaned') plt.xlabel('samples') plt.ylabel('Amplitude') plt.title('BIP-ARMA(1,1) cleaned signal') plt.legend() plt.subplot(2,1,2) plt.plot(x,lw=2,label='original ARMA(1,1)') plt.plot(result['cleaned_signal'],label='cleaned') plt.xlabel('samples') plt.ylabel('Amplitude') plt.title('BIP-ARMA(1,1) cleaned signal') plt.legend() plt.show() ``` # Example 3: MA(2) with 20 % isolated Outliers ## Generate MA(2) observations ``` N = 500 a = np.random.randn(N) x = sps.lfilter([1,-.7,.5],[1],a) p=0 q=2 ``` ## Generate isolated Outliers ``` cont_prob = 0.2 outlier_ind = np.where(np.sign(np.random.rand(N)-(cont_prob))<0) outlier = 100*np.random.randn(N) v = np.zeros(N) v[outlier_ind] = outlier[outlier_ind] v[:2] = 0 ``` ## 20 % of isolated additive Outliers ``` x_ao = x+v ``` ## BIP MM estimation ``` result = rsp.arma_est_bip_mm(x_ao,p,q) print('''Example 3: MA(2) ma_coeff [-0.7 0.5]' \n 20 % of isolated additive Outliers \n estimated coefficients: \n ma_coeff_est = ''',result['ma_coeffs']) plt.subplot(2,1,1) plt.plot(x_ao,'-',lw=2,label='outlier contaminated AR(1)') plt.plot(result['cleaned_signal'],label='cleaned') plt.xlabel('samples') plt.ylabel('Amplitude') plt.title('BIP-MA(2) cleaned signal') plt.legend() plt.subplot(2,1,2) plt.plot(x,lw=2,label='original MA(2)') plt.plot(result['cleaned_signal'],label='cleaned') plt.xlabel('samples') plt.ylabel('Amplitude') plt.title('BIP-MA(2) cleaned signal') plt.legend() plt.show() ```
github_jupyter
# Perceptron ### TODO - **[ok]** Ajouter dans le code la fonction d'évaluation du réseau - **[ok]** Plot de $\sum |E|$ par itération (i.e. num updates par itération) - Critere d'arrêt + générale - Lire l'article de rérérence - Ajouter la preuve de convergence - Ajouter notations et explications - Tester l'autre version de la règle de mise à jours de $w$: if err then ... - **[ok]** Décrire et illustrer les deux fonctions de transfert: signe et heaviside - Plot de l'evolution de la courbe de niveau ($x_1 w_1 + x_2 w_2 + ... = 0$) dans l'espace des entrées: illustration avec 2 entrées seulement ou faire un graph de projection de type *scatter plot matrix* - Plot de l'evolution de $w$ dans l'espace des $w$ illustration avec 2 entrées seulement ou faire un graph de projection de type *scatter plot matrix* - Ajouter "Les limites du Perceptron" Définition des macros LaTeX... $$ \newcommand{\activthres}{\theta} \newcommand{\activfunc}{f} \newcommand{\pot}{p} \newcommand{\learnrate}{\eta} \newcommand{\it}{t} \newcommand{\sigin}{s_i} \newcommand{\sigout}{s_j} \newcommand{\sigoutdes}{d_j} \newcommand{\wij}{w_{ij}} $$ Auteur: F. Rosenblatt Reference: F. Rosenblatt 1958 *The Perceptron: a Probabilistic Model for Information Storage and Organization in the Brain* Psychological Review, 65, 386-408 Le modéle est constitué des éléments suivants: - des *unités sensitives (S-units)*: réagissent à un stimuli extérieur (lumière, son, touché, ...) - retournent `0` ou `1`: - `1` si le signal d'entrée dépasse un seuil $\activthres$ - `0` sinon - des *unités d'associations (A-units)* - retournent `0` ou `1`: - `1` si la somme des signaux d'entrée dépasse un seuil $\activthres$ - `0` sinon - des *unités de réponse (R-units)*: sortie du réseau - retournent `1`, `-1` ou une valeur indéterminée: - `1` si la somme des signaux d'entrée est positive - `-1` si elle est négative - une valeur indéterminée si elle est égale à 0 - une *matrice d'intéractions* Evaluation de la fonction: $$ \pot = \sum \sigin \wij $$ $$ \sigout = \activfunc(\pot - \activthres) $$ Fonction de transfert: signe et heaviside ``` %matplotlib inline #x = np.linspace(-5, 5, 300) #y = np.array([-1 if xi < 0 else 1 for xi in x]) #plt.plot(x, y) plt.hlines(y=-1, xmin=-5, xmax=0, color='red') plt.hlines(y=1, xmin=0, xmax=5, color='red') plt.hlines(y=0, xmin=-5, xmax=5, color='gray', linestyles='dotted') plt.vlines(x=0, ymin=-2, ymax=2, color='gray', linestyles='dotted') plt.title("Fonction signe") plt.axis([-5, 5, -2, 2]) #x = np.linspace(-5, 5, 300) #y = (x > 0).astype('float') #plt.plot(x, y) plt.hlines(y=0, xmin=-5, xmax=0, color='red') plt.hlines(y=1, xmin=0, xmax=5, color='red') plt.hlines(y=0, xmin=-5, xmax=5, color='gray', linestyles='dotted') plt.vlines(x=0, ymin=-2, ymax=2, color='gray', linestyles='dotted') plt.title("Fonction heaviside") plt.axis([-5, 5, -2, 2]) ``` Règle du Perceptron (mise à jour des poid $\wij$): $$ \wij(\it + 1) = \wij(\it) + \learnrate (\sigoutdes - \sigout) \sigin $$ * $\learnrate$: pas d'apprentissage, $\learnrate \in [0, 1]$. Géneralement, on lui donne une valeur proche de 1 au début de l'apprentissage et on diminue sa valeur à chaque itération. Poids de depart des synapses du réseau Nombre de neurones associatifs (A-units) Nombre d'unités sensitives Motif à apprendre ``` %matplotlib inline import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.lines as mlines import matplotlib.patches as mpatches import itertools # https://github.com/jeremiedecock/neural-network-figures.git import nnfigs.core as fig fig.draw_neural_network(); # Poids de depart des synapses du réseau initial_weights = np.array([0., 0., 0., 0., 2.]) # Pas d'apprentissage eta=1 learning_rate = 1. class Log: def __init__(self): self.input_signal = [] self.output_signal = [] self.desired_output_signal = [] self.error = [] self.weights = [] self.iteration = [] self.current_iteration = 0 def log(self, input_signal, output_signal, desired_output_signal, error, weights): self.input_signal.append(input_signal) self.output_signal.append(output_signal) self.desired_output_signal.append(desired_output_signal) self.error.append(error) self.weights.append(weights) self.iteration.append(self.current_iteration) log = Log() def sign_function(x): y = 1. if x >= 0. else -1. return y def heaviside_function(x): y = 1. if x >= 0. else 0. return y def activation_function(p): return heaviside_function(p) def evaluate_network(weights, input_signal): # TODO: find a better name p = np.sum(input_signal * weights) output_signal = activation_function(p) return output_signal def update_weights(weights, input_signal, desired_output_signal): output_signal = evaluate_network(weights, input_signal) error = desired_output_signal - output_signal weights = weights + learning_rate * error * input_signal log.log(input_signal, output_signal, desired_output_signal, error, weights) return weights def learn_examples(example_list, label_list, weights, num_iterations): for it in range(num_iterations): log.current_iteration = it for input_signal, desired_output_signal in zip(example_list, label_list): weights = update_weights(weights, np.array(input_signal + (-1,)), desired_output_signal) return weights ``` Rappel: $\sigin \in \{0, 1\}$ ``` example_list = tuple(reversed(tuple(itertools.product((0., 1.), repeat=4)))) # Motif à apprendre: (1 0 0 1) label_list = [1. if x == (1., 0., 0., 1.) else 0. for x in example_list] print(example_list) print(label_list) weights = learn_examples(example_list, label_list, initial_weights, 5) weights for input_signal, output_signal, desired_output_signal, error, weights, iteration in zip(log.input_signal, log.output_signal, log.desired_output_signal, log.error, log.weights, log.iteration): print(iteration, input_signal, output_signal, desired_output_signal, error, weights) plt.plot(log.error) import pandas as pd df = pd.DataFrame(np.array([log.iteration, log.error]).T, columns=["Iteration", "Error"]) abs_err_per_it = abs(df).groupby(["Iteration"]).sum() abs_err_per_it.plot(title="Sum of absolute errors per iteration") ```
github_jupyter
## Load Weight ``` import torch import numpy as np path = './output/0210/Zero/checkpoint_400.pth' import os assert(os.path.isfile(path)) weight = torch.load(path) input_dim = weight['input_dim'] branchNum = weight['branchNum'] IOScale = weight['IOScale'] state_dict = weight['state_dict'] # n_layers = weight['n_layers'] n_layers = 6 ``` ## Load Model ``` from model import Model model = Model(branchNum, input_dim, n_layers) model.load_state_dict(weight['state_dict']) model = model.q_layer.layers model.eval() ``` ## Save to mat file ``` from inspect import isfunction from scipy.io import savemat name = 'SMINet' v_names,d = [],{} hdims = [] dim = 0 firstflag = False for idx,layer in enumerate(model): # handle Linear layer if isinstance(layer,torch.nn.Linear): layername = 'F_hid_lin_{dim}_kernel'.format(dim=dim) d[layername] = layer.weight.detach().numpy().T hdims.append(layer.weight.detach().numpy().T.shape[1]) layername = 'F_hid_lin_{dim}_bias'.format(dim=dim) d[layername] = layer.bias.detach().numpy().T lastlayer = idx dim = dim+1 # find fist layer if firstflag == False: firstlayer = idx firstflag = True # handle normalization layer if isinstance(layer,torch.nn.BatchNorm1d): layername = 'F_bn_{dim}_mean'.format(dim=dim-1) d[layername] = layer.running_mean.detach().numpy() layername = 'F_bn_{dim}_sigma'.format(dim=dim-1) sigma = torch.sqrt(layer.running_var+1e-5) d[layername] = sigma.detach().numpy() layername = 'F_bn_{dim}_kernel'.format(dim=dim-1) d[layername] = layer.weight.detach().numpy() layername = 'F_bn_{dim}_bias'.format(dim=dim-1) d[layername] = layer.bias.detach().numpy() # change name in last layer lastlayername = 'F_hid_lin_{dim}_kernel'.format(dim=dim-1) newlayername = 'F_y_pred_kernel' d[newlayername] = d[lastlayername] del d[lastlayername] lastlayername = 'F_hid_lin_{dim}_bias'.format(dim=dim-1) newlayername = 'F_y_pred_bias' d[newlayername] = d[lastlayername] del d[lastlayername] xdim = model[firstlayer].weight.detach().numpy().shape[1] ydim = model[lastlayer].weight.detach().numpy().shape[0] d['xdim'] = xdim d['ydim'] = ydim d['name'] = name d['hdims'] = np.array(hdims[:-1]) d['actv'] = 'leaky_relu' d # fix random seeds for reproducibility SEED = 1 torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(SEED) from dataloader import * data_path = './data/SorosimGrid' train_data_loader = iter(ToyDataloader(os.path.join(data_path,'train'), IOScale, n_workers=1, batch=1)) x_vald = np.zeros((10,xdim)) y_vald = np.zeros((10,ydim)) for i in range(10): (input,label) = next(train_data_loader) output = model(input) x_vald[i,:] = input.detach().numpy() y_vald[i,:] = output.detach().numpy() d['x_vald'] = x_vald d['y_vald'] = y_vald y_vald[-1,:],label dir_path = 'nets/%s'%(name) mat_path = os.path.join(dir_path,'weights.mat') if not os.path.exists(dir_path): os.makedirs(dir_path) print ("[%s] created."%(dir_path)) savemat(mat_path,d) # save to a mat file print ("[%s] saved. Size is[%.3f]MB."%(mat_path,os.path.getsize(mat_path) / 1000000)) ```
github_jupyter
# OneHotEncoder Performs One Hot Encoding. The encoder can select how many different labels per variable to encode into binaries. When top_categories is set to None, all the categories will be transformed in binary variables. However, when top_categories is set to an integer, for example 10, then only the 10 most popular categories will be transformed into binary, and the rest will be discarded. The encoder has also the possibility to create binary variables from all categories (drop_last = False), or remove the binary for the last category (drop_last = True), for use in linear models. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from feature_engine.encoding import OneHotEncoder # Load titanic dataset from OpenML def load_titanic(): data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl') data = data.replace('?', np.nan) data['cabin'] = data['cabin'].astype(str).str[0] data['pclass'] = data['pclass'].astype('O') data['age'] = data['age'].astype('float') data['fare'] = data['fare'].astype('float') data['embarked'].fillna('C', inplace=True) data.drop(labels=['boat', 'body', 'home.dest'], axis=1, inplace=True) return data data = load_titanic() data.head() X = data.drop(['survived', 'name', 'ticket'], axis=1) y = data.survived # we will encode the below variables, they have no missing values X[['cabin', 'pclass', 'embarked']].isnull().sum() ''' Make sure that the variables are type (object). if not, cast it as object , otherwise the transformer will either send an error (if we pass it as argument) or not pick it up (if we leave variables=None). ''' X[['cabin', 'pclass', 'embarked']].dtypes # let's separate into training and testing set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) X_train.shape, X_test.shape ``` One hot encoding consists in replacing the categorical variable by a combination of binary variables which take value 0 or 1, to indicate if a certain category is present in an observation. Each one of the binary variables are also known as dummy variables. For example, from the categorical variable "Gender" with categories 'female' and 'male', we can generate the boolean variable "female", which takes 1 if the person is female or 0 otherwise. We can also generate the variable male, which takes 1 if the person is "male" and 0 otherwise. The encoder has the option to generate one dummy variable per category, or to create dummy variables only for the top n most popular categories, that is, the categories that are shown by the majority of the observations. If dummy variables are created for all the categories of a variable, you have the option to drop one category not to create information redundancy. That is, encoding into k-1 variables, where k is the number if unique categories. The encoder will encode only categorical variables (type 'object'). A list of variables can be passed as an argument. If no variables are passed as argument, the encoder will find and encode categorical variables (object type). #### Note: New categories in the data to transform, that is, those that did not appear in the training set, will be ignored (no binary variable will be created for them). ### All binary, no top_categories ``` ''' Parameters ---------- top_categories: int, default=None If None, a dummy variable will be created for each category of the variable. Alternatively, top_categories indicates the number of most frequent categories to encode. Dummy variables will be created only for those popular categories and the rest will be ignored. Note that this is equivalent to grouping all the remaining categories in one group. variables : list The list of categorical variables that will be encoded. If None, the encoder will find and select all object type variables. drop_last: boolean, default=False Only used if top_categories = None. It indicates whether to create dummy variables for all the categories (k dummies), or if set to True, it will ignore the last variable of the list (k-1 dummies). ''' ohe_enc = OneHotEncoder(top_categories=None, variables=['pclass', 'cabin', 'embarked'], drop_last=False) ohe_enc.fit(X_train) ohe_enc.encoder_dict_ train_t = ohe_enc.transform(X_train) test_t = ohe_enc.transform(X_train) test_t.head() ``` ### Selecting top_categories to encode ``` ohe_enc = OneHotEncoder(top_categories=2, variables=['pclass', 'cabin', 'embarked'], drop_last=False) ohe_enc.fit(X_train) ohe_enc.encoder_dict_ train_t = ohe_enc.transform(X_train) test_t = ohe_enc.transform(X_train) test_t.head() ``` ### Dropping the last category for linear models ``` ohe_enc = OneHotEncoder(top_categories=None, variables=['pclass', 'cabin', 'embarked'], drop_last=True) ohe_enc.fit(X_train) ohe_enc.encoder_dict_ train_t = ohe_enc.transform(X_train) test_t = ohe_enc.transform(X_train) test_t.head() ``` ### Automatically select categorical variables This encoder selects all the categorical variables, if None is passed to the variable argument when calling the encoder. ``` ohe_enc = OneHotEncoder(top_categories=None, drop_last=True) ohe_enc.fit(X_train) train_t = ohe_enc.transform(X_train) test_t = ohe_enc.transform(X_train) test_t.head() ```
github_jupyter
``` from eva_cttv_pipeline.clinvar_xml_utils import * from consequence_prediction.repeat_expansion_variants.clinvar_identifier_parsing import parse_variant_identifier import os import sys import urllib import requests import xml.etree.ElementTree as ElementTree from collections import Counter import hgvs.parser from hgvs.exceptions import HGVSParseError import numpy as np import pandas as pd sys.path.append('../') from gather_stats import counts %matplotlib inline import matplotlib.pyplot as plt parser = hgvs.parser.Parser() PROJECT_ROOT = '/home/april/projects/opentargets' # dump of all records with no functional consequences: June consequence pred + ClinVar 6/26/2021 no_consequences_path = os.path.join(PROJECT_ROOT, 'no-consequences.xml.gz') dataset = ClinVarDataset(no_consequences_path) ``` ## Gather counts Among records with no functional consequences * how many of each variant type * how many have hgvs, sequence location w/ start/stop position at least, cytogenic location * of those with hgvs, how many can the library parse? * how many can our code parse? ``` total_count, variant_type_hist, other_counts, exclusive_counts = counts(no_consequences_path, PROJECT_ROOT) print(total_count) plt.figure(figsize=(15,7)) plt.xticks(rotation='vertical') plt.title('Variant Types (no functional consequences and incomplete coordinates)') plt.bar(variant_type_hist.keys(), variant_type_hist.values()) variant_type_hist plt.figure(figsize=(15,7)) plt.xticks(rotation='vertical') plt.title('Variant Descriptors (no functional consequences and incomplete coordinates)') plt.bar(other_counts.keys(), other_counts.values()) other_counts def print_link_for_type(variant_type, min_score=-1): for record in dataset: if record.measure: m = record.measure if m.has_complete_coordinates: continue if m.variant_type == variant_type and record.score >= min_score: print(f'https://www.ncbi.nlm.nih.gov/clinvar/{record.accession}/') print_link_for_type('Microsatellite', min_score=1) ``` ### Examples Some hand-picked examples of complex variants from ClinVar. For each type I tried to choose at least one that seemed "typical" and one that was relatively high quality to get an idea of the variability, but no guarantees for how representative these are. * Duplication * https://www.ncbi.nlm.nih.gov/clinvar/variation/1062574/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/89496/ * Deletion * https://www.ncbi.nlm.nih.gov/clinvar/variation/1011851/ * Inversion * https://www.ncbi.nlm.nih.gov/clinvar/variation/268016/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/90611/ * Translocation * https://www.ncbi.nlm.nih.gov/clinvar/variation/267959/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/267873/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/1012364/ * copy number gain * https://www.ncbi.nlm.nih.gov/clinvar/variation/523250/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/870516/ * copy number loss * https://www.ncbi.nlm.nih.gov/clinvar/variation/1047901/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/625801/ * Complex * https://www.ncbi.nlm.nih.gov/clinvar/variation/267835/ * https://www.ncbi.nlm.nih.gov/clinvar/variation/585332/ ### Appendix A: Marcos' questions * What do the HGVS parser numbers mean? * This is the number of records which had at least one HGVS descriptor for which the specified parser was able to extract _some_ information. For the official parser this means not throwing an exception; for our parser this means returning some non-`None` properties (though note our parser was originally written for the repeat expansion pipeline). * What's the total number of HGVS we can parse with either parser? * added to the above chart. * From the variants with cytogenetic location, how many did not have any of the other descriptors, if any? * see below ``` plt.figure(figsize=(10,7)) plt.title('Variant Descriptors (no functional consequences and incomplete coordinates)') plt.bar(exclusive_counts.keys(), exclusive_counts.values()) exclusive_counts ``` ### Appendix B: More HGVS parsing exploration HGVS python library [doesn't support ranges](https://github.com/biocommons/hgvs/issues/225). [VEP API](https://rest.ensembl.org/#VEP) has some limited support for HGVS. ``` def try_to_parse(hgvs): try: parser.parse_hgvs_variant(hgvs) print(hgvs, 'SUCCESS') except: print(hgvs, 'FAILED') try_to_parse('NC_000011.10:g.(?_17605796)_(17612832_?)del') try_to_parse('NC_000011.10:g.(17605790_17605796)_(17612832_1761283)del') try_to_parse('NC_000011.10:g.17605796_17612832del') try_to_parse('NC_000011.10:g.?_17612832del') def try_to_vep(hgvs): safe_hgvs = urllib.parse.quote(hgvs) vep_url = f'https://rest.ensembl.org/vep/human/hgvs/{safe_hgvs}?content-type=application/json' resp = requests.get(vep_url) print(resp.json()) try_to_vep('NC_000011.10:g.(?_17605796)_(17612832_?)del') try_to_vep('NC_000011.10:g.(17605790_17605796)_(17612832_1761283)del') try_to_vep('NC_000011.10:g.17605796_17612832del') try_to_vep('NC_000011.10:g.?_17612832del') ```
github_jupyter
# GPU ``` gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) print(gpu_info) ``` # CFG ``` CONFIG_NAME = 'config41.yml' debug = False from google.colab import drive, auth # ドライブのマウント drive.mount('/content/drive') # Google Cloudの権限設定 auth.authenticate_user() def get_github_secret(): import json with open('/content/drive/MyDrive/config/github.json') as f: github_config = json.load(f) return github_config github_config = get_github_secret() ! rm -r kaggle-cassava user_name = github_config["user_name"] password = github_config["password"] ! git clone https://{user_name}:{password}@github.com/raijin0704/kaggle-cassava.git import sys sys.path.append('./kaggle-cassava') from src.utils.envs.main import create_env env_dict = create_env() env_dict # ==================================================== # CFG # ==================================================== import yaml CONFIG_PATH = f'./kaggle-cassava/config/{CONFIG_NAME}' with open(CONFIG_PATH) as f: config = yaml.load(f) INFO = config['info'] TAG = config['tag'] CFG = config['cfg'] DATA_PATH = env_dict["data_path"] env = env_dict["env"] NOTEBOOK_PATH = env_dict["notebook_dir"] OUTPUT_DIR = env_dict["output_dir"] TITLE = env_dict["title"] CFG['train'] = True CFG['inference'] = False CFG['debug'] = debug if CFG['debug']: CFG['epochs'] = 1 # 環境変数 import os os.environ["GCLOUD_PROJECT"] = INFO['PROJECT_ID'] # 間違ったバージョンを実行しないかチェック # assert INFO['TITLE'] == TITLE, f'{TITLE}, {INFO["TITLE"]}' TITLE = INFO["TITLE"] import os if env=='colab': !rm -r /content/input ! cp /content/drive/Shareddrives/便利用/kaggle/cassava/input.zip /content/input.zip ! unzip input.zip > /dev/null ! rm input.zip train_num = len(os.listdir(DATA_PATH+"/train_images")) assert train_num == 21397 ``` # install apex ``` if CFG['apex']: try: import apex except Exception: ! git clone https://github.com/NVIDIA/apex.git % cd apex !pip install --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . %cd .. ``` # Library ``` # ==================================================== # Library # ==================================================== import os import datetime import math import time import random import glob import shutil from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import scipy as sp import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from sklearn import preprocessing from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold from tqdm.auto import tqdm from functools import partial import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam, SGD import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau from albumentations import ( Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip, RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout, IAAAdditiveGaussianNoise, Transpose, CenterCrop ) from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform import timm import mlflow import warnings warnings.filterwarnings('ignore') if CFG['apex']: from apex import amp if CFG['debug']: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') else: device = torch.device('cuda') from src.utils.logger import init_logger from src.utils.utils import seed_torch, EarlyStopping from src.utils.loss.bi_tempered_logistic_loss import bi_tempered_logistic_loss from src.utils.augments.randaugment import RandAugment from src.utils.augments.augmix import RandomAugMix start_time = datetime.datetime.now() start_time_str = start_time.strftime('%m%d%H%M') ``` # Directory settings ``` # ==================================================== # Directory settings # ==================================================== if os.path.exists(OUTPUT_DIR): shutil.rmtree(OUTPUT_DIR) if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) ``` # save basic files ``` # with open(f'{OUTPUT_DIR}/{start_time_str}_TAG.json', 'w') as f: # json.dump(TAG, f, indent=4) # with open(f'{OUTPUT_DIR}/{start_time_str}_CFG.json', 'w') as f: # json.dump(CFG, f, indent=4) import shutil notebook_path = f'{OUTPUT_DIR}/{start_time_str}_{TITLE}.ipynb' shutil.copy2(NOTEBOOK_PATH, notebook_path) ``` # Data Loading ``` train = pd.read_csv(f'{DATA_PATH}/train.csv') test = pd.read_csv(f'{DATA_PATH}/sample_submission.csv') label_map = pd.read_json(f'{DATA_PATH}/label_num_to_disease_map.json', orient='index') if CFG['debug']: train = train.sample(n=1000, random_state=CFG['seed']).reset_index(drop=True) ``` # Utils ``` # ==================================================== # Utils # ==================================================== def get_score(y_true, y_pred): return accuracy_score(y_true, y_pred) logger_path = OUTPUT_DIR+f'{start_time_str}_train.log' LOGGER = init_logger(logger_path) seed_torch(seed=CFG['seed']) def remove_glob(pathname, recursive=True): for p in glob.glob(pathname, recursive=recursive): if os.path.isfile(p): os.remove(p) def rand_bbox(size, lam): W = size[2] H = size[3] cut_rat = np.sqrt(1. - lam) cut_w = np.int(W * cut_rat) cut_h = np.int(H * cut_rat) # uniform cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 ``` # CV split ``` folds = train.copy() Fold = StratifiedKFold(n_splits=CFG['n_fold'], shuffle=True, random_state=CFG['seed']) for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[CFG['target_col']])): folds.loc[val_index, 'fold'] = int(n) folds['fold'] = folds['fold'].astype(int) print(folds.groupby(['fold', CFG['target_col']]).size()) ``` # Dataset ``` # ==================================================== # Dataset # ==================================================== class TrainDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['image_id'].values self.labels = df['label'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{DATA_PATH}/train_images/{file_name}' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] label = torch.tensor(self.labels[idx]).long() return image, label class TestDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['image_id'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{DATA_PATH}/test_images/{file_name}' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image # train_dataset = TrainDataset(train, transform=None) # for i in range(1): # image, label = train_dataset[i] # plt.imshow(image) # plt.title(f'label: {label}') # plt.show() ``` # Transforms ``` def _get_train_augmentations(aug_list): process = [] for aug in aug_list: if aug == 'Resize': process.append(Resize(CFG['size'], CFG['size'])) elif aug == 'RandomResizedCrop': process.append(RandomResizedCrop(CFG['size'], CFG['size'])) elif aug =='CenterCrop': process.append(CenterCrop(CFG['size'], CFG['size'])) elif aug == 'Transpose': process.append(Transpose(p=0.5)) elif aug == 'HorizontalFlip': process.append(HorizontalFlip(p=0.5)) elif aug == 'VerticalFlip': process.append(VerticalFlip(p=0.5)) elif aug == 'ShiftScaleRotate': process.append(ShiftScaleRotate(p=0.5)) elif aug == 'RandomBrightness': process.append(RandomBrightness(limit=(-0.2,0.2), p=1)) elif aug == 'Cutout': process.append(Cutout(max_h_size=CFG['CutoutSize'], max_w_size=CFG['CutoutSize'], p=0.5)) elif aug == 'RandAugment': process.append(RandAugment(CFG['RandAugmentN'], CFG['RandAugmentM'], p=0.5)) elif aug == 'RandomAugMix': process.append(RandomAugMix(severity=CFG['AugMixSeverity'], width=CFG['AugMixWidth'], alpha=CFG['AugMixAlpha'], p=0.5)) elif aug == 'Normalize': process.append(Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], )) elif aug in ['mixup', 'cutmix', 'fmix']: pass else: raise ValueError(f'{aug} is not suitable') process.append(ToTensorV2()) return process def _get_valid_augmentations(aug_list): process = [] for aug in aug_list: if aug == 'Resize': process.append(Resize(CFG['size'], CFG['size'])) elif aug == 'RandomResizedCrop': process.append(OneOf( [RandomResizedCrop(CFG['size'], CFG['size'], p=0.5), Resize(CFG['size'], CFG['size'], p=0.5)], p=1)) elif aug =='CenterCrop': process.append(OneOf( [CenterCrop(CFG['size'], CFG['size'], p=0.5), Resize(CFG['size'], CFG['size'], p=0.5)], p=1)) # process.append( # CenterCrop(CFG['size'], CFG['size'], p=1.)) elif aug == 'Transpose': process.append(Transpose(p=0.5)) elif aug == 'HorizontalFlip': process.append(HorizontalFlip(p=0.5)) elif aug == 'VerticalFlip': process.append(VerticalFlip(p=0.5)) elif aug == 'ShiftScaleRotate': process.append(ShiftScaleRotate(p=0.5)) elif aug == 'RandomBrightness': process.append(RandomBrightness(limit=(-0.2,0.2), p=1)) elif aug == 'Cutout': process.append(Cutout(max_h_size=CFG['CutoutSize'], max_w_size=CFG['CutoutSize'], p=0.5)) elif aug == 'RandAugment': process.append(RandAugment(CFG['RandAugmentN'], CFG['RandAugmentM'], p=0.5)) elif aug == 'RandomAugMix': process.append(RandomAugMix(severity=CFG['AugMixSeverity'], width=CFG['AugMixWidth'], alpha=CFG['AugMixAlpha'], p=0.5)) elif aug == 'Normalize': process.append(Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], )) elif aug in ['mixup', 'cutmix', 'fmix']: pass else: raise ValueError(f'{aug} is not suitable') process.append(ToTensorV2()) return process # ==================================================== # Transforms # ==================================================== def get_transforms(*, data): if data == 'train': return Compose( _get_train_augmentations(TAG['augmentation']) ) elif data == 'valid': try: augmentations = TAG['valid_augmentation'] except KeyError: augmentations = ['Resize', 'Normalize'] return Compose( _get_valid_augmentations(augmentations) ) num_fig = 5 train_dataset = TrainDataset(train, transform=get_transforms(data='train')) valid_dataset = TrainDataset(train, transform=get_transforms(data='valid')) origin_dataset = TrainDataset(train, transform=None) fig, ax = plt.subplots(num_fig, 3, figsize=(10, num_fig*3)) for j, dataset in enumerate([train_dataset, valid_dataset, origin_dataset]): for i in range(num_fig): image, label = dataset[i] if j < 2: ax[i,j].imshow(image.transpose(0,2).transpose(0,1)) else: ax[i,j].imshow(image) ax[i,j].set_title(f'label: {label}') ``` # MODEL ``` # ==================================================== # MODEL # ==================================================== class CustomModel(nn.Module): def __init__(self, model_name, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=pretrained) if hasattr(self.model, 'classifier'): n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, CFG['target_size']) elif hasattr(self.model, 'fc'): n_features = self.model.fc.in_features self.model.fc = nn.Linear(n_features, CFG['target_size']) elif hasattr(self.model, 'head'): n_features = self.model.head.in_features self.model.head = nn.Linear(n_features, CFG['target_size']) def forward(self, x): x = self.model(x) return x model = CustomModel(model_name=TAG['model_name'], pretrained=False) train_dataset = TrainDataset(train, transform=get_transforms(data='train')) train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=0, pin_memory=True, drop_last=True) for image, label in train_loader: output = model(image) print(output) break ``` # Helper functions ``` # ==================================================== # Helper functions # ==================================================== class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (remain %s)' % (asMinutes(s), asMinutes(rs)) # ==================================================== # loss # ==================================================== def get_loss(criterion, y_preds, labels): if TAG['criterion']=='CrossEntropyLoss': loss = criterion(y_preds, labels) elif TAG['criterion'] == 'bi_tempered_logistic_loss': loss = criterion(y_preds, labels, t1=CFG['bi_tempered_loss_t1'], t2=CFG['bi_tempered_loss_t2']) return loss # ==================================================== # Helper functions # ==================================================== def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() scores = AverageMeter() # switch to train mode model.train() start = end = time.time() global_step = 0 for step, (images, labels) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) r = np.random.rand(1) is_aug = r < 0.5 # probability of augmentation if is_aug & ('cutmix' in TAG['augmentation']) & (epoch+1>=CFG['heavy_aug_start_epoch']): # generate mixed sample # inference from https://github.com/clovaai/CutMix-PyTorch/blob/master/train.py lam = np.random.beta(CFG['CutmixAlpha'], CFG['CutmixAlpha']) rand_index = torch.randperm(images.size()[0]).to(device) labels_a = labels labels_b = labels[rand_index] bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam) images[:, :, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbx1:bbx2, bby1:bby2] # adjust lambda to exactly match pixel ratio lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2])) # compute output y_preds = model(images) loss = get_loss(criterion, y_preds, labels_a) * lam + \ get_loss(criterion, y_preds, labels_b) * (1. - lam) else: y_preds = model(images) loss = get_loss(criterion, y_preds, labels) # record loss losses.update(loss.item(), batch_size) if CFG['gradient_accumulation_steps'] > 1: loss = loss / CFG['gradient_accumulation_steps'] if CFG['apex']: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # clear memory del loss, y_preds torch.cuda.empty_cache() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CFG['max_grad_norm']) if (step + 1) % CFG['gradient_accumulation_steps'] == 0: optimizer.step() optimizer.zero_grad() global_step += 1 # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG['print_freq'] == 0 or step == (len(train_loader)-1): print('Epoch: [{0}][{1}/{2}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' 'Grad: {grad_norm:.4f} ' #'LR: {lr:.6f} ' .format( epoch+1, step, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, remain=timeSince(start, float(step+1)/len(train_loader)), grad_norm=grad_norm, #lr=scheduler.get_lr()[0], )) return losses.avg def valid_fn(valid_loader, model, criterion, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() scores = AverageMeter() # switch to evaluation mode model.eval() preds = [] start = end = time.time() for step, (images, labels) in enumerate(valid_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) # compute loss with torch.no_grad(): y_preds = model(images) loss = get_loss(criterion, y_preds, labels) losses.update(loss.item(), batch_size) # record accuracy preds.append(y_preds.softmax(1).to('cpu').numpy()) if CFG['gradient_accumulation_steps'] > 1: loss = loss / CFG['gradient_accumulation_steps'] # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG['print_freq'] == 0 or step == (len(valid_loader)-1): print('EVAL: [{0}/{1}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' .format( step, len(valid_loader), batch_time=batch_time, data_time=data_time, loss=losses, remain=timeSince(start, float(step+1)/len(valid_loader)), )) predictions = np.concatenate(preds) return losses.avg, predictions def inference(model, states, test_loader, device): model.to(device) tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i, (images) in tk0: images = images.to(device) avg_preds = [] for state in states: # model.load_state_dict(state['model']) model.load_state_dict(state) model.eval() with torch.no_grad(): y_preds = model(images) avg_preds.append(y_preds.softmax(1).to('cpu').numpy()) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs ``` # Train loop ``` # ==================================================== # scheduler # ==================================================== def get_scheduler(optimizer): if TAG['scheduler']=='ReduceLROnPlateau': scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=CFG['factor'], patience=CFG['patience'], verbose=True, eps=CFG['eps']) elif TAG['scheduler']=='CosineAnnealingLR': scheduler = CosineAnnealingLR(optimizer, T_max=CFG['T_max'], eta_min=CFG['min_lr'], last_epoch=-1) elif TAG['scheduler']=='CosineAnnealingWarmRestarts': scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1, eta_min=CFG['min_lr'], last_epoch=-1) return scheduler # ==================================================== # criterion # ==================================================== def get_criterion(): if TAG['criterion']=='CrossEntropyLoss': criterion = nn.CrossEntropyLoss() elif TAG['criterion'] == 'bi_tempered_logistic_loss': criterion = bi_tempered_logistic_loss return criterion # ==================================================== # Train loop # ==================================================== def train_loop(folds, fold): LOGGER.info(f"========== fold: {fold} training ==========") if not CFG['debug']: mlflow.set_tag('running.fold', str(fold)) # ==================================================== # loader # ==================================================== trn_idx = folds[folds['fold'] != fold].index val_idx = folds[folds['fold'] == fold].index train_folds = folds.loc[trn_idx].reset_index(drop=True) valid_folds = folds.loc[val_idx].reset_index(drop=True) train_dataset = TrainDataset(train_folds, transform=get_transforms(data='train')) valid_dataset = TrainDataset(valid_folds, transform=get_transforms(data='valid')) train_loader = DataLoader(train_dataset, batch_size=CFG['batch_size'], shuffle=True, num_workers=CFG['num_workers'], pin_memory=True, drop_last=True) valid_loader = DataLoader(valid_dataset, batch_size=CFG['batch_size'], shuffle=False, num_workers=CFG['num_workers'], pin_memory=True, drop_last=False) # ==================================================== # model & optimizer & criterion # ==================================================== best_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth' latest_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_latest.pth' model = CustomModel(TAG['model_name'], pretrained=True) model.to(device) # # 学習途中の重みがあれば読み込み # if os.path.isfile(latest_model_path): # state_latest = torch.load(latest_model_path) # state_best = torch.load(best_model_path) # model.load_state_dict(state_latest['model']) # epoch_start = state_latest['epoch']+1 # # er_best_score = state_latest['score'] # er_counter = state_latest['counter'] # er_best_score = state_best['best_score'] # if 'val_loss_history' in state_latest.keys(): # val_loss_history = state_latest['val_loss_history'] # else: # val_loss_history = [] # LOGGER.info(f'Load training model in epoch:{epoch_start}, best_score:{er_best_score:.3f}, counter:{er_counter}') # # 学習済みモデルを再学習する場合 # elif os.path.isfile(best_model_path): if os.path.isfile(best_model_path): state_best = torch.load(best_model_path) model.load_state_dict(state_best['model']) epoch_start = 0 # epochは0からカウントしなおす er_counter = 0 er_best_score = state_best['best_score'] val_loss_history = [] # 過去のval_lossも使用しない LOGGER.info(f'Retrain model, best_score:{er_best_score:.3f}') else: epoch_start = 0 er_best_score = None er_counter = 0 val_loss_history = [] optimizer = Adam(model.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'], amsgrad=False) scheduler = get_scheduler(optimizer) criterion = get_criterion() # 再開時のepochまでschedulerを進める # assert len(range(epoch_start)) == len(val_loss_history) for _, val_loss in zip(range(epoch_start), val_loss_history): if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(val_loss) elif isinstance(scheduler, CosineAnnealingLR): scheduler.step() elif isinstance(scheduler, CosineAnnealingWarmRestarts): scheduler.step() # ==================================================== # apex # ==================================================== if CFG['apex']: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # ==================================================== # loop # ==================================================== # best_score = 0. # best_loss = np.inf early_stopping = EarlyStopping( patience=CFG['early_stopping_round'], eps=CFG['early_stopping_eps'], verbose=True, save_path=best_model_path, counter=er_counter, best_score=er_best_score, val_loss_history = val_loss_history, save_latest_path=latest_model_path) for epoch in range(epoch_start, CFG['epochs']): start_time = time.time() # train avg_loss = train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device) # eval avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device) valid_labels = valid_folds[CFG['target_col']].values # scoring score = get_score(valid_labels, preds.argmax(1)) # get learning rate if hasattr(scheduler, 'get_last_lr'): last_lr = scheduler.get_last_lr()[0] else: # ReduceLROnPlateauには関数get_last_lrがない last_lr = optimizer.param_groups[0]['lr'] # log mlflow if not CFG['debug']: mlflow.log_metric(f"fold{fold} avg_train_loss", avg_loss, step=epoch) mlflow.log_metric(f"fold{fold} avg_valid_loss", avg_val_loss, step=epoch) mlflow.log_metric(f"fold{fold} score", score, step=epoch) mlflow.log_metric(f"fold{fold} lr", last_lr, step=epoch) # early stopping early_stopping(avg_val_loss, model, preds, epoch) if early_stopping.early_stop: print(f'Epoch {epoch+1} - early stopping') break if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(avg_val_loss) elif isinstance(scheduler, CosineAnnealingLR): scheduler.step() elif isinstance(scheduler, CosineAnnealingWarmRestarts): scheduler.step() elapsed = time.time() - start_time LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s') LOGGER.info(f'Epoch {epoch+1} - Accuracy: {score}') # log mlflow if not CFG['debug']: mlflow.log_artifact(best_model_path) if os.path.isfile(latest_model_path): mlflow.log_artifact(latest_model_path) check_point = torch.load(best_model_path) valid_folds[[str(c) for c in range(5)]] = check_point['preds'] valid_folds['preds'] = check_point['preds'].argmax(1) return valid_folds def get_trained_fold_preds(folds, fold, best_model_path): val_idx = folds[folds['fold'] == fold].index valid_folds = folds.loc[val_idx].reset_index(drop=True) check_point = torch.load(best_model_path) valid_folds[[str(c) for c in range(5)]] = check_point['preds'] valid_folds['preds'] = check_point['preds'].argmax(1) return valid_folds def save_confusion_matrix(oof): from sklearn.metrics import confusion_matrix cm_ = confusion_matrix(oof['label'], oof['preds'], labels=[0,1,2,3,4]) label_name = ['0 (CBB)', '1 (CBSD)', '2 (CGM)', '3 (CMD)', '4 (Healthy)'] cm = pd.DataFrame(cm_, index=label_name, columns=label_name) cm.to_csv(OUTPUT_DIR+'oof_confusion_matrix.csv', index=True) # ==================================================== # main # ==================================================== def get_result(result_df): preds = result_df['preds'].values labels = result_df[CFG['target_col']].values score = get_score(labels, preds) LOGGER.info(f'Score: {score:<.5f}') return score def main(): """ Prepare: 1.train 2.test 3.submission 4.folds """ if CFG['train']: # train oof_df = pd.DataFrame() for fold in range(CFG['n_fold']): best_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth' if fold in CFG['trn_fold']: _oof_df = train_loop(folds, fold) elif os.path.exists(best_model_path): _oof_df = get_trained_fold_preds(folds, fold, best_model_path) else: _oof_df = None if _oof_df is not None: oof_df = pd.concat([oof_df, _oof_df]) LOGGER.info(f"========== fold: {fold} result ==========") _ = get_result(_oof_df) # CV result LOGGER.info(f"========== CV ==========") score = get_result(oof_df) # save result oof_df.to_csv(OUTPUT_DIR+'oof_df.csv', index=False) save_confusion_matrix(oof_df) # log mlflow if not CFG['debug']: mlflow.log_metric('oof score', score) mlflow.delete_tag('running.fold') mlflow.log_artifact(OUTPUT_DIR+'oof_df.csv') if CFG['inference']: # inference model = CustomModel(TAG['model_name'], pretrained=False) states = [torch.load(OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth') for fold in CFG['trn_fold']] test_dataset = TestDataset(test, transform=get_transforms(data='valid')) test_loader = DataLoader(test_dataset, batch_size=CFG['batch_size'], shuffle=False, num_workers=CFG['num_workers'], pin_memory=True) predictions = inference(model, states, test_loader, device) # submission test['label'] = predictions.argmax(1) test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False) ``` # rerun ``` def _load_save_point(run_id): # どこで中断したか取得 stop_fold = int(mlflow.get_run(run_id=run_id).to_dictionary()['data']['tags']['running.fold']) # 学習対象のfoldを変更 CFG['trn_fold'] = [fold for fold in CFG['trn_fold'] if fold>=stop_fold] # 学習済みモデルがあれば.pthファイルを取得(学習中も含む) client = mlflow.tracking.MlflowClient() artifacts = [artifact for artifact in client.list_artifacts(run_id) if ".pth" in artifact.path] for artifact in artifacts: client.download_artifacts(run_id, artifact.path, OUTPUT_DIR) def check_have_run(): results = mlflow.search_runs(INFO['EXPERIMENT_ID']) run_id_list = results[results['tags.mlflow.runName']==TITLE]['run_id'].tolist() # 初めて実行する場合 if len(run_id_list) == 0: run_id = None # 既に実行されている場合 else: assert len(run_id_list)==1 run_id = run_id_list[0] _load_save_point(run_id) return run_id def push_github(): ! cp {NOTEBOOK_PATH} kaggle-cassava/notebook/{TITLE}.ipynb !git config --global user.email "raijin.1059@gmail.com" ! git config --global user.name "Raijin Shibata" !cd kaggle-cassava ;git add .; git commit -m {TITLE}; git pull; git remote set-url origin https://{user_name}:{password}@github.com/raijin0704/kaggle-cassava.git; git push origin master def _load_save_point_copy(run_id): # # どこで中断したか取得 # stop_fold = int(mlflow.get_run(run_id=run_id).to_dictionary()['data']['tags']['running.fold']) # # 学習対象のfoldを変更 # CFG['trn_fold'] = [fold for fold in CFG['trn_fold'] if fold>=stop_fold] # 学習済みモデルがあれば.pthファイルを取得(学習中も含む) client = mlflow.tracking.MlflowClient() artifacts = [artifact for artifact in client.list_artifacts(run_id) if ".pth" in artifact.path] for artifact in artifacts: client.download_artifacts(run_id, artifact.path, OUTPUT_DIR) def check_have_run_copy(copy_from): results = mlflow.search_runs(INFO['EXPERIMENT_ID']) run_id_list = results[results['tags.mlflow.runName']==copy_from]['run_id'].tolist() # 初めて実行する場合 if len(run_id_list) == 0: run_id = None # 既に実行されている場合 else: assert len(run_id_list)==1 run_id = run_id_list[0] _load_save_point_copy(run_id) return run_id if __name__ == '__main__': if CFG['debug']: mlflow.set_tracking_uri(INFO['TRACKING_URI']) # 指定したrun_nameの学習済みモデルを取得 _ = check_have_run_copy(TAG['trained']) main() else: mlflow.set_tracking_uri(INFO['TRACKING_URI']) mlflow.set_experiment('single model') # 指定したrun_nameの学習済みモデルを取得 _ = check_have_run_copy(TAG['trained']) # 既に実行済みの場合は続きから実行する run_id = check_have_run() with mlflow.start_run(run_id=run_id, run_name=TITLE): if run_id is None: mlflow.log_artifact(CONFIG_PATH) mlflow.log_param('device', device) mlflow.set_tag('env', env) mlflow.set_tags(TAG) mlflow.log_params(CFG) mlflow.log_artifact(notebook_path) main() mlflow.log_artifacts(OUTPUT_DIR) # remove_glob(f'{OUTPUT_DIR}/*latest.pth') push_github() if env=="kaggle": shutil.copy2(CONFIG_PATH, f'{OUTPUT_DIR}/{CONFIG_NAME}') ! rm -r kaggle-cassava elif env=="colab": shutil.copytree(OUTPUT_DIR, f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}') shutil.copy2(CONFIG_PATH, f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}/{CONFIG_NAME}') ```
github_jupyter
# Transfer Learning Template ``` %load_ext autoreload %autoreload 2 %matplotlib inline import os, json, sys, time, random import numpy as np import torch from torch.optim import Adam from easydict import EasyDict import matplotlib.pyplot as plt from steves_models.steves_ptn import Steves_Prototypical_Network from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper from steves_utils.iterable_aggregator import Iterable_Aggregator from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig from steves_utils.torch_sequential_builder import build_sequential from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path) from steves_utils.PTN.utils import independent_accuracy_assesment from torch.utils.data import DataLoader from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory from steves_utils.ptn_do_report import ( get_loss_curve, get_results_table, get_parameters_table, get_domain_accuracies, ) from steves_utils.transforms import get_chained_transform ``` # Allowed Parameters These are allowed parameters, not defaults Each of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present) Papermill uses the cell tag "parameters" to inject the real parameters below this cell. Enable tags to see what I mean ``` required_parameters = { "experiment_name", "lr", "device", "seed", "dataset_seed", "n_shot", "n_query", "n_way", "train_k_factor", "val_k_factor", "test_k_factor", "n_epoch", "patience", "criteria_for_best", "x_net", "datasets", "torch_default_dtype", "NUM_LOGS_PER_EPOCH", "BEST_MODEL_PATH", "x_shape", } from steves_utils.CORES.utils import ( ALL_NODES, ALL_NODES_MINIMUM_1000_EXAMPLES, ALL_DAYS ) from steves_utils.ORACLE.utils_v2 import ( ALL_DISTANCES_FEET_NARROWED, ALL_RUNS, ALL_SERIAL_NUMBERS, ) standalone_parameters = {} standalone_parameters["experiment_name"] = "STANDALONE PTN" standalone_parameters["lr"] = 0.001 standalone_parameters["device"] = "cuda" standalone_parameters["seed"] = 1337 standalone_parameters["dataset_seed"] = 1337 standalone_parameters["n_way"] = 8 standalone_parameters["n_shot"] = 3 standalone_parameters["n_query"] = 2 standalone_parameters["train_k_factor"] = 1 standalone_parameters["val_k_factor"] = 2 standalone_parameters["test_k_factor"] = 2 standalone_parameters["n_epoch"] = 50 standalone_parameters["patience"] = 10 standalone_parameters["criteria_for_best"] = "source_loss" standalone_parameters["datasets"] = [ { "labels": ALL_SERIAL_NUMBERS, "domains": ALL_DISTANCES_FEET_NARROWED, "num_examples_per_domain_per_label": 100, "pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"), "source_or_target_dataset": "source", "x_transforms": ["unit_mag", "minus_two"], "episode_transforms": [], "domain_prefix": "ORACLE_" }, { "labels": ALL_NODES, "domains": ALL_DAYS, "num_examples_per_domain_per_label": 100, "pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"), "source_or_target_dataset": "target", "x_transforms": ["unit_power", "times_zero"], "episode_transforms": [], "domain_prefix": "CORES_" } ] standalone_parameters["torch_default_dtype"] = "torch.float32" standalone_parameters["x_net"] = [ {"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}}, {"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":256}}, {"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features":256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}}, ] # Parameters relevant to results # These parameters will basically never need to change standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10 standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth" # Parameters parameters = { "experiment_name": "tl_3A:cores+wisig -> oracle.run1.framed", "device": "cuda", "lr": 0.001, "x_shape": [2, 200], "n_shot": 3, "n_query": 2, "train_k_factor": 3, "val_k_factor": 2, "test_k_factor": 2, "torch_default_dtype": "torch.float32", "n_epoch": 50, "patience": 3, "criteria_for_best": "target_loss", "x_net": [ {"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 200]}}, { "class": "Conv2d", "kargs": { "in_channels": 1, "out_channels": 256, "kernel_size": [1, 7], "bias": False, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 256}}, { "class": "Conv2d", "kargs": { "in_channels": 256, "out_channels": 80, "kernel_size": [2, 7], "bias": True, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 16000, "out_features": 256}}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features": 256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}}, ], "NUM_LOGS_PER_EPOCH": 10, "BEST_MODEL_PATH": "./best_model.pth", "n_way": 16, "datasets": [ { "labels": [ "1-10.", "1-11.", "1-15.", "1-16.", "1-17.", "1-18.", "1-19.", "10-4.", "10-7.", "11-1.", "11-14.", "11-17.", "11-20.", "11-7.", "13-20.", "13-8.", "14-10.", "14-11.", "14-14.", "14-7.", "15-1.", "15-20.", "16-1.", "16-16.", "17-10.", "17-11.", "17-2.", "19-1.", "19-16.", "19-19.", "19-20.", "19-3.", "2-10.", "2-11.", "2-17.", "2-18.", "2-20.", "2-3.", "2-4.", "2-5.", "2-6.", "2-7.", "2-8.", "3-13.", "3-18.", "3-3.", "4-1.", "4-10.", "4-11.", "4-19.", "5-5.", "6-15.", "7-10.", "7-14.", "8-18.", "8-20.", "8-3.", "8-8.", ], "domains": [1, 2, 3, 4, 5], "num_examples_per_domain_per_label": 100, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl", "source_or_target_dataset": "source", "x_transforms": ["unit_power", "take_200"], "episode_transforms": [], "domain_prefix": "C_A_", }, { "labels": [ "1-10", "1-12", "1-14", "1-16", "1-18", "1-19", "1-8", "10-11", "10-17", "10-4", "10-7", "11-1", "11-10", "11-19", "11-20", "11-4", "11-7", "12-19", "12-20", "12-7", "13-14", "13-18", "13-19", "13-20", "13-3", "13-7", "14-10", "14-11", "14-12", "14-13", "14-14", "14-19", "14-20", "14-7", "14-8", "14-9", "15-1", "15-19", "15-6", "16-1", "16-16", "16-19", "16-20", "17-10", "17-11", "18-1", "18-10", "18-11", "18-12", "18-13", "18-14", "18-15", "18-16", "18-17", "18-19", "18-2", "18-20", "18-4", "18-5", "18-7", "18-8", "18-9", "19-1", "19-10", "19-11", "19-12", "19-13", "19-14", "19-15", "19-19", "19-2", "19-20", "19-3", "19-4", "19-6", "19-7", "19-8", "19-9", "2-1", "2-13", "2-15", "2-3", "2-4", "2-5", "2-6", "2-7", "2-8", "20-1", "20-12", "20-14", "20-15", "20-16", "20-18", "20-19", "20-20", "20-3", "20-4", "20-5", "20-7", "20-8", "3-1", "3-13", "3-18", "3-2", "3-8", "4-1", "4-10", "4-11", "5-1", "5-5", "6-1", "6-15", "6-6", "7-10", "7-11", "7-12", "7-13", "7-14", "7-7", "7-8", "7-9", "8-1", "8-13", "8-14", "8-18", "8-20", "8-3", "8-8", "9-1", "9-7", ], "domains": [1, 2, 3, 4], "num_examples_per_domain_per_label": 100, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/wisig.node3-19.stratified_ds.2022A.pkl", "source_or_target_dataset": "source", "x_transforms": ["unit_power", "take_200"], "episode_transforms": [], "domain_prefix": "W_A_", }, { "labels": [ "3123D52", "3123D65", "3123D79", "3123D80", "3123D54", "3123D70", "3123D7B", "3123D89", "3123D58", "3123D76", "3123D7D", "3123EFE", "3123D64", "3123D78", "3123D7E", "3124E4A", ], "domains": [32, 38, 8, 44, 14, 50, 20, 26], "num_examples_per_domain_per_label": 2000, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl", "source_or_target_dataset": "target", "x_transforms": ["unit_power", "take_200", "resample_20Msps_to_25Msps"], "episode_transforms": [], "domain_prefix": "ORACLE.run1_", }, ], "seed": 1337, "dataset_seed": 1337, } # Set this to True if you want to run this template directly STANDALONE = False if STANDALONE: print("parameters not injected, running with standalone_parameters") parameters = standalone_parameters if not 'parameters' in locals() and not 'parameters' in globals(): raise Exception("Parameter injection failed") #Use an easy dict for all the parameters p = EasyDict(parameters) if "x_shape" not in p: p.x_shape = [2,256] # Default to this if we dont supply x_shape supplied_keys = set(p.keys()) if supplied_keys != required_parameters: print("Parameters are incorrect") if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters)) if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys)) raise RuntimeError("Parameters are incorrect") ################################### # Set the RNGs and make it all deterministic ################################### np.random.seed(p.seed) random.seed(p.seed) torch.manual_seed(p.seed) torch.use_deterministic_algorithms(True) ########################################### # The stratified datasets honor this ########################################### torch.set_default_dtype(eval(p.torch_default_dtype)) ################################### # Build the network(s) # Note: It's critical to do this AFTER setting the RNG ################################### x_net = build_sequential(p.x_net) start_time_secs = time.time() p.domains_source = [] p.domains_target = [] train_original_source = [] val_original_source = [] test_original_source = [] train_original_target = [] val_original_target = [] test_original_target = [] # global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag # global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag def add_dataset( labels, domains, pickle_path, x_transforms, episode_transforms, domain_prefix, num_examples_per_domain_per_label, source_or_target_dataset:str, iterator_seed=p.seed, dataset_seed=p.dataset_seed, n_shot=p.n_shot, n_way=p.n_way, n_query=p.n_query, train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor), ): if x_transforms == []: x_transform = None else: x_transform = get_chained_transform(x_transforms) if episode_transforms == []: episode_transform = None else: raise Exception("episode_transforms not implemented") episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1]) eaf = Episodic_Accessor_Factory( labels=labels, domains=domains, num_examples_per_domain_per_label=num_examples_per_domain_per_label, iterator_seed=iterator_seed, dataset_seed=dataset_seed, n_shot=n_shot, n_way=n_way, n_query=n_query, train_val_test_k_factors=train_val_test_k_factors, pickle_path=pickle_path, x_transform_func=x_transform, ) train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test() train = Lazy_Iterable_Wrapper(train, episode_transform) val = Lazy_Iterable_Wrapper(val, episode_transform) test = Lazy_Iterable_Wrapper(test, episode_transform) if source_or_target_dataset=="source": train_original_source.append(train) val_original_source.append(val) test_original_source.append(test) p.domains_source.extend( [domain_prefix + str(u) for u in domains] ) elif source_or_target_dataset=="target": train_original_target.append(train) val_original_target.append(val) test_original_target.append(test) p.domains_target.extend( [domain_prefix + str(u) for u in domains] ) else: raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}") for ds in p.datasets: add_dataset(**ds) # from steves_utils.CORES.utils import ( # ALL_NODES, # ALL_NODES_MINIMUM_1000_EXAMPLES, # ALL_DAYS # ) # add_dataset( # labels=ALL_NODES, # domains = ALL_DAYS, # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"cores_{u}" # ) # from steves_utils.ORACLE.utils_v2 import ( # ALL_DISTANCES_FEET, # ALL_RUNS, # ALL_SERIAL_NUMBERS, # ) # add_dataset( # labels=ALL_SERIAL_NUMBERS, # domains = list(set(ALL_DISTANCES_FEET) - {2,62}), # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"), # source_or_target_dataset="source", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"oracle1_{u}" # ) # from steves_utils.ORACLE.utils_v2 import ( # ALL_DISTANCES_FEET, # ALL_RUNS, # ALL_SERIAL_NUMBERS, # ) # add_dataset( # labels=ALL_SERIAL_NUMBERS, # domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}), # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"), # source_or_target_dataset="source", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"oracle2_{u}" # ) # add_dataset( # labels=list(range(19)), # domains = [0,1,2], # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"met_{u}" # ) # # from steves_utils.wisig.utils import ( # # ALL_NODES_MINIMUM_100_EXAMPLES, # # ALL_NODES_MINIMUM_500_EXAMPLES, # # ALL_NODES_MINIMUM_1000_EXAMPLES, # # ALL_DAYS # # ) # import steves_utils.wisig.utils as wisig # add_dataset( # labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES, # domains = wisig.ALL_DAYS, # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"wisig_{u}" # ) ################################### # Build the dataset ################################### train_original_source = Iterable_Aggregator(train_original_source, p.seed) val_original_source = Iterable_Aggregator(val_original_source, p.seed) test_original_source = Iterable_Aggregator(test_original_source, p.seed) train_original_target = Iterable_Aggregator(train_original_target, p.seed) val_original_target = Iterable_Aggregator(val_original_target, p.seed) test_original_target = Iterable_Aggregator(test_original_target, p.seed) # For CNN We only use X and Y. And we only train on the source. # Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda) val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda) test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda) train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda) val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda) test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda) datasets = EasyDict({ "source": { "original": {"train":train_original_source, "val":val_original_source, "test":test_original_source}, "processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source} }, "target": { "original": {"train":train_original_target, "val":val_original_target, "test":test_original_target}, "processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target} }, }) from steves_utils.transforms import get_average_magnitude, get_average_power print(set([u for u,_ in val_original_source])) print(set([u for u,_ in val_original_target])) s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source)) print(s_x) # for ds in [ # train_processed_source, # val_processed_source, # test_processed_source, # train_processed_target, # val_processed_target, # test_processed_target # ]: # for s_x, s_y, q_x, q_y, _ in ds: # for X in (s_x, q_x): # for x in X: # assert np.isclose(get_average_magnitude(x.numpy()), 1.0) # assert np.isclose(get_average_power(x.numpy()), 1.0) ################################### # Build the model ################################### # easfsl only wants a tuple for the shape model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=tuple(p.x_shape)) optimizer = Adam(params=model.parameters(), lr=p.lr) ################################### # train ################################### jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device) jig.train( train_iterable=datasets.source.processed.train, source_val_iterable=datasets.source.processed.val, target_val_iterable=datasets.target.processed.val, num_epochs=p.n_epoch, num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH, patience=p.patience, optimizer=optimizer, criteria_for_best=p.criteria_for_best, ) total_experiment_time_secs = time.time() - start_time_secs ################################### # Evaluate the model ################################### source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test) target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test) source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val) target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val) history = jig.get_history() total_epochs_trained = len(history["epoch_indices"]) val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val)) confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl) per_domain_accuracy = per_domain_accuracy_from_confusion(confusion) # Add a key to per_domain_accuracy for if it was a source domain for domain, accuracy in per_domain_accuracy.items(): per_domain_accuracy[domain] = { "accuracy": accuracy, "source?": domain in p.domains_source } # Do an independent accuracy assesment JUST TO BE SURE! # _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device) # _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device) # _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device) # _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device) # assert(_source_test_label_accuracy == source_test_label_accuracy) # assert(_target_test_label_accuracy == target_test_label_accuracy) # assert(_source_val_label_accuracy == source_val_label_accuracy) # assert(_target_val_label_accuracy == target_val_label_accuracy) experiment = { "experiment_name": p.experiment_name, "parameters": dict(p), "results": { "source_test_label_accuracy": source_test_label_accuracy, "source_test_label_loss": source_test_label_loss, "target_test_label_accuracy": target_test_label_accuracy, "target_test_label_loss": target_test_label_loss, "source_val_label_accuracy": source_val_label_accuracy, "source_val_label_loss": source_val_label_loss, "target_val_label_accuracy": target_val_label_accuracy, "target_val_label_loss": target_val_label_loss, "total_epochs_trained": total_epochs_trained, "total_experiment_time_secs": total_experiment_time_secs, "confusion": confusion, "per_domain_accuracy": per_domain_accuracy, }, "history": history, "dataset_metrics": get_dataset_metrics(datasets, "ptn"), } ax = get_loss_curve(experiment) plt.show() get_results_table(experiment) get_domain_accuracies(experiment) print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"]) print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"]) json.dumps(experiment) ```
github_jupyter
# Cat Dog Classification ## 1. 下载数据 我们将使用包含猫与狗图片的数据集。它是Kaggle.com在2013年底计算机视觉竞赛提供的数据集的一部分,当时卷积神经网络还不是主流。可以在以下位置下载原始数据集: `https://www.kaggle.com/c/dogs-vs-cats/data`。 图片是中等分辨率的彩色JPEG。看起来像这样: ![cats_vs_dogs_samples](https://s3.amazonaws.com/book.keras.io/img/ch5/cats_vs_dogs_samples.jpg) 不出所料,2013年的猫狗大战的Kaggle比赛是由使用卷积神经网络的参赛者赢得的。最佳成绩达到了高达95%的准确率。在本例中,我们将非常接近这个准确率,即使我们将使用不到10%的训练集数据来训练我们的模型。 原始数据集的训练集包含25,000张狗和猫的图像(每个类别12,500张),543MB大(压缩)。 在下载并解压缩之后,我们将创建一个包含三个子集的新数据集: * 每个类有1000个样本的训练集, * 每个类500个样本的验证集, * 最后是每个类500个样本的测试集。 数据已经提前处理好。 ### 1.1 加载数据集目录 ``` import os, shutil # The directory where we will # store our smaller dataset base_dir = './data/cats_and_dogs_small' # Directories for our training, # validation and test splits train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') test_dir = os.path.join(base_dir, 'test') # Directory with our training cat pictures train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training dog pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our validation cat pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation dog pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # Directory with our validation cat pictures test_cats_dir = os.path.join(test_dir, 'cats') # Directory with our validation dog pictures test_dogs_dir = os.path.join(test_dir, 'dogs') ``` ## 2. 模型一 ### 2.1 数据处理 ``` from keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator(rescale=1./255) validation_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) # 150*150 train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = validation_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary') test_generator = test_datagen.flow_from_directory( test_dir, target_size=(150, 150), batch_size=20, class_mode='binary') print('train_dir: ',train_dir) print('validation_dir: ',validation_dir) print('test_dir: ',test_dir) for data_batch, labels_batch in train_generator: print('data batch shape:', data_batch.shape) print('labels batch shape:', labels_batch.shape) break labels_batch ``` ### 2.2 构建模型 ``` from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) from keras import optimizers model.compile(optimizer=optimizers.RMSprop(lr=1e-4), loss='binary_crossentropy', metrics=['acc']) history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50) import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right') plt.show() plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right') plt.show() val_loss_min = history.history['val_loss'].index(min(history.history['val_loss'])) val_acc_max = history.history['val_acc'].index(max(history.history['val_acc'])) print('validation set min loss: ', val_loss_min) print('validation set max accuracy: ', val_acc_max) from keras import layers from keras import models # vgg的做法 model = models.Sequential() model.add(layers.Conv2D(32, 3, activation='relu', padding="same", input_shape=(64, 64, 3))) model.add(layers.Conv2D(32, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(64, 3, activation='relu', padding="same")) model.add(layers.Conv2D(64, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(128, 3, activation='relu', padding="same")) model.add(layers.Conv2D(128, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(256, 3, activation='relu', padding="same")) model.add(layers.Conv2D(256, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=2)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.summary() from keras import optimizers model.compile(optimizer=optimizers.RMSprop(lr=1e-4), loss='binary_crossentropy', metrics=['acc']) # model.compile(loss='binary_crossentropy', # optimizer='adam', # metrics=['accuracy']) ``` ### 2.3 训练模型 ``` history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50) ``` ### 2.4 画出表现 ``` import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() val_loss_min = val_loss.index(min(val_loss)) val_acc_max = val_acc.index(max(val_acc)) print('validation set min loss: ', val_loss_min) print('validation set max accuracy: ', val_acc_max) ``` ### 2.5 测试集表现 ``` scores = model.evaluate_generator(test_generator, verbose=0) print("Large CNN Error: %.2f%%" % (100 - scores[1] * 100)) ``` ## 3. 模型二 使用数据增强来防止过拟合 ### 3.1 数据增强示例 ``` datagen = ImageDataGenerator( rotation_range=40, # 角度值(在 0~180 范围内),表示图像随机旋转的角度范围 width_shift_range=0.2, # 图像在水平或垂直方向上平移的范围 height_shift_range=0.2, # (相对于总宽度或总高度的比例) shear_range=0.2, # 随机错切变换的角度 zoom_range=0.2, # 图像随机缩放的范围 horizontal_flip=True, # 随机将一半图像水平翻转 fill_mode='nearest') # 用于填充新创建像素的方法, # 这些新像素可能来自于旋转或宽度/高度平移 # This is module with image preprocessing utilities from keras.preprocessing import image fnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)] # We pick one image to "augment" img_path = fnames[3] # Read the image and resize it img = image.load_img(img_path, target_size=(150, 150)) imgplot_oringe = plt.imshow(img) # Convert it to a Numpy array with shape (150, 150, 3) x = image.img_to_array(img) # Reshape it to (1, 150, 150, 3) x = x.reshape((1,) + x.shape) # The .flow() command below generates batches of randomly transformed images. # It will loop indefinitely, so we need to `break` the loop at some point! i = 0 for batch in datagen.flow(x, batch_size=1): plt.figure(i) imgplot = plt.imshow(image.array_to_img(batch[0])) i += 1 if i % 4 == 0: break plt.show() ``` ### 3.2 定义数据增强 ``` train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) # 注意,不能增强验证数据 train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary') ``` ### 3.3 训练网络 ``` model = models.Sequential() model.add(layers.Conv2D(32, 3, activation='relu', padding="same", input_shape=(150, 150, 3))) model.add(layers.Conv2D(32, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(64, 3, activation='relu', padding="same")) model.add(layers.Conv2D(64, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(128, 3, activation='relu', padding="same")) model.add(layers.Conv2D(128, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(256, 3, activation='relu', padding="same")) model.add(layers.Conv2D(256, 3, activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=2)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) # model.compile(optimizer=optimizers.RMSprop(lr=1e-4), # loss='binary_crossentropy', # metrics=['acc']) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit_generator(train_generator, steps_per_epoch=100, # 训练集分成100批送进去,相当于每批送20个 epochs=100, # 循环100遍 validation_data=validation_generator, validation_steps=50, # 验证集分50批送进去,每批20个 verbose=0) ``` ### 3.4 画出表现 ``` acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() val_loss_min = val_loss.index(min(val_loss)) val_acc_max = val_acc.index(max(val_acc)) print('validation set min loss: ', val_loss_min) print('validation set max accuracy: ', val_acc_max) # train_datagen = ImageDataGenerator(rotation_range=40, # width_shift_range=0.2, # height_shift_range=0.2, # shear_range=0.2, # zoom_range=0.2, # horizontal_flip=True, # fill_mode='nearest') # train_datagen.fit(train_X) # train_generator = train_datagen.flow(train_X, train_y, # batch_size = 64) # history = model_vgg16.fit_generator(train_generator, # validation_data = (test_X, test_y), # steps_per_epoch = train_X.shape[0] / 100, # epochs = 10) ``` ## 4. 使用预训练的VGG-16 ![swapping FC classifiers](https://s3.amazonaws.com/book.keras.io/img/ch5/swapping_fc_classifier.png) ``` from keras.applications import VGG16 conv_base = VGG16(weights='imagenet', include_top=False, # 不要分类层 input_shape=(150, 150, 3)) conv_base.summary() from keras import models from keras import layers model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) # model = models.Sequential() # model.add(conv_base) # model.add(layers.Dense(256, activation='relu')) # model.add(layers.Dropout(0.5)) # model.add(layers.Dense(256, activation='relu')) # model.add(layers.Dropout(0.5)) # model.add(layers.Dense(1, activation='sigmoid')) print('This is the number of trainable weights ' 'before freezing the conv base:', len(model.trainable_weights)) conv_base.trainable = False print('This is the number of trainable weights ' 'after freezing the conv base:', len(model.trainable_weights)) from keras.preprocessing.image import ImageDataGenerator from keras import optimizers train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary') model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['acc']) history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50, verbose=2) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() val_loss_min = val_loss.index(min(val_loss)) val_acc_max = val_acc.index(max(val_acc)) print('validation set min loss: ', val_loss_min) print('validation set max accuracy: ', val_acc_max) ``` ## Fine-tuning ![fine-tuning VGG16](https://s3.amazonaws.com/book.keras.io/img/ch5/vgg16_fine_tuning.png) ``` conv_base.summary() conv_base.trainable = True set_trainable = False for layer in conv_base.layers: if layer.name == 'block5_conv1': set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False model.summary() model.compile(optimizer=optimizers.RMSprop(lr=1e-5), loss='binary_crossentropy', metrics=['acc']) history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100, validation_data=validation_generator, validation_steps=50, verbose=0) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() def smooth_curve(points, factor=0.8): smoothed_points = [] for point in points: if smoothed_points: previous = smoothed_points[-1] smoothed_points.append(previous * factor + point * (1 - factor)) else: smoothed_points.append(point) return smoothed_points plt.plot(epochs, smooth_curve(acc), 'bo', label='Smoothed training acc') plt.plot(epochs, smooth_curve(val_acc), 'b', label='Smoothed validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, smooth_curve(loss), 'bo', label='Smoothed training loss') plt.plot(epochs, smooth_curve(val_loss), 'b', label='Smoothed validation loss') plt.title('Training and validation loss') plt.legend() plt.show() smooth_val_loss = smooth_curve(val_loss) smooth_val_loss.index(min(smooth_val_loss)) test_generator = test_datagen.flow_from_directory(test_dir, target_size=(150, 150), batch_size=20, class_mode='binary') test_loss, test_acc = model.evaluate_generator(test_generator, steps=50) print('test acc:', test_acc) # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper right') # plt.show() # plt.plot(history.history['acc']) # plt.plot(history.history['val_acc']) # plt.title('model accuracy') # plt.ylabel('accuracy') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper right') # plt.show() ```
github_jupyter
# Titanic: Machine Learning from Disaster ## [Kaggle Challenge](https://www.kaggle.com/c/titanic#tutorials) **In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.** Inspired by [Titanic Data Science Solutions](https://www.kaggle.com/startupsci/titanic-data-science-solutions). ## Workflow 1. Question or problem definition. 2. Acquire training and testing data. 3. Wrangle, prepare, cleanse the data. 4. Analyze, identify patterns, and explore the data. 5. Model, predict and solve the problem. 6. Visualize, report, and present the problem solving steps and final solution. 7. Supply or submit the results. ## 1. Question or problem definition **Given from Kaggle:** Knowing from a training set of samples listing passengers who survived or did not survive the Titanic disaster, can our model determine based on a given test dataset not containing the survival information, if these passengers in the test dataset survived or not. **Info about the case:** * On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. Translated 32% survival rate. * One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. * Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class. ### Workflow goals The data science solutions workflow solves for seven major goals. **Classifying:** We may want to classify or categorize our samples. We may also want to understand the implications or correlation of different classes with our solution goal. **Correlating:** One can approach the problem based on available features within the training dataset. Which features within the dataset contribute significantly to our solution goal? Statistically speaking is there a correlation among a feature and solution goal? As the feature values change does the solution state change as well, and visa-versa? This can be tested both for numerical and categorical features in the given dataset. We may also want to determine correlation among features other than survival for subsequent goals and workflow stages. Correlating certain features may help in creating, completing, or correcting features. **Converting:** For modeling stage, one needs to prepare the data. Depending on the choice of model algorithm one may require all features to be converted to numerical equivalent values. So for instance converting text categorical values to numeric values. **Completing.** Data preparation may also require us to estimate any missing values within a feature. Model algorithms may work best when there are no missing values. **Correcting:** We may also analyze the given training dataset for errors or possibly innacurate values within features and try to corrent these values or exclude the samples containing the errors. One way to do this is to detect any outliers among our samples or features. We may also completely discard a feature if it is not contribting to the analysis or may significantly skew the results. **Creating:** Can we create new features based on an existing feature or a set of features, such that the new feature follows the correlation, conversion, completeness goals. **Charting:** How to select the right visualization plots and charts depending on nature of the data and the solution goals. ## 2. Acquire training and testing data ``` # data analysis and wrangling import pandas as pd import numpy as np import random as rnd # visualization import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline # machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier ``` ### Acquire data ``` df_train = pd.read_csv('titanic/train.csv') df_test = pd.read_csv('titanic/test.csv') combine = [df_train, df_test] ``` ### Column description: **Survival** = Survival **Pclass** = Ticket class (1 = 1st, 2 = 2nd, 3 = 3rd) **Sex** = Sex **Age** = Age in years **SibSp** = # of siblings / spouses aboard the Titanic **Parch** = # of parents / children aboard the Titanic **Ticket** = Ticket number **Fare** = Passenger fare **Cabin** = Cabin number **Embarked** = Port of Embarkation (C = Cherbourg, Q = Queenstown, S = Southampton) ``` df_train.head() df_train.tail() ``` ## 3. Wrangle, prepare, cleanse the data. **What features are available?** ``` list(df_train) ``` ### What features are categorical? These values classify the samples into sets of similar samples. Within categorical features are the values nominal, ordinal, ratio, or interval based? Among other things this helps us select the appropriate plots for visualization. **Categorical:** Survived, Sex, Embarked **Ordinal:** Pclass ### What features are numerical? Which features are numerical? These values change from sample to sample. Within numerical features are the values discrete, continuous, or timeseries based? Among other things this helps us select the appropriate plots for visualization. **Continuous:** Age, Fare **Discrete:** SibSp, Parch ### Which features have mixed data types? Numerical, alphanumeric data within same feature. These are candidates for correcting goal. * Ticket is a mix of numeric and alphanumeric data types. Cabin is alphanumeric. ### Which features may contain errors or typos? This is harder to review for a large dataset, however reviewing a few samples from a smaller dataset may just tell us outright, which features may require correcting. * Name feature may contain errors or typos as there are several ways used to describe a name including titles, round brackets, and quotes used for alternative or short names. ### Which features contain blank, null or empty values? These will require correcting. * Cabin & Age & Embarked features contain a number of null values in that order for the training dataset. * Cabin & Age are incomplete in case of test dataset ### What are the data types for various features? Helping us during converting goal. * Seven features are integer or floats. Six in case of test dataset * Six features are strings (object). Five for test dataset. ``` df_train.info() print('-'*40) df_test.info() ``` ### What is the distribution of numerical feature values across the samples? This helps determine how representative the training dataset is of the actual problem domain. * Total samples are 891 or 40% of the actual number of passengers on board the Titanic (2,224). * Survived is a categorical feature with 0 or 1 values. * Around 38% samples survived representative of the actual survival rate at 32%. * Most passengers (> 75%) did not travel with parents or children. * Nearly 30% of the passengers had siblings and/or spouse aboard. * Fares varied significantly with few passengers (<1%) paying as high as $512. * Few elderly passengers (<1%) within age range 65-80. ``` df_train.describe() ``` ### What is the distribution of categorical features? * Names are unique across the dataset (count=unique=891) * Sex variable as two possible values with 65% male (top=male, freq=577/891) * Cabin values have several duplicates across samples. Alternatively several passengers shared a cabin. * Embarked takes three possible values. S port used by most passengers (top=S) * Ticket feature has high ratio (22%) of duplicate values (unique=681) ``` df_train.describe(include=['O']) ``` ## 4. Analyze, identify patterns, and explore the data. ### Assumptions based on data analysis We arrive at following assumptions based on data analysis done so far. We may validate these assumptions further before taking appropriate actions. **Correlating:** We want to know how well each feature correlates with Survival. We want to do this early in our project and match these quick correlations with modelled correlations later in the project. **Completing:** 1. We may want to complete Age feature as it is definitely correlated to survival 2. We may want to complete the Embarked feature as it may also correlate with survival or another important feature. **Correcting:** 1. Ticket feature may be dropped from our analysis as it contains high ratio of duplicated (22%) and there may not be a correlation between Ticket and survival. 2. Cabin feature may be dropped as it is highly incomplete or contains many null values both in training and test dataset. 3. PassengerId may be dropped from training dataset as it does not contribute to survival. 4. Name feature is relatively non-standard, may not contribute directly to survival, so maybe dropped. **Creating:** 1. We may want to create a new feature called Family based on Parch and SibSp to get total count of family members on board. 2. We may want to engineer the Name feature to extract Title as a new feature. 3. We may want to create new feature for Age bands. This turns a continous numerical feature into an ordinal categorical feature. 4. We may also want to create a Fare range feature if it helps our analysis. **Classifying:** We may also add to our assumptions based on the problem description noted earlier. 1. Women (Sex=female) were more likely to have survived. 2. Children (Age<?) were more likely to have survived. 3. The upper-class passengers (Pclass=1) were more likely to have survived. ### Analyze by pivoting features To confirm some of our observations and assumptions, we can quickly analyze our feature correlations by pivoting features against each other. We can only do so at this stage for features which do not have any empty values. It also makes sense doing so only for features which are categorical (Sex), ordinal (Pclass) or discrete (SibSp, Parch) type. * **Pclass**: We observe significant correlation (>0.5) among Pclass=1 and Survived (classifying #3). We decide to inlcude this feature to our model. * **Sex:** We confirm the observation during problem definition that Sex=female had very high survival rate at 74% (classifying #1) * **SibSp and Parch:** These features have zero correlation for certain values. It may be best to derive a feature or a set of features from these individual features (creating #1) ``` df_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False) df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values(by='Survived', ascending=False) df_train[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False) df_train[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False) ``` ### Analyze by visualizing data Now we can continue confirming some of our assumptions using visualizations for analyzing the data. **Correlating numerical features** Start by understanding correlations between numerical features and our solution goal (Survived). A histogram chart is useful for analyzing continuous numerical variables like Age where banding or ranges will help identify useful patterns. The histogram can indicate distribution of samples using automatically defined bins or equally ranged bands. This helps us answer questions relating to specific bands (Did infants have better survival rate?) Note that x-axis in histogram visualizations represents the count of samples or passengers. **Observations:** * Infants (Age <= 4) had high survival rate * Oldest passengers (Age=80) survived * Large number of 15-25 year olds did not survive * Most passengers are in 15-35 range **Decisions:** This simple analysis confirms our assumptions as decisions for subsequent workflow stages. * We should consider Age (out assumption classifying #2) in our model training. * Complete the Age feature for null values (completing #1). * We should band age groups (creating #3) ``` g = sns.FacetGrid(df_train, col='Survived') g.map(plt.hist, 'Age', bins=20); ``` **Correlating numerical and ordinal features** We can combine multiple features for identifying correlations using a single plot. This can be done with numerical and categorical features which have numerical values. **Observations:** * Pclass=3 had most passengers, however most did not survive. Confirms our classifying assumption #2. * Infant passengers in Pclass=2 and Pclass=3 mostly survived. Further qualifies our classifying assumption #2. * Most passengers in Pclass=1 survived. Confirms our classifying assumption #3. * Pclass varies in terms of Age distribution of passengers. **Decisions:** * Consider Pclass for model training. ``` grid = sns.FacetGrid(df_train, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=.5, bins=20) grid.add_legend(); ``` **Correlating categorical features** Now we can correlate categorical features with our solution goal. **Observations:** * Female passengers had much better survival rates than males. Confirms classifying (#1) * Exception in Embarked=C where males had higher survival rate. This could be a correlation between Pclass and Embarked and in turn Pclass and Survived, not necessarily direct correlation between Embarked and Survived. * Males had better survival rate in Pclass=3 when compared with Pclass=2 for C and Q ports. Completing (#2). * Ports of embarkation have varying survival rates for Pclass=3 and among male passengers. Correlating (#1). **Decisions:** * Add Sex feature to model training * Complete and add Embarked feature to model training ``` grid = sns.FacetGrid(df_train, row='Embarked', size=2.2, aspect=1.6) grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') grid.add_legend(); ``` **Correlating categorical and numerical features** We may also want to correlate categorical features (with non-numeric values) and numeric features. We can consider correlating Embarked (Categorical non-numeric), Sex (Categorical non-numeric), Fare (Numeric continuous), with Survived (Categorical numeric). **Observations:** * Higher fare paying passengers had better survival. Confirms our assumption for creating (#4) fare ranges. * Port of embarkation correlates with survival rates. Confirms correlating (#1) and completing (#2) **Decisions:** * Consider banding Fare feature ``` grid = sns.FacetGrid(df_train, row='Embarked', col='Survived', size=2.2, aspect=1.6) grid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None) grid.add_legend(); ``` ### Wrangle data We have collected several assumptions and decisions regarding our datasets and solution requirements. So far we did not have to change a single feature or value to arrive at these. Let us now execute our decisions and assumptions for correcting, creating and completing goals. **Correcting by dropping features:** This is a good starting goal to execute. By dropping features we are dealing with fewer data points. Speeds up our notebook and eases the analysis. Based on our assumptions and decisions we want to drop the Cabin (correcting #2) and Ticket (correcting #1) features. Note that where applicable we perform operations on both training and testing datasets together to stay consistent. ``` print("Before", df_train.shape, df_test.shape, combine[0].shape, combine[1].shape) df_train = df_train.drop(['Ticket', 'Cabin'], axis=1) df_test = df_test.drop(['Ticket', 'Cabin'], axis=1) combine = [df_train, df_test] print("After", df_train.shape, df_test.shape, combine[0].shape, combine[1].shape) ``` **Creating new feature extracting from existing** We want to analyze if Name feature can be engineered to extract titles and test correlation between titles and survival, before dropping Name and PassengerId features. In the following code we extract Title using regular expressions. The RegEx pattern (\w+\.) matches the first word which ends with a dot character within Name feature. The `expand=False` flag return a DataFrame. **Observations:** When we plot Title, Age, and Survived, we note the following observations. * Most titles band Age groups accurately. For example: Master title has Age mean of 5 years. * Survival among Title Age bands varies slightly * Certain titles mostly survived (Mme, Lady, Sir) or did not (Don, Rev, Jonkheer) **Decision:** * We decide to retain the new Title feature for model training. ``` for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False) pd.crosstab(df_train['Title'], df_train['Sex']) ``` We can replace many titles with a more common name or classify them as `Rare`. ``` for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') df_train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean() ``` We can convert the categorical titles to ordinal. ``` title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) df_train['Title'] = df_train['Title'].apply(pd.to_numeric) df_train.head() ``` Now we can safely drop the 'Name' feature from training and testing datasets. We also do not need the PassengerId feature in the training set. ``` df_train = df_train.drop(['Name', 'PassengerId'], axis=1) df_test = df_test.drop(['Name'], axis=1) combine = [df_train, df_test] df_train.shape, df_test.shape ``` **Converting a categorical feature** Now we can convert features which contain string to numerical values. This is required by most model algorithms. Doing so will also help us in achieving the feature completing goal. Let us start by converting Sex feature to a new feature called Gender where female=1 and male=0. ``` for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) df_train.head() ``` **Completing a numerical continuous feature** Now we should start estimating and completing features with missing or null values. We will first do this for the Age feature. We can consider three methods to complete a numerical continuous feature. 1. A simple way is to generate random numbers between mean and standard deviation. 2. More accurate way of guessing missing values is to use other correlated features. In our case we note correlation among Age, Gender and Pclass. Guess Age values using median values for Age across sets of Pclass and Gender feature combinations. So, median Age for Pclass=1 and Gender=0, Pclass=1 and Gender=1, and so on... 3. Combine methods 1 and 2. So instead of guessing age values based on median, use random numbers between mean and standard deviation, based on sets of Pclass and Gender combinations. Method 1 and 3 will introduce random noise into our models. The results from multiple executions might vary. We will prefer method 2. ``` grid = sns.FacetGrid(df_train, row='Pclass', col='Sex', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=.5, bins=20) grid.add_legend() ``` Let us start by preparing an empty array to contain guessed Age values based on Pclass x Gender ocmbinations. ``` guess_ages = np.zeros((2,3)) guess_ages for dataset in combine: for i in range(0, 2): for j in range(0, 3): df_guess = dataset[(dataset['Sex'] == i) & \ (dataset['Pclass'] == j+1)]['Age'].dropna() # age_mean = df_guess # age_std = df_guess.std() # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std) age_guess = df_guess.median() # Convert random age float to nearest .5 age guess_ages[i,j] = int( age_guess/0.5 + 0.5) * 0.5 for i in range(0,2): for j in range(0,3): dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1), 'Age'] = guess_ages[i,j] dataset['Age'] = dataset['Age'].astype(int) df_train.head() ``` Let us create Age bands and determine correlations with Survived. ``` df_train['AgeBand'] = pd.cut(df_train['Age'], 5) df_train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True) ``` Let us replace Age with ordinals based on these bands. ``` for dataset in combine: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] df_train.head() ``` We can now remove the AgeBand feature. ``` df_train = df_train.drop(['AgeBand'], axis=1) combine = [df_train, df_test] df_train.head() ``` **Create new feature combining existing features** We can create a new feature for FamilySize which combines Parch and SibSp. This will enable us to drop Parch and SibSp from our datasets. ``` for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 df_train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False) df_train.head() ``` We can create another feature called IsAlone. ``` for dataset in combine: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 df_train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean() ``` Let us drop Parch, SibSp and FamilySize features in favor of IsAlone ``` df_train = df_train.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) df_test = df_test.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) combine = [df_train, df_test] df_train.head() ``` We can also create an artificial feature combining Pclass and Age. ``` for dataset in combine: dataset['Age*Class'] = dataset.Age * dataset.Pclass df_train.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10) ``` **Completing a cetagorical feature** Embarked feature takes S, Q, C values based on port of embarkation. Our training dataset has two missing values. We simply fill these with the most common occurance. ``` freq_port = df_train.Embarked.dropna().mode()[0] freq_port for dataset in combine: dataset['Embarked'] = dataset['Embarked'].fillna(freq_port) df_train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False) ``` **Converting categorical feature to numeric** We can now convert the EmbarkedFill feature by creating a new numeric Port feature. ``` for dataset in combine: dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2}).astype(int) df_train.head() ``` **Quick completing and converting a numeric feature** We can now complete the Fare feature for single missing value in test dataset using mode to get the value that occurs most frequently for this feature. We do this in a single line of code. Note that we are not creating an intermediate new feature or doing any further analysis for correlation to guess missing feature as we are replacing only a single value. The completion goal achieves desired requirement for model algorithm to operate on non-null values. We may also want to round off the fare to two decimals as it represents currency. ``` df_test['Fare'].fillna(df_test['Fare'].dropna().median(), inplace=True) df_test.head() ``` We can now create FareBand. ``` df_train['FareBand'] = pd.qcut(df_train['Fare'], 4) df_train[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True) ``` Convert the Fare feature to ordinal values based on the FareBand. ``` for dataset in combine: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) df_train = df_train.drop(['FareBand'], axis=1) combine = [df_train, df_test] df_train.head(10) ``` And the test dataset. ``` df_test.head(10) ``` ## 5. Model, predict and solve Now we are ready to train a model and predict the required solution. There are 60+ predictive modelling algorithms to choose from. We must understand the type of problem and solution requirement to narrow down to a select few models which we can evaluate. Our problem is a classification and regression problem. We want to identify relationship between output (Survived or not) with other variables or features (Gender, Age, Port...). We are also performing a category of machine learning which is called supervised learning as we are training our model with a given dataset. With these two criteria - Supervised Learning plus Classification and Regression, we can narrow down our choice of models to a few. These include: * Logistic Regression * KNN or k-Nearest Neighbors * Support Vector Machines * Naive Bayes classifier * Decision Tree * Random Forrest * Perceptron * Artificial Neural Network * RVM or Relevance Vector Machine ``` X_train = df_train.drop("Survived", axis=1) Y_train = df_train["Survived"] X_test = df_test.drop("PassengerId", axis=1).copy() X_train.shape, Y_train.shape, X_test.shape X_test.head(1) ``` **Logistic Regression** Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable(feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic function, which is the cumulative logistic distribution. Reference [Wikipedia](https://en.wikipedia.org/wiki/Logistic_regression). Note the confidence score generated by the model based on our training dataset. ``` logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) acc_log = round(logreg.score(X_train, Y_train)*100, 2) acc_log ``` We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function. Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability). * Sex is the highest positive coefficient, implying as the Sex value increases (male: 0 to female: 1), the probability of Survived increases the most * Inversely as Pclass increases, probability of Survived=1 decreases the most * This way Age*Class is a good artificial feature to model as it has second highest negative correlation with Survived * So is the Title as second highest correlation ``` coeff_df = pd.DataFrame(df_train.columns.delete(0)) coeff_df.columns = ['Feature'] coeff_df["Correlation"] = pd.Series(logreg.coef_[0]) coeff_df.sort_values(by='Correlation', ascending=False) ``` **Support Vector Machines** Next we model using Support Vector Machines which are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. Given a set of training samples, each marked as belonging to one or the other of **two categories**, an SVM training algorithm builds that assigns new test samples to one category or the other, making it a non-probabilistic binary linear classifier. Reference [Wikipedia](https://en.wikipedia.org/wiki/Support-vector_machine). Note that the model generates a confidence score which is higher than Logistics Regression model. ``` svc = SVC() svc.fit(X_train, Y_train) Y_pred = svc.predict(X_test) acc_svc = round(svc.score(X_train, Y_train) * 100, 2) acc_svc ``` **k-Nearest Neighbours** In pattern recognition, the k-Nearest Neighbors algorithm (or k-NN for short) is a non-parametric method used for classification and regression. A sample is classified by a majority vote of its neighbors, with the sample being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of thart single nearest neighbor. Reference [Wikipedia](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm). KNN confidence score is better than both Logistic Regression and SVM. ``` knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, Y_train) * 100, 2) acc_knn ``` **Naive Bayes** In machine learning, naive Bayes classifiers are a family of simple probabilistic classifiers based on applying Bayes' theorem with strong (naive) independence assumptions between the features. Naive Bayes classifiers are hihgly scalable, requiring a number of parameters linear in the number of variables (features) in a learning problem. Reference [Wikipedia](https://en.wikipedia.org/wiki/Naive_Bayes_classifier). The model generated confidence score is the lowest among the models evaluated so far. ``` # Gaussian Naive Bayes gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2) acc_gaussian ``` **Perceptron** The perceptron is an algorithm for supervised learning of binary classifiers (functions that can decide whether an input, represented by a vector of numbers, belongs to some specific class or not). It is a type of linear classifier, i.e. a classificatino algorithm that makes its predictions based on a linear predictor function combining a set of weights with feature vector. The algorithm allows for online learning, in that it processes elements in the training set one at a time. Reference [Wikipedia](https://en.wikipedia.org/wiki/Perceptron). ``` # Perceptron perceptron = Perceptron() perceptron.fit(X_train, Y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2) acc_perceptron ``` **Linear SVC** ``` linear_svc = LinearSVC() linear_svc.fit(X_train, Y_train) Y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2) acc_linear_svc ``` **Stochastic Gradient Descent** ``` sgd = SGDClassifier() sgd.fit(X_train, Y_train) Y_pred = sgd.predict(X_test) acc_sgd = round(sgd.score(X_train, Y_train) * 100, 2) acc_sgd ``` **Decision Tree** This model uses a decision tree as a predictive model which maps features (tree branches) to conclusions about the target value (tree leaves). Tree models where the target variable can take a finite set of values are called classification trees; in these tree structures, leaves represent class labels and branches represent conjunctions of features that lead to those class labels. Decision trees where the target variable can take continuous values (typically real numbers) are called regression trees. Reference [Wikipedia](https://en.wikipedia.org/wiki/Decision_tree_learning). The model confidence score is the highest among models evaluated so far. ``` decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2) acc_decision_tree ``` **Random Forests** The next model Random Forests is one of the most popular. Random forests or random decision forests are an ensemble learning method for classification, regression and other tasks, that operate by constructing a multitude of decision trees (n_estimator=100) at training time and outputting the class that is the mode of the classes (classification) or mean prediction (regression) of the individual trees. Reference [Wikipedia](https://en.wikipedia.org/wiki/Random_forest). The model confidence score is the highest among models evaluated so far. We decide to use this model's output (Y_pred) for creating our competition submission of results. ``` random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train) acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2) acc_random_forest ``` ## 6. Visualize, report, and present the problem solving steps and final solution. ``` models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree'], 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree]}) models.sort_values(by='Score', ascending=False) ``` ## 7. Supply or submit the results. ``` submission = pd.DataFrame({ "PassengerId": df_test["PassengerId"], "Survived": Y_pred }) submission.to_csv('submission.csv', index=False) ```
github_jupyter
# CNN for Classification --- In this notebook, we define **and train** an CNN to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist). ### Load the [data](http://pytorch.org/docs/master/torchvision/datasets.html) In this cell, we load in both **training and test** datasets from the FashionMNIST class. ``` # our basic libraries import torch import torchvision # data loading and transforming from torchvision.datasets import FashionMNIST from torch.utils.data import DataLoader from torchvision import transforms # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors for input into a CNN ## Define a transform to read the data in as a tensor data_transform = transforms.ToTensor() # choose the training and test datasets train_data = FashionMNIST(root='./data', train=True, download=True, transform=data_transform) test_data = FashionMNIST(root='./data', train=False, download=True, transform=data_transform) # Print out some stats about the training and test data print('Train data, number of images: ', len(train_data)) print('Test data, number of images: ', len(test_data)) # prepare data loaders, set the batch_size ## TODO: you can try changing the batch_size to be larger or smaller ## when you get to training your network, see how batch_size affects the loss batch_size = 20 train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) # specify the image classes classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ``` ### Visualize some training data This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(batch_size): ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title(classes[labels[idx]]) ``` ### Define the network architecture The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/master/nn.html). For a convolutional neural network, we'll use a simple series of layers: * Convolutional layers * Maxpooling layers * Fully-connected (linear) layers You are also encouraged to look at adding [dropout layers](http://pytorch.org/docs/stable/nn.html#dropout) to avoid overfitting this data. --- ### TODO: Define the Net Define the layers of your **best, saved model from the classification exercise** in the function `__init__` and define the feedforward behavior of that Net in the function `forward`. Defining the architecture here, will allow you to instantiate and load your best Net. ``` import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel (grayscale), 10 output channels/feature maps # 3x3 square convolution kernel self.conv1 = nn.Conv2d(1, 10, 3) ## TODO: Define the rest of the layers: # include another conv layer, maxpooling layers, and linear layers # also consider adding a dropout layer to avoid overfitting ## TODO: define the feedforward behavior def forward(self, x): # one activated conv layer x = F.relu(self.conv1(x)) # final output return x ``` ### Load a Trained, Saved Model To instantiate a trained model, you'll first instantiate a new `Net()` and then initialize it with a saved dictionary of parameters. This notebook needs to know the network architecture, as defined above, and once it knows what the "Net" class looks like, we can instantiate a model and load in an already trained network. You should have a trained net in `saved_models/`. ``` # instantiate your Net net = Net() # load the net parameters by name, uncomment the line below to load your model # net.load_state_dict(torch.load('saved_models/model_1.pt')) print(net) ``` ## Feature Visualization To see what your network has learned, make a plot of the learned image filter weights and the activation maps (for a given image) at each convolutional layer. ### TODO: Visualize the learned filter weights and activation maps of the convolutional layers in your trained Net Choose a sample input image and apply the filters in every convolutional layer to that image to see the activation map. ``` # As a reminder, here is how we got the weights in the first conv layer (conv1), before weights = net.conv1.weight.data w = weights.numpy() ``` ### Question: Choose a filter from one of your trained convolutional layers; looking at these activations, what purpose do you think it plays? What kind of feature do you think it detects?
github_jupyter
``` import os os.environ['KERAS_BACKEND'] = 'theano' from keras.models import Sequential from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D, Dropout, Flatten from keras import optimizers from keras.models import Model from keras import backend as K import numpy # train def classifier(n): # load pima indians dataset dataset = numpy.load("train.npy") # 18643 * 32768 label = numpy.load("train_one_hot.npy") # 18643 * 28 # split into input (X) and output (Y) variables X = dataset X = numpy.reshape(X,(X.shape[0],64,64,8)) Y = label[:,n] # create model input_img = Input(shape=(64, 64, 8)) x = Convolution2D(4, (1, 1), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Convolution2D(4, (1, 1), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Convolution2D(2, (1, 1), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Convolution2D(2, (1, 1), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) fc= Flatten()(x) fc= Dense(64, activation='relu')(fc) fc= Dropout(0.15)(fc) fc= Dense(32, activation='relu')(fc) fc= Dropout(0.15)(fc) fc= Dense(16, activation='relu')(fc) fc= Dropout(0.15)(fc) fc= Dense(8, activation='relu')(fc) fc= Dropout(0.15)(fc) fc= Dense(4, activation='relu')(fc) fc= Dropout(0.15)(fc) fc= Dense(2, activation='relu')(fc) output= Dense(1, activation='sigmoid')(fc) model=Model(input_img, output) # Compile model sgd = optimizers.SGD(lr=0.01, momentum=0.9,nesterov=True) class_weight = {0: 0.4*float((Y==0).sum()),1: float((Y==1).sum())} model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) # Fit the model model.fit(X, Y, epochs=5, batch_size=128,class_weight=class_weight) return model for n in range(28): classifier(n).save('model_'+str(n)+'.h5') #classifier(0).save('model_'+str(0)+'.h5') import torch import pandas as pd import numpy as np import os import torch.autograd as ag import torch.nn as nn import torch.nn.functional as F import prepare import ResNet18 path = os.getcwd()# we can change the path to fit our need data_path = os.path.join(path, 'human-protein/train/')# this path is temporary #order of training set df = pd.read_csv(path+'/data/train_idx.txt', sep = '\t', header = None) df.columns = ['order'] order = list(df['order']) # labels of one hot label = np.load(path +'/data/train_one_hot.npy') # id of pictures trainset = pd.read_csv(path + '/data/train.csv') ls = trainset['Id'] num = trainset['Target'] #hyper-parameter N = label.shape[0] # Training set size B = 28 # Minibacth size NB = int(N/B)-1 # Number of minibatches T = 2 # Number of epochs criterion = nn.CrossEntropyLoss () # training preparation if torch.cuda.is_available (): net = ResNet18.ResNet().cuda() ltrain = ag.Variable(torch.from_numpy(label).cuda(),requires_grad = False) optimizer = torch.optim .SGD(net. parameters(),lr= 0.001 ,momentum = 0.9) # start training resnet for epoch in range(T): running_loss = 0.0 for k in range(NB): idxsmp = k*B # indices of samples for k-th minibatch xt = prepare.load_batch(ls,order,data_path, B, k) xt = prepare.normalize(xt) xtrain = np.moveaxis(xt,[1,2],[2,3]) inputs = ag.Variable(torch.from_numpy(xtrain).cuda(),requires_grad = True) labels0 = ltrain [ idxsmp:idxsmp+B,0 ] labels1 = ltrain [ idxsmp:idxsmp+B,1 ] labels2 = ltrain [ idxsmp:idxsmp+B,2 ] labels3 = ltrain [ idxsmp:idxsmp+B,3 ] labels4 = ltrain [ idxsmp:idxsmp+B,4 ] labels5 = ltrain [ idxsmp:idxsmp+B,5 ] labels6 = ltrain [ idxsmp:idxsmp+B,6 ] labels7 = ltrain [ idxsmp:idxsmp+B,7 ] labels8 = ltrain [ idxsmp:idxsmp+B,8 ] labels9 = ltrain [ idxsmp:idxsmp+B,9 ] labels10 = ltrain [ idxsmp:idxsmp+B,10 ] labels11 = ltrain [ idxsmp:idxsmp+B,11 ] labels12 = ltrain [ idxsmp:idxsmp+B,12 ] labels13 = ltrain [ idxsmp:idxsmp+B,13 ] labels14 = ltrain [ idxsmp:idxsmp+B,14 ] labels15 = ltrain [ idxsmp:idxsmp+B,15 ] labels16 = ltrain [ idxsmp:idxsmp+B,16 ] labels17 = ltrain [ idxsmp:idxsmp+B,17 ] labels18 = ltrain [ idxsmp:idxsmp+B,18 ] labels19 = ltrain [ idxsmp:idxsmp+B,19 ] labels20 = ltrain [ idxsmp:idxsmp+B,20 ] labels21 = ltrain [ idxsmp:idxsmp+B,21 ] labels22 = ltrain [ idxsmp:idxsmp+B,22 ] labels23 = ltrain [ idxsmp:idxsmp+B,23 ] labels24 = ltrain [ idxsmp:idxsmp+B,24 ] labels25 = ltrain [ idxsmp:idxsmp+B,25 ] labels26 = ltrain [ idxsmp:idxsmp+B,26 ] labels27 = ltrain [ idxsmp:idxsmp+B,27 ] # Initialize the gradients to zero optimizer.zero_grad () # Forward propagation x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27 = net ( inputs ) # Error evaluation loss0 = criterion( x0 , labels0 ) loss1 = criterion( x1 , labels1 ) loss2 = criterion( x2 , labels2 ) loss3 = criterion( x3 , labels3 ) loss4 = criterion( x4 , labels4 ) loss5 = criterion( x5 , labels5 ) loss6 = criterion( x6 , labels6 ) loss7 = criterion( x7 , labels7 ) loss8 = criterion( x8 , labels8 ) loss9 = criterion( x9 , labels9 ) loss10 = criterion( x10 , labels10 ) loss11 = criterion( x11 , labels11 ) loss12 = criterion( x12 , labels12 ) loss13 = criterion( x13 , labels13 ) loss14 = criterion( x14 , labels14 ) loss15 = criterion( x15 , labels15 ) loss16 = criterion( x16 , labels16 ) loss17 = criterion( x17 , labels17 ) loss18 = criterion( x18 , labels18 ) loss19 = criterion( x19 , labels19 ) loss20 = criterion( x20 , labels20 ) loss21 = criterion( x21 , labels21 ) loss22 = criterion( x22 , labels22 ) loss23 = criterion( x23 , labels23 ) loss24 = criterion( x24 , labels24 ) loss25 = criterion( x25 , labels25 ) loss26 = criterion( x26 , labels26 ) loss27 = criterion( x27 , labels27 ) loss_01 = loss0+loss1+loss2+loss3+loss4+loss5+loss6+loss7+loss8+loss9+loss10 loss_02 = loss11+loss12+loss13+loss14+loss15+loss16+loss17+loss18+loss19+loss20 loss_03 = loss21+loss22+loss23+loss24+loss25+loss26+loss27 loss = loss_01+loss_02+loss_03 # Back propagation loss.backward() #retain_graph=True) # Parameter update optimizer.step () # Print averaged loss per minibatch every 100 mini - batches running_loss += loss.cpu().data[0] if k % 100 == 99: print ('[%d, %5d] loss : %.3f' %( epoch + 1, k + 1, running_loss/100 )) running_loss = 0.0 torch.save(net, 'ResNet18_28outputs_epoch%d.pkl'%(epoch+1)) print ('Finished Training ') ```
github_jupyter
# Titanic: Machine Learning from Disaster --- ``` import pandas as pd import numpy as np import seaborn as sns from sklearn.model_selection import train_test_split, learning_curve, GridSearchCV, cross_validate from sklearn.metrics import f1_score, accuracy_score, make_scorer from sklearn.linear_model import LogisticRegression from sklearn.ensemble import AdaBoostClassifier from sklearn.svm import SVC import matplotlib.pyplot as plt import time %matplotlib inline # Load the dataset data = pd.read_csv("./data/train.csv") ``` ## Exploring the data ``` display(data.head()) train_size = data.shape[0] percent_positive = data[data['Survived'] == 1].shape[0] / train_size percent_negative = data[data['Survived'] == 0].shape[0] / train_size print("Number of training examples: {}".format(train_size)) print("Positive examples: {:2.0f}%".format(percent_positive*100)) print("Negative examples: {:2.0f}%".format(percent_negative*100)) ``` **Note**: The data is skewed towards negative samples, so accuracy might be misleading as a metric. We'll use the F1 score instead. --- ## Dealing with NaNs ``` display(data.isnull().sum()) ``` * `Age`: it makes sense to use the mean as a replacement for missing values, to represent the "expected" age of the passengers. * `Cabin`: in this dataset, missing cabin values indicate that a passenger was not in a cabin. This could be useful information, so we'll turn missing values into their own feature called "U" for "Unknown". * `Embarked`: similar to cabin numbers, we're simply going to assume that the port passengers were picked up from was random. This isn't necessarily true, since richer passengers (for example) might have picked up more frequently from one port rather than another. ``` # Replace all Age NaNs with the mean age data["Age"].fillna(np.around(data["Age"].mean(), decimals=1), inplace=True) # Replace missing values with "Unknown" data["Cabin"].fillna("Unknown", inplace=True) # Forward/back fill Embarked NaNs data["Embarked"].fillna(method="ffill", inplace=True) data["Embarked"].fillna(method="backfill", inplace=True) # Confirm NaNs are gone print("Age NaN count: {}".format(data["Age"].isnull().sum())) print("Cabin NaN count: {}".format(data["Cabin"].isnull().sum())) print("Embarked NaN count: {}".format(data["Embarked"].isnull().sum())) ``` --- ## One-hot encoding The categorical features `Pclass`, `Sex`, `Cabin`, and `Embarked` need one-hot encoding to be useful. If we one-hot encoded `Cabin` as-is, we would have too many features as a result, since we would be including every single cabin passengers were in. Instead, we can discard the cabin number and only focus on the deck they were on, denoted by the letter. E.g. cabin C123 is cabin 123 on deck C. This should give us the relevant information about the passengers' location on the ship, without filling the dataset with a ton of unimportant features. ``` # Prepare Cabin for one-hot encoding data["Cabin"] = data["Cabin"].apply(lambda s: s[0]) # Check cabin column display(data["Cabin"].head(10)) # Perform one-hot encoding data = pd.get_dummies(data, columns=["Pclass", "Sex", "Cabin", "Embarked"]) # Check one-hot encoded features print(data.columns) ``` Now we can compute the correlation of the cabins labeled "Unknown" with the target to see if it's indeed a useful feature. ``` print("Correlation between 'Cabin_U' and 'Survived': {}".format(data.corr()["Survived"]["Cabin_U"])) ``` Indeed, the correlation is high enough for `Cabin_U` to be considered a useful feature. This justifies our earlier decision to keep those missing values as part of the dataset. --- ## Removing unimportant features We are going to assume that the names of the passengers, as well as their ID and ticket number, are irrelevant to their chance of survival (i.e. they're random) and we'll remove them from the dataset. Additionally, as we can see below that `Embarked_Q` has a very low correlation with the target (less than 0.01). So, we're going to remove that feature as well. ``` display(data.corr()["Survived"]) # Drop uninteresting features data = data.drop(columns=["Name", "Ticket", "PassengerId", "Embarked_Q"]) display(data.head()) ``` ## Removing highly-correlated features From the heatmap below, we can spot which features are highly correlated with each other. In this case, we notice that the 3 `Pclass` features (particularly classes 1 and 3) are highly correlated with the fare. This makes sense, since `Pclass` represents the ticket class the passengers bought, which gets more expensive the higher it is. Since `Fare` can be predicted using `Pclass`, we're going to remove `Fare` from the dataset, to eliminate duplicate information. ``` # Draw heatmap from the correlation table sns.heatmap(data.corr()) plt.show() # Display a table with the correlations between the 3 classes and the fare corr = data.corr() df = pd.DataFrame(data=[ [corr["Pclass_1"]["Fare"], corr["Pclass_2"]["Fare"], corr["Pclass_3"]["Fare"]]], columns=["Pclass_1", "Pclass_2", "Pclass_3"], index=["Fare"]) display(df) # Drop the Fare from the dataset data = data.drop(columns=["Fare"]) ``` --- ## Feature Scaling The only continuous feature left in the dataset is `Age`. We are going to normalise it so that it's values are between 0 and 1. ``` # Normalise the age feature data["Age"] = (data["Age"] - data["Age"].mean()) / (data["Age"].max() - data["Age"].min()) ``` --- ## Model selection Normally, we would do a train/test split and leave the test set aside for the final model evaluation. However, we can use Kaggle's test set for that, so we don't have to sacrifice any training data. Now, let's split the data into features and labels. ``` # Split data into X_train and y_train X_train = data.iloc[:, 1:] y_train = data["Survived"] ``` ### Train-Predict Pipeline The following function will serve as a train-predict pipeline. ``` def train_predict(model, score="f1"): """ Computes the cross-validation score of a given model using a given metric. :model: model to evaluate :score: scoring metric to use for cross-validation :return: tuple with training and validation scores """ # Create scorer object if score == "accuracy": scorer = make_scorer(accuracy_score) elif score == "f1": scorer = make_scorer(f1_score) # 5-fold cross-valdation cv_results = cross_validate(model, X_train, y_train, scoring=scorer, cv=5, return_train_score=True) # Get the average scores over all validation cuts train_score = np.mean(cv_results["train_score"]) val_score = np.mean(cv_results["test_score"]) return train_score, val_score ``` ### Model Evaluation Now let's use the train-predict pipeline to get the CV scores for each of our models. ``` # Compute the score of every model models = [LogisticRegression(random_state=42), SVC(random_state=42), AdaBoostClassifier(random_state=42)] scores = [train_predict(model) for model in models] # Display a table with the scores df_scores = pd.DataFrame(data=[[score[0] for score in scores], [score[1] for score in scores]], index=["Train", "Validation"], columns=[type(model).__name__ for model in models]) display(df_scores) ``` From the table above we can see that AdaBoost has the highest cross-validation scores, so we'll choose that as our model. ``` chosen_model = AdaBoostClassifier(random_state=42) ``` Let's plot a learning curve for our model to assess whether it's overfitting or underfitting. ``` def plot_learning_curve(model): start = time.time() train_sizes, train_scores, test_scores = learning_curve(model, X_train, y_train, scoring=make_scorer(f1_score), random_state=42) end = time.time() print("Learning curve time: {:0.2f}s".format(end-start)) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) # Plot learning curve plt.title("Learning Curve") plt.xlabel("Training examples") plt.ylabel("Score") plt.grid() plt.fill_between(train_sizes, train_scores_mean, train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean, test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") plt.show() plot_learning_curve(chosen_model) ``` From the learning curve we can see that the training and validation scores converge just below 0.8. This indicates the default model is underfitting the data a bit. Let's perform hyperparameter tuning to see if we can improve the performance. --- ## Hyperparameter Tuning Since we know the model is underfitting, we'll use high numbers of estimators and a small learning rate as our parameters for tuning. ``` parameters = {"n_estimators": [500, 1000, 2000], "learning_rate": [0.01, 0.05, 0.1, 0.5]} grid_search = GridSearchCV(chosen_model, param_grid=parameters, scoring=make_scorer(f1_score)) run = True if run: start = time.time() grid_fit = grid_search.fit(X_train, y_train) end = time.time() print("Grid search time: {:2.2f}s".format(end-start)) # Print the best model best_clf = grid_fit.best_estimator_ print(best_clf) # Save tuned model to file from joblib import dump, load dump(best_clf, "./tuned_model.joblib") ``` --- ## Final Model Evaluation Let's plot the learning curve and measure the cross-validation score again, to see whether our model improved. ``` # Learning curve plot_learning_curve(best_clf) f1_train, f1_val = train_predict(best_clf) print("F1 score (train): {:0.2f}".format(f1_train)) print("F1 score (validation): {:0.2f}".format(f1_test)) ``` The learning curve looks the same, but we can see that both the training and validation f1 scores have increased a little bit. --- ## Final Training and Submission ``` # Load the test set test = pd.read_csv("./data/test.csv") # Remove missing values test["Age"].fillna(np.around(test["Age"].mean(), decimals=1), inplace=True) test["Cabin"].fillna("Unknown", inplace=True) test["Embarked"].fillna(method="ffill", inplace=True) test["Embarked"].fillna(method="backfill", inplace=True) # One-hot encoding test["Cabin"] = test["Cabin"].apply(lambda s: s[0]) test = pd.get_dummies(test, columns=["Pclass", "Sex", "Cabin", "Embarked"]) # Drop unimportant features test = test.drop(columns=["Name", "Ticket", "PassengerId", "Embarked_Q"]) test = test.drop(columns=["Fare"]) # Feature scaling test["Age"] = (test["Age"] - test["Age"].mean()) / (test["Age"].max() - test["Age"].min()) display(test.head()) # Add missing column to test set test["Cabin_T"] = np.zeros(test.shape[0]) # Display test to confirm display(test.head()) # Train the tuned model best_clf = best_clf.fit(X_train, y_train) # Predict test labels test_labels = pd.Series(best_clf.predict(test)) # Load passenger IDs (to be used as labels in the submission) test_orig = pd.read_csv("./data/test.csv") ids = test_orig["PassengerId"] # Create DataFrame for submission test_submission = pd.DataFrame(data={"PassengerId": ids, "Survived": test_labels}) display(test_submission) # Produce submission file test_submission.to_csv("./data/submission.csv", index=False) ```
github_jupyter
# Widget Events In this lecture we will discuss widget events, such as button clicks! ## Special events The `Button` is not used to represent a data type. Instead the button widget is used to handle mouse clicks. The `on_click` method of the `Button` can be used to register a function to be called when the button is clicked. The docstring of the `on_click` can be seen below. ``` import ipywidgets as widgets print(widgets.Button.on_click.__doc__) ``` ### Example #1 - on_click Since button clicks are stateless, they are transmitted from the front-end to the back-end using custom messages. By using the `on_click` method, a button that prints a message when it has been clicked is shown below. ``` from IPython.display import display button = widgets.Button(description="Click Me!") display(button) def on_button_clicked(b): print("Button clicked.") button.on_click(on_button_clicked) ``` ### Example #2 - on_submit The `Text` widget also has a special `on_submit` event. The `on_submit` event fires when the user hits <kbd>enter</kbd>. ``` text = widgets.Text() display(text) def handle_submit(sender): print(text.value) text.on_submit(handle_submit) ``` ## Traitlet events Widget properties are IPython traitlets and traitlets are eventful. To handle changes, the `observe` method of the widget can be used to register a callback. The docstring for `observe` can be seen below. ``` print(widgets.Widget.observe.__doc__) ``` ### Signatures Mentioned in the docstring, the callback registered must have the signature `handler(change)` where `change` is a dictionary holding the information about the change. Using this method, an example of how to output an `IntSlider`’s value as it is changed can be seen below. ``` int_range = widgets.IntSlider() display(int_range) def on_value_change(change): print(change['new']) int_range.observe(on_value_change, names='value') ``` # Linking Widgets Often, you may want to simply link widget attributes together. Synchronization of attributes can be done in a simpler way than by using bare traitlets events. ## Linking traitlets attributes in the kernel¶ The first method is to use the `link` and `dlink` functions from the `traitlets` module. This only works if we are interacting with a live kernel. ``` import traitlets # Create Caption caption = widgets.Label(value = 'The values of slider1 and slider2 are synchronized') # Create IntSliders slider1 = widgets.IntSlider(description='Slider 1') slider2 = widgets.IntSlider(description='Slider 2') # Use trailets to link l = traitlets.link((slider1, 'value'), (slider2, 'value')) # Display! display(caption, slider1, slider2) # Create Caption caption = widgets.Label(value='Changes in source values are reflected in target1') # Create Sliders source = widgets.IntSlider(description='Source') target1 = widgets.IntSlider(description='Target 1') # Use dlink dl = traitlets.dlink((source, 'value'), (target1, 'value')) display(caption, source, target1) ``` Function `traitlets.link` and `traitlets.dlink` return a `Link` or `DLink` object. The link can be broken by calling the `unlink` method. ``` # May get an error depending on order of cells being run! l.unlink() dl.unlink() ``` ### Registering callbacks to trait changes in the kernel Since attributes of widgets on the Python side are traitlets, you can register handlers to the change events whenever the model gets updates from the front-end. The handler passed to observe will be called with one change argument. The change object holds at least a `type` key and a `name` key, corresponding respectively to the type of notification and the name of the attribute that triggered the notification. Other keys may be passed depending on the value of `type`. In the case where type is `change`, we also have the following keys: * `owner` : the HasTraits instance * `old` : the old value of the modified trait attribute * `new` : the new value of the modified trait attribute * `name` : the name of the modified trait attribute. ``` caption = widgets.Label(value='The values of range1 and range2 are synchronized') slider = widgets.IntSlider(min=-5, max=5, value=1, description='Slider') def handle_slider_change(change): caption.value = 'The slider value is ' + ( 'negative' if change.new < 0 else 'nonnegative' ) slider.observe(handle_slider_change, names='value') display(caption, slider) ``` ## Linking widgets attributes from the client side When synchronizing traitlets attributes, you may experience a lag because of the latency due to the roundtrip to the server side. You can also directly link widget attributes in the browser using the link widgets, in either a unidirectional or a bidirectional fashion. Javascript links persist when embedding widgets in html web pages without a kernel. ``` # NO LAG VERSION caption = widgets.Label(value = 'The values of range1 and range2 are synchronized') range1 = widgets.IntSlider(description='Range 1') range2 = widgets.IntSlider(description='Range 2') l = widgets.jslink((range1, 'value'), (range2, 'value')) display(caption, range1, range2) # NO LAG VERSION caption = widgets.Label(value = 'Changes in source_range values are reflected in target_range') source_range = widgets.IntSlider(description='Source range') target_range = widgets.IntSlider(description='Target range') dl = widgets.jsdlink((source_range, 'value'), (target_range, 'value')) display(caption, source_range, target_range) ``` Function `widgets.jslink` returns a `Link` widget. The link can be broken by calling the `unlink` method. ``` l.unlink() dl.unlink() ``` ### The difference between linking in the kernel and linking in the client Linking in the kernel means linking via python. If two sliders are linked in the kernel, when one slider is changed the browser sends a message to the kernel (python in this case) updating the changed slider, the link widget in the kernel then propagates the change to the other slider object in the kernel, and then the other slider’s kernel object sends a message to the browser to update the other slider’s views in the browser. If the kernel is not running (as in a static web page), then the controls will not be linked. Linking using jslink (i.e., on the browser side) means contructing the link in Javascript. When one slider is changed, Javascript running in the browser changes the value of the other slider in the browser, without needing to communicate with the kernel at all. If the sliders are attached to kernel objects, each slider will update their kernel-side objects independently. To see the difference between the two, go to the [ipywidgets documentation](http://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Events.html) and try out the sliders near the bottom. The ones linked in the kernel with `link` and `dlink` are no longer linked, but the ones linked in the browser with `jslink` and `jsdlink` are still linked. ## Continuous updates Some widgets offer a choice with their `continuous_update` attribute between continually updating values or only updating values when a user submits the value (for example, by pressing Enter or navigating away from the control). In the next example, we see the “Delayed” controls only transmit their value after the user finishes dragging the slider or submitting the textbox. The “Continuous” controls continually transmit their values as they are changed. Try typing a two-digit number into each of the text boxes, or dragging each of the sliders, to see the difference. ``` import traitlets a = widgets.IntSlider(description="Delayed", continuous_update=False) b = widgets.IntText(description="Delayed", continuous_update=False) c = widgets.IntSlider(description="Continuous", continuous_update=True) d = widgets.IntText(description="Continuous", continuous_update=True) traitlets.link((a, 'value'), (b, 'value')) traitlets.link((a, 'value'), (c, 'value')) traitlets.link((a, 'value'), (d, 'value')) widgets.VBox([a,b,c,d]) ``` Sliders, `Text`, and `Textarea` controls default to `continuous_update=True`. `IntText` and other text boxes for entering integer or float numbers default to `continuous_update=False` (since often you’ll want to type an entire number before submitting the value by pressing enter or navigating out of the box). # Conclusion You should now feel comfortable linking Widget events!
github_jupyter
# Broadcast Variables We already saw so called *broadcast joins* which is a specific impementation of a join suitable for small lookup tables. The term *broadcast* is also used in a different context in Spark, there are also *broadcast variables*. ### Origin of Broadcast Variables Broadcast variables where introduced fairly early with Spark and were mainly targeted at the RDD API. Nontheless they still have their place with the high level DataFrames API in conjunction with user defined functions (UDFs). ### Weather Example As usual, we'll use the weather data example. This time we'll manually implement a join using a UDF (actually this would be again a manual broadcast join). # 1 Load Data First we load the weather data, which consists of the measurement data and some station metadata. ``` storageLocation = "s3://dimajix-training/data/weather" ``` ## 1.1 Load Measurements Measurements are stored in multiple directories (one per year). But we will limit ourselves to a single year in the analysis to improve readability of execution plans. ``` from pyspark.sql.functions import * from functools import reduce # Read in all years, store them in an Python array raw_weather_per_year = [spark.read.text(storageLocation + "/" + str(i)).withColumn("year", lit(i)) for i in range(2003,2015)] # Union all years together raw_weather = reduce(lambda l,r: l.union(r), raw_weather_per_year) ``` Use a single year to keep execution plans small ``` raw_weather = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003)) ``` ### Extract Measurements Measurements were stored in a proprietary text based format, with some values at fixed positions. We need to extract these values with a simple SELECT statement. ``` weather = raw_weather.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban"), substring(col("value"),16,8).alias("date"), substring(col("value"),24,4).alias("time"), substring(col("value"),42,5).alias("report_type"), substring(col("value"),61,3).alias("wind_direction"), substring(col("value"),64,1).alias("wind_direction_qual"), substring(col("value"),65,1).alias("wind_observation"), (substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"),70,1).alias("wind_speed_qual"), (substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"),93,1).alias("air_temperature_qual") ) ``` ## 1.2 Load Station Metadata We also need to load the weather station meta data containing information about the geo location, country etc of individual weather stations. ``` stations = spark.read \ .option("header", True) \ .csv(storageLocation + "/isd-history") ``` ### Convert Station Metadata We convert the stations DataFrame to a normal Python map, since we want to discuss broadcast variables. This means that the variable `py_stations` contains a normal Python object which only lives on the driver. It has no connection to Spark any more. The resulting map converts a given station id (usaf and wban) to a country. ``` py_stations = stations.select(concat(stations["usaf"], stations["wban"]).alias("key"), stations["ctry"]).collect() py_stations = {key:value for (key,value) in py_stations} # Inspect result list(py_stations.items())[0:10] ``` # 2 Using Broadcast Variables In the following section, we want to use a Spark broadcast variable inside a UDF. Technically this is not required, as Spark also has other mechanisms of distributing data, so we'll start with a simple implementation *without* using a broadcast variable. ## 2.1 Create a UDF For the initial implementation, we create a simple Python UDF which looks up the country for a given station id, which consists of the usaf and wban code. This way we will replace the `JOIN` of our original solution with a UDF implemented in Python. ``` def lookup_country(usaf, wban): return py_stations.get(usaf + wban) # Test lookup with an existing station print(lookup_country("007026", "99999")) # Test lookup with a non-existing station (better should not throw an exception) print(lookup_country("123", "456")) ``` ## 2.2 Not using a broadcast variable Now that we have a simple Python function providing the required functionality, we convert it to a PySpark UDF using a Python decorator. ``` @udf('string') def lookup_country(usaf, wban): return py_stations.get(usaf + wban) ``` ### Replace JOIN by UDF Now we can perform the lookup by using the UDF instead of the original `JOIN`. ``` result = weather.withColumn('country', lookup_country(weather["usaf"], weather["wban"])) result.limit(10).toPandas() ``` ### Remarks Since the code is actually executed not on the driver, but istributed on the executors, the executors also require access to the Python map. PySpark automatically serializes the map and sends it to the executors on the fly. ### Inspect Plan We can also inspect the execution plan, which is different from the original implementation. Instead of the broadcast join, it now contains a `BatchEvalPython` step which looks up the stations country from the station id. ``` result.explain() ``` ## 2.2 Using a Broadcast Variable Now let us change the implementation to use a so called *broadcast variable*. While the original implementation implicitly sent the Python map to all executors, a broadcast variable makes the process of sending (*broadcasting*) a Python variable to all executors more explicit. A Python variable can be broadcast using the `broadcast` method of the underlying Spark context (the Spark session does not export this functionality). Once the data is encapsulated in the broadcast variable, all executors can access the original data via the `value` member variable. ``` # First create a broadcast variable from the original Python map bc_stations = spark.sparkContext.broadcast(py_stations) @udf('string') def lookup_country(usaf, wban): # Access the broadcast variables value and perform lookup return bc_stations.value.get(usaf + wban) ``` ### Replace JOIN by UDF Again we replace the original `JOIN` by the UDF we just defined above ``` result = weather.withColumn('country', lookup_country(weather["usaf"], weather["wban"])) result.limit(10).toPandas() ``` ### Remarks Actually there is no big difference to the original implementation. But Spark handles a broadcast variable slightly more efficiently, especially if the variable is used in multiple UDFs. In this case the data will be broadcast only a single time, while not using a broadcast variable would imply sending the data around for every UDF. ### Execution Plan The execution plan does not differ at all, since it does not provide information on broadcast variables. ``` result.explain() ``` ## 2.3 Pandas UDFs Since we already learnt that Pandas UDFs are executed more efficiently than normal UDFs, we want to provide a better implementation using Pandas. Of course Pandas UDFs can also access broadcast variables. ``` from pyspark.sql.functions import pandas_udf, PandasUDFType @pandas_udf('string', PandasUDFType.SCALAR) def lookup_country(usaf, wban): # Create helper function def lookup(key): # Perform lookup by accessing the Python map return bc_stations.value.get(key) # Create key from both incoming Pandas series usaf_wban = usaf + wban # Perform lookup return usaf_wban.apply(lookup) ``` ### Replace JOIN by Pandas UDF Again, we replace the original `JOIN` by the Pandas UDF. ``` result = weather.withColumn('country', lookup_country(weather["usaf"], weather["wban"])) result.limit(10).toPandas() ``` ### Execution Plan Again, let's inspect the execution plan. ``` result.explain(True) ```
github_jupyter
# Sudoku This tutorial includes everything you need to set up decision optimization engines, build constraint programming models. When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_. >This notebook is part of the **[Prescriptive Analytics for Python](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html)** >It requires a **local installation of CPLEX Optimizers**. Table of contents: - [Describe the business problem](#Describe-the-business-problem) * [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help) * [Use decision optimization](#Use-decision-optimization) * [Step 1: Download the library](#Step-1:-Download-the-library) * [Step 2: Model the Data](#Step-2:-Model-the-data) * [Step 3: Set up the prescriptive model](#Step-3:-Set-up-the-prescriptive-model) * [Define the decision variables](#Define-the-decision-variables) * [Express the business constraints](#Express-the-business-constraints) * [Express the objective](#Express-the-objective) * [Solve with Decision Optimization solve service](#Solve-with-Decision-Optimization-solve-service) * [Step 4: Investigate the solution and run an example analysis](#Step-4:-Investigate-the-solution-and-then-run-an-example-analysis) * [Summary](#Summary) **** ### Describe the business problem * Sudoku is a logic-based, combinatorial number-placement puzzle. * The objective is to fill a 9x9 grid with digits so that each column, each row, and each of the nine 3x3 sub-grids that compose the grid contains all of the digits from 1 to 9. * The puzzle setter provides a partially completed grid, which for a well-posed puzzle has a unique solution. #### References * See https://en.wikipedia.org/wiki/Sudoku for details ***** ## How decision optimization can help * Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. <br/> + For example: + Automate complex decisions and trade-offs to better manage limited resources. + Take advantage of a future opportunity or mitigate a future risk. + Proactively update recommendations based on changing events. + Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. ## Use decision optimization ### Step 1: Download the library Run the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier. ``` import sys try: import docplex.cp except: if hasattr(sys, 'real_prefix'): #we are in a virtual env. !pip install docplex else: !pip install --user docplex ``` Note that the more global package <i>docplex</i> contains another subpackage <i>docplex.mp</i> that is dedicated to Mathematical Programming, another branch of optimization. ``` from docplex.cp.model import * from sys import stdout ``` ### Step 2: Model the data #### Grid range ``` GRNG = range(9) ``` #### Different problems _zero means cell to be filled with appropriate value_ ``` SUDOKU_PROBLEM_1 = ( (0, 0, 0, 0, 9, 0, 1, 0, 0), (2, 8, 0, 0, 0, 5, 0, 0, 0), (7, 0, 0, 0, 0, 6, 4, 0, 0), (8, 0, 5, 0, 0, 3, 0, 0, 6), (0, 0, 1, 0, 0, 4, 0, 0, 0), (0, 7, 0, 2, 0, 0, 0, 0, 0), (3, 0, 0, 0, 0, 1, 0, 8, 0), (0, 0, 0, 0, 0, 0, 0, 5, 0), (0, 9, 0, 0, 0, 0, 0, 7, 0), ) SUDOKU_PROBLEM_2 = ( (0, 7, 0, 0, 0, 0, 0, 4, 9), (0, 0, 0, 4, 0, 0, 0, 0, 0), (4, 0, 3, 5, 0, 7, 0, 0, 8), (0, 0, 7, 2, 5, 0, 4, 0, 0), (0, 0, 0, 0, 0, 0, 8, 0, 0), (0, 0, 4, 0, 3, 0, 5, 9, 2), (6, 1, 8, 0, 0, 0, 0, 0, 5), (0, 9, 0, 1, 0, 0, 0, 3, 0), (0, 0, 5, 0, 0, 0, 0, 0, 7), ) SUDOKU_PROBLEM_3 = ( (0, 0, 0, 0, 0, 6, 0, 0, 0), (0, 5, 9, 0, 0, 0, 0, 0, 8), (2, 0, 0, 0, 0, 8, 0, 0, 0), (0, 4, 5, 0, 0, 0, 0, 0, 0), (0, 0, 3, 0, 0, 0, 0, 0, 0), (0, 0, 6, 0, 0, 3, 0, 5, 4), (0, 0, 0, 3, 2, 5, 0, 0, 6), (0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0) ) try: import numpy as np import matplotlib.pyplot as plt VISU_ENABLED = True except ImportError: VISU_ENABLED = False def print_grid(grid): """ Print Sudoku grid """ for l in GRNG: if (l > 0) and (l % 3 == 0): stdout.write('\n') for c in GRNG: v = grid[l][c] stdout.write(' ' if (c % 3 == 0) else ' ') stdout.write(str(v) if v > 0 else '.') stdout.write('\n') def draw_grid(values): %matplotlib inline fig, ax = plt.subplots(figsize =(4,4)) min_val, max_val = 0, 9 R = range(0,9) for l in R: for c in R: v = values[c][l] s = " " if v > 0: s = str(v) ax.text(l+0.5,8.5-c, s, va='center', ha='center') ax.set_xlim(min_val, max_val) ax.set_ylim(min_val, max_val) ax.set_xticks(np.arange(max_val)) ax.set_yticks(np.arange(max_val)) ax.grid() plt.show() def display_grid(grid, name): stdout.write(name) stdout.write(":\n") if VISU_ENABLED: draw_grid(grid) else: print_grid(grid) display_grid(SUDOKU_PROBLEM_1, "PROBLEM 1") display_grid(SUDOKU_PROBLEM_2, "PROBLEM 2") display_grid(SUDOKU_PROBLEM_3, "PROBLEM 3") ``` #### Choose your preferred problem (SUDOKU_PROBLEM_1 or SUDOKU_PROBLEM_2 or SUDOKU_PROBLEM_3) If you change the problem, ensure to re-run all cells below this one. ``` problem = SUDOKU_PROBLEM_3 ``` ### Step 3: Set up the prescriptive model ``` mdl = CpoModel(name="Sudoku") ``` #### Define the decision variables ``` grid = [[integer_var(min=1, max=9, name="C" + str(l) + str(c)) for l in GRNG] for c in GRNG] ``` #### Express the business constraints Add alldiff constraints for lines ``` for l in GRNG: mdl.add(all_diff([grid[l][c] for c in GRNG])) ``` Add alldiff constraints for columns ``` for c in GRNG: mdl.add(all_diff([grid[l][c] for l in GRNG])) ``` Add alldiff constraints for sub-squares ``` ssrng = range(0, 9, 3) for sl in ssrng: for sc in ssrng: mdl.add(all_diff([grid[l][c] for l in range(sl, sl + 3) for c in range(sc, sc + 3)])) ``` Initialize known cells ``` for l in GRNG: for c in GRNG: v = problem[l][c] if v > 0: grid[l][c].set_domain((v, v)) ``` #### Solve with Decision Optimization solve service ``` print("\nSolving model....") msol = mdl.solve(TimeLimit=10) ``` ### Step 4: Investigate the solution and then run an example analysis ``` display_grid(problem, "Initial problem") if msol: sol = [[msol[grid[l][c]] for c in GRNG] for l in GRNG] stdout.write("Solve time: " + str(msol.get_solve_time()) + "\n") display_grid(sol, "Solution") else: stdout.write("No solution found\n") ``` ## Summary You learned how to set up and use the IBM Decision Optimization CPLEX Modeling for Python to formulate and solve a Constraint Programming model. #### References * [CPLEX Modeling for Python documentation](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html) * [Decision Optimization on Cloud](https://developer.ibm.com/docloud/) * Need help with DOcplex or to report a bug? Please go [here](https://developer.ibm.com/answers/smartspace/docloud) * Contact us at dofeedback@wwpdl.vnet.ibm.com Copyright © 2017, 2018 IBM. IPLA licensed Sample Materials.
github_jupyter
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
# Karatsuba Multiplication Order is O(n^1.59) as opposed to O(n^2) asin the case of normal multiplication.<br> This is a recursive technique performed with the divide and conquer technique. ## By retaining the numbers in the same base ``` def addition(m, n, r) : res = 0 a = m b = n po = 0 c = 0 while(a>0 or b>0 or c==1) : if a==0 : s = b%10 + c elif b == 0 : s = a%10 + c else : s = a%10 + b%10 + c if s<r : c=0 else : c=1 s = s-r res = ((10**po)*s) + res po += 1 a = a//10 b = b//10 return(res) def subtraction(m, n, r) : res = 0 a = m b = n po = 0 c = 0 while(a>0 or b>0 or c==1) : if a==0 : s = b%10 - c elif b == 0 : s = a%10 - c else : s = a%10 - b%10 - c if s>=0 : c=0 else : c=1 s = s+r res = ((10**po)*s) + res po += 1 a = a//10 b = b//10 return(res) def karatsuba_multiplication(m, n, r) : if len(str(m)) == 1 or len(str(n)) == 1: return m*n dig = max(len(str(m)), len(str(n))) // 2 a = m//(10**dig) b = m%(10**dig) c = n//(10**dig) d = n%(10**dig) ac = karatsuba_multiplication(a,c,r) bd = karatsuba_multiplication(b,d,r) a_plus_b = addition(a,b,r) c_plus_d = addition(c,d,r) aplusd_cplusd = karatsuba_multiplication(a_plus_b,c_plus_d,r) sub1 = subtraction(aplusd_cplusd,ac,r) ad_plus_bc = subtraction(sub1, bd, r) t1 = addition(ac * (10**(2*dig)), (ad_plus_bc * 10**dig), r) res = addition(t1, bd, r) #print(res) return(res) def convert_to_decimal(n, r) : a = 1 b = n c = 0 res = 0 while(b>0) : c = b%10 b = b//10 res = res + (a*c) a = a*r return(res) r = int(input('Enter the base of the numbers entered :- ')) a = int(input('Enter the first number :- ')) b = int(input('Enter the second number :- ')) res = karatsuba_multiplication(a, b, r) print('The product is :- ', res) if r!=10 : print('The product in decimal form is :- ', convert_to_decimal(res, r)) ``` ## By converting numbers of any base to decimal and then calculating the product ``` def karatsuba(x,y): if len(str(x)) == 1 or len(str(y)) == 1: return x*y else: n = max(len(str(x)),len(str(y))) dig = n // 2 a = x // 10**(dig) b = x % 10**(dig) c = y // 10**(dig) d = y % 10**(dig) ac = karatsuba(a,c) bd = karatsuba(b,d) ad_plus_bc = karatsuba(a+b,c+d) - ac - bd prod = ac * 10**(2*dig) + (ad_plus_bc * 10**dig) + bd return prod def convert_to_decimal(n, r) : a = 1 b = n c = 0 res = 0 while(b>0) : c = b%10 b = b//10 res = res + (a*c) a = a*r return(res) r = int(input('Enter the base of the numbers entered :- ')) a = convert_to_decimal(int(input('Enter the first number :- ')), r) b = convert_to_decimal(int(input('Enter the second number :- ')), r) res = karatsuba(a, b) print('The product is :- ', res) ```
github_jupyter
#1. Install Dependencies First install the libraries needed to execute recipes, this only needs to be done once, then click play. ``` !pip install git+https://github.com/google/starthinker ``` #2. Get Cloud Project ID To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play. ``` CLOUD_PROJECT = 'PASTE PROJECT ID HERE' print("Cloud Project Set To: %s" % CLOUD_PROJECT) ``` #3. Get Client Credentials To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play. ``` CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE' print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS) ``` #4. Enter Conversion Upload BigQuery Parameters Move from BigQuery to CM. 1. Specify a CM Account ID, Floodligh Activity ID and Conversion Type. 1. Include BigQuery dataset and table. 1. Columns: Ordinal, timestampMicros, encryptedUserId | encryptedUserIdCandidates | gclid | mobileDeviceId 1. Include encryption information if using encryptedUserId or encryptedUserIdCandidates. Modify the values below for your use case, can be done multiple times, then click play. ``` FIELDS = { 'account': '', 'floodlight_activity_id': '', 'auth_read': 'user', # Credentials used for reading data. 'floodlight_conversion_type': 'encryptedUserId', 'encryption_entity_id': '', 'encryption_entity_type': 'DCM_ACCOUNT', 'encryption_entity_source': 'DATA_TRANSFER', 'bigquery_dataset': '', 'bigquery_table': '', 'bigquery_legacy': True, } print("Parameters Set To: %s" % FIELDS) ``` #5. Execute Conversion Upload BigQuery This does NOT need to be modified unles you are changing the recipe, click play. ``` from starthinker.util.project import project from starthinker.script.parse import json_set_fields USER_CREDENTIALS = '/content/user.json' TASKS = [ { 'conversion_upload': { 'auth': 'user', 'encryptionInfo': { 'encryptionEntityId': {'field': {'kind': 'integer','name': 'encryption_entity_id','order': 3,'default': ''}}, 'encryptionEntityType': {'field': {'kind': 'choice','choices': ['ADWORDS_CUSTOMER','DBM_ADVERTISER','DBM_PARTNER','DCM_ACCOUNT','DCM_ADVERTISER','ENCRYPTION_ENTITY_TYPE_UNKNOWN'],'name': 'encryption_entity_type','order': 4,'default': 'DCM_ACCOUNT'}}, 'encryptionSource': {'field': {'kind': 'choice','choices': ['AD_SERVING','DATA_TRANSFER','ENCRYPTION_SCOPE_UNKNOWN'],'name': 'encryption_entity_source','order': 5,'default': 'DATA_TRANSFER'}} }, 'conversion_type': {'field': {'kind': 'choice','choices': ['encryptedUserId','encryptedUserIdCandidates','gclid','mobileDeviceId'],'name': 'floodlight_conversion_type','order': 2,'default': 'encryptedUserId'}}, 'bigquery': { 'table': {'field': {'kind': 'string','name': 'bigquery_table','order': 7,'default': ''}}, 'dataset': {'field': {'kind': 'string','name': 'bigquery_dataset','order': 6,'default': ''}}, 'legacy': {'field': {'kind': 'boolean','name': 'bigquery_legacy','order': 8,'default': True}} }, 'activity_id': {'field': {'kind': 'integer','name': 'floodlight_activity_id','order': 1,'default': ''}}, 'account_id': {'field': {'kind': 'string','name': 'account','order': 0,'default': ''}} } } ] json_set_fields(TASKS, FIELDS) project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True) project.execute(_force=True) ```
github_jupyter
## Dragon Real Estate - Price Predictor ``` import pandas as pd housing = pd.read_csv("data.csv") housing.head() housing.info() housing['CHAS'].value_counts() housing.describe() %matplotlib inline # # For plotting histogram # import matplotlib.pyplot as plt # housing.hist(bins=50, figsize=(20, 15)) ``` ## Train-Test Splitting ``` # For learning purpose import numpy as np def split_train_test(data, test_ratio): np.random.seed(42) shuffled = np.random.permutation(len(data)) print(shuffled) test_set_size = int(len(data) * test_ratio) test_indices = shuffled[:test_set_size] train_indices = shuffled[test_set_size:] return data.iloc[train_indices], data.iloc[test_indices] # train_set, test_set = split_train_test(housing, 0.2) # print(f"Rows in train set: {len(train_set)}\nRows in test set: {len(test_set)}\n") from sklearn.model_selection import train_test_split train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42) print(f"Rows in train set: {len(train_set)}\nRows in test set: {len(test_set)}\n") from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['CHAS']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] strat_test_set['CHAS'].value_counts() strat_train_set['CHAS'].value_counts() # 95/7 # 376/28 housing = strat_train_set.copy() ``` ## Looking for Correlations ``` corr_matrix = housing.corr() corr_matrix['MEDV'].sort_values(ascending=False) # from pandas.plotting import scatter_matrix # attributes = ["MEDV", "RM", "ZN", "LSTAT"] # scatter_matrix(housing[attributes], figsize = (12,8)) housing.plot(kind="scatter", x="RM", y="MEDV", alpha=0.8) ``` ## Trying out Attribute combinations ``` housing["TAXRM"] = housing['TAX']/housing['RM'] housing.head() corr_matrix = housing.corr() corr_matrix['MEDV'].sort_values(ascending=False) housing.plot(kind="scatter", x="TAXRM", y="MEDV", alpha=0.8) housing = strat_train_set.drop("MEDV", axis=1) housing_labels = strat_train_set["MEDV"].copy() ``` ## Missing Attributes ``` # To take care of missing attributes, you have three options: # 1. Get rid of the missing data points # 2. Get rid of the whole attribute # 3. Set the value to some value(0, mean or median) a = housing.dropna(subset=["RM"]) #Option 1 a.shape # Note that the original housing dataframe will remain unchanged housing.drop("RM", axis=1).shape # Option 2 # Note that there is no RM column and also note that the original housing dataframe will remain unchanged median = housing["RM"].median() # Compute median for Option 3 housing["RM"].fillna(median) # Option 3 # Note that the original housing dataframe will remain unchanged housing.shape housing.describe() # before we started filling missing attributes from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="median") imputer.fit(housing) imputer.statistics_ X = imputer.transform(housing) housing_tr = pd.DataFrame(X, columns=housing.columns) housing_tr.describe() ``` ## Scikit-learn Design Primarily, three types of objects 1. Estimators - It estimates some parameter based on a dataset. Eg. imputer. It has a fit method and transform method. Fit method - Fits the dataset and calculates internal parameters 2. Transformers - transform method takes input and returns output based on the learnings from fit(). It also has a convenience function called fit_transform() which fits and then transforms. 3. Predictors - LinearRegression model is an example of predictor. fit() and predict() are two common functions. It also gives score() function which will evaluate the predictions. ## Feature Scaling Primarily, two types of feature scaling methods: 1. Min-max scaling (Normalization) (value - min)/(max - min) Sklearn provides a class called MinMaxScaler for this 2. Standardization (value - mean)/std Sklearn provides a class called StandardScaler for this ## Creating a Pipeline ``` from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler my_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy="median")), # ..... add as many as you want in your pipeline ('std_scaler', StandardScaler()), ]) housing_num_tr = my_pipeline.fit_transform(housing) housing_num_tr.shape ``` ## Selecting a desired model for Dragon Real Estates ``` from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor # model = LinearRegression() # model = DecisionTreeRegressor() model = RandomForestRegressor() model.fit(housing_num_tr, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] prepared_data = my_pipeline.transform(some_data) model.predict(prepared_data) list(some_labels) ``` ## Evaluating the model ``` from sklearn.metrics import mean_squared_error housing_predictions = model.predict(housing_num_tr) mse = mean_squared_error(housing_labels, housing_predictions) rmse = np.sqrt(mse) rmse ``` ## Using better evaluation technique - Cross Validation ``` # 1 2 3 4 5 6 7 8 9 10 from sklearn.model_selection import cross_val_score scores = cross_val_score(model, housing_num_tr, housing_labels, scoring="neg_mean_squared_error", cv=10) rmse_scores = np.sqrt(-scores) rmse_scores def print_scores(scores): print("Scores:", scores) print("Mean: ", scores.mean()) print("Standard deviation: ", scores.std()) print_scores(rmse_scores) ``` Quiz: Convert this notebook into a python file and run the pipeline using Visual Studio Code ## Saving the model ``` from joblib import dump, load dump(model, 'Dragon.joblib') ``` ## Testing the model on test data ``` X_test = strat_test_set.drop("MEDV", axis=1) Y_test = strat_test_set["MEDV"].copy() X_test_prepared = my_pipeline.transform(X_test) final_predictions = model.predict(X_test_prepared) final_mse = mean_squared_error(Y_test, final_predictions) final_rmse = np.sqrt(final_mse) # print(final_predictions, list(Y_test)) final_rmse prepared_data[0] ``` ## Using the model ``` from joblib import dump, load import numpy as np model = load('Dragon.joblib') features = np.array([[-5.43942006, 4.12628155, -1.6165014, -0.67288841, -1.42262747, -11.44443979304, -49.31238772, 7.61111401, -26.0016879 , -0.5778192 , -0.97491834, 0.41164221, -66.86091034]]) model.predict(features) ```
github_jupyter
``` import numpy as np from sklearn import datasets from scipy.optimize import minimize from matplotlib import pyplot as plt from sklearn.metrics.pairwise import euclidean_distances import warnings warnings.simplefilter("ignore") n = 200 np.random.seed(1111) X, y = datasets.make_blobs(n_samples=n, shuffle=True, random_state=None, centers = 2, cluster_std = 2.0) plt.scatter(X[:,0], X[:,1]) from sklearn.cluster import KMeans km = KMeans(n_clusters = 2) km.fit(X) plt.scatter(X[:,0], X[:,1], c = km.predict(X)) np.random.seed(1234) n = 200 X, y = datasets.make_moons(n_samples=n, shuffle=True, noise=0.05, random_state=None) plt.scatter(X[:,0], X[:,1]) km = KMeans(n_clusters = 2) km.fit(X) plt.scatter(X[:,0], X[:,1], c = km.predict(X)) ``` # Part A ``` epsilon = 0.4 dist = euclidean_distances(X, X) A = [[1 if dist[i, j] < epsilon else 0 for j in range(n)] for i in range(n)] A = np.array(A) np.fill_diagonal(A, 0) A ``` # Part B ``` degree = [sum(A[i, :]) for i in range(A.shape[0])] ``` ### B.1. Cut term ``` def cut(A, y): l = [] for i in range(len(A[:, 1])): for j in range(int(len(A[1, :]))): if A[i, j] != 0 and y[i] != y[j]: l.append(A[i, j]) return(len(l)/2) cut_of_y = cut(A, y) cut_of_y num = 0 for i in range(20): random_array = np.random.randint(0, 2, size = 200) cut_of_random_array = cut(A, random_array) if cut_of_random_array <= cut_of_y: print("Oops, cut of random array was smaller") num = 1 break if num == 0: print("Cut of y was always smaller") ``` ### B.2. Volume term ``` def vols(A, y): v0 = sum([degree[i] for i in range(len(y)) if y[i] == 0]) v1 = sum([degree[i] for i in range(len(y)) if y[i] == 1]) return (v0, v1) def normcut(A, y): v0, v1 = vols(A, y) cut_y = cut(A, y) return (cut_y * ((1/v0) + (1/v1))) normcut_of_y = round(normcut(A, y), 3) normcut_of_y for i in range(10): random_array = np.random.randint(0, 2, size = 200) cut_of_random_array = cut(A, random_array) print("Cut of y :", normcut_of_y, ", Cut of random array :", round(normcut(A, random_array), 3)) ``` # Part C ``` def transform(A, y): v0, v1 = vols(A, y) z = [1/v0 if y[i] == 0 else -1/v1 for i in range(len(y))] return np.array(z) norm_1 = normcut(A, y) z = transform(A, y) D = np.diag(degree) norm_2 = (z @ (D - A) @ z)/(z @ D @ z) np.isclose(norm_1, norm_2) z @ D @ np.ones(n) ``` # Part D ``` def orth(u, v): return (u @ v) / (v @ v) * v e = np.ones(n) d = D @ e def orth_obj(z): z_o = z - orth(z, d) return (z_o @ (D - A) @ z_o)/(z_o @ D @ z_o) output = minimize(fun = orth_obj, x0 = z, method = 'Nelder-Mead') z_min = output.x ``` # Part E ``` def set_color(): colors = [] for i in range(len(z_min)): if z_min[i] >= 0: colors.append("red") if z_min[i] < 0: colors.append("blue") return colors plt.scatter(X[:, 0], X[:, 1], c = set_color()) plt.savefig(fname = "/Users/arshmacbook/Desktop/PIC 16B/arshg285.github.io/images/blog-post-4-e", bbox_inches = 'tight') plt.show() ``` # Part F ``` L = np.linalg.inv(D) @ (D - A) def second_smallest_eigenvector(L): Lam, U = np.linalg.eig(L) ix = Lam.argsort() Lam, U = Lam[ix], U[:, ix] z_eig = U[:, 1] return z_eig z_eig = second_smallest_eigenvector(L) def set_color(z_eig): colors = [] for i in range(len(z_eig)): if z_eig[i] >= 0: colors.append("red") if z_eig[i] < 0: colors.append("blue") return colors plt.scatter(X[:, 0], X[:, 1], c = set_color(z_eig)) plt.savefig(fname = "/Users/arshmacbook/Desktop/PIC 16B/arshg285.github.io/images/blog-post-4-f", bbox_inches = 'tight') plt.show() ``` # Part G ``` def spectral_clustering(X, epsilon): # Constructing the similarity matrix dist = euclidean_distances(X, X) A = np.array([[1 if dist[i, j] < epsilon else 0 for j in range(n)] for i in range(n)]) np.fill_diagonal(A, 0) # Constructing the laplacian matrix degree = [sum(A[i, :]) for i in range(A.shape[0])] L = np.linalg.inv(np.diag(degree)) @ (np.diag(degree) - A) # Compute the eigenvector with second-smallest eigenvalue of the Laplacian matrix z_eig = second_smallest_eigenvector(L) y = [1 if z_eig[i] > 0 else 0 for i in range(len(z_eig))] # Return labels based on this eigenvector return y y = spectral_clustering(X, epsilon) ``` # Part H ``` np.random.seed(1234) n = 1000 noise_values = np.linspace(0, 0.2, 5) num = 0 for elem in noise_values[1:]: X, y = datasets.make_moons(n_samples=n, shuffle=True, noise=elem, random_state=None) y = spectral_clustering(X, epsilon = 0.4) def set_color(vector): colors = [] for i in range(len(vector)): if vector[i] == 0: colors.append("red") if vector[i] == 1: colors.append("blue") return colors num += 1 print("For epsilon = ", elem) plt.scatter(X[:, 0], X[:, 1], c = set_color(y)) plt.savefig(fname = f"/Users/arshmacbook/Desktop/PIC 16B/arshg285.github.io/images/blog-post-4-h{num}", bbox_inches = 'tight') plt.show() ``` # Part I ``` n = 2000 noise_values = np.linspace(0, 1, 11) num = 0 for elem in noise_values[1:]: X, y = datasets.make_circles(n_samples=n, shuffle=True, noise=elem, random_state=None, factor = 0.4) def set_color(vector): colors = [] for i in range(len(vector)): if vector[i] == 0: colors.append("red") if vector[i] == 1: colors.append("blue") return colors num += 1 print("For epsilon = ", elem) plt.scatter(X[:, 0], X[:, 1], c = set_color(y)) plt.savefig(fname = f"/Users/arshmacbook/Desktop/PIC 16B/arshg285.github.io/images/blog-post-4-i{num}", bbox_inches = 'tight') plt.show() ```
github_jupyter
# Replacing scalar values I In this exercise, we will replace a list of values in our dataset by using the .replace() method with another list of desired values. We will apply the functions in the poker_hands DataFrame. Remember that in the poker_hands DataFrame, each row of columns R1 to R5 represents the rank of each card from a player's poker hand spanning from 1 (Ace) to 13 (King). The Class feature classifies each hand as a category, and the Explanation feature briefly explains each hand. The poker_hands DataFrame is already loaded for you, and you can explore the features Class and Explanation. Remember you can always explore the dataset and see how it changes in the IPython Shell, and refer to the slides in the Slides tab. ``` import pandas as pd poker_hands = pd.read_csv('../datasets/poker_hand.csv') poker_hands # Replace Class 1 to -2 poker_hands['Class'].replace(1, -2, inplace=True) # Replace Class 2 to -3 poker_hands['Class'].replace(2, -3, inplace=True) print(poker_hands[['Class']]) ``` # Replace scalar values II As discussed in the video, in a pandas DataFrame, it is possible to replace values in a very intuitive way: we locate the position (row and column) in the Dataframe and assign in the new value you want to replace with. In a more pandas-ian way, the .replace() function is available that performs the same task. You will be using the names DataFrame which includes, among others, the most popular names in the US by year, gender and ethnicity. Your task is to replace all the babies that are classified as FEMALE to GIRL using the following methods: - intuitive scalar replacement - using the .replace() function ``` names = pd.read_csv('../datasets/Popular_Baby_Names.csv') names.head() import time start_time = time.time() # Replace all the entries that has 'FEMALE' as a gender with 'GIRL' names['Gender'].loc[names['Gender'] == 'FEMALE'] = 'GIRL' print("Time using .loc[]: {} sec".format(time.time() - start_time)) start_time = time.time() # Replace all the entries that has 'FEMALE' as a gender with 'GIRL' names['Gender'].replace('FEMALE', 'GIRL', inplace=True) print("Time using .replace(): {} sec".format(time.time() - start_time)) ``` # Replace multiple values I In this exercise, you will apply the .replace() function for the task of replacing multiple values with one or more values. You will again use the names dataset which contains, among others, the most popular names in the US by year, gender and Ethnicity. Thus you want to replace all ethnicities classified as black or white non-hispanics to non-hispanic. Remember, the ethnicities are stated in the dataset as follows: ```['BLACK NON HISP', 'BLACK NON HISPANIC', 'WHITE NON HISP' , 'WHITE NON HISPANIC']``` and should be replaced to 'NON HISPANIC' ``` start_time = time.time() # Replace all non-Hispanic ethnicities with 'NON HISPANIC' names['Ethnicity'].loc[(names["Ethnicity"] == 'BLACK NON HISP') | (names["Ethnicity"] == 'BLACK NON HISPANIC') | (names["Ethnicity"] == 'WHITE NON HISP') | (names["Ethnicity"] == 'WHITE NON HISPANIC')] = 'NON HISPANIC' print("Time using .loc[]: {0} sec".format(time.time() - start_time)) start_time = time.time() # Replace all non-Hispanic ethnicities with 'NON HISPANIC' names['Ethnicity'].replace(['BLACK NON HISP', 'BLACK NON HISPANIC', 'WHITE NON HISP' , 'WHITE NON HISPANIC'], 'NON HISPANIC', inplace=True) print("Time using .replace(): {} sec".format(time.time() - start_time)) ``` # Replace multiple values II As discussed in the video, instead of using the .replace() function multiple times to replace multiple values, you can use lists to map the elements you want to replace one to one with those you want to replace them with. As you have seen in our popular names dataset, there are two names for the same ethnicity. We want to standardize the naming of each ethnicity by replacing - 'ASIAN AND PACI' to 'ASIAN AND PACIFIC ISLANDER' - 'BLACK NON HISP' to 'BLACK NON HISPANIC' - 'WHITE NON HISP' to 'WHITE NON HISPANIC' In the DataFrame names, you are going to replace all the values on the left by the values on the right. ``` start_time = time.time() # Replace ethnicities as instructed names['Ethnicity'].replace(['ASIAN AND PACI','BLACK NON HISP', 'WHITE NON HISP'], ['ASIAN AND PACIFIC ISLANDER','BLACK NON HISPANIC','WHITE NON HISPANIC'], inplace=True) print("Time using .replace(): {} sec".format(time.time() - start_time)) ``` # Replace single values I In this exercise, we will apply the following replacing technique of replacing multiple values using dictionaries on a different dataset. We will apply the functions in the data DataFrame. Each row represents the rank of 5 cards from a playing card deck, spanning from 1 (Ace) to 13 (King) (features R1, R2, R3, R4, R5). The feature 'Class' classifies each row to a category (from 0 to 9) and the feature 'Explanation' gives a brief explanation of what each class represents. The purpose of this exercise is to categorize the two types of flush in the game ('Royal flush' and 'Straight flush') under the 'Flush' name. ``` # Replace Royal flush or Straight flush to Flush poker_hands.replace({'Royal flush':'Flush', 'Straight flush':'Flush'}, inplace=True) print(poker_hands['Explanation'].head()) ``` # Replace single values II For this exercise, we will be using the names DataFrame. In this dataset, the column 'Rank' shows the ranking of each name by year. For this exercise, you will use dictionaries to replace the first ranked name of every year as 'FIRST', the second name as 'SECOND' and the third name as 'THIRD'. You will use dictionaries to replace one single value per key. You can already see the first 5 names of the data, which correspond to the 5 most popular names for all the females belonging to the 'ASIAN AND PACIFIC ISLANDER' ethnicity in 2011. ``` # Replace the number rank by a string names['Rank'].replace({1:'FIRST', 2:'SECOND', 3:'THIRD'}, inplace=True) print(names.head()) ``` # Replace multiple values III As you saw in the video, you can use dictionaries to replace multiple values with just one value, even from multiple columns. To show the usefulness of replacing with dictionaries, you will use the names dataset one more time. In this dataset, the column 'Rank' shows which rank each name reached every year. You will change the rank of the first three ranked names of every year to 'MEDAL' and those from 4th and 5th place to 'ALMOST MEDAL'. You can already see the first 5 names of the data, which correspond to the 5 most popular names for all the females belonging to the 'ASIAN AND PACIFIC ISLANDER' ethnicity in 2011. ``` # Replace the rank of the first three ranked names to 'MEDAL' names.replace({'Rank': {1:'MEDAL', 2:'MEDAL', 3:'MEDAL'}}, inplace=True) # Replace the rank of the 4th and 5th ranked names to 'ALMOST MEDAL' names.replace({'Rank': {4:'ALMOST MEDAL', 5:'ALMOST MEDAL'}}, inplace=True) print(names.head()) ``` # Most efficient method for scalar replacement If you want to replace a scalar value with another scalar value, which technique is the most efficient?? Replace using dictionaries.
github_jupyter
``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt def plot_series(time, series, format="-", start=0, end=None): plt.plot(time[start:end], series[start:end], format) plt.xlabel("Time") plt.ylabel("Value") plt.grid(True) !wget --no-check-certificate \ https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv \ -O /tmp/daily-min-temperatures.csv import csv time_step = [] temps = [] with open('/tmp/daily-min-temperatures.csv') as csvfile: reader = csv.reader(csvfile, delimiter=',') next(reader) step=0 for row in reader: temps.append(float(row[1])) time_step.append(step) step = step + 1 series = np.array(temps) time = np.array(time_step) plt.figure(figsize=(10, 6)) plot_series(time, series) split_time = 2500 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] window_size = 30 batch_size = 32 shuffle_buffer_size = 1000 def windowed_dataset(series, window_size, batch_size, shuffle_buffer): series = tf.expand_dims(series, axis=-1) ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(window_size + 1, shift=1, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(window_size + 1)) ds = ds.shuffle(shuffle_buffer) ds = ds.map(lambda w: (w[:-1], w[1:])) return ds.batch(batch_size).prefetch(1) def model_forecast(model, series, window_size): ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(window_size, shift=1, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(window_size)) ds = ds.batch(32).prefetch(1) forecast = model.predict(ds) return forecast tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) window_size = 64 batch_size = 256 train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size) print(train_set) print(x_train.shape) model = tf.keras.models.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=5, strides=1, padding="causal", activation="relu", input_shape=[None, 1]), tf.keras.layers.LSTM(64, return_sequences=True), tf.keras.layers.LSTM(64, return_sequences=True), tf.keras.layers.Dense(30, activation="relu"), tf.keras.layers.Dense(10, activation="relu"), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 400) ]) lr_schedule = tf.keras.callbacks.LearningRateScheduler( lambda epoch: 1e-8 * 10**(epoch / 20)) optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(train_set, epochs=100, callbacks=[lr_schedule]) plt.semilogx(history.history["lr"], history.history["loss"]) plt.axis([1e-8, 1e-4, 0, 60]) tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) train_set = windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size) model = tf.keras.models.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=5, strides=1, padding="causal", activation="relu", input_shape=[None, 1]), tf.keras.layers.LSTM(64, return_sequences=True), tf.keras.layers.LSTM(64, return_sequences=True), tf.keras.layers.Dense(30, activation="relu"), tf.keras.layers.Dense(10, activation="relu"), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 400) ]) optimizer = tf.keras.optimizers.SGD(lr=5e-5, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(train_set,epochs=150) # EXPECTED OUTPUT SHOULD SEE AN MAE OF <2 WITHIN ABOUT 30 EPOCHS rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size) rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, rnn_forecast) # EXPECTED OUTPUT. PLOT SHOULD SHOW PROJECTIONS FOLLOWING ORIGINAL DATA CLOSELY tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy() # EXPECTED OUTPUT MAE < 2 -- I GOT 1.789626 print(rnn_forecast) # EXPECTED OUTPUT -- ARRAY OF VALUES IN THE LOW TEENS ```
github_jupyter
### Exercícios do capítulo 24 ``` import math import matplotlib.pyplot as plt import numpy as np import pandas as pd %matplotlib inline def trap(f,a,b): return (b-a)*(f[0]+f[1])/2 def simpson13(f,a,b): s = f[0] + 4*f[1] + f[2] s *= (b-a)/6 #print("pontos usados:") #print("a") #print(a) #print("ponto médio") #print(((b-a)/2)) #print("b") #print(b) return s def simpson38(f,a,b): s = f[0] + 3*f[1] + 3*f[2] + f[3] s *= (b-a)/8 #print("pontos usados:") #print("a") #print(a) #print("ponto médio 1") #print(a + (b-a)/3 ) #print("ponto médio 2") #print(a + (2*(b-a)/3)) #print("b") #print(b) return s ``` <img src='241.JPG'> <img src='242.JPG'> Alterando para escala de 24h apenas para plotar o gráfico, para o gráfico fazer mais sentido: ``` h = [0,2,4,5,6,7,8,9,10.5,11.5,12.5,14,16,17,18,19,20,21,22,23,24] f = [2,2,0,2,6,7,23,11,4,11,12,8,7,26,20,10,8,10,8,7,3] plt.figure(figsize=(7,7)) plt.plot(h,f, color='blue') plt.grid() plt.show() ``` Retornando à escala original: h = [0,2,4,5,6,7,8,9,10.5,11.5,12.5,2,4,5,6,7,8,9,10,11,12] f = [2,2,0,2,6,7,23,11,4,11,12,8,7,26,20,10,8,10,8,7,3] ``` taxas = [] f = [2,2,0] res = simpson13(f, 0, 4) print(res) taxas.append(res) f = [0,2,6] res = simpson13(f, 4, 6) print(res) taxas.append(res) f = [6,7,23,11] res = simpson38(f, 6, 9) print(res) taxas.append(res) f = [11,4] res = trap(f, 9, 10.5) print(res) taxas.append(res) f = [4,11,12] res = simpson13(f, 10.5, 12.5) print(res) taxas.append(res) f = [12,8] res = trap(f, 12.5, 14) print(res) taxas.append(res) f = [8,7] res = trap(f, 14, 16) print(res) taxas.append(res) f = [7,26,20] res = simpson13(f, 16, 18) print(res) taxas.append(res) f = [20,10,8] res = simpson13(f, 18, 20) print(res) taxas.append(res) f = [8,10,8] res = simpson13(f, 20, 22) print(res) taxas.append(res) f = [8,7,3] res = simpson13(f, 22, 24) print(res) taxas.append(res) print(sum(taxas)) ``` ### Portanto, o resultado da área da curva fica igual a 210.7083. (Unidade: horas*carros/(minuto)) ``` print(sum(taxas)/24) ``` ## Portanto, dividindo o valor encontrado por 24 horas, o resultado da taxa fica igual a 8.77951 carros/minuto. ### Uma vez que o dia tem 24*60 minutos, o total de carros que passa na intersecção por dia é de: ``` print(8.77951*24*60) ``` ### 12642.4944 ### Uma vez que não existe 0,49 carro, aproxima-se para 12643 carros <img src='243.JPG'> <img src='244.JPG'> 1481 <img src='245.JPG'> ### n = 5
github_jupyter
``` import sys,tweepy,csv,re from textblob import TextBlob import matplotlib.pyplot as plt class SentimentAnalysis: def __init__(self): self.tweets = [] self.tweetText = [] def DownloadData(self): # authenticating consumerKey = '59oDfXxmBBm22p2j3Gowy4lEE' consumerSecret = "bZufUMPivqtX94xG4Bt3QmsmqyL7TsDbkW8Kuo3cGYeFfKoysY" accessToken = "3060838521-u5eXreDFHOqaxUcvTYMFyuEXImu5RlpdiY436h8" accessTokenSecret = "Q55FxITLmzlJWW4xpNbwnsW2UPXQZL4KiOWf9QdsDlYKt" auth = tweepy.OAuthHandler(consumerKey, consumerSecret) auth.set_access_token(accessToken, accessTokenSecret) api = tweepy.API(auth) # input for term to be searched and how many tweets to search searchTerm = input("Enter Keyword/Tag to search about: ") NoOfTerms = int(input("Enter how many tweets to search: ")) # searching for tweets self.tweets = tweepy.Cursor(api.search, q=searchTerm, lang = "en").items(NoOfTerms) # Open/create a file to append data to csvFile = open('result.csv', 'a') # Use csv writer csvWriter = csv.writer(csvFile) # creating some variables to store info polarity = 0 positive = 0 wpositive = 0 spositive = 0 negative = 0 wnegative = 0 snegative = 0 neutral = 0 # iterating through tweets fetched for tweet in self.tweets: #Append to temp so that we can store in csv later. I use encode UTF-8 self.tweetText.append(self.cleanTweet(tweet.text).encode('utf-8')) # print (tweet.text.translate(non_bmp_map)) #print tweet's text analysis = TextBlob(tweet.text) # print(analysis.sentiment) # print tweet's polarity polarity += analysis.sentiment.polarity # adding up polarities to find the average later if (analysis.sentiment.polarity == 0): # adding reaction of how people are reacting to find average later neutral += 1 elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3): wpositive += 1 elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6): positive += 1 elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1): spositive += 1 elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0): wnegative += 1 elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3): negative += 1 elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6): snegative += 1 # Write to csv and close csv file csvWriter.writerow(self.tweetText) csvFile.close() # finding average of how people are reacting positive = self.percentage(positive, NoOfTerms) wpositive = self.percentage(wpositive, NoOfTerms) spositive = self.percentage(spositive, NoOfTerms) negative = self.percentage(negative, NoOfTerms) wnegative = self.percentage(wnegative, NoOfTerms) snegative = self.percentage(snegative, NoOfTerms) neutral = self.percentage(neutral, NoOfTerms) # finding average reaction polarity = polarity / NoOfTerms # printing out data print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.") print() print("General Report: ") if (polarity == 0): print("Neutral") elif (polarity > 0 and polarity <= 0.3): print("Weakly Positive") elif (polarity > 0.3 and polarity <= 0.6): print("Positive") elif (polarity > 0.6 and polarity <= 1): print("Strongly Positive") elif (polarity > -0.3 and polarity <= 0): print("Weakly Negative") elif (polarity > -0.6 and polarity <= -0.3): print("Negative") elif (polarity > -1 and polarity <= -0.6): print("Strongly Negative") print() print("Detailed Report: ") print(str(positive) + "% people thought it was positive") print(str(wpositive) + "% people thought it was weakly positive") print(str(spositive) + "% people thought it was strongly positive") print(str(negative) + "% people thought it was negative") print(str(wnegative) + "% people thought it was weakly negative") print(str(snegative) + "% people thought it was strongly negative") print(str(neutral) + "% people thought it was neutral") self.plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, NoOfTerms) def cleanTweet(self, tweet): # Remove Links, Special Characters etc from tweet return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w +:\ / \ / \S +)", " ", tweet).split()) # function to calculate percentage def percentage(self, part, whole): temp = 100 * float(part) / float(whole) return format(temp, '.2f') def plotPieChart(self, positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms): labels = ['Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]','Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]', 'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]'] sizes = [positive, wpositive, spositive, neutral, negative, wnegative, snegative] colors = ['yellowgreen','lightgreen','darkgreen', 'gold', 'red','lightsalmon','darkred'] patches, texts = plt.pie(sizes, colors=colors, startangle=90) plt.legend(patches, labels, loc="best") plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.') plt.axis('equal') plt.tight_layout() plt.show() if __name__== "__main__": sa = SentimentAnalysis() sa.DownloadData() ```
github_jupyter
## <font color='blue'>数据清洗案例 ### 1. 导入相关包 ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline #jupyter notebook一定运行这一行代码,在cell中显示图形 ``` ### 2.导入数据集 ``` df=pd.read_csv('qunar_freetrip.csv',index_col=0) df.head(2) ``` ### 3. 初步探索数据 ``` #查看数据形状 df.shape #快速了解数据的结构 df.info() #快速查看数据的描述性统计信息 df.describe() #显示数值型数据的描述统计 ``` ### 4.简单数据处理 ``` df.columns df.head(2) col = df.columns.values col col[0].strip() #strip函数一次只能处理一个数据 [x.strip() for x in col] #strip去除前后空格 df.columns = [x.strip() for x in col] df.columns ``` ### 5.重复值的处理 #### 检查重复值duplicated() Duplicated函数功能:查找并显示数据表中的重复值 这里需要注意的是: - 当两条记录中所有的数据都相等时duplicated函数才会判断为重复值 - duplicated支持从前向后(first),和从后向前(last)两种重复值查找模式 - 默认是从前向后进行重复值的查找和判断,也就是后面的条目在重复值判断中显示为True ``` #第一行数据 #第二行数据和第一行一样 #从前向后就把第二行数据判断为重复值 #从后向前就把第一行数据判断为重复值 df.duplicated() #返回布尔型数据,告诉重复值的位置 df.duplicated().sum()#说明有100个重复值 #查看重复的记录 df[df.duplicated()] ``` #### 删除重复值drop_duplicates() drop_duplicates函数功能是:删除数据表中的重复值,判断标准和逻辑与duplicated函数一样 ``` df.drop_duplicates(inplace=True) #inplace=True表示直接在源数据上进行操作 df.head() df.shape df df.shape[0] range(df.shape[0]) df.index = range(df.shape[0]) df.index ``` ### 6.异常值的处理 ``` df.describe().T #找出'价格'异常值 sta=(df['价格']-df['价格'].mean())/df['价格'].std() sta[:10] sta.abs()[:10] sta.abs()>3 df[sta.abs()>3] df.head(10) sum(df.价格>df.节省) #找出'节省'异常值 df[df.节省>df.价格] ``` - 对于建模来说,通常会删掉异常值 - 但是对于业务来说,异常值中可能包含有更多的价值 ``` pd.concat([df[df.节省>df.价格],df[sta.abs()>3]]) pd.concat([df[df.节省>df.价格],df[sta.abs()>3]]).index delindex = pd.concat([df[df.节省>df.价格],df[sta.abs()>3]]).index df.drop(delindex,inplace=True) df.shape ``` ### 7.缺失值的处理 - df.isnull() #查看缺失值 - df.notnull() #查看不是缺失值的数据 - df.dropna() #删除缺失值 - df.fillna() #填补缺失值 ``` df.isnull().sum() df[df.出发地.isnull()] [str(x)[:2] for x in df.loc[df.出发地.isnull(),'路线名']] df.loc[df.出发地.isnull(),'出发地'] = [str(x)[:2] for x in df.loc[df.出发地.isnull(),'路线名']] df[df.出发地.isnull()] df.出发地.isnull().sum() df[df.目的地.isnull()] str(df.loc[df.目的地.isnull(),'路线名'].values)[5:7] df.loc[df.目的地.isnull(),'目的地'] = str(df.loc[df.目的地.isnull(),'路线名'].values)[5:7] round(df['价格'].mean(),0) #处理价格缺失值 df['价格'].fillna(round(df['价格'].mean(),0),inplace=True) #处理节省缺失值 df['节省'].fillna(round(df['节省'].mean(),0),inplace=True) df.isnull().sum() ``` ### 8. 处理文本型数据 ``` # 如果我们想要在一系列文本提取信息,可以使用正则表达式 # 正则表达式通常被用来检索某个规则的文本 df.head(10) df.酒店[:10] df.酒店.str.extract('(\d\.\d)分/5分',expand=True)[:10] #提取酒店评分 df['酒店评分'] = df.酒店.str.extract('(\d\.\d)分/5分',expand=False) #expand=False (return Index/Series) #expand=True (return DataFrame) df.head(2) df.酒店[:10] df.酒店.str.extract(' (.+) ',expand=False)[:5] #提取酒店等级 df['酒店等级'] = df.酒店.str.extract(' (.+) ',expand=False)#+号表示的是贪婪模式,也就是所有的数据都要提取出来 #提取天数信息 df['天数']=df.路线名.str.extract('(\d+)天\d晚',expand=False) df.head() ```
github_jupyter
<a href="https://colab.research.google.com/github/christianadriano/PCA_AquacultureSystem/blob/master/PCA_KMeans_All_Piscicultura.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import pandas as pd #tables for data wrangling import numpy as np #basic statistical methods import io #for uploading data #Manual option from google.colab import files uploaded = files.upload() #choose file dados_relativizados_centralizados_piscicultura.csv #Upload data from cvs file df = pd.read_csv(io.StringIO(uploaded['dados_relativizados_centralizados_piscicultura.csv'].decode('utf-8'))) #print(df) column_names = df.columns #Select fatores Ambientais feature_names = [name for name in column_names if name.startswith("E")] #feature_names = list(df.columns["A2_DA":"A4_EUC"]) #print(feature_names) list_names = ['fazenda'] + feature_names df_cultivo = df[list_names] df_cultivo.head() #Look at correlations import pandas as pd import matplotlib.pyplot as plt import seaborn as sns corr = df_cultivo.corr() # using a styled panda's dataframe from https://stackoverflow.com/a/42323184/1215012 cmap = 'coolwarm' corr.style.background_gradient(cmap, axis=1)\ .set_properties(**{'max-width': '80px', 'font-size': '10pt'})\ .set_precision(2)\ #smaller chart sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, cmap='coolwarm') #check which ones are statiscally significant from scipy.stats import pearsonr import pandas as pd def calculate_pvalues(df): df = df.dropna()._get_numeric_data() dfcols = pd.DataFrame(columns=df.columns) pvalues = dfcols.transpose().join(dfcols, how='outer') for r in df.columns: for c in df.columns: pvalues[r][c] = round(pearsonr(df[r], df[c])[1], 4) return pvalues p_values = calculate_pvalues(df_cultivo) #Plot p-values def highlight_significant(val): ''' highlight in blue only the statistically significant cells ''' color = 'blue' if val < 0.05 else 'grey' return 'color: %s' % color p_values.style.applymap(highlight_significant) #Smaller plot of p-values import matplotlib.pyplot as plt from matplotlib import colors import numpy as np np.random.seed(101) zvals = np.random.rand(100, 100) * 10 # make a color map of fixed colors cmap_discrete = colors.ListedColormap(['lightblue', 'white']) bounds=[0,0.05,1] norm_binary = colors.BoundaryNorm(bounds, cmap_discrete.N) # tell imshow about color map so that only set colors are used img = plt.imshow(zvals, interpolation='nearest', origin='lower', cmap=cmap_discrete, norm=norm_binary) sns.heatmap(p_values, xticklabels=p_values.columns, yticklabels=p_values.columns, cmap=cmap_discrete, norm=norm_binary) ``` **PCA** Now we do the PCA ``` #Normalize the data to have MEAN==0 from sklearn.preprocessing import StandardScaler x = df_cultivo.iloc[:,1:].values x = StandardScaler().fit_transform(x) # normalizing the features #print(x) #Run PCA from sklearn.decomposition import PCA pca = PCA(n_components=2) principalComponents = pca.fit_transform(x) principalDf = pd.DataFrame(data = principalComponents , columns = ['principal component 1', 'principal component 2']) finalDf = pd.concat([principalDf, df_cultivo[['fazenda']]], axis = 1) #Visualize results of PCA in Two Dimensions import matplotlib.pyplot as plt fig = plt.figure(figsize = (12,12)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) targets = df_cultivo['fazenda'].to_numpy() print(targets) for target in targets: indicesToKeep = finalDf['fazenda'] == target x = finalDf.loc[indicesToKeep, 'principal component 1'] y = finalDf.loc[indicesToKeep, 'principal component 2'] ax.scatter(x,y,s = 100) ax.annotate(target, (x+0.1,y)) #for name in targets: ax.legend(targets, loc='top right') ax.grid() variance_list =pca.explained_variance_ratio_ print("variance explained by each component:", variance_list) print("total variance explained:", sum(variance_list)) #principal components for each indicador #print(principalComponents) #print(targets) df_clustering = pd.DataFrame({'fazenda': targets, 'pc1':list(principalComponents[:,0]), 'pc2': list(principalComponents[:,1])}, columns=['fazenda', 'pc1','pc2']) #df_clustering #Find clusters from sklearn.cluster import KMeans #4 clusters model = KMeans(4) model.fit(df_clustering.iloc[:,1:3]) #print(model.cluster_centers_) #Plot clusters plt.scatter(df_clustering.iloc[:,1],df_clustering.iloc[:,2], c=model.labels_.astype(float)); plt.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=50, marker="x", color="grey"); # Show the #To which cluster each point belongs? df1= df_clustering.assign(cluster=pd.Series(model.labels_).values) df1.sort_values(by='cluster') #5 clusters model = KMeans(5) model.fit(df_clustering.iloc[:,1:3]) #print(model.cluster_centers_) #Plot clusters plt.scatter(df_clustering.iloc[:,1],df_clustering.iloc[:,2], c=model.labels_.astype(float)); plt.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=50, marker="x", color="grey"); # Show the ``` In my view, we have two large clusters and three outliers, as the graph above shows. ``` #To which cluster each point belongs? df1= df_clustering.assign(cluster=pd.Series(model.labels_).values) df1.sort_values(by='cluster') #6 clusters model = KMeans(6) model.fit(df_clustering.iloc[:,1:3]) #print(model.cluster_centers_) #Plot clusters plt.scatter(df_clustering.iloc[:,1],df_clustering.iloc[:,2], c=model.labels_.astype(float)); plt.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=50, marker="x", color="grey"); # Show the #To which cluster each point belongs? df1= df_clustering.assign(cluster=pd.Series(model.labels_).values) df1.sort_values(by='cluster') ``` Now we analyze 3 Principal Components ``` #Normalize the data to have MEAN==0 from sklearn.preprocessing import StandardScaler x = df_cultivo.iloc[:,1:].values x = StandardScaler().fit_transform(x) # normalizing the features #print(x) #Run PCA from sklearn.decomposition import PCA pca = PCA(n_components=3) principalComponents = pca.fit_transform(x) principalDf = pd.DataFrame(data = principalComponents , columns = ['principal component 1', 'principal component 2','principal component 3']) finalDf = pd.concat([principalDf, df_cultivo[['fazenda']]], axis = 1) variance_list =pca.explained_variance_ratio_ print("variance explained by each component:", variance_list) print("total variance explained:", sum(variance_list)) ``` Now we search for clusters for 3 principal components ``` #Find clusters from sklearn.cluster import KMeans #4 clusters model = KMeans(4) model.fit(df_clustering.iloc[:,1:4]) #print(model.cluster_centers_) #principal components for each indicador #print(principalComponents) #print(targets) df_clustering = pd.DataFrame({'fazenda': targets, 'pc1':list(principalComponents[:,0]), 'pc2': list(principalComponents[:,1]),'pc3': list(principalComponents[:,2])}, columns=['fazenda', 'pc1','pc2','pc3']) #df_clustering #4 clusters from sklearn.cluster import KMeans model = KMeans(4) model.fit(df_clustering.iloc[:,1:4]) #print(model.cluster_centers_) #Plot clusters fig = plt.figure(figsize = (12,12)) ax = fig.add_subplot(111, projection='3d') ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_zlabel('Principal Component 3', fontsize = 15) ax.set_title('3-components PCA', fontsize = 20) targets = df_cultivo['fazenda'].to_numpy() for target in targets: indicesToKeep = finalDf['fazenda'] == target x = finalDf.loc[indicesToKeep, 'principal component 1'] y = finalDf.loc[indicesToKeep, 'principal component 2'] z = finalDf.loc[indicesToKeep, 'principal component 3'] ax.scatter(x,y,z,s = 100) ax.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1],model.cluster_centers_[:,2], s=150, marker='x', color="black"); # Show the ax.legend(targets) ax.grid() ``` Now we search for clusters for the 3 principal components ``` #To which cluster each point belongs? df1= df_clustering.assign(cluster=pd.Series(model.labels_).values) df1.sort_values(by='cluster') ``` Comparing k-means of PC12 with PC123, we see that the cluster membership changes completely. ``` #5 clusters from sklearn.cluster import KMeans model = KMeans(5) model.fit(df_clustering.iloc[:,1:4]) #print(model.cluster_centers_) #Plot clusters #plt.scatter(df_clustering.iloc[:,1],df_clustering.iloc[:,2], c=model.labels_.astype(float)); #plt.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=50, color="red"); # Show the fig = plt.figure(figsize = (12,12)) ax = fig.add_subplot(111, projection='3d') ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_zlabel('Principal Component 3', fontsize = 15) ax.set_title('3-components PCA', fontsize = 20) targets = df_cultivo['fazenda'].to_numpy() for target in targets: indicesToKeep = finalDf['fazenda'] == target x = finalDf.loc[indicesToKeep, 'principal component 1'] y = finalDf.loc[indicesToKeep, 'principal component 2'] z = finalDf.loc[indicesToKeep, 'principal component 3'] ax.scatter(x,y,z,s = 100) #ax.annotate(target, (x,y)) ax.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1],model.cluster_centers_[:,2], s=150, marker='x', color="black"); # Show the #for name in targets: ax.legend(targets) ax.grid() #To which cluster each point belongs? df1= df_clustering.assign(cluster=pd.Series(model.labels_).values) df1.sort_values(by='cluster') #6 clusters from sklearn.cluster import KMeans model = KMeans(6) model.fit(df_clustering.iloc[:,1:4]) #print(model.cluster_centers_) #Plot clusters #plt.scatter(df_clustering.iloc[:,1],df_clustering.iloc[:,2], c=model.labels_.astype(float)); #plt.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=50, color="red"); # Show the fig = plt.figure(figsize = (12,12)) ax = fig.add_subplot(111, projection='3d') ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_zlabel('Principal Component 3', fontsize = 15) ax.set_title('3-components PCA', fontsize = 20) targets = df_cultivo['fazenda'].to_numpy() for target in targets: indicesToKeep = finalDf['fazenda'] == target x = finalDf.loc[indicesToKeep, 'principal component 1'] y = finalDf.loc[indicesToKeep, 'principal component 2'] z = finalDf.loc[indicesToKeep, 'principal component 3'] ax.scatter(x,y,z,s = 100) #ax.annotate(target, (x,y)) ax.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1],model.cluster_centers_[:,2], s=150, marker='x', color="black"); # Show the #for name in targets: ax.legend(targets) ax.grid() #To which cluster each point belongs? df1= df_clustering.assign(cluster=pd.Series(model.labels_).values) df1.sort_values(by='cluster') ```
github_jupyter
# TensorFlowOnSpark with InputMode.TENSORFLOW This notebook demonstrates TensorFlowOnSpark using `InputMode.TENSORFLOW`, which launches a distributed TensorFlow cluster on the Spark executors, where each TensorFlow process reads directly from disk. ### Start a Spark Standalone Cluster First, in a terminal/shell window, start a single-machine Spark Standalone Cluster with three workers: ``` export MASTER=spark://$(hostname):7077 export SPARK_WORKER_INSTANCES=3 export CORES_PER_WORKER=1 export TOTAL_CORES=$((${CORES_PER_WORKER}*${SPARK_WORKER_INSTANCES})) ${SPARK_HOME}/sbin/start-master.sh; ${SPARK_HOME}/sbin/start-slave.sh -c $CORES_PER_WORKER -m 3G ${MASTER} ``` ### Convert the MNIST zip files using Spark This notebook assumes that you have already [downloaded the MNIST dataset](https://github.com/yahoo/TensorFlowOnSpark/wiki/GetStarted_Standalone#download-mnist-data). If so, you can convert it to TFRecord format as follows: ``` export TFoS_HOME=</path/to/TensorFlowOnSpark> cd ${TFoS_HOME} # rm -rf examples/mnist/tfr ${SPARK_HOME}/bin/spark-submit \ --master ${MASTER} \ ${TFoS_HOME}/examples/mnist/mnist_data_setup.py \ --output examples/mnist/tfr \ --format tfr ls -lR examples/mnist/tfr ``` ### Launch the Spark Jupyter Notebook Now, in the same terminal window, launch a Pyspark Jupyter notebook: ``` # export TFoS_HOME=</path/to/TensorFlowOnSpark> cd ${TFoS_HOME}/examples/mnist PYSPARK_DRIVER_PYTHON="jupyter" \ PYSPARK_DRIVER_PYTHON_OPTS="notebook" \ pyspark --master ${MASTER} \ --conf spark.cores.max=${TOTAL_CORES} \ --conf spark.task.cpus=${CORES_PER_WORKER} \ --py-files ${TFoS_HOME}/examples/mnist/tf/mnist_dist.py \ --conf spark.executorEnv.JAVA_HOME="$JAVA_HOME" ``` This should open a Jupyter browser pointing to the directory where this notebook is hosted. Click on this notebook and begin executing the steps of the notebook. NOTE: the `SparkContext` should be available as the `sc` variable. You can use it to navigate to the Spark UI's "Executors" tab, where you will find the logs for each Spark executor. For TensorFlowOnSpark, each executor will correspond to a specific TensorFlow instance in the cluster, and the TensorFlow logs will be reported in each executor's `stderr` logs. ``` sc from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import subprocess from tensorflowonspark import TFCluster # main TensorFlow code for this example import mnist_dist parser = argparse.ArgumentParser() parser.add_argument("--batch_size", help="number of records per batch", type=int, default=100) parser.add_argument("--epochs", help="number of epochs", type=int, default=1) parser.add_argument("--export", help="HDFS path to export model", type=str, default="mnist_export") parser.add_argument("--format", help="example format: (csv2|tfr)", choices=["csv2", "tfr"], default="tfr") parser.add_argument("--images_labels", help="HDFS path to MNIST image_label files in parallelized format") parser.add_argument("--mode", help="train|inference", default="train") parser.add_argument("--model", help="HDFS path to save/load model during train/test", default="mnist_model") parser.add_argument("--output", help="HDFS path to save test/inference output", default="predictions") parser.add_argument("--rdma", help="use rdma connection", default=False) parser.add_argument("--readers", help="number of reader/enqueue threads per worker", type=int, default=10) parser.add_argument("--shuffle_size", help="size of shuffle buffer", type=int, default=1000) parser.add_argument("--steps", help="maximum number of steps", type=int, default=1000) parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true") num_executors = sc.defaultParallelism num_executors ``` ### Run Distributed Training ``` # verify training images and labels train_images_files = "tfr/train" print(subprocess.check_output(["ls", "-l", train_images_files]).decode("utf-8")) # parse arguments for training args = parser.parse_args(['--mode', 'train', '--steps', '600', '--epochs', '1', '--images_labels', train_images_files]) args # remove any existing models subprocess.call(["rm", "-rf", args.model, args.export]) # start the cluster for training cluster = TFCluster.run(sc, mnist_dist.map_fun, args, num_executors, 1, args.tensorboard, TFCluster.InputMode.TENSORFLOW) # shutdown the cluster. # NOTE: this will block until all TensorFlow nodes have completed cluster.shutdown() print(subprocess.check_output(["ls", "-l", args.model]).decode("utf-8")) print(subprocess.check_output(["ls", "-lR", args.export]).decode("utf-8")) ``` ### Run Distributed Inference ``` test_images_files = "tfr/test" print(subprocess.check_output(["ls", "-l", test_images_files]).decode("utf-8")) #Parse arguments for inference args = parser.parse_args(['--mode', 'inference', '--images_labels', test_images_files]) args #remove existing output if any subprocess.call(["rm", "-rf", args.output]) #Start the cluster for inference cluster = TFCluster.run(sc, mnist_dist.map_fun, args, num_executors, 1, False, TFCluster.InputMode.SPARK) cluster.shutdown() print(subprocess.check_output(["ls", "-l", args.output]).decode("utf-8")) ``` ### Shutdown In your terminal/shell window, you can type `<ctrl-C>` to exit the Notebook server. Then, stop the Standalone Cluster via: ``` ${SPARK_HOME}/sbin/stop-slave.sh; ${SPARK_HOME}/sbin/stop-master.sh ```
github_jupyter
# Determine the 2D Basis of a Plane The first step in simulating interfaces is the determination of the two-dimensional periodicity (i.e. the basis vectors) of the plane. The interfaces are two-dimensional sections of the underlying three-dimensional lattice, and hence, the interface will exhibit the periodicity of the corresponding 2D lattice of the plane. The technique to determine the basis vectors is outlined in the following article: [**An efficient algorithm for computing the primitive bases of a general lattice plane.**](https://scripts.iucr.org/cgi-bin/paper?rg5087) Journal of Applied Crystallography Banadaki, A. D., & Patala, S. (2015). , 48(2), 585-588. In this tutorial, we will discuss the steps involved in determining the basis vectors of the 2D plane using the **byxtal** package. Please follow the installation steps (link needed!!) to acquire the byxtal package and import all the required packages that we need for completing this tutorial. ## Miller Indices and Conventions: 1. Miller Indices are often used to refer to a given crystallographic plane in crystals. 2. However, various conventions are commonly used in determining the Miller Indices that can change the indices of the plane. For example, in the FCC lattice, one could either use the cubic unit-cell or the primitive cell to index planes and directions, resulting in completely different indices for the same plane. Therefore, we would like to declare our conventions in defining the Miller Indices to avoid potential confusion. 3. By definition, the Miller indices of a plane, denoted by $(h \, k \, l)$, refer to the indices of the lattice vector perpendicular to the plane expressed in the reciprocal lattice. Therefore, the indices will depend on the reference lattice used (e.g. the cubic unit-cell or the primitive cell). 4. In the **byxtal** package, we perform the calculations in the primitve cell. The reason simply is that, in the primitive lattice, all the lattice points are expressed using integers. This helps with some of the algebraic manipulations that are preformed in the package. 5. **Unless otherwise specified, the indices in byxtal package are in reference to the primitve cell.** 6. In the present tutorial, we also discuss how to convert the indices from one reference frame to the other (e.g. from the primitive cell to the cubic unit-cell). [//]: # "Miller Indices are defined as the reciprocal of intercepts of a crystallographic plane with the Unit Cell Basis Vectors. The confusion is often caused by the definition of the Unit Cell. In f.c.c and b.c.c lattices primitive basis vectors are non-orthogonal while the supercell basis vectors are orthogonal. Most importantly since the reciprocal and direct lattices are the identical in f.c.c and b.c.c, the Miller Indices (defined in supercell unit cell) are the same as the plane normal indices. This unique property and convenience of using an orthogonal set of basis vectors is the root cause of the bipartisan approach to defining the Miller Indices. The downfall of such an approach is the fact that it does not have much utility in other types of lattices e.g. hcp. Therefore in GBpy whenever we use the term Miller Indices, we are referring to the reciprocals of intercepts of a crystallographic plane with the primitive Basis Vectors." [//]: # (Miller Indices: reciprocal of intercepts of a crystallographic plane with the primitive Basis Vectors, or alternatively, Miller Indices: normal vector indices of the plane defined in primitive reciprocal lattice. The above interchangeable definitions are consistently used in GBpy for referring to Miller Indices. Other conventions in defining the Miller Indices are ultimately converted to the above definition. In the present tutorial we manually extract various Indices and demonstrate how they can be fed to the GBpy. In practice such conversions are not necessary as long as the user is consistent with the definitions of GBpy, and can be used for verifying the answer.) Let's start with importing the **byxtal** package and other modules that we will use in this tutorial. ``` import byxtal as bxt import numpy as np from sympy.matrices import Matrix, eye, zeros; ``` ## Problem Definition: In the tutorial, we will determine the planar basis of a crystallographic plane in the FCC lattice. Consider the plane whose normal vector is along the direction $[2 3 1]$ expressed in the cubic unit-cell reference frame. Since the normal vector to the plane is provided in the orthogonal unit-cell basis, we will first determine the Miller Indices of the plane (using the primitive cell bases). ### Finding Miller Indices: A vector in the space can be expressed in any basis, of course with varying components. Vector $\vec{v}$ in basis A can be expressed as: \begin{equation} \vec{v} = \mathcal{B}_A v_A \end{equation} Similarly we can define the plane normal $\vec{n}$ in any basis. For instance we can define $\vec{n}$ in unit-cell basis ($\mathcal{B}_{PO}$) or in primitive-cell basis $\mathcal{B}_P$; we can write: \begin{equation} \vec{n} = \mathcal{B}_{PO} n_{PO} = \mathcal{B}_{P} n_{P} \end{equation} The conversion from one basis to the other can be determined by using the components of the basis vectors of one of the frames (e.g. $P$) in the other frame (e.g. $PO$): \begin{equation} \mathcal{B}_{P} = \mathcal{B}_{PO} \Lambda_{P}^{PO} \end{equation} where, $\Lambda_P^{PO}$ is a $3 \times 3$ matrix with its columns representing the components of basis vectors of $P$ frame in the $PO$ basis. For example, for an FCC lattice, $\Lambda_P^{PO}$ is given below. ``` l_p_po = 1.0 * Matrix([[0.,0.5,0.5],[0.5,0.,0.5],[0.5,0.5,0.]]) l_p_po ``` We can now determine the components of the vector $\vec{n}$ in the $P$ reference frame as follows: \begin{align} \mathcal{B}_{P} n_{P} &= \mathcal{B}_{PO} n_{P0} \\ \nonumber \mathcal{B}_{PO} \Lambda_{P}^{PO} n_{P} &= \mathcal{B}_{PO} n_{P0} \\ \nonumber \Lambda_{P}^{PO} n_{P} &= \mathcal{B}_{PO} n_{P0} \\ \nonumber n_{P} &= \Lambda_{PO}^{P} n_{P0} \end{align} where $\Lambda_{P}^{PO} = \left( \Lambda_{PO}^{P} \right)^{-1}$. 1. **To determine the Miller indices**, we have to express the components of the normal vector $\vec{n}$ in the reference frame of the reciprocal lattice (the reciprocal of the primitive cell). 2. The basis vectors of the reciprocal of the primitve lattice are denoted using the symbol $\mathcal{B}^*_{P}$, and are given in the $PO$ reference frame as: \begin{equation} \mathcal{B}^*_{P} = \mathcal{B}_{PO} \Lambda_{P*}^{PO} \end{equation} 3. $\Lambda_{P*}^{PO}$ can be computed using the byxtal package using the function `bxt.find_csl_dsc.reciprocal_mat()`. For the sake of convenience we abbreviate the imported module `bxt.find_csl_dsc` as `fcd`. The code is shown below. ``` import byxtal.find_csl_dsc as fcd l_rp_po = fcd.reciprocal_mat(l_p_po) l_rp_po ``` where we use the variable `l_rp_po` to represent $\Lambda_{P*}^{PO}$. Now, we can determine the indices of $\vec{n}$ in the $P^*$ reference frame, using equation (4) as: \begin{equation} n_{P^*} = \Lambda_{PO}^{P*} n_{P0} \end{equation} Use the following code to determine the components $n_{P^*}$: ``` l_po_rp = (l_rp_po).inv() n_po = Matrix([[2], [3], [1]]) n_rp = l_po_rp*n_po n_rp ``` Remember, that the Miller Indices ought to be integers (without common factors). We have to find a common scaling factor for all the components such that the result is going to be scalar. We have implemented a function named `int_finder` that performs this task for a variety of input types (e.g. rows and columns of matrices). For irrational numbers int_finder accepts a tolerance and performs the same operation on the closest rational number within the specified tolerance. You can find this function in the package as: `byxtal.integer_manipulations.int_finder()`. Therefore, we repeat the previous steps and pass the results to the `int_finder` function to obtain the integer Miller indices. ``` import byxtal.integer_manipulations as iman ni_rp = iman.int_finder(n_rp) ni_rp ``` ## Finding the Planar Basis: 1. From the previous section, we found the Miller Indices of an FCC plane with the normal along $n_{PO} = [2 3 1]$ to be $(4 3 5)$. 2. Now all we have to do is to pass the obtained indices to `bp_basis`, which is a function that gets the Miller Indices (expressed using the primitive cell) as the input and returns a $3 \times 2$ matrix, where the columns represent the components of the basis vectors in the primitive $(P)$ reference frame. 3. Also the obtained vectors are in the [reduced form](https://en.wikipedia.org/wiki/Lenstra%E2%80%93Lenstra%E2%80%93Lov%C3%A1sz_lattice_basis_reduction_algorithm). You can find the bp_basis function in the following path: `byxtal.bp_basis.bp_basis()`. To find the basis vector of a plane with the Miller Indices of $(4 3 5)$ use the following syntax: ``` import byxtal.bp_basis as bpb l_2D_p = Matrix(bpb.bp_basis(ni_rp)) l_2D_p ``` To express the obtained basis in the orthogonal basis (i.e. supercell f.c.c) one needs to perform the following conversion of bases: \begin{equation} \Lambda_{2D}^{PO} = \Lambda_{P}^{PO} \times \Lambda_{2D}^{P} \end{equation} ``` l_2D_po = l_p_po*l_2D_p l_2D_po ``` ## Summary 1. At the interface of a bicrystal, the $\Lambda_{2D}^{PO}$ provides a basis for the interface. 2. If the two crystals are related to each other by a $\Sigma$-rotation, the obtained $\Lambda_{2D}^{po}$ is the two-dimensional basis for the two-dimensional coincidence site lattice at the interface. Therefore, the bicrystal conserves its periodicity in the obtained 2D-basis. 3. In other words the obtained basis is in fact the basis for the unit cell of the bicrystal and since it is in the reduced form, it is going to have the least skewness, hence ideal for constructing a periodic simulation box. The above process is frquently repeated for simulation of grain boundaries. Therefore, we have developed a set of functions that make the conversion of indices more convenient and will accept various conventions for the Miller Indices. Please refer to the grain boundary 2D-CSL tutorial for how to use these functions.
github_jupyter
# Large scale text analysis with deep learning (3 points) Today we're gonna apply the newly learned tools for the task of predicting job salary. <img src="https://storage.googleapis.com/kaggle-competitions/kaggle/3342/media/salary%20prediction%20engine%20v2.png" width=400px> _Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the core assignment idea._ ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline ``` ### About the challenge For starters, let's download and unpack the data from [here]. You can also get it from [yadisk url](https://yadi.sk/d/vVEOWPFY3NruT7) the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (pick `Train_rev1.*`). ``` # !wget https://ysda-seminars.s3.eu-central-1.amazonaws.com/Train_rev1.zip # !unzip Train_rev1.zip data = pd.read_csv("./Train_rev1.csv", index_col=None) data.shape data.head() ``` One problem with salary prediction is that it's oddly distributed: there are many people who are paid standard salaries and a few that get tons o money. The distribution is fat-tailed on the right side, which is inconvenient for MSE minimization. There are several techniques to combat this: using a different loss function, predicting log-target instead of raw target or even replacing targets with their percentiles among all salaries in the training set. We gonna use logarithm for now. _You can read more [in the official description](https://www.kaggle.com/c/job-salary-prediction#description)._ ``` data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') plt.figure(figsize=[8, 4]) plt.subplot(1, 2, 1) plt.hist(data["SalaryNormalized"], bins=20); plt.subplot(1, 2, 2) plt.hist(data['Log1pSalary'], bins=20); ``` Our task is to predict one number, __Log1pSalary__. To do so, our model can access a number of features: * Free text: __`Title`__ and __`FullDescription`__ * Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__. ``` text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] TARGET_COLUMN = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast missing values to string "NaN" data.sample(3) ``` ### Preprocessing text data Just like last week, applying NLP to a problem begins from tokenization: splitting raw text into sequences of tokens (words, punctuation, etc). __Your task__ is to lowercase and tokenize all texts under `Title` and `FullDescription` columns. Store the tokenized data as a __space-separated__ string of tokens for performance reasons. It's okay to use nltk tokenizers. Assertions were designed for WordPunctTokenizer, slight deviations are okay. ``` print("Raw text:") print(data["FullDescription"][2::100000]) import nltk #TODO YOUR CODE HERE tokenizer = nltk.tokenize.WordPunctTokenizer() def tokenize(text): tokens = tokenizer.tokenize(str(text).lower()) return " ".join(tokens) data[text_columns] = data[text_columns].applymap(tokenize) data["FullDescription"][2][:50] ``` Now we can assume that our text is a space-separated list of tokens: ``` print("Tokenized:") print(data["FullDescription"][2::100000]) assert data["FullDescription"][2][:50] == 'mathematical modeller / simulation analyst / opera' assert data["Title"][54321] == 'international digital account manager ( german )' from itertools import chain # example of iterator chaining descs= pd.Series(['desc1', 'desc2', 'desc3']) titles= pd.Series(['title1', 'title2', 'titl3']) desc_title_iter = chain(descs, titles) for text in desc_title_iter: print(text) ``` Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's count how many times is each word present in the data so that we can build a "white list" of known words. ``` from collections import Counter sentenses = chain(data["FullDescription"], data["Title"]) token_counts = Counter(" ".join(sentenses).split(" ")) # Count how many times does each token occur in both "Title" and "FullDescription" in total #TODO <YOUR CODE> %%time sentenses = chain(data["FullDescription"], data["Title"]) token_counts = Counter(" ".join(sentenses).split(" ")) %%time token_counts2 = Counter() for row in data[text_columns].values.flatten(): token_counts2.update(row.split()) assert token_counts == token_counts2 print("Total unique tokens :", len(token_counts)) print('\n'.join(map(str, token_counts.most_common(n=5)))) print('...') print('\n'.join(map(str, token_counts.most_common()[-3:]))) assert token_counts.most_common(1)[0][1] in range(2600000, 2700000) assert len(token_counts) in range(200000, 210000) print('Correct!') # Let's see how many words are there for each count plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True) plt.xlabel("Word counts"); ``` __Task 1.1__ Get a list of all tokens that occur at least 10 times. ``` min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = sorted(t for t, c in token_counts.items() if c >= min_count)#TODO<YOUR CODE HERE> # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Vocabulary size:", len(tokens)) assert type(tokens) == list assert len(tokens) in range(32000, 35000) assert 'me' in tokens assert UNK in tokens print("Correct!") for i, t in enumerate(tokens[:5]): print(i) print(t) ``` __Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int) ``` token_to_id = {t: i for i, t in enumerate(tokens)} assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!") ``` And finally, let's use the vocabulary you've built to map text lines into neural network-digestible matrices. ``` UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrix print("Lines:") print('\n'.join(data["Title"][::100000].values), end='\n\n') print("Matrix:") print(as_matrix(data["Title"][::100000])) ``` Now let's encode the categirical data we have. As usual, we shall use one-hot encoding for simplicity. Kudos if you implement more advanced encodings: tf-idf, pseudo-time-series, etc. ``` from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000)) recognized_companies = set(top_companies) data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1)) ``` ### The deep learning part Once we've learned to tokenize the data, let's design a machine learning experiment. As before, we won't focus too much on validation, opting for a simple train-test split. __To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes. ``` from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.2, random_state=42) data_train.index = range(len(data_train)) data_val.index = range(len(data_val)) print("Train size = ", len(data_train)) print("Validation size = ", len(data_val)) import torch print(torch.cuda.is_available()) print(torch.cuda.get_device_name(0)) def to_tensors(batch, device): batch_tensors = dict() for key, arr in batch.items(): if key in ["FullDescription", "Title"]: batch_tensors[key] = torch.tensor(arr, device=device, dtype=torch.int64) else: batch_tensors[key] = torch.tensor(arr, device=device) return batch_tensors def make_batch(data, max_len=None, word_dropout=0, device=torch.device('cpu')): """ Creates a keras-friendly dict from the batch data. :param word_dropout: replaces token index with UNK_IX with this probability :returns: a dict with {'title' : int64[batch, title_max_len] """ batch = {} batch["Title"] = as_matrix(data["Title"].values, max_len) batch["FullDescription"] = as_matrix(data["FullDescription"].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if word_dropout != 0: batch["FullDescription"] = apply_word_dropout(batch["FullDescription"], 1. - word_dropout) if TARGET_COLUMN in data.columns: batch[TARGET_COLUMN] = data[TARGET_COLUMN].values return to_tensors(batch, device) def apply_word_dropout(matrix, keep_prop, replace_with=UNK_IX, pad_ix=PAD_IX,): dropout_mask = np.random.choice(2, np.shape(matrix), p=[keep_prop, 1 - keep_prop]) dropout_mask &= matrix != pad_ix return np.choose(dropout_mask, [matrix, np.full_like(matrix, replace_with)]) batch_example = make_batch(data_train[:3], max_len=10) batch_example['Title'].shape batch_example['FullDescription'].shape batch_example['Categorical'].shape batch_example[TARGET_COLUMN].shape target = batch_example[TARGET_COLUMN] ``` #### Architecture Our basic model consists of three branches: * Title encoder * Description encoder * Categorical features encoder We will then feed all 3 branches into one common network that predicts salary. ![scheme](https://github.com/yandexdataschool/nlp_course/raw/master/resources/w2_conv_arch.png) This clearly doesn't fit into keras' __Sequential__ interface. To build such a network, one will have to use PyTorch. ``` import torch import torch.nn as nn import torch.functional as F ``` ### Simple NN on description ``` class Reorder(nn.Module): # helper to reorder with Conv1d specs (num_batches, n_channels (emb_size in our case), input_length) def forward(self, input): return input.permute((0, 2, 1)) def iterate_minibatches(data, batch_size=256, shuffle=True, cycle=False, device=torch.device('cpu'), **kwargs): """ iterates minibatches of data in random order """ while True: indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) for start in range(0, len(indices), batch_size): batch = make_batch(data.iloc[indices[start : start + batch_size]], **kwargs) yield batch if not cycle: break ``` #### example of iterator usage and network layers ``` iterator = iterate_minibatches(data_train, 3) batch = next(iterator) print(batch['FullDescription'].shape) example_emb = nn.Embedding(num_embeddings=len(tokens), embedding_dim=64) emb_batch = example_emb(torch.tensor(batch['FullDescription']).type(torch.LongTensor)) emb_batch.shape emb_batch = Reorder()(emb_batch) emb_batch.shape conv_l = nn.Conv1d( in_channels=64, out_channels=128, kernel_size=5 ) emb_batch = conv_l(emb_batch) emb_batch.shape apool_l = nn.AdaptiveMaxPool1d(2) emb_batch = apool_l(emb_batch) emb_batch.shape ``` #### Create simple model as nn.Sequantial ``` N_TOKENS = len(tokens) N_CAT_FEATURES = len(categorical_vectorizer.vocabulary_) HID_SIZE = 64 N_MAXIMUS = 2 simple_model = nn.Sequential() simple_model.add_module('emb', nn.Embedding( num_embeddings=N_TOKENS, embedding_dim=HID_SIZE)) simple_model.add_module('reorder', Reorder()) simple_model.add_module('conv1', nn.Conv1d( in_channels=HID_SIZE, out_channels=HID_SIZE * 2, kernel_size=3 )) simple_model.add_module('relu1', nn.ReLU()) simple_model.add_module('conv2', nn.Conv1d( in_channels=HID_SIZE * 2, out_channels=HID_SIZE * 2, kernel_size=3 )) simple_model.add_module('relu2', nn.ReLU()) simple_model.add_module('bn1', nn.BatchNorm1d(HID_SIZE*2)) simple_model.add_module('adaptive_pool', nn.AdaptiveMaxPool1d(N_MAXIMUS)) simple_model.add_module('flatten', nn.Flatten()) simple_model.add_module('linear_out', nn.Linear(HID_SIZE * 2 * N_MAXIMUS, 1)) b = simple_model(torch.tensor(batch['FullDescription']).type(torch.LongTensor)) b.shape data_train.columns len(data_train) ``` #### Training simple model ``` from IPython.display import clear_output from random import sample N_EPOCHS = 1 device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') model = simple_model model.to(device) opt = torch.optim.Adam(model.parameters()) loss_func = nn.MSELoss() history = [] for rpoch_num in range(N_EPOCHS): for idx, batch in enumerate(iterate_minibatches(data_train, device=device)): _batch = batch['FullDescription'].to(device) target = batch[TARGET_COLUMN].to(device) predictions = model(_batch) predictions = predictions.view(predictions.size(0)) loss = loss_func(target, predictions) loss.backward() opt.step() opt.zero_grad() history.append(loss.item()) if (idx+1)%10 == 0: clear_output(True) plt.plot(history, label='loss') plt.legend() plt.show() ``` #### Simple model evaluation ``` model.eval() from tqdm import tqdm, tqdm_notebook def print_metrics(model, data, batch_size=256, name="", **kw): squared_error = abs_error = num_samples = 0.0 for batch in tqdm( iterate_minibatches(data, batch_size=256, shuffle=False, **kw) ): _batch = batch["FullDescription"].to(device) targets = batch[TARGET_COLUMN].to(device) batch_pred = model(_batch)[:, 0] squared_error += loss_func(batch_pred, targets).item() abs_error += np.sum(np.abs(batch_pred.cpu().data.numpy() - targets.cpu().data.numpy())) num_samples += len(targets) print("%s results:" % (name or "")) print("Mean square error: %.5f" % (squared_error / num_samples)) print("Mean absolute error: %.5f" % (abs_error / num_samples)) return squared_error, abs_error print_metrics(model, data_train, 256, name="Train", device=device) print_metrics(model, data_val, 256, name="Val", device=device) data[TARGET_COLUMN].var() data[TARGET_COLUMN].std() class SalaryPredictor(nn.Module): def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_), hid_size=64): super().__init__() # YOUR CODE HERE def forward(self, batch): # YOUR CODE HERE model = SalaryPredictor() model = SalaryPredictor() batch = make_batch(data_train[:100]) criterion = nn.MSELoss() dummy_pred = model(batch) dummy_loss = criterion(dummy_pred, batch[TARGET_COLUMN]) assert dummy_pred.shape == torch.Size([100]) assert len(torch.unique(dummy_pred)) > 20, "model returns suspiciously few unique outputs. Check your initialization" assert dummy_loss.ndim == 0 and 0. <= dummy_loss <= 250., "make sure you minimize MSE" ``` #### Training and evaluation As usual, we gonna feed our monster with random minibatches of data. As we train, we want to monitor not only loss function, which is computed in log-space, but also the actual error measured in dollars. ### Model training We can now fit our model the usual minibatch way. The interesting part is that we train on an infinite stream of minibatches, produced by `iterate_minibatches` function. ``` import tqdm BATCH_SIZE = 16 EPOCHS = 5 DEVICE = torch.device('cpu') def print_metrics(model, data, batch_size=BATCH_SIZE, name="", **kw): squared_error = abs_error = num_samples = 0.0 model.eval() with torch.no_grad(): for batch in iterate_minibatches(data, batch_size=batch_size, shuffle=False, **kw): batch_pred = model(batch) squared_error += torch.sum(torch.square(batch_pred - batch[TARGET_COLUMN])) abs_error += torch.sum(torch.abs(batch_pred - batch[TARGET_COLUMN])) num_samples += len(batch_pred) mse = squared_error.detach().cpu().numpy() / num_samples mae = abs_error.detach().cpu().numpy() / num_samples print("%s results:" % (name or "")) print("Mean square error: %.5f" % mse) print("Mean absolute error: %.5f" % mae) return mse, mae model = SalaryPredictor().to(DEVICE) criterion = nn.MSELoss(reduction='sum') optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) for epoch in range(EPOCHS): print(f"epoch: {epoch}") model.train() for i, batch in tqdm.tqdm_notebook(enumerate( iterate_minibatches(data_train, batch_size=BATCH_SIZE, device=DEVICE)), total=len(data_train) // BATCH_SIZE ): pred = model(batch) loss = criterion(pred, batch[TARGET_COLUMN]) optimizer.zero_grad() loss.backward() optimizer.step() print_metrics(model, data_val) ``` ### Bonus part: explaining model predictions It's usually a good idea to understand how your model works before you let it make actual decisions. It's simple for linear models: just see which words learned positive or negative weights. However, its much harder for neural networks that learn complex nonlinear dependencies. There are, however, some ways to look inside the black box: * Seeing how model responds to input perturbations * Finding inputs that maximize/minimize activation of some chosen neurons (_read more [on distill.pub](https://distill.pub/2018/building-blocks/)_) * Building local linear approximations to your neural network: [article](https://arxiv.org/abs/1602.04938), [eli5 library](https://github.com/TeamHG-Memex/eli5/tree/master/eli5/formatters) Today we gonna try the first method just because it's the simplest one. ``` def explain(model, sample, col_name='Title'): """ Computes the effect each word had on model predictions """ sample = dict(sample) sample_col_tokens = [tokens[token_to_id.get(tok, 0)] for tok in sample[col_name].split()] data_drop_one_token = pd.DataFrame([sample] * (len(sample_col_tokens) + 1)) for drop_i in range(len(sample_col_tokens)): data_drop_one_token.loc[drop_i, col_name] = ' '.join(UNK if i == drop_i else tok for i, tok in enumerate(sample_col_tokens)) *predictions_drop_one_token, baseline_pred = model.predict(make_batch(data_drop_one_token))[:, 0] diffs = baseline_pred - predictions_drop_one_token return list(zip(sample_col_tokens, diffs)) from IPython.display import HTML, display_html def draw_html(tokens_and_weights, cmap=plt.get_cmap("bwr"), display=True, token_template="""<span style="background-color: {color_hex}">{token}</span>""", font_style="font-size:14px;" ): def get_color_hex(weight): rgba = cmap(1. / (1 + np.exp(weight)), bytes=True) return '#%02X%02X%02X' % rgba[:3] tokens_html = [ token_template.format(token=token, color_hex=get_color_hex(weight)) for token, weight in tokens_and_weights ] raw_html = """<p style="{}">{}</p>""".format(font_style, ' '.join(tokens_html)) if display: display_html(HTML(raw_html)) return raw_html i = 36605 tokens_and_weights = explain(model, data.loc[i], "Title") draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;'); tokens_and_weights = explain(model, data.loc[i], "FullDescription") draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]); i = 12077 tokens_and_weights = explain(model, data.loc[i], "Title") draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;'); tokens_and_weights = explain(model, data.loc[i], "FullDescription") draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]); i = np.random.randint(len(data)) print("Index:", i) print("Salary (gbp):", np.expm1(model.predict(make_batch(data.iloc[i: i+1]))[0, 0])) tokens_and_weights = explain(model, data.loc[i], "Title") draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;'); tokens_and_weights = explain(model, data.loc[i], "FullDescription") draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]); from cnn import Vocab import pathlib import os import sys pathlib.Path(__name__) import os os.os.getcwd() project_path_os = os.path.dirname(os.getcwd()) project_path_os project_path = os.path.dirname(os.getcwd()) if project_path not in sys.path: sys.path.append(project_path) sys.path from app.vocab import Vocab project_path.as_uri() ``` __Terrible start-up idea #1962:__ make a tool that automaticaly rephrases your job description (or CV) to meet salary expectations :)
github_jupyter
``` from osgeo import gdal import numpy as np import cv2 import matplotlib.pyplot as plt import os import datetime import random import xlwt import tensorflow as tf from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, BatchNormalization, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint from keras.layers import merge import keras print(gdal.__version__) print(tf.__version__) print(keras.__version__) ``` #### Asign GPU memory ``` ''' GPU == 1080ti CUDA version == 11.3.55 cudnn-11.3-windows-x64-v8.2.0.53 ''' ''' Necessary, other wise will have error in train_generator() (index out of range) ''' os.environ["CUDA_VISIBLE_DEVICES"] = '0' #The first GPU config=tf.compat.v1.ConfigProto() # The program can only occupy up to 90% of the specified GPU memory config.gpu_options.per_process_gpu_memory_fraction = 0.9 #The program allocates memory on demand config.gpu_options.allow_growth = True sess=tf.compat.v1.Session(config=config) ``` #### Test GPU ``` os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' print('GPU', tf.test.is_gpu_available()) a = tf.constant(2.0) b = tf.constant(4.0) print(a + b) ``` #### Define def ``` def normalize(img): #min of each channel minlist = [414, 457, 408, 325, 321, 352, 273, 262, 246, 258, 151, 124, 114, 119, 108, 135, 107, 99, 117, 93, 121, 127, 125, 138, 109, 107, 97, 100, 101, 68, 62, 57] #max - min (channel) diflist = [599, 566, 615, 698, 702, 671, 750, 761, 777, 765, 872, 899, 909, 904, 915, 888, 876, 848, 906, 703, 900, 896, 872, 762, 656, 682, 672, 739, 635, 510, 445, 198] img = img.astype(np.float32) #make sure all data in 0~1 #(i-min)/(max-min) for i in range(32): img[i][np.where(img[i]==0)]=minlist[i] img[i] = (img[i]-minlist[i])/diflist[i] img[i] = img[i]*255 return img def load_img(path): dataset = gdal.Open(path) im_width = dataset.RasterXSize im_height = dataset.RasterYSize im_data = dataset.ReadAsArray(0,0,im_width,im_height) im_data = im_data.transpose((1,2,0)) return im_data def dataPreprocess(img, label, classNum, colorDict_GRAY): # normalize img = img / 255.0 for i in range(colorDict_GRAY.shape[0]): label[label == colorDict_GRAY[i][0]] = i # Extend the data thickness to the classNum (including background) layer new_label = np.zeros(label.shape + (classNum,)) # Turn each type of flat label into a separate layer for i in range(classNum): new_label[label == i,i] = 1 label = new_label return (img, label) # read .tif image def readTif(fileName): dataset = gdal.Open(fileName) im_width = dataset.RasterXSize im_height = dataset.RasterYSize if dataset == None: print(fileName + "file can not open") return dataset # save .tif image def writeTiff(im_data, im_geotrans, im_proj, path): if 'int8' in im_data.dtype.name: datatype = gdal.GDT_Byte elif 'int16' in im_data.dtype.name: datatype = gdal.GDT_UInt16 else: datatype = gdal.GDT_Float32 if len(im_data.shape) == 3: im_bands, im_height, im_width = im_data.shape elif len(im_data.shape) == 2: im_data = np.array([im_data]) im_bands, im_height, im_width = im_data.shape #创建文件 driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(path, int(im_width), int(im_height), int(im_bands), datatype) if(dataset!= None): dataset.SetGeoTransform(im_geotrans) #Write affine transformation parameters dataset.SetProjection(im_proj) #Write projection for i in range(im_bands): dataset.GetRasterBand(i + 1).WriteArray(im_data[i]) del dataset def color_dict(labelFolder, classNum): colorDict = [] # Get the file name in the folder ImageNameList = os.listdir(labelFolder) for i in range(len(ImageNameList)): ImagePath = labelFolder + "/" + ImageNameList[i] img = cv2.imread(ImagePath).astype(np.uint32) img[img==255]=0 # If it is grayscale, convert to RGB if(len(img.shape) == 2): img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB).astype(np.uint32) # In order to extract the unique value, convert RGB into a number img_new = img[:,:,0] * 1000000 + img[:,:,1] * 1000 + img[:,:,2] unique = np.unique(img_new) # Add the unique value of the i-th pixel matrix to the colorDict for j in range(unique.shape[0]): colorDict.append(unique[j]) # Take the unique value for the unique value in the current i pixel matrix colorDict = sorted(set(colorDict)) # If the number of unique values is equal to the total number of classes (including background) ClassNum, #stop traversing the remaining images if(len(colorDict) == classNum): break # RGB dictionary storing colors, used for rendering results during prediction colorDict_RGB = [] for k in range(len(colorDict)): # Add zeros to the left of the result that does not reach nine digits color = str(colorDict[k]).rjust(9, '0') # The first 3 digits are R, the middle 3 digits are G, and the last 3 digits are B color_RGB = [int(color[0 : 3]), int(color[3 : 6]), int(color[6 : 9])] colorDict_RGB.append(color_RGB) colorDict_RGB = np.array(colorDict_RGB) # GRAY dictionary to store colors colorDict_GRAY = colorDict_RGB.reshape((colorDict_RGB.shape[0], 1 ,colorDict_RGB.shape[1])).astype(np.uint8) colorDict_GRAY = cv2.cvtColor(colorDict_GRAY, cv2.COLOR_BGR2GRAY) return colorDict_RGB, colorDict_GRAY def trainGenerator(batch_size, train_image_path, train_label_path, classNum, colorDict_GRAY, resize_shape = None): imageList = os.listdir(train_image_path) labelList = os.listdir(train_label_path) img = load_img(train_image_path + "/" + imageList[0]) # generate data while(True): img_generator = np.zeros((batch_size, img.shape[0], img.shape[1], img.shape[2])) label_generator = np.zeros((batch_size, img.shape[0], img.shape[1]), np.uint8) if(resize_shape != None): img_generator = np.zeros((batch_size, resize_shape[0], resize_shape[1], resize_shape[2])) label_generator = np.zeros((batch_size, resize_shape[0], resize_shape[1]), np.uint8) # randomly select a starting point for batch rand = random.randint(0, len(imageList) - batch_size) for j in range(batch_size): img = load_img(train_image_path + "/" + imageList[rand + j]) #normalize img = img.swapaxes(1, 2) img = img.swapaxes(1, 0) img = normalize(img) img = img.swapaxes(1, 0) img = img.swapaxes(1, 2) # change shape if(resize_shape != None): img = np.resize(img, (resize_shape[0], resize_shape[1], resize_shape[2])) img_generator[j] = img label = cv2.imread(train_label_path + "/" + labelList[rand + j]) label[label==255]=0 # color to grayscale if(len(label.shape) == 3): label = cv2.cvtColor(label, cv2.COLOR_RGB2GRAY) if(resize_shape != None): label = cv2.resize(label, (resize_shape[0], resize_shape[1])) label_generator[j] = label img_generator, label_generator = dataPreprocess(img_generator, label_generator, classNum, colorDict_GRAY) yield (img_generator,label_generator) ``` #### Define unet ``` def unet(pretrained_weights = None, input_size = (256, 256, 32), classNum = 5, learning_rate = 1e-4): inputs = Input(input_size) # 2D-Dimensional Convolution Layer conv1 = BatchNormalization()(Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)) conv1 = BatchNormalization()(Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)) # Max Pooling to the data pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = BatchNormalization()(Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)) conv2 = BatchNormalization()(Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = BatchNormalization()(Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)) conv3 = BatchNormalization()(Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = BatchNormalization()(Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)) conv4 = BatchNormalization()(Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)) # Dropout regularization and avoid overfitting drop4 = Dropout(0.5)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) conv5 = BatchNormalization()(Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)) conv5 = BatchNormalization()(Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)) drop5 = Dropout(0.5)(conv5) # transposed convolution up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) try: merge6 = concatenate([drop4,up6],axis = 3) except: merge6 = merge([drop4,up6], mode = 'concat', concat_axis = 3) conv6 = BatchNormalization()(Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)) conv6 = BatchNormalization()(Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)) up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) try: merge7 = concatenate([conv3,up7],axis = 3) except: merge7 = merge([conv3,up7], mode = 'concat', concat_axis = 3) conv7 = BatchNormalization()(Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)) conv7 = BatchNormalization()(Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)) up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) try: merge8 = concatenate([conv2,up8],axis = 3) except: merge8 = merge([conv2,up8],mode = 'concat', concat_axis = 3) conv8 = BatchNormalization()(Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)) conv8 = BatchNormalization()(Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)) up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) try: merge9 = concatenate([conv1,up9],axis = 3) except: merge9 = merge([conv1,up9],mode = 'concat', concat_axis = 3) conv9 = BatchNormalization()(Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)) conv9 = BatchNormalization()(Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)) conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv10 = Conv2D(classNum, 1, activation = 'softmax')(conv9) model = Model(inputs = inputs, outputs = conv10) # Used to configure the training model (optimizer, objective function, model evaluation criteria) model.compile(optimizer = Adam(lr = learning_rate), loss = 'categorical_crossentropy', metrics = ['accuracy']) if(pretrained_weights): model.load_weights(pretrained_weights) return model ``` #### All the parameter ``` ''' PATH parameter ''' # training image PATH train_image_path = "./comp/train/imagescut" # training label PATH train_label_path = "./comp/train/labelscut" # validation image PATH validation_image_path = "./comp/train/it" # validation label PATH validation_label_path = "./comp/train/lt" ''' Model parameter ''' # batch size batch_size = 2 # number of label classNum = 5 # shape of input images (2**n) input_size = (256, 256, 32) # epochs epochs = 10 # learning rate learning_rate = 1e-2 # pretrained model PATH # premodel_path = "./Model/unet_model.hdf5" premodel_path = None # save model PATH model_path = "./Model/unet_model.hdf5" # number of training set train_num = len(os.listdir(train_image_path)) # number of validation set validation_num = len(os.listdir(validation_image_path)) # how much batch size in each epoch for training set steps_per_epoch = train_num / batch_size # how much batch size in each epoch for validation set validation_steps = validation_num / batch_size # color directory for label colorDict_RGB, colorDict_GRAY = color_dict(train_label_path, classNum) ``` #### Model training ``` # get a generator to generate training data at the rate of batch_size train_Generator = trainGenerator(batch_size, train_image_path, train_label_path, classNum , colorDict_GRAY, input_size) # get a generator to generate validation data at the rate of batch_size validation_data = trainGenerator(batch_size, validation_image_path, validation_label_path, classNum, colorDict_GRAY, input_size) # define the model model = unet(pretrained_weights = premodel_path, input_size = input_size, classNum = classNum, learning_rate = learning_rate) # print model structure model.summary() # Callback model_checkpoint = ModelCheckpoint(model_path, monitor = 'loss', verbose = 1, save_best_only = True) # get time start_time = datetime.datetime.now() # model training history = model.fit(train_Generator, steps_per_epoch = steps_per_epoch, epochs = epochs, callbacks = [model_checkpoint], validation_data = validation_data, validation_steps = validation_steps) # total training time end_time = datetime.datetime.now() log_time = "Total training time: " + str((end_time - start_time).seconds / 60) + "m" print(log_time) with open('TrainTime.txt','w') as f: f.write(log_time) ``` #### Accuracy and loss ``` # plot loss and accuracy acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] book = xlwt.Workbook(encoding='utf-8', style_compression=0) sheet = book.add_sheet('test', cell_overwrite_ok=True) for i in range(len(acc)): sheet.write(i, 0, acc[i]) sheet.write(i, 1, val_acc[i]) sheet.write(i, 2, loss[i]) sheet.write(i, 3, val_loss[i]) book.save(r'AccAndLoss.xls') epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'r', label = 'Training acc') plt.plot(epochs, val_acc, 'b', label = 'Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.savefig("accuracy.png",dpi = 300) plt.figure() plt.plot(epochs, loss, 'r', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.savefig("loss.png", dpi = 300) plt.show() ```
github_jupyter
``` try: import openmdao.api as om except ImportError: !python -m pip install openmdao[notebooks] import openmdao.api as om ``` # Warning Control OpenMDAO has several classes of warnings that may be raised during operation. In general, these warnings are useful and the user should pay attention to them. Sometimes these warnings can be unnecessarily noisy. Filtering out noisy "low-priority" warnings can make other more important ones more obvious. (om_specific_warning_categories)= ## OpenMDAO-Specific Warning Categories Class **OpenMDAOWarning** serves as the base-class for all OpenMDAO-specific warnings. All OpenMDAO-specific warnings default to a filter of 'always'. The following table shows all OpenMDAOWarning-derived classes. | Warning Class | Description | |-----------------------|-------------------------------------------------------------------------------------| | CacheWarning | Issued when cache is invalid and must be discarded. | | CaseRecorderWarning | Issued when a problem is encountered by a case recorder or case reader. | | DerivativesWarning | Issued when the approximated partials or coloring cannot be evaluated as expected. | | DriverWarning | Issued when a problem is encountered during driver execution. | | OMDeprecationWarning | Issued when a deprecated OpenMDAO feature is used. | | SetupWarning | Issued when a problem is encountered during setup. | | SolverWarning | Issued when a problem is encountered during solver execution. | | UnusedOptionWarning | Issued when a given option or argument has no effect. | Note that the OpenMDAO-Specific **OMDeprecationWarning** behaves a bit differently than the default Python DeprecationWarning. **OMDeprecationWarning** is is always displayed by default, but can be silenced by the user. For finer control over which warnings are displayed during setup, the following warning classes derive from **SetupWarning**. Using a filter to silence SetupWarning will silence **all** of the following. | Warning Class | Description | |-----------------------------|----------------------------------------------------------------------| | DistributedComponentWarning | Issued when problems arise with a distributed component. | | MPIWarning | Issued when MPI is not available or cannot be used. | | PromotionWarning | Issued when there is ambiguity due to variable promotion. | | UnitsWarning | Issued when unitless variable is connected to a variable with units. | (filtering_warnings)= ## Filtering Warnings Python's built-in warning filtering system can be used to control which warnings are displayed when using OpenMDAO. The following script generates an OpenMDAO model which will generate UnitsWarning due to connecting unitless outputs to inputs with units. In the following code, the UnitsWarning will be displayed as expected: ``` """ Test nominal UnitsWarning. """ import warnings class AComp(om.ExplicitComponent): def initialize(self): pass def setup(self): self.add_input('a', shape=(10,), units='m') self.add_input('x', shape=(10,), units='1/s') self.add_input('b', shape=(10,), units='m/s') self.add_output('y', shape=(10,), units='m') self.add_output('z', shape=(10,), units='m/s') self.declare_coloring(wrt='*', form='cs') def compute(self, inputs, outputs): outputs['y'] = inputs['a'] * inputs['x'] + inputs['b'] outputs['z'] = inputs['b'] * inputs['x'] p = om.Problem() p.model.add_subsystem('a_comp', AComp()) p.model.add_subsystem('exec_comp', om.ExecComp('foo = y + z', y={'shape': (10,)}, z={'shape': (10,)}, foo={'shape': (10,)})) p.model.connect('a_comp.y', 'exec_comp.y') p.model.connect('a_comp.z', 'exec_comp.z') p.driver.declare_coloring() p.setup() with warnings.catch_warnings(record=True) as w: p.setup() unit_warnings = [wm for wm in w if wm.category is om.UnitsWarning] assert(len(unit_warnings) == 2) ``` The warnings can be completely turned off by filtering them using Python’s filterwarnings function: ``` """ Test the ability to ignore UnitsWarning """ import warnings import openmdao.api as om class AComp(om.ExplicitComponent): def initialize(self): pass def setup(self): self.add_input('a', shape=(10,), units='m') self.add_input('x', shape=(10,), units='1/s') self.add_input('b', shape=(10,), units='m/s') self.add_output('y', shape=(10,), units='m') self.add_output('z', shape=(10,), units='m/s') self.declare_coloring(wrt='*', form='cs') def compute(self, inputs, outputs): outputs['y'] = inputs['a'] * inputs['x'] + inputs['b'] outputs['z'] = inputs['b'] * inputs['x'] p = om.Problem() p.model.add_subsystem('a_comp', AComp()) p.model.add_subsystem('exec_comp', om.ExecComp('foo = y + z', y={'shape': (10,)}, z={'shape': (10,)}, foo={'shape': (10,)})) p.model.connect('a_comp.y', 'exec_comp.y') p.model.connect('a_comp.z', 'exec_comp.z') p.driver.declare_coloring() warnings.filterwarnings('ignore', category=om.UnitsWarning) with warnings.catch_warnings(record=True) as w: p.setup() unit_warnings = [wm for wm in w if wm.category is om.UnitsWarning] assert (len(unit_warnings) == 0) ``` If you want to clean your code and remove warnings, it can be useful to promote them to errors so that they cannot be ignored. The following code filters **all** OpenMDAO associated warnings to Errors: ``` """ Test the ability to raise a UnitWarning to an error. """ import warnings import openmdao.api as om class AComp(om.ExplicitComponent): def initialize(self): pass def setup(self): self.add_input('a', shape=(10,), units='m') self.add_input('x', shape=(10,), units='1/s') self.add_input('b', shape=(10,), units='m/s') self.add_output('y', shape=(10,), units='m') self.add_output('z', shape=(10,), units='m/s') self.declare_coloring(wrt='*', form='cs') def compute(self, inputs, outputs): outputs['y'] = inputs['a'] * inputs['x'] + inputs['b'] outputs['z'] = inputs['b'] * inputs['x'] p = om.Problem() p.model.add_subsystem('a_comp', AComp()) p.model.add_subsystem('exec_comp', om.ExecComp('foo = y + z', y={'shape': (10,)}, z={'shape': (10,)}, foo={'shape': (10,)})) p.model.connect('a_comp.y', 'exec_comp.y') p.model.connect('a_comp.z', 'exec_comp.z') p.driver.declare_coloring() warnings.filterwarnings('error', category=om.OpenMDAOWarning) expected = "<model> <class Group>: Output 'a_comp.y' with units of 'm' is connected to " \ "input 'exec_comp.y' which has no units." try: p.setup() except om.UnitsWarning as e: if str(e) != expected: raise RuntimeError(f"{str(e.exception)} != {expected}") else: print(str(e)) else: raise RuntimeError(f"Exception '{expected}' not raised") ``` (notes_for_developers)= ## Notes for Developers Python's treatment of warnings inside UnitTest tests can be somewhat confusing. If you wish to test that certain warnings are filtered during testing, we recommend using the `om.reset_warnings()` method in the `setUp` method that is run before each test in a `TestCase`. ```python import unittest import openmdao.api as om class MyTestCase(unittest.TestCase): def setUp(self): """ Ensure that OpenMDAO warnings are using their default filter action. """ om.reset_warnings() def test_a(self): ... ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Linear-Algebra" data-toc-modified-id="Linear-Algebra-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Linear Algebra</a></span><ul class="toc-item"><li><span><a href="#Dot-Products" data-toc-modified-id="Dot-Products-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Dot Products</a></span><ul class="toc-item"><li><span><a href="#What-does-a-dot-product-conceptually-mean?" data-toc-modified-id="What-does-a-dot-product-conceptually-mean?-1.1.1"><span class="toc-item-num">1.1.1&nbsp;&nbsp;</span>What does a dot product conceptually mean?</a></span></li></ul></li><li><span><a href="#Exercises" data-toc-modified-id="Exercises-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Exercises</a></span></li><li><span><a href="#Using-Scikit-Learn" data-toc-modified-id="Using-Scikit-Learn-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Using Scikit-Learn</a></span></li><li><span><a href="#Bag-of-Words-Models" data-toc-modified-id="Bag-of-Words-Models-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Bag of Words Models</a></span></li></ul></li><li><span><a href="#Distance-Measures" data-toc-modified-id="Distance-Measures-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Distance Measures</a></span><ul class="toc-item"><li><span><a href="#Euclidean-Distance" data-toc-modified-id="Euclidean-Distance-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Euclidean Distance</a></span><ul class="toc-item"><li><span><a href="#Scikit-Learn" data-toc-modified-id="Scikit-Learn-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>Scikit Learn</a></span></li></ul></li></ul></li><li><span><a href="#Similarity-Measures" data-toc-modified-id="Similarity-Measures-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Similarity Measures</a></span><ul class="toc-item"><li><span><a href="#Cosine-Similarity" data-toc-modified-id="Cosine-Similarity-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Cosine Similarity</a></span><ul class="toc-item"><li><span><a href="#Shift-Invariance" data-toc-modified-id="Shift-Invariance-3.1.1"><span class="toc-item-num">3.1.1&nbsp;&nbsp;</span>Shift Invariance</a></span></li></ul></li></ul></li><li><span><a href="#Exercise-(20-minutes):" data-toc-modified-id="Exercise-(20-minutes):-4"><span class="toc-item-num">4&nbsp;&nbsp;</span><span style="background-color: #ffff00">Exercise (20 minutes):</span></a></span><ul class="toc-item"><li><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#3.-Define-your-cosine-similarity-functions" data-toc-modified-id="3.-Define-your-cosine-similarity-functions-4.0.0.1"><span class="toc-item-num">4.0.0.1&nbsp;&nbsp;</span>3. Define your cosine similarity functions</a></span></li><li><span><a href="#4.-Get-the-two-documents-from-the-BoW-feature-space-and-calculate-cosine-similarity" data-toc-modified-id="4.-Get-the-two-documents-from-the-BoW-feature-space-and-calculate-cosine-similarity-4.0.0.2"><span class="toc-item-num">4.0.0.2&nbsp;&nbsp;</span>4. Get the two documents from the BoW feature space and calculate cosine similarity</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Challenge:-Use-the-Example-Below-to-Create-Your-Own-Cosine-Similarity-Function" data-toc-modified-id="Challenge:-Use-the-Example-Below-to-Create-Your-Own-Cosine-Similarity-Function-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Challenge: Use the Example Below to Create Your Own Cosine Similarity Function</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Create-a-list-of-all-the-vocabulary-$V$" data-toc-modified-id="Create-a-list-of-all-the-vocabulary-$V$-5.0.1"><span class="toc-item-num">5.0.1&nbsp;&nbsp;</span>Create a list of all the <strong>vocabulary $V$</strong></a></span><ul class="toc-item"><li><span><a href="#Native-Implementation:" data-toc-modified-id="Native-Implementation:-5.0.1.1"><span class="toc-item-num">5.0.1.1&nbsp;&nbsp;</span>Native Implementation:</a></span></li></ul></li><li><span><a href="#Create-your-Bag-of-Words-model" data-toc-modified-id="Create-your-Bag-of-Words-model-5.0.2"><span class="toc-item-num">5.0.2&nbsp;&nbsp;</span>Create your Bag of Words model</a></span></li></ul></li></ul></li></ul></div> # Linear Algebra In the natural language processing, each document is a vector of numbers. ## Dot Products A dot product is defined as $ a \cdot b = \sum_{i}^{n} a_{i}b_{i} = a_{1}b_{1} + a_{2}b_{2} + a_{3}b_{3} + \dots + a_{n}b_{n}$ The geometric definition of a dot product is $ a \cdot b = $\|\|b\|\|\|\|a\|\| ### What does a dot product conceptually mean? A dot product is a representation of the **similarity between two components**, because it is calculated based upon shared elements. It tells you how much one vector goes in the direction of another vector. The actual value of a dot product reflects the direction of change: * **Zero**: we don't have any growth in the original direction * **Positive** number: we have some growth in the original direction * **Negative** number: we have negative (reverse) growth in the original direction ``` A = [0,2] B = [0,1] def dot_product(x,y): return sum(a*b for a,b in zip(x,y)) dot_product(A,B) # What will the dot product of A and B be? ``` ![Correlations](images/dot_product.png "Visualization of various r values for Pearson correlation coefficient") ## Exercises What will the dot product of `A` and `B` be? ``` A = [1,2] B = [2,4] dot_product(A,B) ``` What will the dot product of `document_1` and `document_2` be? ``` document_1 = [0, 0, 1] document_2 = [1, 0, 2] ``` ## Using Scikit-Learn ``` from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() data_corpus = ["John likes to watch movies. Mary likes movies too.", "John also likes to watch football games. Mary does not like football much."] X = vectorizer.fit_transform(data_corpus) print(vectorizer.get_feature_names()) ``` ## Bag of Words Models ``` corpus = [ "Some analysts think demand could drop this year because a large number of homeowners take on remodeling projectsafter buying a new property. With fewer homes selling, home values easing, and mortgage rates rising, they predict home renovations could fall to their lowest levels in three years.", "Most home improvement stocks are expected to report fourth-quarter earnings next month.", "The conversation boils down to how much leverage management can get out of its wide-ranging efforts to re-energize operations, branding, digital capabilities, and the menu–and, for investors, how much to pay for that.", "RMD’s software acquisitions, efficiency, and mix overcame pricing and its gross margin improved by 90 bps Y/Y while its operating margin (including amortization) improved by 80 bps Y/Y. Since RMD expects the slower international flow generator growth to continue for the next few quarters, we have lowered our organic growth estimates to the mid-single digits. " ] X = vectorizer.fit_transform(corpus).toarray() import numpy as np from sys import getsizeof zeroes = np.where(X.flatten() == 0)[0].size percent_sparse = zeroes / X.size print(f"The bag of words feature space is {round(percent_sparse * 100,2)}% sparse. \n\ That's approximately {round(getsizeof(X) * percent_sparse,2)} bytes of wasted memory. This is why sklearn uses CSR (compressed sparse rows) instead of normal matrices!") ``` # Distance Measures ## Euclidean Distance Euclidean distances can range from 0 (completely identically) to $\infty$ (extremely dissimilar). The distance between two points, $x$ and $y$, can be defined as $d(x,y)$: $$ d(x,y) = \sqrt{\sum_{i=1}^{n}(x_{i}-y_{i})^2} $$ Compared to the other dominant distance measure (cosine similarity), **magnitude** plays an extremely important role. ``` from math import sqrt def euclidean_distance_1(x,y): distance = sum((a-b)**2 for a, b in zip(x, y)) return sqrt(distance) ``` There's typically an easier way to write this function that takes advantage of Numpy's vectorization capabilities: ``` import numpy as np def euclidean_distance_2(x,y): x = np.array(x) y = np.array(y) return np.linalg.norm(x-y) ``` ### Scikit Learn ``` from sklearn.metrics.pairwise import euclidean_distances X = [document_1, document_2] euclidean_distances(X) ``` # Similarity Measures Similarity measures will always range between -1 and 1. A similarity of -1 means the two objects are complete opposites, while a similarity of 1 indicates the objects are identical. ## Cosine Similarity The cosine similarity of two vectors (each vector will usually represent one document) is a measure that calculates $ cos(\theta)$, where $\theta$ is the angle between the two vectors. Therefore, if the vectors are **orthogonal** to each other (90 degrees), $cos(90) = 0$. If the vectors are in exactly the same direction, $\theta = 0$ and $cos(0) = 1$. Cosine similiarity **does not care about the magnitude of the vector, only the direction** in which it points. This can help normalize when comparing across documents that are different in terms of word count. ![Cosine Similarity](images/cos-equation.png) ### Shift Invariance * The Pearson correlation coefficient between X and Y does not change with you transform $X \rightarrow a + bX$ and $Y \rightarrow c + dY$, assuming $a$, $b$, $c$, and $d$ are constants and $b$ and $d$ are positive. * Cosine similarity does, however, change when transformed in this way. <h1><span style="background-color: #FFFF00">Exercise (20 minutes):</span></h1> >In Python, find the **cosine similarity** and the **Pearson correlation coefficient** of the two following sentences, assuming a **one-hot encoded binary bag of words** model. You may use a library to create the BoW feature space, but do not use libraries other than `numpy` or `scipy` to compute Pearson and cosine similarity: >`A = "John likes to watch movies. Mary likes movies too"` >`B = "John also likes to watch football games, but he likes to watch movies on occasion as well"` #### 3. Define your cosine similarity functions ```python from scipy.spatial.distance import cosine # we are importing this library to check that our own cosine similarity func works from numpy import dot # to calculate dot product from numpy.linalg import norm # to calculate the norm def cosine_similarity(A, B): numerator = dot(A, B) denominator = norm(A) * norm(B) return numerator / denominator def cosine_distance(A,B): return 1 - cosine_similarity A = [0,2,3,4,1,2] B = [1,3,4,0,0,2] # check that your native implementation and 3rd party library function produce the same values assert round(cosine_similarity(A,B),4) == round(cosine(A,B),4) ``` #### 4. Get the two documents from the BoW feature space and calculate cosine similarity ```python cosine_similarity(X[0], X[1]) ``` >0.5241424183609592 ``` from scipy.spatial.distance import cosine from numpy import dot import numpy as np from numpy.linalg import norm def cosine_similarity(A, B): numerator = dot(A, B) denominator = norm(A) * norm(B) return numerator / denominator # remember, you take 1 - the distance to get the distance def cosine_distance(A,B): return 1 - cosine_similarity A = [0,2,3,4,1,2] B = [1,3,4,0,0,2] # check that your native implementation and 3rd party library function produce the same values assert round(cosine_similarity(A,B),4) == round(1 - cosine(A,B),4) # check for shift invariance cosine(np.array(A), B) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() # take two very similar sentences, should have high similarity # edit these sentences to become less similar, and the similarity score should decrease data_corpus = ["John likes to watch movies. Mary likes movies too.", "John also likes to watch football games"] X = vectorizer.fit_transform(data_corpus) X = X.toarray() print(vectorizer.get_feature_names()) cosine_similarity(X[0], X[1]) ``` # Challenge: Use the Example Below to Create Your Own Cosine Similarity Function ### Create a list of all the **vocabulary $V$** Using **`sklearn`**'s **`CountVectorizer`**: ```python from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() data_corpus = ["John likes to watch movies. Mary likes movies too", "John also likes to watch football games, but he likes to watch movies on occasion as well"] X = vectorizer.fit_transform(data_corpus) V = vectorizer.get_feature_names() ``` #### Native Implementation: ```python def get_vocabulary(sentences): vocabulary = {} # create an empty set - question: Why not a list? for sentence in sentences: # this is a very crude form of "tokenization", would not actually use in production for word in sentence.split(" "): if word not in vocabulary: vocabulary.add(word) return vocabulary ``` ### Create your Bag of Words model ```python X = X.toarray() print(X) ``` Your console output: ```python [[0 0 0 1 2 1 2 1 1 1] [1 1 1 1 1 0 0 1 0 1]] ``` ``` vectors = [[0,0,0,1,2,1,2,1,1,1], [1,1,1,1,1,0,0,1,0,1]] import math def find_norm(vector): total = 0 for element in vector: total += element ** 2 return math.sqrt(total) norm(vectors[0]) # Numpy find_norm(vectors[0]) # your own dot_product(vectors[0], vectors[1]) / (find_norm(vectors[0]) * find_norm(vectors[1])) from sklearn.metrics.pairwise import cosine_distances, cosine_similarity cosine_similarity(vectors) ```
github_jupyter
<h2>What is the purpose of Data Wrangling?</h2> Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis. <h3>What is the fuel consumption (L/100k) rate for the diesel car?</h3> <h3>Import data</h3> <p> You can find the "Automobile Data Set" from the following link: <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a>. We will be using this data set throughout this course. </p> <h4>Import pandas</h4> ``` import pandas as pd import matplotlib.pylab as plt ``` URL of the dataset ``` filename = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv" ``` Python list <b>headers</b> containing name of headers ``` headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style", "drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type", "num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower", "peak-rpm","city-mpg","highway-mpg","price"] ``` Use the Pandas method <b>read_csv()</b> to load the data from the web address. Set the parameter "names" equal to the Python list "headers". ``` df = pd.read_csv(filename, names = headers) ``` Use the method <b>head()</b> to display the first five rows of the dataframe. ``` # To see what the data set looks like, we'll use the head() method. df.head() ``` As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis. <div>So, how do we identify all those missing values and deal with them?</div> <b>How to work with missing data?</b> Steps for working with missing data: <ol> <li>dentify missing data</li> <li>deal with missing data</li> <li>correct data format</li> </ol> <h2 id="identify_handle_missing_values">Identify and handle missing values</h2> <h3 id="identify_missing_values">Identify missing values</h3> <h4>Convert "?" to NaN</h4> In the car dataset, missing data comes with the question mark "?". We replace "?" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function: <pre>.replace(A, B, inplace = True) </pre> to replace A by B ``` import numpy as np # replace "?" to NaN df.replace("?", np.nan, inplace = True) df.head(5) ``` dentify_missing_values <h4>Evaluating for Missing Data</h4> The missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data: <ol> <li><b>.isnull()</b></li> <li><b>.notnull()</b></li> </ol> The output is a boolean value indicating whether the value that is passed into the argument is in fact missing data. ``` missing_data = df.isnull() missing_data.head(5) ``` "True" stands for missing value, while "False" stands for not missing value. <h4>Count missing values in each column</h4> <p> Using a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, "True" represents a missing value, "False" means the value is present in the dataset. In the body of the for loop the method ".value_counts()" counts the number of "True" values. </p> ``` for column in missing_data.columns.values.tolist(): print(column) print (missing_data[column].value_counts()) print("") ``` Based on the summary above, each column has 205 rows of data, seven columns containing missing data: <ol> <li>"normalized-losses": 41 missing data</li> <li>"num-of-doors": 2 missing data</li> <li>"bore": 4 missing data</li> <li>"stroke" : 4 missing data</li> <li>"horsepower": 2 missing data</li> <li>"peak-rpm": 2 missing data</li> <li>"price": 4 missing data</li> </ol> <h3 id="deal_missing_values">Deal with missing data</h3> <b>How to deal with missing data?</b> <ol> <li>drop data<br> a. drop the whole row<br> b. drop the whole column </li> <li>replace data<br> a. replace it by mean<br> b. replace it by frequency<br> c. replace it based on other functions </li> </ol> Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely. We have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns: <b>Replace by mean:</b> <ul> <li>"normalized-losses": 41 missing data, replace them with mean</li> <li>"stroke": 4 missing data, replace them with mean</li> <li>"bore": 4 missing data, replace them with mean</li> <li>"horsepower": 2 missing data, replace them with mean</li> <li>"peak-rpm": 2 missing data, replace them with mean</li> </ul> <b>Replace by frequency:</b> <ul> <li>"num-of-doors": 2 missing data, replace them with "four". <ul> <li>Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur</li> </ul> </li> </ul> <b>Drop the whole row:</b> <ul> <li>"price": 4 missing data, simply delete the whole row <ul> <li>Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us</li> </ul> </li> </ul> <h4>Calculate the average of the column </h4> ``` avg_norm_loss = df["normalized-losses"].astype("float").mean(axis=0) print("Average of normalized-losses:", avg_norm_loss) ``` <h4>Replace "NaN" by mean value in "normalized-losses" column</h4> ``` df["normalized-losses"].replace(np.nan, avg_norm_loss, inplace=True) ``` <h4>Calculate the mean value for 'bore' column</h4> ``` avg_bore=df['bore'].astype('float').mean(axis=0) print("Average of bore:", avg_bore) ``` <h4>Replace NaN by mean value</h4> ``` df["bore"].replace(np.nan, avg_bore, inplace=True) ``` <div class="alert alert-danger alertdanger" style="margin-top: 20px"> <h1> Question #1: </h1> <b>According to the example above, replace NaN in "stroke" column by mean.</b> </div> ``` # Write your code below and press Shift+Enter to execute avg_stroke = df['stroke'].astype('float').mean(axis = 0) print("Average of stroke:", avg_stroke) df['stroke'].replace(np.nan,avg_stroke,inplace = True) ``` Double-click <b>here</b> for the solution. <!-- The answer is below: # calculate the mean vaule for "stroke" column avg_stroke = df["stroke"].astype("float").mean(axis = 0) print("Average of stroke:", avg_stroke) # replace NaN by mean value in "stroke" column df["stroke"].replace(np.nan, avg_stroke, inplace = True) --> <h4>Calculate the mean value for the 'horsepower' column:</h4> ``` avg_horsepower = df['horsepower'].astype('float').mean(axis=0) print("Average horsepower:", avg_horsepower) ``` <h4>Replace "NaN" by mean value:</h4> ``` df['horsepower'].replace(np.nan, avg_horsepower, inplace=True) ``` <h4>Calculate the mean value for 'peak-rpm' column:</h4> ``` avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0) print("Average peak rpm:", avg_peakrpm) ``` <h4>Replace NaN by mean value:</h4> ``` df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True) ``` To see which values are present in a particular column, we can use the ".value_counts()" method: ``` df['num-of-doors'].value_counts() ``` We can see that four doors are the most common type. We can also use the ".idxmax()" method to calculate for us the most common type automatically: ``` df['num-of-doors'].value_counts().idxmax() ``` The replacement procedure is very similar to what we have seen previously ``` #replace the missing 'num-of-doors' values by the most frequent df["num-of-doors"].replace(np.nan, "four", inplace=True) ``` Finally, let's drop all rows that do not have price data: ``` # simply drop whole row with NaN in "price" column df.dropna(subset=["price"], axis=0, inplace=True) # reset index, because we droped two rows df.reset_index(drop=True, inplace=True) df.head() ``` <b>Good!</b> Now, we obtain the dataset with no missing values. <h3 id="correct_data_format">Correct data format</h3> <b>We are almost there!</b> <p>The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).</p> In Pandas, we use <p><b>.dtype()</b> to check the data type</p> <p><b>.astype()</b> to change the data type</p> <h4>Lets list the data types for each column</h4> ``` df.dtypes ``` <p>As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the "astype()" method.</p> <h4>Convert data types to proper format</h4> ``` df[["bore", "stroke"]] = df[["bore", "stroke"]].astype("float") df[["normalized-losses"]] = df[["normalized-losses"]].astype("int") df[["price"]] = df[["price"]].astype("float") df[["peak-rpm"]] = df[["peak-rpm"]].astype("float") ``` <h4>Let us list the columns after the conversion</h4> ``` df.dtypes ``` <b>Wonderful!</b> Now, we finally obtain the cleaned dataset with no missing values and all data in its proper format. <h2 id="data_standardization">Data Standardization</h2> <p> Data is usually collected from different agencies with different formats. (Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation) </p> <b>What is Standardization?</b> <p>Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison. </p> <b>Example</b> <p>Transform mpg to L/100km:</p> <p>In our dataset, the fuel consumption columns "city-mpg" and "highway-mpg" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standard</p> <p>We will need to apply <b>data transformation</b> to transform mpg into L/100km?</p> <p>The formula for unit conversion is<p> L/100km = 235 / mpg <p>We can do many mathematical operations directly in Pandas.</p> ``` df.head() # Convert mpg to L/100km by mathematical operation (235 divided by mpg) df['city-L/100km'] = 235/df["city-mpg"] # check your transformed data df.head() ``` <div class="alert alert-danger alertdanger" style="margin-top: 20px"> <h1> Question #2: </h1> <b>According to the example above, transform mpg to L/100km in the column of "highway-mpg", and change the name of column to "highway-L/100km".</b> </div> ``` # Write your code below and press Shift+Enter to execute df['highway-mpg'] = 235/df['highway-mpg'] df.rename(columns={'highway-mpg':'highway-L/100km'}, inplace=True) df.head() ``` Double-click <b>here</b> for the solution. <!-- The answer is below: # transform mpg to L/100km by mathematical operation (235 divided by mpg) df["highway-mpg"] = 235/df["highway-mpg"] # rename column name from "highway-mpg" to "highway-L/100km" df.rename(columns={'"highway-mpg"':'highway-L/100km'}, inplace=True) # check your transformed data df.head() --> <h2 id="data_normalization">Data Normalization</h2> <b>Why normalization?</b> <p>Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1 </p> <b>Example</b> <p>To demonstrate normalization, let's say we want to scale the columns "length", "width" and "height" </p> <p><b>Target:</b>would like to Normalize those variables so their value ranges from 0 to 1.</p> <p><b>Approach:</b> replace original value by (original value)/(maximum value)</p> ``` # replace (original value) by (original value)/(maximum value) df['length'] = df['length']/df['length'].max() df['width'] = df['width']/df['width'].max() ``` <div class="alert alert-danger alertdanger" style="margin-top: 20px"> <h1> Questiont #3: </h1> <b>According to the example above, normalize the column "height".</b> </div> ``` # Write your code below and press Shift+Enter to execute df['height'] = df['height']/df['height'].max() df[["length","width","height"]].head() ``` Double-click <b>here</b> for the solution. <!-- The answer is below: df['height'] = df['height']/df['height'].max() # show the scaled columns df[["length","width","height"]].head() --> Here we can see, we've normalized "length", "width" and "height" in the range of [0,1]. <h2 id="binning">Binning</h2> <b>Why binning?</b> <p> Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis. </p> <b>Example: </b> <p>In our dataset, "horsepower" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? </p> <p>We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins </p> <h3>Example of Binning Data In Pandas</h3> Convert data to correct format ``` df["horsepower"]=df["horsepower"].astype(int, copy=True) ``` Lets plot the histogram of horspower, to see what the distribution of horsepower looks like. ``` %matplotlib inline import matplotlib as plt from matplotlib import pyplot plt.pyplot.hist(df["horsepower"]) # set x/y labels and plot title plt.pyplot.xlabel("horsepower") plt.pyplot.ylabel("count") plt.pyplot.title("horsepower bins") ``` <p>We would like 3 bins of equal size bandwidth so we use numpy's <code>linspace(start_value, end_value, numbers_generated</code> function.</p> <p>Since we want to include the minimum value of horsepower we want to set start_value=min(df["horsepower"]).</p> <p>Since we want to include the maximum value of horsepower we want to set end_value=max(df["horsepower"]).</p> <p>Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4.</p> We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins. ``` bins = np.linspace(min(df["horsepower"]), max(df["horsepower"]), 4) bins ``` We set group names: ``` group_names = ['Low', 'Medium', 'High'] ``` We apply the function "cut" the determine what each value of "df['horsepower']" belongs to. ``` df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True ) df[['horsepower','horsepower-binned']].head(20) ``` Lets see the number of vehicles in each bin. ``` df["horsepower-binned"].value_counts() ``` Lets plot the distribution of each bin. ``` %matplotlib inline import matplotlib as plt from matplotlib import pyplot pyplot.bar(group_names, df["horsepower-binned"].value_counts()) # set x/y labels and plot title plt.pyplot.xlabel("horsepower") plt.pyplot.ylabel("count") plt.pyplot.title("horsepower bins") ``` <p> Check the dataframe above carefully, you will find the last column provides the bins for "horsepower" with 3 categories ("Low","Medium" and "High"). </p> <p> We successfully narrow the intervals from 57 to 3! </p> <h3>Bins visualization</h3> Normally, a histogram is used to visualize the distribution of bins we created above. ``` %matplotlib inline import matplotlib as plt from matplotlib import pyplot a = (0,1,2) # draw historgram of attribute "horsepower" with bins = 3 plt.pyplot.hist(df["horsepower"], bins = 3) # set x/y labels and plot title plt.pyplot.xlabel("horsepower") plt.pyplot.ylabel("count") plt.pyplot.title("horsepower bins") ``` The plot above shows the binning result for attribute "horsepower". <h2 id="indicator">Indicator variable (or dummy variable)</h2> <b>What is an indicator variable?</b> <p> An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning. </p> <b>Why we use indicator variables?</b> <p> So we can use categorical variables for regression analysis in the later modules. </p> <b>Example</b> <p> We see the column "fuel-type" has two unique values, "gas" or "diesel". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert "fuel-type" into indicator variables. </p> <p> We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type. </p> ``` df.columns ``` get indicator variables and assign it to data frame "dummy_variable_1" ``` dummy_variable_1 = pd.get_dummies(df["fuel-type"]) dummy_variable_1.head() ``` change column names for clarity ``` dummy_variable_1.rename(columns={'fuel-type-diesel':'gas', 'fuel-type-diesel':'diesel'}, inplace=True) dummy_variable_1.head() ``` We now have the value 0 to represent "gas" and 1 to represent "diesel" in the column "fuel-type". We will now insert this column back into our original dataset. ``` # merge data frame "df" and "dummy_variable_1" df = pd.concat([df, dummy_variable_1], axis=1) # drop original column "fuel-type" from "df" df.drop("fuel-type", axis = 1, inplace=True) df.head() ``` The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now. <div class="alert alert-danger alertdanger" style="margin-top: 20px"> <h1> Question #4: </h1> <b>As above, create indicator variable to the column of "aspiration": "std" to 0, while "turbo" to 1.</b> </div> ``` # Write your code below and press Shift+Enter to execute dummy_variable_2 = pd.get_dummies(df['aspiration']) dummy_variable_2.rename(columns = {'std':'aspiration-std','turbo':'aspiration-turbo'},inplace = True) dummy_variable_2.head() ``` Double-click <b>here</b> for the solution. <!-- The answer is below: # get indicator variables of aspiration and assign it to data frame "dummy_variable_2" dummy_variable_2 = pd.get_dummies(df['aspiration']) # change column names for clarity dummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True) # show first 5 instances of data frame "dummy_variable_1" dummy_variable_2.head() --> <div class="alert alert-danger alertdanger" style="margin-top: 20px"> <h1> Question #5: </h1> <b>Merge the new dataframe to the original dataframe then drop the column 'aspiration'</b> </div> ``` # Write your code below and press Shift+Enter to execute df = pd.concat([df,dummy_variable_2],axis=1) df.drop('aspiration',axis=1,inplace=True) df.head() ``` Double-click <b>here</b> for the solution. <!-- The answer is below: #merge the new dataframe to the original datafram df = pd.concat([df, dummy_variable_2], axis=1) # drop original column "aspiration" from "df" df.drop('aspiration', axis = 1, inplace=True) --> save the new csv ``` df.to_csv('clean_df.csv') ``` <h1>Thank you for completing this notebook</h1> <div class="alert alert-block alert-info" style="margin-top: 20px"> <p><a href="https://cocl.us/DA0101EN_NotbookLink_Top_bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png" width="750" align="center"></a></p> </div> <h3>About the Authors:</h3> This notebook was written by <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank">Mahdi Noorian PhD</a>, <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a>, Bahare Talayian, Eric Xiao, Steven Dong, Parizad, Hima Vsudevan and <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank">Fiorella Wenver</a> and <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" >Yi Yao</a>. <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p> <hr> <p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
github_jupyter
``` import numpy as np import pickle import gzip import glob import json import csv import sys import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os import imageio import cv2 sns.set() %matplotlib inline ``` Specify the experiment directory: pass this in as a command line argument. ``` # Specify the experiment directory experiment_dir = '/home/justinvyu/ray_results/gym/DClaw/TurnFreeValve3ResetFreeSwapGoal-v0/2019-08-16T02-38-24-state_estimation_scaled_goal_condition' ``` What needs to be saved? 1. Plots of whatever the user passes in ("observation_keys") - TODO: Split by whatever the experiment is being tuned over (like in viskit) 2. (# of goals, resets/reset-free, domain/task, VICE/gtr, etc.) 3. Gifs of the run 4. Important parameters ``` def log_experiment(experiment_dir, observation_keys): # Search for the seed directories for seed in glob.iglob(os.path.join(experiment_dir, '*')): if not os.path.isdir(seed): continue test = '/home/justinvyu/ray_results/gym/DClaw/TurnFreeValve3ResetFreeSwapGoal-v0/2019-08-16T02-38-24-state_estimation_scaled_goal_condition/id=9867fc30-seed=2007_2019-08-16_02-38-25c0jt87k7/progress.csv' with open(test, newline='') as f: df = pd.read_csv(f) df.columns observation_keys = [ 'object_to_target_circle_distance-last-mean', 'object_to_target_position_distance-last-mean', ] # evaluation_obs_path = 'evaluation/env_infos/obs/' # training_obs_path = 'training/env_infos/obs/' def contains_str_from_list(str_to_check, str_list): return any(s in str_to_check for s in str_list) all_obs_keys_to_record = [ col_name for col_name in df.columns if contains_str_from_list(col_name, observation_keys)] # all_obs_keys_to_record = np.concatenate([ # [path + observation_key for observation_key in observation_keys] # for path in (evaluation_obs_path, training_obs_path) # ]) all_obs_keys_to_record record_data = df[all_obs_keys_to_record] num_keys = len(all_obs_keys_to_record) if num_keys % 2 != 0: num_keys += 1 num_rows = num_keys // 2 num_cols = 2 curr_row, curr_col = 0, 0 fig, ax = plt.subplots(2, 2, figsize=(18, 9)) for i, col in enumerate(record_data): num_data_points = len(record_data[col]) data = record_data[col] # ax[i].subplot(num_rows, num_cols, i + 1) row_index, col_index = i // num_rows, i % num_cols ax[row_index, col_index].set_title(col) ax[row_index, col_index].plot(data) # plt.show() def generate_plots(seed_dir, save_dir, observation_keys, fig=None, axes=None): data_fn = os.path.join(seed_dir, 'progress.csv') with open(data_fn, newline='') as f: df = pd.read_csv(f) def contains_str_from_list(str_to_check, str_list): return any(s in str_to_check for s in str_list) all_obs_keys_to_record = [ col_name for col_name in df.columns if contains_str_from_list(col_name, observation_keys) ] record_data = df[all_obs_keys_to_record] num_keys = len(all_obs_keys_to_record) # Set up the figure if num_keys % 2 != 0: num_keys += 1 num_rows = num_keys // 2 num_cols = 2 if fig is None and axes is None: fig, axes = plt.subplots(num_cols, num_rows, figsize=(18, 9)) for i, col in enumerate(record_data): num_data_points = len(record_data[col]) data = record_data[col] row_index, col_index = i // num_rows, i % num_cols axes[row_index, col_index].set_title(col) axes[row_index, col_index].plot(data, alpha=0.9) return fig, axes video_save_frequency = 100 video_path = '/home/justinvyu/ray_results/gym/DClaw/TurnFreeValve3ResetFreeSwapGoal-v0/2019-08-16T02-38-24-state_estimation_scaled_goal_condition/id=9867fc30-seed=2007_2019-08-16_02-38-25c0jt87k7/videos' for video_path in glob.iglob(os.path.join(video_path, '*00_0.mp4')): print(video_path) test_video = '/home/justinvyu/ray_results/gym/DClaw/TurnFreeValve3ResetFreeSwapGoal-v0/2019-08-16T15-46-37-two_policies_debug/id=b529c39e-seed=2542_2019-08-16_15-46-38m9pcum43/videos/training_path_0_0.mp4' def extract_video_frames(video_path, img_size): vidcap = cv2.VideoCapture(video_path) success, image = vidcap.read() images = [] while success: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, img_size) images.append(image) success, image = vidcap.read() return images def convert_images_to_gif(images, save_path): imageio.mimsave(save_path, images) def video_to_gif(video_path, output_path, img_size=(100, 100)): images = extract_video_frames(test_video, img_size) convert_images_to_gif(images, output_path) def save_gifs(seed_dir, save_dir, save_frequency=100): video_path = os.path.join(seed_dir, 'videos') # TODO: Find the videos to save w.r.t save_frequency. for path in glob.iglob(os.path.join(video_path, '*00_0.mp4')): seed_name = seed_dir.split('seed=')[-1].split('_')[0] output_fn = 'seed=' + seed_name + '_' + path.split('/')[-1].replace('mp4', 'gif') output_path = os.path.join(save_dir, output_fn) video_to_gif(path, output_path) def log_experiment(experiment_dir, observation_keys): if not os.path.exists(os.path.join(experiment_dir, 'log')): os.mkdir(os.path.join(experiment_dir, 'log')) save_dir = os.path.join(experiment_dir, 'log') # Search for the seed directories fig, axes = None, None for seed_dir in glob.iglob(os.path.join(experiment_dir, '*')): if not os.path.isdir(seed_dir) or seed_dir == save_dir: continue fig, axes = generate_plots(seed_dir, save_dir, observation_keys, fig=fig, axes=axes) save_gifs(seed_dir, save_dir) output_fn = os.path.join(save_dir, 'plots.png') plt.savefig(output_fn) plt.show() log_experiment('/home/justinvyu/ray_results/gym/DClaw/TurnFreeValve3ResetFreeSwapGoal-v0/2019-08-16T02-38-24-state_estimation_scaled_goal_condition/', observation_keys) ```
github_jupyter
# Operations on word vectors Welcome to your first assignment of this week! Because word embeddings are very computationally expensive to train, most ML practitioners will load a pre-trained set of embeddings. **After this assignment you will be able to:** - Load pre-trained word vectors, and measure similarity using cosine similarity - Use word embeddings to solve word analogy problems such as Man is to Woman as King is to ______. - Modify word embeddings to reduce their gender bias ## <font color='darkblue'>Updates</font> #### If you were working on the notebook before this update... * The current notebook is version "2a". * You can find your original work saved in the notebook with the previous version name ("v2") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. #### List of updates * cosine_similarity * Additional hints. * complete_analogy * Replaces the list of input words with a set, and sets it outside the for loop (to follow best practices in coding). * Spelling, grammar and wording corrections. Let's get started! Run the following cell to load the packages you will need. ``` import numpy as np from w2v_utils import * ``` #### Load the word vectors * For this assignment, we will use 50-dimensional GloVe vectors to represent words. * Run the following cell to load the `word_to_vec_map`. ``` words, word_to_vec_map = read_glove_vecs('../../readonly/glove.6B.50d.txt') ``` You've loaded: - `words`: set of words in the vocabulary. - `word_to_vec_map`: dictionary mapping words to their GloVe vector representation. #### Embedding vectors versus one-hot vectors * Recall from the lesson videos that one-hot vectors do not do a good job of capturing the level of similarity between words (every one-hot vector has the same Euclidean distance from any other one-hot vector). * Embedding vectors such as GloVe vectors provide much more useful information about the meaning of individual words. * Lets now see how you can use GloVe vectors to measure the similarity between two words. # 1 - Cosine similarity To measure the similarity between two words, we need a way to measure the degree of similarity between two embedding vectors for the two words. Given two vectors $u$ and $v$, cosine similarity is defined as follows: $$\text{CosineSimilarity(u, v)} = \frac {u \cdot v} {||u||_2 ||v||_2} = cos(\theta) \tag{1}$$ * $u \cdot v$ is the dot product (or inner product) of two vectors * $||u||_2$ is the norm (or length) of the vector $u$ * $\theta$ is the angle between $u$ and $v$. * The cosine similarity depends on the angle between $u$ and $v$. * If $u$ and $v$ are very similar, their cosine similarity will be close to 1. * If they are dissimilar, the cosine similarity will take a smaller value. <img src="images/cosine_sim.png" style="width:800px;height:250px;"> <caption><center> **Figure 1**: The cosine of the angle between two vectors is a measure their similarity</center></caption> **Exercise**: Implement the function `cosine_similarity()` to evaluate the similarity between word vectors. **Reminder**: The norm of $u$ is defined as $ ||u||_2 = \sqrt{\sum_{i=1}^{n} u_i^2}$ #### Additional Hints * You may find `np.dot`, `np.sum`, or `np.sqrt` useful depending upon the implementation that you choose. ``` # GRADED FUNCTION: cosine_similarity def cosine_similarity(u, v): """ Cosine similarity reflects the degree of similarity between u and v Arguments: u -- a word vector of shape (n,) v -- a word vector of shape (n,) Returns: cosine_similarity -- the cosine similarity between u and v defined by the formula above. """ distance = 0.0 ### START CODE HERE ### # Compute the dot product between u and v (≈1 line) dot = np.dot(u, v) # Compute the L2 norm of u (≈1 line) norm_u = np.linalg.norm(u) # Compute the L2 norm of v (≈1 line) norm_v = np.linalg.norm(v) # Compute the cosine similarity defined by formula (1) (≈1 line) cosine_similarity = dot / (norm_u * norm_v) ### END CODE HERE ### return cosine_similarity father = word_to_vec_map["father"] mother = word_to_vec_map["mother"] ball = word_to_vec_map["ball"] crocodile = word_to_vec_map["crocodile"] france = word_to_vec_map["france"] italy = word_to_vec_map["italy"] paris = word_to_vec_map["paris"] rome = word_to_vec_map["rome"] print("cosine_similarity(father, mother) = ", cosine_similarity(father, mother)) print("cosine_similarity(ball, crocodile) = ",cosine_similarity(ball, crocodile)) print("cosine_similarity(france - paris, rome - italy) = ",cosine_similarity(france - paris, rome - italy)) ``` **Expected Output**: <table> <tr> <td> **cosine_similarity(father, mother)** = </td> <td> 0.890903844289 </td> </tr> <tr> <td> **cosine_similarity(ball, crocodile)** = </td> <td> 0.274392462614 </td> </tr> <tr> <td> **cosine_similarity(france - paris, rome - italy)** = </td> <td> -0.675147930817 </td> </tr> </table> #### Try different words! * After you get the correct expected output, please feel free to modify the inputs and measure the cosine similarity between other pairs of words! * Playing around with the cosine similarity of other inputs will give you a better sense of how word vectors behave. ## 2 - Word analogy task * In the word analogy task, we complete the sentence: <font color='brown'>"*a* is to *b* as *c* is to **____**"</font>. * An example is: <font color='brown'> '*man* is to *woman* as *king* is to *queen*' </font>. * We are trying to find a word *d*, such that the associated word vectors $e_a, e_b, e_c, e_d$ are related in the following manner: $e_b - e_a \approx e_d - e_c$ * We will measure the similarity between $e_b - e_a$ and $e_d - e_c$ using cosine similarity. **Exercise**: Complete the code below to be able to perform word analogies! ``` # GRADED FUNCTION: complete_analogy def complete_analogy(word_a, word_b, word_c, word_to_vec_map): """ Performs the word analogy task as explained above: a is to b as c is to ____. Arguments: word_a -- a word, string word_b -- a word, string word_c -- a word, string word_to_vec_map -- dictionary that maps words to their corresponding vectors. Returns: best_word -- the word such that v_b - v_a is close to v_best_word - v_c, as measured by cosine similarity """ # convert words to lowercase word_a, word_b, word_c = word_a.lower(), word_b.lower(), word_c.lower() ### START CODE HERE ### # Get the word embeddings e_a, e_b and e_c (≈1-3 lines) e_a, e_b, e_c = word_to_vec_map[word_a], word_to_vec_map[word_b], word_to_vec_map[word_c] ### END CODE HERE ### words = word_to_vec_map.keys() max_cosine_sim = -100 # Initialize max_cosine_sim to a large negative number best_word = None # Initialize best_word with None, it will help keep track of the word to output # to avoid best_word being one of the input words, skip the input words # place the input words in a set for faster searching than a list # We will re-use this set of input words inside the for-loop input_words_set = set([word_a, word_b, word_c]) # loop over the whole word vector set for w in words: # to avoid best_word being one of the input words, skip the input words if w in input_words_set: continue ### START CODE HERE ### # Compute cosine similarity between the vector (e_b - e_a) and the vector ((w's vector representation) - e_c) (≈1 line) cosine_sim = cosine_similarity(e_b - e_a, word_to_vec_map[w] - e_c) # If the cosine_sim is more than the max_cosine_sim seen so far, # then: set the new max_cosine_sim to the current cosine_sim and the best_word to the current word (≈3 lines) if cosine_sim > max_cosine_sim: max_cosine_sim = cosine_sim best_word = w ### END CODE HERE ### return best_word ``` Run the cell below to test your code, this may take 1-2 minutes. ``` triads_to_try = [('italy', 'italian', 'spain'), ('india', 'delhi', 'japan'), ('man', 'woman', 'boy'), ('small', 'smaller', 'large')] for triad in triads_to_try: print ('{} -> {} :: {} -> {}'.format( *triad, complete_analogy(*triad,word_to_vec_map))) ``` **Expected Output**: <table> <tr> <td> **italy -> italian** :: </td> <td> spain -> spanish </td> </tr> <tr> <td> **india -> delhi** :: </td> <td> japan -> tokyo </td> </tr> <tr> <td> **man -> woman ** :: </td> <td> boy -> girl </td> </tr> <tr> <td> **small -> smaller ** :: </td> <td> large -> larger </td> </tr> </table> * Once you get the correct expected output, please feel free to modify the input cells above to test your own analogies. * Try to find some other analogy pairs that do work, but also find some where the algorithm doesn't give the right answer: * For example, you can try small->smaller as big->?. ### Congratulations! You've come to the end of the graded portion of the assignment. Here are the main points you should remember: - Cosine similarity is a good way to compare the similarity between pairs of word vectors. - Note that L2 (Euclidean) distance also works. - For NLP applications, using a pre-trained set of word vectors is often a good way to get started. - Even though you have finished the graded portions, we recommend you take a look at the rest of this notebook to learn about debiasing word vectors. Congratulations on finishing the graded portions of this notebook! ## 3 - Debiasing word vectors (OPTIONAL/UNGRADED) In the following exercise, you will examine gender biases that can be reflected in a word embedding, and explore algorithms for reducing the bias. In addition to learning about the topic of debiasing, this exercise will also help hone your intuition about what word vectors are doing. This section involves a bit of linear algebra, though you can probably complete it even without being an expert in linear algebra, and we encourage you to give it a shot. This portion of the notebook is optional and is not graded. Lets first see how the GloVe word embeddings relate to gender. You will first compute a vector $g = e_{woman}-e_{man}$, where $e_{woman}$ represents the word vector corresponding to the word *woman*, and $e_{man}$ corresponds to the word vector corresponding to the word *man*. The resulting vector $g$ roughly encodes the concept of "gender". (You might get a more accurate representation if you compute $g_1 = e_{mother}-e_{father}$, $g_2 = e_{girl}-e_{boy}$, etc. and average over them. But just using $e_{woman}-e_{man}$ will give good enough results for now.) ``` g = word_to_vec_map['woman'] - word_to_vec_map['man'] print(g) ``` Now, you will consider the cosine similarity of different words with $g$. Consider what a positive value of similarity means vs a negative cosine similarity. ``` print ('List of names and their similarities with constructed vector:') # girls and boys name name_list = ['john', 'marie', 'sophie', 'ronaldo', 'priya', 'rahul', 'danielle', 'reza', 'katy', 'yasmin'] for w in name_list: print (w, cosine_similarity(word_to_vec_map[w], g)) ``` As you can see, female first names tend to have a positive cosine similarity with our constructed vector $g$, while male first names tend to have a negative cosine similarity. This is not surprising, and the result seems acceptable. But let's try with some other words. ``` print('Other words and their similarities:') word_list = ['lipstick', 'guns', 'science', 'arts', 'literature', 'warrior','doctor', 'tree', 'receptionist', 'technology', 'fashion', 'teacher', 'engineer', 'pilot', 'computer', 'singer'] for w in word_list: print (w, cosine_similarity(word_to_vec_map[w], g)) ``` Do you notice anything surprising? It is astonishing how these results reflect certain unhealthy gender stereotypes. For example, "computer" is closer to "man" while "literature" is closer to "woman". Ouch! We'll see below how to reduce the bias of these vectors, using an algorithm due to [Boliukbasi et al., 2016](https://arxiv.org/abs/1607.06520). Note that some word pairs such as "actor"/"actress" or "grandmother"/"grandfather" should remain gender specific, while other words such as "receptionist" or "technology" should be neutralized, i.e. not be gender-related. You will have to treat these two types of words differently when debiasing. ### 3.1 - Neutralize bias for non-gender specific words The figure below should help you visualize what neutralizing does. If you're using a 50-dimensional word embedding, the 50 dimensional space can be split into two parts: The bias-direction $g$, and the remaining 49 dimensions, which we'll call $g_{\perp}$. In linear algebra, we say that the 49 dimensional $g_{\perp}$ is perpendicular (or "orthogonal") to $g$, meaning it is at 90 degrees to $g$. The neutralization step takes a vector such as $e_{receptionist}$ and zeros out the component in the direction of $g$, giving us $e_{receptionist}^{debiased}$. Even though $g_{\perp}$ is 49 dimensional, given the limitations of what we can draw on a 2D screen, we illustrate it using a 1 dimensional axis below. <img src="images/neutral.png" style="width:800px;height:300px;"> <caption><center> **Figure 2**: The word vector for "receptionist" represented before and after applying the neutralize operation. </center></caption> **Exercise**: Implement `neutralize()` to remove the bias of words such as "receptionist" or "scientist". Given an input embedding $e$, you can use the following formulas to compute $e^{debiased}$: $$e^{bias\_component} = \frac{e \cdot g}{||g||_2^2} * g\tag{2}$$ $$e^{debiased} = e - e^{bias\_component}\tag{3}$$ If you are an expert in linear algebra, you may recognize $e^{bias\_component}$ as the projection of $e$ onto the direction $g$. If you're not an expert in linear algebra, don't worry about this. <!-- **Reminder**: a vector $u$ can be split into two parts: its projection over a vector-axis $v_B$ and its projection over the axis orthogonal to $v$: $$u = u_B + u_{\perp}$$ where : $u_B = $ and $ u_{\perp} = u - u_B $ !--> ``` def neutralize(word, g, word_to_vec_map): """ Removes the bias of "word" by projecting it on the space orthogonal to the bias axis. This function ensures that gender neutral words are zero in the gender subspace. Arguments: word -- string indicating the word to debias g -- numpy-array of shape (50,), corresponding to the bias axis (such as gender) word_to_vec_map -- dictionary mapping words to their corresponding vectors. Returns: e_debiased -- neutralized word vector representation of the input "word" """ ### START CODE HERE ### # Select word vector representation of "word". Use word_to_vec_map. (≈ 1 line) e = word_to_vec_map[word] # Compute e_biascomponent using the formula give above. (≈ 1 line) e_biascomponent = (np.dot(e,g)/np.linalg.norm(g)**2)*g # Neutralize e by substracting e_biascomponent from it # e_debiased should be equal to its orthogonal projection. (≈ 1 line) e_debiased = e-e_biascomponent ### END CODE HERE ### return e_debiased e = "receptionist" print("cosine similarity between " + e + " and g, before neutralizing: ", cosine_similarity(word_to_vec_map["receptionist"], g)) e_debiased = neutralize("receptionist", g, word_to_vec_map) print("cosine similarity between " + e + " and g, after neutralizing: ", cosine_similarity(e_debiased, g)) ``` **Expected Output**: The second result is essentially 0, up to numerical rounding (on the order of $10^{-17}$). <table> <tr> <td> **cosine similarity between receptionist and g, before neutralizing:** : </td> <td> 0.330779417506 </td> </tr> <tr> <td> **cosine similarity between receptionist and g, after neutralizing:** : </td> <td> -3.26732746085e-17 </tr> </table> ### 3.2 - Equalization algorithm for gender-specific words Next, lets see how debiasing can also be applied to word pairs such as "actress" and "actor." Equalization is applied to pairs of words that you might want to have differ only through the gender property. As a concrete example, suppose that "actress" is closer to "babysit" than "actor." By applying neutralizing to "babysit" we can reduce the gender-stereotype associated with babysitting. But this still does not guarantee that "actor" and "actress" are equidistant from "babysit." The equalization algorithm takes care of this. The key idea behind equalization is to make sure that a particular pair of words are equi-distant from the 49-dimensional $g_\perp$. The equalization step also ensures that the two equalized steps are now the same distance from $e_{receptionist}^{debiased}$, or from any other work that has been neutralized. In pictures, this is how equalization works: <img src="images/equalize10.png" style="width:800px;height:400px;"> The derivation of the linear algebra to do this is a bit more complex. (See Bolukbasi et al., 2016 for details.) But the key equations are: $$ \mu = \frac{e_{w1} + e_{w2}}{2}\tag{4}$$ $$ \mu_{B} = \frac {\mu \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis} \tag{5}$$ $$\mu_{\perp} = \mu - \mu_{B} \tag{6}$$ $$ e_{w1B} = \frac {e_{w1} \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis} \tag{7}$$ $$ e_{w2B} = \frac {e_{w2} \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis} \tag{8}$$ $$e_{w1B}^{corrected} = \sqrt{ |{1 - ||\mu_{\perp} ||^2_2} |} * \frac{e_{\text{w1B}} - \mu_B} {||(e_{w1} - \mu_{\perp}) - \mu_B||} \tag{9}$$ $$e_{w2B}^{corrected} = \sqrt{ |{1 - ||\mu_{\perp} ||^2_2} |} * \frac{e_{\text{w2B}} - \mu_B} {||(e_{w2} - \mu_{\perp}) - \mu_B||} \tag{10}$$ $$e_1 = e_{w1B}^{corrected} + \mu_{\perp} \tag{11}$$ $$e_2 = e_{w2B}^{corrected} + \mu_{\perp} \tag{12}$$ **Exercise**: Implement the function below. Use the equations above to get the final equalized version of the pair of words. Good luck! ``` def equalize(pair, bias_axis, word_to_vec_map): """ Debias gender specific words by following the equalize method described in the figure above. Arguments: pair -- pair of strings of gender specific words to debias, e.g. ("actress", "actor") bias_axis -- numpy-array of shape (50,), vector corresponding to the bias axis, e.g. gender word_to_vec_map -- dictionary mapping words to their corresponding vectors Returns e_1 -- word vector corresponding to the first word e_2 -- word vector corresponding to the second word """ ### START CODE HERE ### # Step 1: Select word vector representation of "word". Use word_to_vec_map. (≈ 2 lines) w1, w2 = pair[0],pair[1] e_w1, e_w2 = word_to_vec_map[w1],word_to_vec_map[w2] # Step 2: Compute the mean of e_w1 and e_w2 (≈ 1 line) mu = (e_w1 + e_w2)/2 # Step 3: Compute the projections of mu over the bias axis and the orthogonal axis (≈ 2 lines) mu_B = (np.dot(mu,bias_axis)/np.linalg.norm(bias_axis)**2)*bias_axis mu_orth = mu-mu_B # Step 4: Use equations (7) and (8) to compute e_w1B and e_w2B (≈2 lines) e_w1B = (np.dot(e_w1,bias_axis)/np.linalg.norm(bias_axis)**2)*bias_axis e_w2B = (np.dot(e_w2,bias_axis)/np.linalg.norm(bias_axis)**2)*bias_axis # Step 5: Adjust the Bias part of e_w1B and e_w2B using the formulas (9) and (10) given above (≈2 lines) corrected_e_w1B = np.sqrt(np.abs(1-np.linalg.norm(mu_orth)**2))*((e_w1B - mu_B)/np.abs((e_w1-mu_orth)-mu_B)) corrected_e_w2B = np.sqrt(np.abs(1-np.linalg.norm(mu_orth)**2))*((e_w2B - mu_B)/np.abs((e_w2-mu_orth)-mu_B)) # Step 6: Debias by equalizing e1 and e2 to the sum of their corrected projections (≈2 lines) e1 = corrected_e_w1B + mu_orth e2 = corrected_e_w2B + mu_orth ### END CODE HERE ### return e1, e2 print("cosine similarities before equalizing:") print("cosine_similarity(word_to_vec_map[\"man\"], gender) = ", cosine_similarity(word_to_vec_map["man"], g)) print("cosine_similarity(word_to_vec_map[\"woman\"], gender) = ", cosine_similarity(word_to_vec_map["woman"], g)) print() e1, e2 = equalize(("man", "woman"), g, word_to_vec_map) print("cosine similarities after equalizing:") print("cosine_similarity(e1, gender) = ", cosine_similarity(e1, g)) print("cosine_similarity(e2, gender) = ", cosine_similarity(e2, g)) ``` **Expected Output**: cosine similarities before equalizing: <table> <tr> <td> **cosine_similarity(word_to_vec_map["man"], gender)** = </td> <td> -0.117110957653 </td> </tr> <tr> <td> **cosine_similarity(word_to_vec_map["woman"], gender)** = </td> <td> 0.356666188463 </td> </tr> </table> cosine similarities after equalizing: <table> <tr> <td> **cosine_similarity(u1, gender)** = </td> <td> -0.700436428931 </td> </tr> <tr> <td> **cosine_similarity(u2, gender)** = </td> <td> 0.700436428931 </td> </tr> </table> Please feel free to play with the input words in the cell above, to apply equalization to other pairs of words. These debiasing algorithms are very helpful for reducing bias, but are not perfect and do not eliminate all traces of bias. For example, one weakness of this implementation was that the bias direction $g$ was defined using only the pair of words _woman_ and _man_. As discussed earlier, if $g$ were defined by computing $g_1 = e_{woman} - e_{man}$; $g_2 = e_{mother} - e_{father}$; $g_3 = e_{girl} - e_{boy}$; and so on and averaging over them, you would obtain a better estimate of the "gender" dimension in the 50 dimensional word embedding space. Feel free to play with such variants as well. ### Congratulations You have come to the end of this notebook, and have seen a lot of the ways that word vectors can be used as well as modified. Congratulations on finishing this notebook! **References**: - The debiasing algorithm is from Bolukbasi et al., 2016, [Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings](https://papers.nips.cc/paper/6228-man-is-to-computer-programmer-as-woman-is-to-homemaker-debiasing-word-embeddings.pdf) - The GloVe word embeddings were due to Jeffrey Pennington, Richard Socher, and Christopher D. Manning. (https://nlp.stanford.edu/projects/glove/)
github_jupyter
## 15 Used cars dataset A short tour through some used car data * https://data.world/data-society/used-cars-data This is a real-world data set with couple of flaws: wrong or missing data, outliers. In the process we: * inspect the data * dig into the strange (ugly) parts and clean bad rows * run a few aggregations and visualisation ``` import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt %matplotlib inline import pandas as pd df = pd.read_csv("data/autos.csv", encoding="latin-1") df.shape ``` ## Inspection * unique * groupby * size * sort_values Unique Brands ``` df.brand.unique() df.brand.unique().size ``` Most popular brands ``` df.groupby('brand').size().sort_values(ascending=False) df.groupby('brand').size().sort_values().plot(kind='barh', figsize=(8, 6), grid=True) df.head() ``` ## Null checks? * isnull * any * head * sum ``` df.isnull().values.any() ``` Ok, but where? ``` df[df.isnull().any(axis=1)].head() # 1/3 of rows contain 0 values df[df.isnull().any(axis=1)].shape # about 1/3 of rows contain 0 values ``` Which columns? ``` void = df[pd.isnull(df).any(axis=1)].loc[:, df.isna().any()] void.shape ``` We created a new data frame, see checks. ``` void.values.base is df.values void._is_view ``` Counting null values. ``` void.isnull() void.isnull().sum() ``` Question: Is there a group brands, that do not have a vehicle type? ``` s = df[df.vehicleType.isnull()].groupby('brand').size().sort_values(ascending=False) s.head() ``` What is the ratio of undefined types for each brand in this dataset? ``` (s / df.groupby('brand').size()).describe() (s / df.groupby('brand').size()).sort_values(ascending=False).head(10) ``` Oh right, the GDR built cars, too. Ok, look at the prices. ``` df.price.describe() # Suppress scientific notation. pd.set_option('display.float_format', lambda x: '%.3f' % x) df.price.describe() ``` Simple outlier detection. ``` len(df[df.price > 3 * df.price.mean()]) df[df.price > 3 * df.price.mean()].shape df[df.price > 3 * df.price.mean()].describe() ``` Question: Which used car brands cost 50000 to 60000? ``` df[(df.price > 50000) & (df.price < 60000)].groupby('brand').size().sort_values(ascending=False) ``` And which ones cost over 100k? ``` df[(df.price > 100000)].groupby('brand').size().sort_values(ascending=False) ``` Getting closer to the outlier? ``` df[(df.price > 500000)].groupby('brand').size().sort_values(ascending=False) ``` Let's look at these gems. ``` df[(df.price > 400000)][["name", "seller", "price", "yearOfRegistration"]] ``` Let's ignore all rows, where price is silly. Int and string match? ``` df[(df.price > 400000)].price.dtype df[(df.price > 400000)].price.astype(str).dtype ``` Exclude some, but maybe all. ``` df[(df.price > 400000)].price.astype(str).str.match("999|111|12345|911911").sum() ``` * https://stackoverflow.com/questions/13851535/how-to-delete-rows-from-a-pandas-dataframe-based-on-a-conditional-expression ``` df = df.drop(df[df.score < 50].index) ``` Let's drop these things above 500000. ``` df = df.drop(df[df.price > 500000].index) df = df.drop(df[df.price < 1].index) df.shape df.price.dtype df = df.drop(df[df.yearOfRegistration > 2018].index) df = df.drop(df[df.yearOfRegistration < 1886].index) df.shape df.plot(kind="scatter", x="yearOfRegistration", y="price") df.plot(kind="scatter", x="yearOfRegistration", y="price", alpha=0.5) df.groupby('brand').price.mean().sort_values(ascending=True).plot(kind='barh', figsize=(10, 8), grid=True) ``` What else? ``` df.columns ``` Did PS increase over the years? ``` df.plot(kind="scatter", x="yearOfRegistration", y="powerPS") ``` Ok, let's drop some more. 7500 PS? ``` df[df.powerPS > 500].head() df[df.powerPS > 500].shape df = df.drop(df[df.powerPS > 500].index) df.shape df.plot(kind="scatter", x="yearOfRegistration", y="powerPS", alpha=0.4) ``` There seems to be some spike in the 1970? Some breakthrough? What else? ``` df.columns ``` Price categories for vehicle types? ``` df.groupby('vehicleType').price.mean().sort_values().plot(kind='barh') df.groupby('vehicleType').price.median().sort_values().plot(kind='barh') ``` What else? ``` df.columns ``` Last question: Fuel type. ``` df.fuelType.unique() df.groupby('fuelType').price.mean().sort_values().plot(kind='bar') ```
github_jupyter
``` import torch as t import torchvision as tv import numpy as np import time ``` # 不是逻辑回归 ``` # 超参数 EPOCH = 5 BATCH_SIZE = 100 DOWNLOAD_MNIST = True # 下过数据的话, 就可以设置成 False N_TEST_IMG = 10 # 到时候显示 5张图片看效果, 如上图一 class DNN(t.nn.Module): def __init__(self): super(DNN, self).__init__() train_data = tv.datasets.FashionMNIST( root="./fashionmnist/", train=True, transform=tv.transforms.ToTensor(), download=DOWNLOAD_MNIST ) test_data = tv.datasets.FashionMNIST( root="./fashionmnist/", train=False, transform=tv.transforms.ToTensor(), download=DOWNLOAD_MNIST ) print(test_data) # Data Loader for easy mini-batch return in training, the image batch shape will be (50, 1, 28, 28) self.train_loader = t.utils.data.DataLoader( dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) self.test_loader = t.utils.data.DataLoader( dataset=test_data, batch_size=1000, shuffle=True) self.cnn = t.nn.Sequential( t.nn.Conv2d( in_channels=1, # input height out_channels=32, # n_filters kernel_size=5, # filter size stride=1, # filter movement/step padding=2, # 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1 ), # output shape (16, 28, 28) t.nn.ELU(), # activation t.nn.MaxPool2d(kernel_size=2), t.nn.Conv2d( in_channels=32, # input height out_channels=64, # n_filters kernel_size=3, # filter size stride=1, # filter movement/step padding=1, # 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1 ), # output shape (64, 14, 14) t.nn.ELU(), # activation t.nn.MaxPool2d(kernel_size=2) # output shape (64, 7, 7) ) self.dnn = t.nn.Sequential( t.nn.Linear(7*7*64,256), t.nn.Dropout(0.5), t.nn.ELU(), t.nn.Linear(256,10), ) self.lr = 0.001 self.loss = t.nn.CrossEntropyLoss() self.opt = t.optim.Adam(self.parameters(), lr = self.lr) def forward(self,x): cnn1 = self.cnn(x) #print(cnn1.shape) cnn1 = cnn1.view(-1,7*7*64) #print(cnn1.shape) out = self.dnn(cnn1) #print(out.shape) return(out) def train(): use_gpu = t.cuda.is_available() model = DNN() if(use_gpu): model.cuda() print(model) loss = model.loss opt = model.opt dataloader = model.train_loader testloader = model.test_loader for e in range(EPOCH): step = 0 ts = time.time() for (x, y) in (dataloader): model.train()# train model dropout used step += 1 b_x = x.view(-1,1,28,28) # batch x, shape (batch, 28*28) #print(b_x.shape) b_y = y if(use_gpu): b_x = b_x.cuda() b_y = b_y.cuda() out = model(b_x) losses = loss(out,b_y) opt.zero_grad() losses.backward() opt.step() if(step%100 == 0): if(use_gpu): print(e,step,losses.data.cpu().numpy()) else: print(e,step,losses.data.numpy()) model.eval() # train model dropout not use for (tx,ty) in testloader: t_x = tx.view(-1,1, 28,28) # batch x, shape (batch, 28*28) t_y = ty if(use_gpu): t_x = t_x.cuda() t_y = t_y.cuda() t_out = model(t_x) if(use_gpu): acc = (np.argmax(t_out.data.cpu().numpy(),axis=1) == t_y.data.cpu().numpy()) else: acc = (np.argmax(t_out.data.numpy(),axis=1) == t_y.data.numpy()) print(time.time() - ts ,np.sum(acc)/1000) ts = time.time() break#只测试前1000个 t.save(model, './model.pkl') # 保存整个网络 t.save(model.state_dict(), './model_params.pkl') # 只保存网络中的参数 (速度快, 占内存少) #加载参数的方式 """net = DNN() net.load_state_dict(t.load('./model_params.pkl')) net.eval()""" #加载整个模型的方式 net = t.load('./model.pkl') net.cpu() net.eval() for (tx,ty) in testloader: t_x = tx.view(-1, 1,28,28) # batch x, shape (batch, 28*28) t_y = ty t_out = net(t_x) #acc = (np.argmax(t_out.data.CPU().numpy(),axis=1) == t_y.data.CPU().numpy()) acc = (np.argmax(t_out.data.numpy(),axis=1) == t_y.data.numpy()) print(np.sum(acc)/1000) train() ```
github_jupyter
``` import numpy as np import time import pickle ## I import sys to kill the program if an option is not correct. import sys import os import csv from RhoAndBeta import CalcRhoAndBetaVectors from UtilitiesOptimization import SubgrAlgSavPrimDualObjInd, \ SubgrAlgSavPrimDualObjFn_L2Ind from SimulationCode import ExpPareto from Utilities import CreateTableParetoInd, CreateTableParetoL2_L2Ind_Gr ## Read the data created in CreateDataJupNot suffix='InstanceInfo/Ins1LongRun/' num_impressions=pickle.load(open(suffix+'num_impressions'+'.p',"rb")) numCampaigns=pickle.load(open(suffix+'numCampaigns'+'.p',"rb")) num_edges=pickle.load(open(suffix+'num_edges'+'.p',"rb")) index_Imps=pickle.load(open(suffix+'index_Imps'+'.p',"rb")) index_sizeCamps=pickle.load(open(suffix+'index_sizeCamps'+'.p',"rb")) index_startCamp=pickle.load(open(suffix+'index_startCamp'+'.p',"rb")) vector_maxbid=pickle.load(open(suffix+'vector_maxbid'+'.p',"rb")) vector_r=pickle.load(open(suffix+'vector_r'+'.p',"rb")) vector_s=(pickle.load(open(suffix+'vector_s'+'.p',"rb"))).astype(int) ext_s=pickle.load(open(suffix+'ext_s'+'.p',"rb")) adverPerImp=(pickle.load(open(suffix+'adverPerImp'+'.p',"rb"))).astype(int) UB_bidsPerImp = pickle.load(open(suffix+'UB_bids'+'.p',"rb")) vector_ctr=pickle.load(open(suffix+'vector_ctr'+'.p',"rb")) vector_rctr=pickle.load(open(suffix+'vector_rctr'+'.p',"rb")) probImp=pickle.load(open(suffix+'probImp'+'.p',"rb")) ## In this Experiment we fix a budget to use. In this case we choose 100 vector_m = np.ones(numCampaigns)*100 ## If this parameter is true then first price auctions (\beta_i(b) =b) are used, otherwise second price. firstPrice = False ## If this parameter is true a parameterSearch will be performd and .csv will be made, parameterSearch = False ## We use $\tau_k = 1/m_k$ expForTau=-1.0 tau = np.power(vector_m,expForTau) UB_bids = UB_bidsPerImp[index_Imps] suffix='ResultsPareto/' current_directory = os.getcwd() results_directory = os.path.join(current_directory, suffix) if not os.path.exists(results_directory): os.makedirs(results_directory) ``` # Parameter Search This parameter search looks for a constant $C$, such that the dual method shows empirical convergence. Given that here we only look for training convergence, we don't need to cross-validate or similar. ``` if parameterSearch: input_var = input("This will execute a simple parameter search.\ If you just wanted to run just a full long run do parameterSearch= False \ and kill this execution. To continue press enter.") c=np.array([1.0,0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001]) p_grad_Type = 0 num_it = 1000 it_per_cal = 250 init_lam=np.zeros((numCampaigns)) alphas_pure=np.fromfunction(lambda i, j: (1/(np.sqrt(i + 1))), \ (num_it, 1), dtype=int)[:,0] # dualObjFn, primalObjFn, dualObjFnAvg, primalObjFnAvg, budget_used, \ # budget_LamAvgUse, dual_vars, dual_varsAvg = [], [], [], [], [], [], [], [] nameResults='ParameterSearch'+'It_'+str(num_it) f = open(suffix+nameResults+'.csv', 'wt') writer = csv.writer(f, lineterminator='\n') writer.writerow( ('Auction Type', 'Problem Type', 'Ite', 'Cte', 'DualFnValue',\ 'PrimalValue','DualFnValueAvg','PrimalFnValueAvg') ) print('Using First Price Auctions') firstPrice = True for c_val in c: print('c_val: ',c_val, end =', Methods: ') print('Indicator Case', end =' ,') p_grad_Type = 0 alphas=c_val*alphas_pure [dual_FnValues,primal_GivenMu,budget_used,dual_vars,dual_AvgLamFnValues,\ primal_AvgLamGivenMu,budget_LamAvgUse,dual_varsAvg]= SubgrAlgSavPrimDualObjInd(\ init_lam, num_it, alphas, vector_r, vector_ctr, vector_rctr, vector_s, ext_s, \ vector_m, num_impressions, numCampaigns, num_edges, index_sizeCamps, index_Imps,\ UB_bids, firstPrice, adverPerImp, it_per_cal, p_grad_Type) numSaved=len(budget_used) for t in range(numSaved): writer.writerow(('First price', 'Indicator', (t+1)*it_per_cal,c_val,dual_FnValues[t],\ primal_GivenMu[t],dual_AvgLamFnValues[t],primal_AvgLamGivenMu[t])) print('L2 penalization wout indicator', end =' ,') p_grad_Type = 1 alphas=c_val*alphas_pure [dual_FnValues,primal_GivenMu,budget_used,dual_vars,dual_AvgLamFnValues,\ primal_AvgLamGivenMu,budget_LamAvgUse,dual_varsAvg]= SubgrAlgSavPrimDualObjFn_L2Ind(\ init_lam, num_it, alphas, vector_r, vector_ctr, vector_rctr, vector_s, ext_s, vector_m,\ num_impressions, numCampaigns, num_edges, index_sizeCamps, index_Imps, UB_bids, firstPrice,\ adverPerImp, it_per_cal, p_grad_Type, tau, False) numSaved=len(budget_used) for t in range(numSaved): writer.writerow(('First price', 'L2 Wout Ind', (t+1)*it_per_cal,c_val,dual_FnValues[t],\ primal_GivenMu[t],dual_AvgLamFnValues[t],primal_AvgLamGivenMu[t])) print('L2 with indicator') p_grad_Type = 2 alphas=c_val*alphas_pure [dual_FnValues,primal_GivenMu,budget_used,dual_vars,dual_AvgLamFnValues,\ primal_AvgLamGivenMu,budget_LamAvgUse,dual_varsAvg]= SubgrAlgSavPrimDualObjFn_L2Ind(\ init_lam, num_it, alphas, vector_r, vector_ctr, vector_rctr, vector_s, ext_s, vector_m,\ num_impressions, numCampaigns, num_edges, index_sizeCamps, index_Imps, UB_bids, firstPrice,\ adverPerImp, it_per_cal, p_grad_Type, tau, True) numSaved=len(budget_used) for t in range(numSaved): writer.writerow(('First price', 'L2 + Indicator', (t+1)*it_per_cal,c_val,dual_FnValues[t],\ primal_GivenMu[t],dual_AvgLamFnValues[t],primal_AvgLamGivenMu[t])) print('Using Second Price Auctions') firstPrice = False for c_val in c: print('c_val: ',c_val, end =', Methods: ') print('Indicator Case', end =' ,') p_grad_Type = 0 alphas=c_val*alphas_pure [dual_FnValues,primal_GivenMu,budget_used,dual_vars,dual_AvgLamFnValues,\ primal_AvgLamGivenMu,budget_LamAvgUse,dual_varsAvg]= SubgrAlgSavPrimDualObjInd(\ init_lam, num_it, alphas, vector_r, vector_ctr, vector_rctr, vector_s, ext_s, \ vector_m, num_impressions, numCampaigns, num_edges, index_sizeCamps, index_Imps,\ UB_bids, firstPrice, adverPerImp, it_per_cal, p_grad_Type) numSaved=len(budget_used) for t in range(numSaved): writer.writerow(('Second price', 'Indicator', (t+1)*it_per_cal,c_val,dual_FnValues[t],\ primal_GivenMu[t],dual_AvgLamFnValues[t],primal_AvgLamGivenMu[t])) print('L2 penalization wout indicator', end =' ,') p_grad_Type = 1 alphas=c_val*alphas_pure [dual_FnValues,primal_GivenMu,budget_used,dual_vars,dual_AvgLamFnValues,\ primal_AvgLamGivenMu,budget_LamAvgUse,dual_varsAvg]= SubgrAlgSavPrimDualObjFn_L2Ind(\ init_lam, num_it, alphas, vector_r, vector_ctr, vector_rctr, vector_s, ext_s, vector_m,\ num_impressions, numCampaigns, num_edges, index_sizeCamps, index_Imps, UB_bids, firstPrice,\ adverPerImp, it_per_cal, p_grad_Type, tau, False) numSaved=len(budget_used) for t in range(numSaved): writer.writerow(('Second price', 'L2 Wout Ind', (t+1)*it_per_cal,c_val,dual_FnValues[t],\ primal_GivenMu[t],dual_AvgLamFnValues[t],primal_AvgLamGivenMu[t])) print('L2 with indicator') p_grad_Type = 2 alphas=c_val*alphas_pure [dual_FnValues,primal_GivenMu,budget_used,dual_vars,dual_AvgLamFnValues,\ primal_AvgLamGivenMu,budget_LamAvgUse,dual_varsAvg]= SubgrAlgSavPrimDualObjFn_L2Ind(\ init_lam, num_it, alphas, vector_r, vector_ctr, vector_rctr, vector_s, ext_s, vector_m,\ num_impressions, numCampaigns, num_edges, index_sizeCamps, index_Imps, UB_bids, firstPrice,\ adverPerImp, it_per_cal, p_grad_Type, tau, True) numSaved=len(budget_used) for t in range(numSaved): writer.writerow(('Second price', 'L2 + Indicator', (t+1)*it_per_cal,c_val,dual_FnValues[t],\ primal_GivenMu[t],dual_AvgLamFnValues[t],primal_AvgLamGivenMu[t])) f.close() ``` # The following value are obtained just by Looking at The Parameter Search .csv ``` ## Best constants consBestFP = [0.0001, 0.5, 0.5] consBestSP = [0.0001, 0.5, 0.5] ``` ### Run Pareto ``` num_itInd, num_itL2, num_itL2Ind = 10000, 10000, 10000 init_lam = np.zeros(numCampaigns) shuffle = False sim = 100 np.random.seed(12345) vecOfSeeds = np.random.randint(100000, size=sim) multTP = [0.018, 0.020, 0.022, 0.025, 0.027, 0.030, 0.033, 0.037, 0.041, \ 0.045, 0.050, 0.055, 0.061, 0.067, 0.074, 0.082, 0.091, 0.100, 0.111, \ 0.122, 0.135, 0.150, 0.165, 0.183, 0.202, 0.223, 0.247, 0.273, 0.301, \ 0.333, 0.368, 0.407, 0.449, 0.497, 0.549, 0.607, 0.670, 0.741, 0.819, \ 0.905, 1.000, 1.105, 1.221, 1.350, 1.492, 1.649, 1.822, 2.014, 2.226, \ 2.460] multGr = [np.round(0.4+0.02*y, decimals = 2) for y in range(50)] p_grad_TypeInd, p_grad_TypeL2, p_grad_TypeL2Ind = 0, 1, 2 alphasInd, alphasL2, alphasL2Ind = 0, 0, 0 if firstPrice: alphasInd = consBestFP[0] * np.array([np.sqrt(1.0/(i + 1)) for i in range(num_itInd)]) alphasL2 = consBestFP[1] * np.array([np.sqrt(1.0/(i + 1)) for i in range(num_itL2)]) alphasL2Ind = consBestFP[2] * np.array([np.sqrt(1.0/(i + 1)) for i in range(num_itL2Ind)]) else: alphasInd = consBestSP[0] * np.array([np.sqrt(1.0/(i + 1)) for i in range(num_itInd)]) alphasL2 = consBestSP[1] * np.array([np.sqrt(1.0/(i + 1)) for i in range(num_itL2)]) alphasL2Ind = consBestSP[2] * np.array([np.sqrt(1.0/(i + 1)) for i in range(num_itL2Ind)]) listToRetInd, dictToRetL2, dictToRetL2Ind, dictToRetGr = ExpPareto(numCampaigns, num_impressions,\ num_edges, index_Imps, index_sizeCamps, vector_s, vector_r, vector_m, vector_ctr, vector_rctr, \ UB_bidsPerImp, adverPerImp, alphasInd, num_itInd, alphasL2, num_itL2, alphasL2Ind, num_itL2Ind, \ p_grad_TypeInd, p_grad_TypeL2, p_grad_TypeL2Ind, multGr, multTP, init_lam, sim, firstPrice, \ vecOfSeeds = vecOfSeeds, shuffle = shuffle) from Utilities import CreateTableSensitivity nameToSaveSP = 'TableParetoLogSP.csv' nameToSaveIndSP = 'TableParetoIndLogSP.csv' TableParetoSP = CreateTableParetoL2_L2Ind_Gr(dictToRetL2, dictToRetL2Ind, dictToRetGr,\ vector_m, multTP, multGr, sim) TableJustIndSP = CreateTableParetoInd(listToRetInd, vector_m, sim, name = "FP") with open(suffix+nameToSaveSP, 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['multL2', 'multGr', 'sim',\ 'L2-Profit', 'L2-Revenue', 'L2-Cost', 'L2-BidsMade',\ 'L2-BidsWon', 'L2-ClicksWon', 'L2-%BudgetUsed',\ 'L2Ind-Profit', 'L2Ind-Revenue', 'L2Ind-Cost', 'L2Ind-BidsMade',\ 'L2Ind-BidsWon', 'L2Ind-ClicksWon', 'L2Ind-%BudgetUsed',\ 'Gr-Profit', 'Gr-Revenue', 'Gr-Cost', 'Gr-BidsMade',\ 'Gr-BidsWon', 'Gr-ClicksWon', 'Gr-%BudgetUsed']) [writer.writerow(r) for r in TableParetoSP] with open(suffix+nameToSaveIndSP, 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['sim', 'Ind-Profit', 'Ind-Revenue', 'Ind-Cost',\ 'Ind-BidsMade', 'Ind-BidsWon', 'Ind-ClicksWon', 'Ind-%BudgetUsed']) [writer.writerow(r) for r in TableJustIndSP] ```
github_jupyter
# Meanshift and Camshift _You can view [IPython Nootebook](README.ipynb) report._ ---- ## Contents - [GOAL](#GOAL) - [Meanshift](#Meanshift) - [Meanshift in OpenCV](#Meanshift-in-OpenCV) - [Camshift](#Camshift) - [Camshift in OpenCV](#Camshift-in-OpenCV) - [Additional Resources](#Additional-Resources) - [Exercises](#Exercises) ## GOAL In this chapter: - We will learn about Meanshift and Camshift algorithms to find and track objects in videos. ## Meanshift The intuition behind the meanshift is simple. Consider you have a set of points. (It can be a pixel distribution like histogram backprojection). You are given a small window ( may be a circle) and you have to move that window to the area of maximum pixel density (or maximum number of points). It is illustrated in the simple image given below: ![meanshift-basics](../../data/meanshift-basics.jpg) The initial window is shown in blue circle with the name "C1". Its original center is marked in blue rectangle, named "C1_o". But if you find the centroid of the points inside that window, you will get the point "C1_r" (marked in small blue circle) which is the real centroid of window. Surely they don't match. So move your window such that circle of the new window matches with previous centroid. Again find the new centroid. Most probably, it won't match. So move it again, and continue the iterations such that center of window and its centroid falls on the same location (or with a small desired error). So finally what you obtain is a window with maximum pixel distribution. It is marked with green circle, named "C2". As you can see in image, it has maximum number of points. The whole process is demonstrated on a static image below: ![meanshift-face](../../data/meanshift-face.gif) So we normally pass the histogram backprojected image and initial target location. When the object moves, obviously the movement is reflected in histogram backprojected image. As a result, meanshift algorithm moves our window to the new location with maximum density. ### Meanshift in OpenCV To use meanshift in OpenCV, first we need to setup the target, find its histogram so that we can backproject the target on each frame for calculation of meanshift. We also need to provide initial location of window. For histogram, only Hue is considered here. Also, to avoid false values due to low light, low light values are discarded using [cv.inRange()](https://docs.opencv.org/3.4.1/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981) function. ```python import numpy as np import cv2 as cv cap = cv.VideoCapture("../../data/slow.mp4") # Take first frame of the video ret, frame = cap.read() # Setup initial location of window r, h, c, w = 190, 20, 330, 60 # simply hardcoded the values track_window = (c, r, w, h) # Set up the ROI for tracking roi = frame[r:r+h, c:c+w] hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) mask = cv.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.))) roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180]) cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by at least 1 pt term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1) # Define the codec and create VideoWriter object fourcc = cv.VideoWriter_fourcc(*'XVID') width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) outVideo = cv.VideoWriter("output-files/meanshift-res.avi", fourcc, 25.0, (width, height), True) # Saved frame number frame_number = 0 while True: ret, frame = cap.read() if ret is True: hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) # Apply meanshift to get the new location ret, track_window = cv.meanShift(dst, track_window, term_crit) # Draw it on image x, y, w, h = track_window img2 = cv.rectangle(frame, (x, y), (x+w, y+h), 255, 2) # Save the image and show it outVideo.write(img2) cv.imshow("img2", img2) k = cv.waitKey(60) & 0xFF if k == 27: # Press "esc" to exit break elif k == 0x73: # Press "s" to save the current frame cv.imwrite("output-files/" + "meanshift-res-" + str(frame_number) + ".png", img2) frame_number += 1 else: break cap.release() outVideo.release() cv.destroyAllWindows() ``` Three frames in a video I used is given below: ![meanshift-result](output-files/meanshift-result.png) ## Camshift Did you closely watch the last result? There is a problem. Our window always has the same size when car is farther away and it is very close to camera. That is not good. We need to adapt the window size with size and rotation of the target. Once again, the solution came from "OpenCV Labs" and it is called CAMshift (Continuously Adaptive Meanshift) published by Gary Bradsky in his paper "Computer Vision Face Tracking for Use in a Perceptual User Interface" in 1988. It applies meanshift first. Once meanshift converges, it updates the size of the window as, $ s = 2 \times \sqrt{\frac{M_{00}}{256}} $. It also calculates the orientation of best fitting ellipse to it. Again it applies the meanshift with new scaled search window and previous window location. The process is continued until required accuracy is met. ![camshift-face](../../data/camshift-face.gif) ### Camshift in OpenCV It is almost same as meanshift, but it returns a rotated rectangle (that is our result) and box parameters (used to be passed as search window in next iteration). See the code below: ```python import numpy as np import cv2 as cv cap = cv.VideoCapture("../../data/slow.mp4") # Take first frame of the video ret, frame = cap.read() # Setup initial location of window r, h, c, w = 190, 20, 330, 60 # simply hardcoded the values track_window = (c, r, w, h) # Set up the ROI for tracking roi = frame[r:r+h, c:c+w] hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) mask = cv.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.))) roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180]) cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by at least 1 pt term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1) # Define the codec and create VideoWriter object fourcc = cv.VideoWriter_fourcc(*'XVID') width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) outVideo = cv.VideoWriter("output-files/camshift-res.avi", fourcc, 25.0, (width, height), True) # Saved frame number frame_number = 0 while True: ret, frame = cap.read() if ret is True: hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) # Apply camshift to get the new location ret, track_window = cv.CamShift(dst, track_window, term_crit) # Draw it on image pts = cv.boxPoints(ret) pts = np.int0(pts) img2 = cv.polylines(frame, [pts], True, (0, 0, 255), 2) # Save the image and show it outVideo.write(img2) cv.imshow('img2', img2) k = cv.waitKey(60) & 0xFF if k == 27: # Press "esc" to exit break elif k == 0x73: # Press "s" to save the current frame cv.imwrite("output-files/" + "camshift-res-" + str(frame_number) + ".png", img2) frame_number += 1 else: break cap.release() outVideo.release() cv.destroyAllWindows() ``` Three frames of the result is shown below: ![camshift-result](output-files/camshift-result.png) ## Additional Resources 1. French Wikipedia page on Camshift. (The two animations are taken from here) 2. Bradski, G.R., "Real time face and object tracking as a component of a perceptual user interface," Applications of Computer Vision, 1998. WACV '98. Proceedings., Fourth IEEE Workshop on , vol., no., pp.214,219, 19-21 Oct 1998 ## Exercises 1. OpenCV comes with a Python sample on interactive demo of camshift. Use it, hack it, understand it.
github_jupyter
<a href="https://colab.research.google.com/github/skywalker0803r/c620/blob/main/notebook/Modeling_C620.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import pandas as pd import numpy as np import joblib !pip install autorch > log.txt c = joblib.load('/content/drive/MyDrive/台塑輕油案子/data/c620/col_names/c620_col_names.pkl') c620_df = pd.read_csv('/content/drive/MyDrive/台塑輕油案子/data/c620/cleaned/c620_train.csv',index_col=0).dropna(axis=0) print(c620_df.shape) c620_df.head(3) x_col = c['case']+c['x41'] op_col = c['density']+c['yRefluxRate']+c['yHeatDuty']+c['yControl'] sp_col = c['vent_gas_sf'] +c['distillate_sf'] +c['sidedraw_sf'] +c['bottoms_sf'] wt_col = c['vent_gas_x'] +c['distillate_x'] +c['sidedraw_x'] +c['bottoms_x'] y_col = sp_col + op_col n_idx = [ [i,i+41,i+41*2,i+41*3]for i in range(41)] for idx in n_idx: assert np.allclose(c620_df[y_col].iloc[:,idx].sum(axis=1),1.0) # check print(len(x_col),len(y_col)) bz_idx = y_col.index('Tatoray Stripper C620 Split Factors Calculation_Split Factor for Individual Component to Tatoray Stripper C620 Sidedraw_Benzene_Fraction') bz_idx c620_df.loc[['001-002', '001-005', '001-008', '001-011', '001-014'], x_col+y_col].to_excel('/content/drive/MyDrive/台塑輕油案子/data/c620/Data_format_example/c620_data.xlsx') ``` # PartBuliderPlus ``` from autorch.utils import PartBulider class PartBuliderPlus(PartBulider): def train_step(self): self.net.train() total_loss = 0 for t,(x,y) in enumerate(self.train_iter): y_hat = self.net(x) bz_loss = (y_hat[:,bz_idx] - y[:,bz_idx])**2 loss = self.loss_fn(y_hat,y) + 5*bz_loss loss = loss.mean() loss.backward() self.optimizer.step() self.optimizer.zero_grad() total_loss += loss.item() return total_loss/(t+1) def valid_step(self): self.net.eval() total_loss = 0 for t,(x,y) in enumerate(self.vaild_iter): y_hat = self.net(x) bz_loss = (y_hat[:,bz_idx] - y[:,bz_idx])**2 loss = self.loss_fn(y_hat,y) + 5*bz_loss loss = loss.mean() total_loss += loss.item() return total_loss/(t+1) #c620 = PartBuliderPlus(c620_df,x_col,y_col,normalize_idx_list=n_idx,limit_y_range=True) c620 = PartBulider(c620_df,x_col,y_col,normalize_idx_list=n_idx,limit_y_range=True) c620.train() c620.test(e=0.01) from autorch.function import sp2wt x_test = c620.data['X_test'] x41 = x_test[c['x41']].values sp = c620.predict(x_test).iloc[:,:41*4] s1,s2,s3,s4 = sp.iloc[:,:41].values,sp.iloc[:,41:41*2].values,sp.iloc[:,41*2:41*3].values,sp.iloc[:,41*3:41*4].values w1,w2,w3,w4 = sp2wt(x41,s1),sp2wt(x41,s2),sp2wt(x41,s3),sp2wt(x41,s4) wt_pred = np.hstack((w1,w2,w3,w4)) wt_pred = pd.DataFrame(wt_pred,index=x_test.index,columns=wt_col) wt_pred.head(3) wt_real = c620_df.loc[x_test.index,wt_col] wt_real.head(3) res = c620.show_metrics(wt_real,wt_pred,e=0.01).fillna(0) res ``` # 有些分離係數欄位始終是常數 這裡調整一下看看 ``` c620_wt_always_same_split_factor_dict = joblib.load('/content/drive/MyDrive/台塑輕油案子/data/c620/map_dict/c620_wt_always_same_split_factor_dict.pkl') for i in c620_wt_always_same_split_factor_dict.keys(): sp[i] = c620_wt_always_same_split_factor_dict[i] s1,s2,s3,s4 = sp.iloc[:,:41].values,sp.iloc[:,41:41*2].values,sp.iloc[:,41*2:41*3].values,sp.iloc[:,41*3:41*4].values w1,w2,w3,w4 = sp2wt(x41,s1),sp2wt(x41,s2),sp2wt(x41,s3),sp2wt(x41,s4) wt_pred = np.hstack((w1,w2,w3,w4)) wt_pred = pd.DataFrame(wt_pred,index=x_test.index,columns=wt_col) wt_pred.head(3) res = c620.show_metrics(wt_real,wt_pred,e=0.01).fillna(0) res res.iloc[[bz_idx],:] ``` # 以下這兩個不能脫鉤 ``` wt_pred.iloc[:,[89]].head() c620.data['X_test'].iloc[:,[2]].head() ``` # 針對c620預測的wt_pred做後處理 ``` def c620_wt_post_processing(wt_pred): def normalize(x): return x / x.sum(axis=1).reshape(-1,1) bz_idx = wt_pred.columns.tolist().index('Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%') other_idx = [i for i in range(41*2,41*3,1) if i != bz_idx] other_total = (100 - wt_pred.iloc[:,bz_idx].values).reshape(-1,1) wt_pred.iloc[:,other_idx] = normalize(wt_pred.iloc[:,other_idx].values)*other_total wt_pred.iloc[:,bz_idx] = c620.data['X_test'].iloc[:,[2]].values return wt_pred wt_pred = c620_wt_post_processing(wt_pred) ``` # 再次確認 ``` wt_pred.iloc[:,[89]].head() c620.data['X_test'].iloc[:,[2]].head() ``` # 確認質量平衡 ``` wt_pred.iloc[:,41*2:41*3].sum(axis=1) ``` # 確認準確度 ``` c620.show_metrics(wt_real,wt_pred,e=0.01).fillna(0) c620.shrink() # 節省空間 joblib.dump(c620,'/content/drive/MyDrive/台塑輕油案子/data/c620/model/c620.pkl') ```
github_jupyter
# Denoising Autoencoder Sticking with the MNIST dataset, let's add noise to our data and see if we can define and train an autoencoder to _de_-noise the images. <img src='notebook_ims/autoencoder_denoise.png' width=70%/> Let's get started by importing our libraries and getting the dataset. ``` import torch import numpy as np from torchvision import datasets import torchvision.transforms as transforms # convert data to torch.FloatTensor transform = transforms.ToTensor() # load the training and test datasets train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform) test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform) # Create training and test dataloaders num_workers = 0 # how many samples per batch to load batch_size = 20 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) ``` ### Visualize the Data ``` import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # get one image from the batch img = np.squeeze(images[0]) fig = plt.figure(figsize = (5,5)) ax = fig.add_subplot(111) ax.imshow(img, cmap='gray') ``` --- # Denoising As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. >**We'll use noisy images as input and the original, clean images as targets.** Below is an example of some of the noisy images I generated and the associated, denoised images. <img src='notebook_ims/denoising.png' /> Since this is a harder problem for the network, we'll want to use _deeper_ convolutional layers here; layers with more feature maps. You might also consider adding additional layers. I suggest starting with a depth of 32 for the convolutional layers in the encoder, and the same depths going backward through the decoder. #### TODO: Build the network for the denoising autoencoder. Add deeper and/or additional layers compared to the model above. ``` train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print("CUDA is not available. Train on CPU ...") else: print("CUDA is available. Train on GPU ...") import torch.nn as nn import torch.nn.functional as F # define the NN architecture class ConvDenoiser(nn.Module): def __init__(self): super(ConvDenoiser, self).__init__() ## encoder layers ## self.encoder1 = nn.Conv2d(1,32,3,padding=1) self.encoder2 = nn.Conv2d(32,16,3,padding=1) self.encoder3 = nn.Conv2d(16,8,3,padding=1) self.maxpool = nn.MaxPool2d(2,2) # reduce x-y dims by 2 ## decoder layers ## ## a kernel of 2 and a stride of 2 will increase the spatial dims by 2 self.decoder1 = nn.ConvTranspose2d(8,8,3,stride=2) self.decoder2 = nn.ConvTranspose2d(8,16,2,stride=2) self.decoder3 = nn.ConvTranspose2d(16,32,2,stride=2) self.final = nn.Conv2d(32,1,3,padding=1) # final normal conv layer to decrease depth def forward(self, x): ## encode ## x = F.relu(self.encoder1(x)) x = self.maxpool(x) x = F.relu(self.encoder2(x)) x = self.maxpool(x) x = F.relu(self.encoder3(x)) x = self.maxpool(x) ## decode ## x = F.relu(self.decoder1(x)) x = F.relu(self.decoder2(x)) x = F.relu(self.decoder3(x)) x = F.sigmoid(self.final(x)) return x # initialize the NN model = ConvDenoiser() if train_on_gpu: model.cuda() print(model) ``` --- ## Training We are only concerned with the training images, which we can get from the `train_loader`. >In this case, we are actually **adding some noise** to these images and we'll feed these `noisy_imgs` to our model. The model will produce reconstructed images based on the noisy input. But, we want it to produce _normal_ un-noisy images, and so, when we calculate the loss, we will still compare the reconstructed outputs to the original images! Because we're comparing pixel values in input and output images, it will be best to use a loss that is meant for a regression task. Regression is all about comparing quantities rather than probabilistic values. So, in this case, I'll use `MSELoss`. And compare output images and input images as follows: ``` loss = criterion(outputs, images) ``` ``` # for adding noise to images noise_factor=0.5 def train_model(trainloader, model, criterion, optimizer): # number of epochs to train the model n_epochs = 20 for epoch in range(1, n_epochs+1): # monitor training loss train_loss = 0.0 ################### # train the model # ################### # _ stands in for labels, here # no need to flatten images for (images,_) in train_loader: ## add random noise to the input images noisy_imgs = images + noise_factor * torch.randn(*images.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) if train_on_gpu: images = images.cuda() noisy_imgs = noisy_imgs.cuda() # clear the gradients of all optimized variables optimizer.zero_grad() ## forward pass: compute predicted outputs by passing *noisy* images to the model outputs = model(noisy_imgs) # calculate the loss # the "target" is still the original, not-noisy images loss = criterion(outputs, images) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update running training loss train_loss += loss.item()*images.size(0) # print avg training statistics train_loss = train_loss/len(train_loader) print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss)) # specify loss function criterion = nn.MSELoss() # specify loss function optimizer = torch.optim.Adam(model.parameters(), lr=0.001) train_model(train_loader, model, criterion, optimizer) ``` ## Checking out the results Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is. ``` def test(model, images): # add noise to the test images noisy_imgs = images + noise_factor * torch.randn(*images.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) if train_on_gpu: images = images.cuda() noisy_imgs = noisy_imgs.cuda() # get sample outputs output = model(noisy_imgs) # prep images for display noisy_imgs = noisy_imgs.numpy() if not train_on_gpu else noisy_imgs.cpu().numpy() # output is resized into a batch of iages output = output.view(batch_size, 1, 28, 28) # use detach when it's an output that requires_grad output = output.detach().numpy() if not train_on_gpu else output.detach().cpu().numpy() # plot the first ten input images and then reconstructed images fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25,4)) # input images on top row, reconstructions on bottom for noisy_imgs, row in zip([noisy_imgs, output], axes): for img, ax in zip(noisy_imgs, row): ax.imshow(np.squeeze(img), cmap='gray') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() test(model, images) ```
github_jupyter
``` import os import time import datetime as dt import xarray as xr from datetime import datetime import pandas import matplotlib.pyplot as plt import numpy as np import math import geopy.distance from math import sin, pi from scipy import interpolate from scipy import stats #functions for running storm data import sys ####################you will need to change some paths here!##################### #list of input directories dir_storm_info='f:/data/tc_wakes/database/info/' dir_out='f:/data/tc_wakes/database/sst/' #for iyr_storm in range(2002,2018): init = 0 for iyr_storm in range(2002,2018): for inum_storm in range(0,110): filename = dir_out + str(iyr_storm) + '/' + str(inum_storm).zfill(3) + '_interpolated_track.nc' exists = os.path.isfile(filename) if not exists: continue print(filename) ds_storm_info=xr.open_dataset(filename) ds_storm_info = ds_storm_info.sel(j2=0) ds_storm_info.close() ds_storm_info['sindex']=ds_storm_info.pres ds_storm_info['sindex']=iyr_storm+inum_storm/120 # if abs(ds_storm_info.lon[-1]-ds_storm_info.lon[0])>180: # ds_storm_info['lon'] = np.mod(ds_storm_info['lon'], 360) if init==0: ds_all = ds_storm_info init=1 ds_all = xr.concat([ds_all, ds_storm_info], dim='i2') import cartopy.crs as ccrs import matplotlib.pyplot as plt from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import matplotlib as mpl dir_figs = 'f:/data/tc_wakes/database/figs/hist/' fig = plt.figure(figsize=(14,6)) ax = plt.axes(projection=ccrs.PlateCarree()) ax.coastlines() #ax.scatter(ds_all.lon,ds_all.lat,c=ds_all.sindex,s=.01) ax.scatter(ds_all.lon,ds_all.lat,cmap=mpl.cm.jet,c=ds_all.wind,s=.01,vmin=0,vmax=100) ax.set_xticks([-180, -120, -60, 0, 60, 120, 180], crs=ccrs.PlateCarree()) ax.set_yticks([-90, -60, -30, 0, 30, 60, 90], crs=ccrs.PlateCarree()) lon_formatter = LongitudeFormatter(zero_direction_label=True) lat_formatter = LatitudeFormatter() ax.xaxis.set_major_formatter(lon_formatter) ax.yaxis.set_major_formatter(lat_formatter) sm = plt.cm.ScalarMappable(cmap=mpl.cm.jet,norm=plt.Normalize(0,100)) sm._A = [] cb = plt.colorbar(sm,ax=ax) cb.ax.set_ylabel('wind speed (ms$^{-1}$)') plt.savefig(dir_figs+'map_storms.png') subset = ds_all.where(ds_all.wind>1) plt.hist(subset.wind,bins=np.arange(0,150,10)) plt.xlabel('wind speed (ms$^{-1}$)') plt.ylabel('number of observations') plt.savefig(dir_figs+'hist_database_windspeed.png') subset = ds_all.where(ds_all.wind>1) plt.scatter(subset.storm_speed_kmhr,subset.wind) plt.xlabel('wind speed (m s$^{-1}$)') plt.ylabel('translation speed (km hr$^{-1}$)') plt.savefig(dir_figs+'hist_database_translation.png') ds_all.wind ```
github_jupyter
## Dependencies ``` import os import sys import cv2 import shutil import random import warnings import numpy as np import pandas as pd import seaborn as sns import multiprocessing as mp import matplotlib.pyplot as plt from tensorflow import set_random_seed from sklearn.utils import class_weight from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, cohen_kappa_score from keras import backend as K from keras.models import Model from keras.utils import to_categorical from keras import optimizers, applications from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler def seed_everything(seed=0): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) set_random_seed(0) seed = 0 seed_everything(seed) %matplotlib inline sns.set(style="whitegrid") warnings.filterwarnings("ignore") sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import * ``` ## Load data ``` hold_out_set = pd.read_csv('../input/aptos-data-split/hold-out.csv') X_train = hold_out_set[hold_out_set['set'] == 'train'] X_val = hold_out_set[hold_out_set['set'] == 'validation'] test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') print('Number of train samples: ', X_train.shape[0]) print('Number of validation samples: ', X_val.shape[0]) print('Number of test samples: ', test.shape[0]) # Preprocecss data X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png") X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png") test["id_code"] = test["id_code"].apply(lambda x: x + ".png") display(X_train.head()) ``` # Model parameters ``` # Model parameters FACTOR = 2 BATCH_SIZE = 8 * FACTOR EPOCHS = 10 WARMUP_EPOCHS = 3 LEARNING_RATE = 1e-4 * FACTOR WARMUP_LEARNING_RATE = 1e-3 * FACTOR HEIGHT = 256 WIDTH = 256 CHANNELS = 3 TTA_STEPS = 5 ES_PATIENCE = 5 RLROP_PATIENCE = 3 DECAY_DROP = 0.5 LR_WARMUP_EPOCHS_1st = 2 LR_WARMUP_EPOCHS_2nd = 3 STEP_SIZE = len(X_train) // BATCH_SIZE TOTAL_STEPS_1st = WARMUP_EPOCHS * STEP_SIZE TOTAL_STEPS_2nd = EPOCHS * STEP_SIZE WARMUP_STEPS_1st = LR_WARMUP_EPOCHS_1st * STEP_SIZE WARMUP_STEPS_2nd = LR_WARMUP_EPOCHS_2nd * STEP_SIZE ``` # Pre-procecess images ``` train_base_path = '../input/aptos2019-blindness-detection/train_images/' test_base_path = '../input/aptos2019-blindness-detection/test_images/' train_dest_path = 'base_dir/train_images/' validation_dest_path = 'base_dir/validation_images/' test_dest_path = 'base_dir/test_images/' # Making sure directories don't exist if os.path.exists(train_dest_path): shutil.rmtree(train_dest_path) if os.path.exists(validation_dest_path): shutil.rmtree(validation_dest_path) if os.path.exists(test_dest_path): shutil.rmtree(test_dest_path) # Creating train, validation and test directories os.makedirs(train_dest_path) os.makedirs(validation_dest_path) os.makedirs(test_dest_path) def crop_image(img, tol=7): if img.ndim ==2: mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] elif img.ndim==3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img>tol check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0] if (check_shape == 0): # image is too dark so that we crop out everything, return img # return original image else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))] img = np.stack([img1,img2,img3],axis=-1) return img def circle_crop(img): img = crop_image(img) height, width, depth = img.shape largest_side = np.max((height, width)) img = cv2.resize(img, (largest_side, largest_side)) height, width, depth = img.shape x = width//2 y = height//2 r = np.amin((x, y)) circle_img = np.zeros((height, width), np.uint8) cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1) img = cv2.bitwise_and(img, img, mask=circle_img) img = crop_image(img) return img def preprocess_image(base_path, save_path, image_id, HEIGHT, WIDTH, sigmaX=10): image = cv2.imread(base_path + image_id) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = circle_crop(image) image = cv2.resize(image, (HEIGHT, WIDTH)) image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128) cv2.imwrite(save_path + image_id, image) # Pre-procecss train set for i, image_id in enumerate(X_train['id_code']): preprocess_image(train_base_path, train_dest_path, image_id, HEIGHT, WIDTH) # Pre-procecss validation set for i, image_id in enumerate(X_val['id_code']): preprocess_image(train_base_path, validation_dest_path, image_id, HEIGHT, WIDTH) # Pre-procecss test set for i, image_id in enumerate(test['id_code']): preprocess_image(test_base_path, test_dest_path, image_id, HEIGHT, WIDTH) ``` # Data generator ``` datagen=ImageDataGenerator(rescale=1./255, rotation_range=360, horizontal_flip=True, vertical_flip=True) train_generator=datagen.flow_from_dataframe( dataframe=X_train, directory=train_dest_path, x_col="id_code", y_col="diagnosis", class_mode="raw", batch_size=BATCH_SIZE, target_size=(HEIGHT, WIDTH), seed=seed) valid_generator=datagen.flow_from_dataframe( dataframe=X_val, directory=validation_dest_path, x_col="id_code", y_col="diagnosis", class_mode="raw", batch_size=BATCH_SIZE, target_size=(HEIGHT, WIDTH), seed=seed) test_generator=datagen.flow_from_dataframe( dataframe=test, directory=test_dest_path, x_col="id_code", batch_size=1, class_mode=None, shuffle=False, target_size=(HEIGHT, WIDTH), seed=seed) def cosine_decay_with_warmup(global_step, learning_rate_base, total_steps, warmup_learning_rate=0.0, warmup_steps=0, hold_base_rate_steps=0): """ Cosine decay schedule with warm up period. In this schedule, the learning rate grows linearly from warmup_learning_rate to learning_rate_base for warmup_steps, then transitions to a cosine decay schedule. :param global_step {int}: global step. :param learning_rate_base {float}: base learning rate. :param total_steps {int}: total number of training steps. :param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}). :param warmup_steps {int}: number of warmup steps. (default: {0}). :param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}). :param global_step {int}: global step. :Returns : a float representing learning rate. :Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps. """ if total_steps < warmup_steps: raise ValueError('total_steps must be larger or equal to warmup_steps.') learning_rate = 0.5 * learning_rate_base * (1 + np.cos( np.pi * (global_step - warmup_steps - hold_base_rate_steps ) / float(total_steps - warmup_steps - hold_base_rate_steps))) if hold_base_rate_steps > 0: learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps, learning_rate, learning_rate_base) if warmup_steps > 0: if learning_rate_base < warmup_learning_rate: raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.') slope = (learning_rate_base - warmup_learning_rate) / warmup_steps warmup_rate = slope * global_step + warmup_learning_rate learning_rate = np.where(global_step < warmup_steps, warmup_rate, learning_rate) return np.where(global_step > total_steps, 0.0, learning_rate) class WarmUpCosineDecayScheduler(Callback): """Cosine decay with warmup learning rate scheduler""" def __init__(self, learning_rate_base, total_steps, global_step_init=0, warmup_learning_rate=0.0, warmup_steps=0, hold_base_rate_steps=0, verbose=0): """ Constructor for cosine decay with warmup learning rate scheduler. :param learning_rate_base {float}: base learning rate. :param total_steps {int}: total number of training steps. :param global_step_init {int}: initial global step, e.g. from previous checkpoint. :param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}). :param warmup_steps {int}: number of warmup steps. (default: {0}). :param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}). :param verbose {int}: quiet, 1: update messages. (default: {0}). """ super(WarmUpCosineDecayScheduler, self).__init__() self.learning_rate_base = learning_rate_base self.total_steps = total_steps self.global_step = global_step_init self.warmup_learning_rate = warmup_learning_rate self.warmup_steps = warmup_steps self.hold_base_rate_steps = hold_base_rate_steps self.verbose = verbose self.learning_rates = [] def on_batch_end(self, batch, logs=None): self.global_step = self.global_step + 1 lr = K.get_value(self.model.optimizer.lr) self.learning_rates.append(lr) def on_batch_begin(self, batch, logs=None): lr = cosine_decay_with_warmup(global_step=self.global_step, learning_rate_base=self.learning_rate_base, total_steps=self.total_steps, warmup_learning_rate=self.warmup_learning_rate, warmup_steps=self.warmup_steps, hold_base_rate_steps=self.hold_base_rate_steps) K.set_value(self.model.optimizer.lr, lr) if self.verbose > 0: print('\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr)) ``` # Model ``` def create_model(input_shape): input_tensor = Input(shape=input_shape) base_model = EfficientNetB5(weights=None, include_top=False, input_tensor=input_tensor) # base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5') x = GlobalAveragePooling2D()(base_model.output) final_output = Dense(1, activation='linear', name='final_output')(x) model = Model(input_tensor, final_output) model.load_weights('../input/aptos-pretrain-olddata-effnetb5/effNetB5_img224_oldData.h5') return model ``` # Train top layers ``` model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS)) for layer in model.layers: layer.trainable = False for i in range(-2, 0): model.layers[i].trainable = True cosine_lr_1st = WarmUpCosineDecayScheduler(learning_rate_base=WARMUP_LEARNING_RATE, total_steps=TOTAL_STEPS_1st, warmup_learning_rate=0.0, warmup_steps=WARMUP_STEPS_1st, hold_base_rate_steps=(2 * STEP_SIZE)) metric_list = ["accuracy"] callback_list = [cosine_lr_1st] optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE) model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list) model.summary() STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size history_warmup = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=WARMUP_EPOCHS, callbacks=callback_list, verbose=2).history ``` # Fine-tune the complete model ``` for layer in model.layers: layer.trainable = True es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1) cosine_lr_2nd = WarmUpCosineDecayScheduler(learning_rate_base=LEARNING_RATE, total_steps=TOTAL_STEPS_2nd, warmup_learning_rate=0.0, warmup_steps=WARMUP_STEPS_2nd, hold_base_rate_steps=(2 * STEP_SIZE)) callback_list = [es, cosine_lr_2nd] optimizer = optimizers.Adam(lr=LEARNING_RATE) model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list) model.summary() history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS, callbacks=callback_list, verbose=2).history fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 6)) ax1.plot(cosine_lr_1st.learning_rates) ax1.set_title('Warm up learning rates') ax2.plot(cosine_lr_2nd.learning_rates) ax2.set_title('Fine-tune learning rates') plt.xlabel('Steps') plt.ylabel('Learning rate') sns.despine() plt.show() ``` # Model loss graph ``` fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 14)) ax1.plot(history['loss'], label='Train loss') ax1.plot(history['val_loss'], label='Validation loss') ax1.legend(loc='best') ax1.set_title('Loss') ax2.plot(history['acc'], label='Train accuracy') ax2.plot(history['val_acc'], label='Validation accuracy') ax2.legend(loc='best') ax2.set_title('Accuracy') plt.xlabel('Epochs') sns.despine() plt.show() # Create empty arays to keep the predictions and labels df_preds = pd.DataFrame(columns=['label', 'pred', 'set']) train_generator.reset() valid_generator.reset() # Add train predictions and labels for i in range(STEP_SIZE_TRAIN + 1): im, lbl = next(train_generator) preds = model.predict(im, batch_size=train_generator.batch_size) for index in range(len(preds)): df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train'] # Add validation predictions and labels for i in range(STEP_SIZE_VALID + 1): im, lbl = next(valid_generator) preds = model.predict(im, batch_size=valid_generator.batch_size) for index in range(len(preds)): df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation'] df_preds['label'] = df_preds['label'].astype('int') def classify(x): if x < 0.5: return 0 elif x < 1.5: return 1 elif x < 2.5: return 2 elif x < 3.5: return 3 return 4 # Classify predictions df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x)) train_preds = df_preds[df_preds['set'] == 'train'] validation_preds = df_preds[df_preds['set'] == 'validation'] ``` # Model Evaluation ## Confusion Matrix ### Original thresholds ``` labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR'] def plot_confusion_matrix(train, validation, labels=labels): train_labels, train_preds = train validation_labels, validation_preds = validation fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7)) train_cnf_matrix = confusion_matrix(train_labels, train_preds) validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds) train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis] validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis] train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels) validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels) sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train') sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation') plt.show() plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions'])) ``` ## Quadratic Weighted Kappa ``` def evaluate_model(train, validation): train_labels, train_preds = train validation_labels, validation_preds = validation print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic')) print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic')) print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic')) evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions'])) ``` ## Apply model to test set and output predictions ``` def apply_tta(model, generator, steps=10): step_size = generator.n//generator.batch_size preds_tta = [] for i in range(steps): generator.reset() preds = model.predict_generator(generator, steps=step_size) preds_tta.append(preds) return np.mean(preds_tta, axis=0) preds = apply_tta(model, test_generator, TTA_STEPS) predictions = [classify(x) for x in preds] results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions}) results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4]) # Cleaning created directories if os.path.exists(train_dest_path): shutil.rmtree(train_dest_path) if os.path.exists(validation_dest_path): shutil.rmtree(validation_dest_path) if os.path.exists(test_dest_path): shutil.rmtree(test_dest_path) ``` # Predictions class distribution ``` fig = plt.subplots(sharex='col', figsize=(24, 8.7)) sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test') sns.despine() plt.show() results.to_csv('submission.csv', index=False) display(results.head()) ```
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm %matplotlib inline from torch.utils.data import Dataset, DataLoader import torch import torchvision import torch.nn as nn import torch.optim as optim from torch.nn import functional as F device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) ``` # Generate dataset ``` y = np.random.randint(0,3,500) idx= [] for i in range(3): print(i,sum(y==i)) idx.append(y==i) x = np.zeros((500,)) np.random.seed(12) x[idx[0]] = np.random.uniform(low =-1,high =0,size= sum(idx[0])) x[idx[1]] = np.random.uniform(low =0,high =1,size= sum(idx[1])) x[idx[2]] = np.random.uniform(low =2,high =3,size= sum(idx[2])) x[idx[0]][0], x[idx[2]][5] print(x.shape,y.shape) idx= [] for i in range(3): idx.append(y==i) for i in range(3): y= np.zeros(x[idx[i]].shape[0]) plt.scatter(x[idx[i]],y,label="class_"+str(i)) plt.legend() bg_idx = [ np.where(idx[2] == True)[0]] bg_idx = np.concatenate(bg_idx, axis = 0) bg_idx.shape np.unique(bg_idx).shape x = x - np.mean(x[bg_idx], axis = 0, keepdims = True) np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True) x = x/np.std(x[bg_idx], axis = 0, keepdims = True) np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True) for i in range(3): y= np.zeros(x[idx[i]].shape[0]) plt.scatter(x[idx[i]],y,label="class_"+str(i)) plt.legend() foreground_classes = {'class_0','class_1' } background_classes = {'class_2'} fg_class = np.random.randint(0,2) fg_idx = np.random.randint(0,9) a = [] for i in range(9): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(2,3) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) print(a.shape) print(fg_class , fg_idx) a.shape np.reshape(a,(9,1)) a=np.reshape(a,(3,3)) plt.imshow(a) desired_num = 2000 mosaic_list_of_images =[] mosaic_label = [] fore_idx=[] for j in range(desired_num): np.random.seed(j) fg_class = np.random.randint(0,2) fg_idx = 0 a = [] for i in range(9): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(2,3) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list_of_images.append(np.reshape(a,(9,1))) mosaic_label.append(fg_class) fore_idx.append(fg_idx) mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T mosaic_list_of_images.shape, mosaic_list_of_images[0] for j in range(9): print(mosaic_list_of_images[0][j]) class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list_of_images self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx], self.fore_idx[idx] batch = 250 msd1 = MosaicDataset(mosaic_list_of_images[0:1000], mosaic_label[0:1000] , fore_idx[0:1000]) train_loader = DataLoader( msd1 ,batch_size= batch ,shuffle=True) batch = 250 msd2 = MosaicDataset(mosaic_list_of_images[1000:2000], mosaic_label[1000:2000] , fore_idx[1000:2000]) test_loader = DataLoader( msd2 ,batch_size= batch ,shuffle=True) class Focus(nn.Module): def __init__(self): super(Focus, self).__init__() self.fc1 = nn.Linear(1, 1) # self.fc2 = nn.Linear(2, 1) def forward(self,z): #y is avg image #z batch of list of 9 images y = torch.zeros([batch], dtype=torch.float64) x = torch.zeros([batch,9],dtype=torch.float64) y = y.to("cuda") x = x.to("cuda") # print(x.shape, z.shape) for i in range(9): # print(z[:,i].shape) # print(self.helper(z[:,i])[:,0].shape) x[:,i] = self.helper(z[:,i])[:,0] # print(x.shape, z.shape) x = F.softmax(x,dim=1) # print(x.shape, z.shape) # x1 = x[:,0] # print(torch.mul(x[:,0],z[:,0]).shape) for i in range(9): # x1 = x[:,i] y = y + torch.mul(x[:,i],z[:,i]) # print(x.shape, y.shape) return x, y def helper(self, x): x = x.view(-1, 1) # x = F.relu(self.fc1(x)) x = (self.fc1(x)) return x class Classification(nn.Module): def __init__(self): super(Classification, self).__init__() self.fc1 = nn.Linear(1, 2) def forward(self, x): x = x.view(-1, 1) x = self.fc1(x) # print(x.shape) return x torch.manual_seed(12) focus_net = Focus().double() focus_net = focus_net.to("cuda") torch.manual_seed(12) classify = Classification().double() classify = classify.to("cuda") focus_net.fc1.weight, focus_net.fc1.bias classify.fc1.weight, classify.fc1.bias focus_net.fc1.weight = torch.nn.Parameter(torch.tensor(np.array([[0.0]]))) focus_net.fc1.bias = torch.nn.Parameter(torch.tensor(np.array([0.0]))) focus_net.fc1.weight, focus_net.fc1.bias classify.fc1.weight = torch.nn.Parameter(torch.tensor(np.array([[0.0],[0.0]]))) classify.fc1.bias = torch.nn.Parameter(torch.tensor(np.array([0.0, 0.0]))) classify.fc1.weight, classify.fc1.bias focus_net = focus_net.to("cuda") classify = classify.to("cuda") focus_net.fc1.weight, focus_net.fc1.bias classify.fc1.weight, classify.fc1.bias import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer_classify = optim.SGD(classify.parameters(), lr=0.01, momentum=0.9) optimizer_focus = optim.SGD(focus_net.parameters(), lr=0.01, momentum=0.9) # optimizer_classify = optim.Adam(classify.parameters(), lr=0.01) # optimizer_focus = optim.Adam(focus_net.parameters(), lr=0.01) col1=[] col2=[] col3=[] col4=[] col5=[] col6=[] col7=[] col8=[] col9=[] col10=[] col11=[] col12=[] col13=[] correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in train_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) # print(outputs.shape) _, predicted = torch.max(outputs.data, 1) # print(predicted.shape) for j in range(labels.size(0)): count += 1 focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 # print(focus, fore_idx[j], predicted[j]) if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 train images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) print(count) print("="*100) col1.append(0) col2.append(argmax_more_than_half) col3.append(argmax_less_than_half) col4.append(focus_true_pred_true) col5.append(focus_false_pred_true) col6.append(focus_true_pred_false) col7.append(focus_false_pred_false) correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in test_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 test images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) col8.append(argmax_more_than_half) col9.append(argmax_less_than_half) col10.append(focus_true_pred_true) col11.append(focus_false_pred_true) col12.append(focus_true_pred_false) col13.append(focus_false_pred_false) nos_epochs = 1000 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 for epoch in range(nos_epochs): # loop over the dataset multiple times focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 running_loss = 0.0 epoch_loss = [] cnt=0 iteration = desired_num // batch #training data set for i, data in enumerate(train_loader): inputs , labels , fore_idx = data inputs, labels = inputs.to("cuda"), labels.to("cuda") inputs = inputs.double() # zero the parameter gradients optimizer_focus.zero_grad() optimizer_classify.zero_grad() alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) # print(outputs) # print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1)) loss = criterion(outputs, labels) loss.backward() optimizer_focus.step() optimizer_classify.step() running_loss += loss.item() mini = 3 if cnt % mini == mini-1: # print every 40 mini-batches print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini)) epoch_loss.append(running_loss/mini) running_loss = 0.0 cnt=cnt+1 if epoch % 5 == 0: for j in range (batch): focus = torch.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): argmax_more_than_half +=1 else: argmax_less_than_half +=1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true +=1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false +=1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false +=1 if(np.mean(epoch_loss) <= 0.001): break; if epoch % 5 == 0: col1.append(epoch + 1) col2.append(argmax_more_than_half) col3.append(argmax_less_than_half) col4.append(focus_true_pred_true) col5.append(focus_false_pred_true) col6.append(focus_true_pred_false) col7.append(focus_false_pred_false) # print("="*20) # print("Train FTPT : ", col4) # print("Train FFPT : ", col5) #************************************************************************ #testing data set # focus_net.eval() with torch.no_grad(): focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 for data in test_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels = inputs.to("cuda"), labels.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) for j in range (batch): focus = torch.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): argmax_more_than_half +=1 else: argmax_less_than_half +=1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true +=1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false +=1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false +=1 col8.append(argmax_more_than_half) col9.append(argmax_less_than_half) col10.append(focus_true_pred_true) col11.append(focus_false_pred_true) col12.append(focus_true_pred_false) col13.append(focus_false_pred_false) # print("Test FTPT : ", col10) # print("Test FFPT : ", col11) # print("="*20) print('Finished Training') df_train = pd.DataFrame() df_test = pd.DataFrame() columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ] df_train[columns[0]] = col1 df_train[columns[1]] = col2 df_train[columns[2]] = col3 df_train[columns[3]] = col4 df_train[columns[4]] = col5 df_train[columns[5]] = col6 df_train[columns[6]] = col7 df_test[columns[0]] = col1 df_test[columns[1]] = col8 df_test[columns[2]] = col9 df_test[columns[3]] = col10 df_test[columns[4]] = col11 df_test[columns[5]] = col12 df_test[columns[6]] = col13 df_train # plt.figure(12,12) plt.plot(col1,np.array(col2)/10, label='argmax > 0.5') plt.plot(col1,np.array(col3)/10, label='argmax < 0.5') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("training data") plt.title("On Training set") plt.show() plt.plot(col1,np.array(col4)/10, label ="focus_true_pred_true ") plt.plot(col1,np.array(col5)/10, label ="focus_false_pred_true ") plt.plot(col1,np.array(col6)/10, label ="focus_true_pred_false ") plt.plot(col1,np.array(col7)/10, label ="focus_false_pred_false ") plt.title("On Training set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("training data") plt.show() df_test # plt.figure(12,12) plt.plot(col1,np.array(col8)/10, label='argmax > 0.5') plt.plot(col1,np.array(col9)/10, label='argmax < 0.5') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("Testing data") plt.title("On Testing set") plt.show() plt.plot(col1,np.array(col10)/10, label ="focus_true_pred_true ") plt.plot(col1,np.array(col11)/10, label ="focus_false_pred_true ") plt.plot(col1,np.array(col12)/10, label ="focus_true_pred_false ") plt.plot(col1,np.array(col13)/10, label ="focus_false_pred_false ") plt.title("On Testing set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("Testing data") plt.show() correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in train_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 train images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in test_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 test images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) correct = 0 total = 0 with torch.no_grad(): for data in train_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels = inputs.to("cuda"), labels.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 train images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) correct = 0 total = 0 with torch.no_grad(): for data in test_loader: inputs, labels , fore_idx = data inputs = inputs.double() inputs, labels = inputs.to("cuda"), labels.to("cuda") alphas, avg_images = focus_net(inputs) outputs = classify(avg_images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 test images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) focus_net.fc1.weight, focus_net.fc1.bias classify.fc1.weight, classify.fc1.bias ```
github_jupyter
<a href="https://colab.research.google.com/github/AliaksandrSiarohin/first-order-model/blob/master/demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Demo for paper "First Order Motion Model for Image Animation" To try the demo, press the 2 play buttons in order and scroll to the bottom. Note that it may take several minutes to load. ``` !pip install ffmpy &> /dev/null !git init -q . !git remote add origin https://github.com/AliaksandrSiarohin/first-order-model !git pull -q origin master !git clone -q https://github.com/graphemecluster/first-order-model-demo demo import IPython.display import PIL.Image import cv2 import imageio import io import ipywidgets import numpy import os.path import requests import skimage.transform import warnings from base64 import b64encode from demo import load_checkpoints, make_animation from ffmpy import FFmpeg from google.colab import files, output from IPython.display import HTML, Javascript from skimage import img_as_ubyte warnings.filterwarnings("ignore") os.makedirs("user", exist_ok=True) display(HTML(""" <style> .widget-box > * { flex-shrink: 0; } .widget-tab { min-width: 0; flex: 1 1 auto; } .widget-tab .p-TabBar-tabLabel { font-size: 15px; } .widget-upload { background-color: tan; } .widget-button { font-size: 18px; width: 160px; height: 34px; line-height: 34px; } .widget-dropdown { width: 250px; } .widget-checkbox { width: 650px; } .widget-checkbox + .widget-checkbox { margin-top: -6px; } .input-widget .output_html { text-align: center; width: 266px; height: 266px; line-height: 266px; color: lightgray; font-size: 72px; } div.stream { display: none; } .title { font-size: 20px; font-weight: bold; margin: 12px 0 6px 0; } .warning { display: none; color: red; margin-left: 10px; } .warn { display: initial; } .resource { cursor: pointer; border: 1px solid gray; margin: 5px; width: 160px; height: 160px; min-width: 160px; min-height: 160px; max-width: 160px; max-height: 160px; -webkit-box-sizing: initial; box-sizing: initial; } .resource:hover { border: 6px solid crimson; margin: 0; } .selected { border: 6px solid seagreen; margin: 0; } .input-widget { width: 266px; height: 266px; border: 1px solid gray; } .input-button { width: 268px; font-size: 15px; margin: 2px 0 0; } .output-widget { width: 256px; height: 256px; border: 1px solid gray; } .output-button { width: 258px; font-size: 15px; margin: 2px 0 0; } .uploaded { width: 256px; height: 256px; border: 6px solid seagreen; margin: 0; } .label-or { align-self: center; font-size: 20px; margin: 16px; } .loading { align-items: center; width: fit-content; } .loader { margin: 32px 0 16px 0; width: 48px; height: 48px; min-width: 48px; min-height: 48px; max-width: 48px; max-height: 48px; border: 4px solid whitesmoke; border-top-color: gray; border-radius: 50%; animation: spin 1.8s linear infinite; } .loading-label { color: gray; } .comparison-widget { width: 256px; height: 256px; border: 1px solid gray; margin-left: 2px; } .comparison-label { color: gray; font-size: 14px; text-align: center; position: relative; bottom: 3px; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } </style> """)) def thumbnail(file): return imageio.get_reader(file, mode='I', format='FFMPEG').get_next_data() def create_image(i, j): image_widget = ipywidgets.Image( value=open('demo/images/%d%d.png' % (i, j), 'rb').read(), format='png' ) image_widget.add_class('resource') image_widget.add_class('resource-image') image_widget.add_class('resource-image%d%d' % (i, j)) return image_widget def create_video(i): video_widget = ipywidgets.Image( value=cv2.imencode('.png', cv2.cvtColor(thumbnail('demo/videos/%d.mp4' % i), cv2.COLOR_RGB2BGR))[1].tostring(), format='png' ) video_widget.add_class('resource') video_widget.add_class('resource-video') video_widget.add_class('resource-video%d' % i) return video_widget def create_title(title): title_widget = ipywidgets.Label(title) title_widget.add_class('title') return title_widget def download_output(button): complete.layout.display = 'none' loading.layout.display = '' files.download('output.mp4') loading.layout.display = 'none' complete.layout.display = '' def convert_output(button): complete.layout.display = 'none' loading.layout.display = '' FFmpeg(inputs={'output.mp4': None}, outputs={'scaled.mp4': '-vf "scale=1080x1080:flags=lanczos,pad=1920:1080:420:0" -y'}).run() files.download('scaled.mp4') loading.layout.display = 'none' complete.layout.display = '' def back_to_main(button): complete.layout.display = 'none' main.layout.display = '' label_or = ipywidgets.Label('or') label_or.add_class('label-or') image_titles = ['Peoples', 'Cartoons', 'Dolls', 'Game of Thrones', 'Statues'] image_lengths = [8, 4, 8, 9, 4] image_tab = ipywidgets.Tab() image_tab.children = [ipywidgets.HBox([create_image(i, j) for j in range(length)]) for i, length in enumerate(image_lengths)] for i, title in enumerate(image_titles): image_tab.set_title(i, title) input_image_widget = ipywidgets.Output() input_image_widget.add_class('input-widget') upload_input_image_button = ipywidgets.FileUpload(accept='image/*', button_style='primary') upload_input_image_button.add_class('input-button') image_part = ipywidgets.HBox([ ipywidgets.VBox([input_image_widget, upload_input_image_button]), label_or, image_tab ]) video_tab = ipywidgets.Tab() video_tab.children = [ipywidgets.HBox([create_video(i) for i in range(5)])] video_tab.set_title(0, 'All Videos') input_video_widget = ipywidgets.Output() input_video_widget.add_class('input-widget') upload_input_video_button = ipywidgets.FileUpload(accept='video/*', button_style='primary') upload_input_video_button.add_class('input-button') video_part = ipywidgets.HBox([ ipywidgets.VBox([input_video_widget, upload_input_video_button]), label_or, video_tab ]) model = ipywidgets.Dropdown( description="Model:", options=[ 'vox', 'vox-adv', 'taichi', 'taichi-adv', 'nemo', 'mgif', 'fashion', 'bair' ] ) warning = ipywidgets.HTML('<b>Warning:</b> Upload your own images and videos (see README)') warning.add_class('warning') model_part = ipywidgets.HBox([model, warning]) relative = ipywidgets.Checkbox(description="Relative keypoint displacement (Inherit object proporions from the video)", value=True) adapt_movement_scale = ipywidgets.Checkbox(description="Adapt movement scale (Don’t touch unless you know want you are doing)", value=True) generate_button = ipywidgets.Button(description="Generate", button_style='primary') main = ipywidgets.VBox([ create_title('Choose Image'), image_part, create_title('Choose Video'), video_part, create_title('Settings'), model_part, relative, adapt_movement_scale, generate_button ]) loader = ipywidgets.Label() loader.add_class("loader") loading_label = ipywidgets.Label("This may take several minutes to process…") loading_label.add_class("loading-label") loading = ipywidgets.VBox([loader, loading_label]) loading.add_class('loading') output_widget = ipywidgets.Output() output_widget.add_class('output-widget') download = ipywidgets.Button(description='Download', button_style='primary') download.add_class('output-button') download.on_click(download_output) convert = ipywidgets.Button(description='Convert to 1920×1080', button_style='primary') convert.add_class('output-button') convert.on_click(convert_output) back = ipywidgets.Button(description='Back', button_style='primary') back.add_class('output-button') back.on_click(back_to_main) comparison_widget = ipywidgets.Output() comparison_widget.add_class('comparison-widget') comparison_label = ipywidgets.Label('Comparison') comparison_label.add_class('comparison-label') complete = ipywidgets.HBox([ ipywidgets.VBox([output_widget, download, convert, back]), ipywidgets.VBox([comparison_widget, comparison_label]) ]) display(ipywidgets.VBox([main, loading, complete])) display(Javascript(""" var images, videos; function deselectImages() { images.forEach(function(item) { item.classList.remove("selected"); }); } function deselectVideos() { videos.forEach(function(item) { item.classList.remove("selected"); }); } function invokePython(func) { google.colab.kernel.invokeFunction("notebook." + func, [].slice.call(arguments, 1), {}); } setTimeout(function() { (images = [].slice.call(document.getElementsByClassName("resource-image"))).forEach(function(item) { item.addEventListener("click", function() { deselectImages(); item.classList.add("selected"); invokePython("select_image", item.className.match(/resource-image(\d\d)/)[1]); }); }); images[0].classList.add("selected"); (videos = [].slice.call(document.getElementsByClassName("resource-video"))).forEach(function(item) { item.addEventListener("click", function() { deselectVideos(); item.classList.add("selected"); invokePython("select_video", item.className.match(/resource-video(\d)/)[1]); }); }); videos[0].classList.add("selected"); }, 1000); """)) selected_image = None def select_image(filename): global selected_image selected_image = resize(PIL.Image.open('demo/images/%s.png' % filename).convert("RGB")) input_image_widget.clear_output(wait=True) with input_image_widget: display(HTML('Image')) input_image_widget.remove_class('uploaded') output.register_callback("notebook.select_image", select_image) selected_video = None def select_video(filename): global selected_video selected_video = 'demo/videos/%s.mp4' % filename input_video_widget.clear_output(wait=True) with input_video_widget: display(HTML('Video')) input_video_widget.remove_class('uploaded') output.register_callback("notebook.select_video", select_video) def resize(image, size=(256, 256)): w, h = image.size d = min(w, h) r = ((w - d) // 2, (h - d) // 2, (w + d) // 2, (h + d) // 2) return image.resize(size, resample=PIL.Image.LANCZOS, box=r) def upload_image(change): global selected_image for name, file_info in upload_input_image_button.value.items(): content = file_info['content'] if content is not None: selected_image = resize(PIL.Image.open(io.BytesIO(content)).convert("RGB")) input_image_widget.clear_output(wait=True) with input_image_widget: display(selected_image) input_image_widget.add_class('uploaded') display(Javascript('deselectImages()')) upload_input_image_button.observe(upload_image, names='value') def upload_video(change): global selected_video for name, file_info in upload_input_video_button.value.items(): content = file_info['content'] if content is not None: selected_video = 'user/' + name preview = resize(PIL.Image.fromarray(thumbnail(content)).convert("RGB")) input_video_widget.clear_output(wait=True) with input_video_widget: display(preview) input_video_widget.add_class('uploaded') display(Javascript('deselectVideos()')) with open(selected_video, 'wb') as video: video.write(content) upload_input_video_button.observe(upload_video, names='value') def change_model(change): if model.value.startswith('vox'): warning.remove_class('warn') else: warning.add_class('warn') model.observe(change_model, names='value') def generate(button): main.layout.display = 'none' loading.layout.display = '' filename = model.value + ('' if model.value == 'fashion' else '-cpk') + '.pth.tar' if not os.path.isfile(filename): download = requests.get(requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key=https://yadi.sk/d/lEw8uRm140L_eQ&path=/' + filename).json().get('href')) with open(filename, 'wb') as checkpoint: checkpoint.write(download.content) reader = imageio.get_reader(selected_video, mode='I', format='FFMPEG') fps = reader.get_meta_data()['fps'] driving_video = [] for frame in reader: driving_video.append(frame) generator, kp_detector = load_checkpoints(config_path='config/%s-256.yaml' % model.value, checkpoint_path=filename) predictions = make_animation( skimage.transform.resize(numpy.asarray(selected_image), (256, 256)), [skimage.transform.resize(frame, (256, 256)) for frame in driving_video], generator, kp_detector, relative=relative.value, adapt_movement_scale=adapt_movement_scale.value ) if selected_video.startswith('user/') or selected_video == 'demo/videos/0.mp4': imageio.mimsave('temp.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps) FFmpeg(inputs={'temp.mp4': None, selected_video: None}, outputs={'output.mp4': '-c copy -y'}).run() else: imageio.mimsave('output.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps) loading.layout.display = 'none' complete.layout.display = '' with output_widget: display(HTML('<video id="left" controls src="data:video/mp4;base64,%s" />' % b64encode(open('output.mp4', 'rb').read()).decode())) with comparison_widget: display(HTML('<video id="right" muted src="data:video/mp4;base64,%s" />' % b64encode(open(selected_video, 'rb').read()).decode())) display(Javascript(""" (function(left, right) { left.addEventListener("play", function() { right.play(); }); left.addEventListener("pause", function() { right.pause(); }); left.addEventListener("seeking", function() { right.currentTime = left.currentTime; }); })(document.getElementById("left"), document.getElementById("right")); """)) generate_button.on_click(generate) loading.layout.display = 'none' complete.layout.display = 'none' select_image('00') select_video('0') ```
github_jupyter
<a href="https://colab.research.google.com/github/Jun-629/20MA573/blob/master/src/bsm_price_change.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> **Consider** an european option with - call type - strike = 110 - maturity = T underlying a Gbm stock with - initial: 100 - interest rate: 4.75% - vol ratio: $\sigma$ We denote this bsm price by $f(\sigma, T)$. - Let $\sigma = 20\%$ fixed. plot $T \mapsto f(0.2, T)$ when $T$ is ranging over $(0.5, 2)$. - Let $T = 1$ fixed. plot $\sigma \mapsto f(\sigma, 1)$ when $\sigma$ is ranging over $(.05, 0.5)$ - Describe your observations. Do you think the same behavior is also true for put? - Could you prove your observations? ``` import scipy.stats as stats class VanillaOption: def __init__( self, otype = 1, # 1: 'call' -1: 'put' strike = 110., maturity = 1., market_price = 10.): self.otype = otype self.strike = strike self.maturity = maturity self.market_price = market_price class Gbm: def __init__(self, init_state = 100., drift_ratio = .0475, vol_ratio = .2): self.init_state = init_state self.drift_ratio = drift_ratio self.vol_ratio = vol_ratio def bsm_price(self, vanilla_option): s0 = self.init_state sigma = self.vol_ratio r = self.drift_ratio otype = vanilla_option.otype k = vanilla_option.strike maturity = vanilla_option.maturity d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2) * maturity) / (sigma * np.sqrt(maturity)) d2 = d1 - sigma * np.sqrt(maturity) return (otype * s0 * stats.norm.cdf(otype * d1) - otype * np.exp(-r * maturity) * k * stats.norm.cdf(otype * d2)) Gbm.bsm_price = bsm_price ``` **Soln:** - Let $\sigma = 20\%$ fixed. plot $T \mapsto f(0.2, T)$ when $T$ is ranging over $(0.5, 2)$. ``` import numpy as np import matplotlib.pyplot as plt T = np.arange(0.5,2.1,0.1) gbm1 = Gbm() option1 = VanillaOption(otype=1, strike=110, maturity=T) BS_price1 = gbm1.bsm_price(option1) plt.plot(T, BS_price1, label = 'BSM price of f(0.2, T=(0.5,2.0))') plt.legend() ``` - Let $T = 1$ fixed. plot $\sigma \mapsto f(\sigma, 1)$ when $\sigma$ is ranging over $(.05, 0.5)$ ``` sigma = np.arange(0.05, 0.53, 0.03) gbm2 = Gbm(100., .0475, sigma) option2 = VanillaOption() BS_price2 = gbm2.bsm_price(option2) plt.plot(sigma, BS_price2, label = 'BSM price of f(σ=(0.05,0.5),1)') plt.legend() ``` __Observation:__ **1.** For the fixed volatility $\sigma$, with the increasing of maturity time, the bsm price of call option increases. **2.** For the fixed time, with the increasing of volatility $\sigma$, the bsm price of call option increases. The put option shares the same conclusion in **observation 2.** as the call option. __Pf:__ Firstly, we will show the counter example of **observation 1.** for the put option. ``` T = np.arange(0.2,4.,0.1) gbm1 = Gbm() option3 = VanillaOption(otype=-1, strike=110, maturity=T) BS_price1 = gbm1.bsm_price(option3) plt.plot(T, BS_price1, label = 'BSM price of f(0.2, T=(0.5,2.0))') plt.legend() ``` The plot shows that when maturity time is big enough, the bsm price of put option will decrease as the maturity time increases. Now we will prove the **observation 1.** and the same conclusion for the put option. Due to the Put-Call parity $$C_t - P_t = S(t) - Ke^{-r(T-t)} ,$$ when $C_t$ increases as the volatility increases, $P_t$ has to increase since $S(t) - Ke^{-r(T-t)}$ is a constant, which means that we just need to prove the **observation 1.**. $$C_t = \mathbb E [e^{-r(T-t)} (S(T-t) - K)^+] = S_t \Phi(d_1) - K e^{-r(T-t)} \Phi(d_2),$$ where $d_i$ are given as $$d_1 = \frac{(r + \frac 1 2 \sigma^2) T - \ln \frac{K}{S_0}}{\sigma \sqrt T},$$ and $$d_2 = \frac{(r - \frac 1 2 \sigma^2) T - \ln \frac{K}{S_0}}{\sigma \sqrt T} = d_1 - \sigma \sqrt T.$$ Without loss of generality, assuming that $t = 0$, then we will have $$C_0 = \mathbb E [e^{-rT} (S(T) - K)^+] = S_0 \Phi(d_1) - K e^{-rT} \Phi(d_2),$$ thus \begin{equation} \begin{split} \frac{\partial C_0}{\partial \sigma} &= S_0 \frac{\partial \Phi(d_1)}{\partial \sigma} - K e^{-rT} \frac{\partial \Phi(d_2)}{\partial \sigma} \\ &= S_0 \Phi'(d_1) \frac{\partial d_1}{\partial \sigma} - K e^{-rT} \Phi'(d_2) (\frac{\partial d_1}{\partial \sigma} - \sqrt T) \\ \frac{\partial d_1}{\partial \sigma} &= \frac{1}{\sqrt T}\frac{\sigma T \cdot \sigma - [(r + \frac 1 2 \sigma^2) T - \ln \frac{K}{S_0}]}{\sigma^2} \\ &= \sqrt T - \frac{d_1}{\sigma} \\ \Phi'(d_i) &= \frac{1}{\sqrt {2\pi}}e^{-\frac{d_i^2}{2}}, i = 1,2 \end{split} \end{equation} then by calculation, we will have \begin{equation} \begin{split} \frac{\partial C_0}{\partial \sigma} &= \frac{1}{\sqrt {2\pi}} [S_0 \cdot e^{-\frac{d_1^2}{2}} \cdot (\sqrt T - \frac{d_1}{\sigma}) + K e^{-rT} \cdot e^{-\frac{d_1^2 - 2\sigma \sqrt T d_1 + \sigma^2 T}{2}} \cdot \frac{d_1}{\sigma}] \\ &= \frac{1}{\sqrt {2\pi}} e^{-\frac{d_1^2}{2}} [S_0 \cdot (\sqrt T - \frac{d_1}{\sigma}) + K e^{-rT} \cdot \frac{d_1}{\sigma} \cdot e^{\sigma \sqrt T d_1 - \frac{\sigma^2 T}{2}}] \\ &= \frac{1}{\sqrt {2\pi}} e^{-\frac{d_1^2}{2}} [S_0 \cdot (\sqrt T - \frac{d_1}{\sigma}) + K e^{-rT} \cdot \frac{d_1}{\sigma} \cdot e^{rT - ln{\frac{K}{S_0}}}]\\ &= \frac{1}{\sqrt {2\pi}} e^{-\frac{d_1^2}{2}} [S_0 \cdot (\sqrt T - \frac{d_1}{\sigma}) + K \cdot \frac{d_1}{\sigma} \cdot e^{ln{\frac{S_0}{K}}}] \\ &= \frac{1}{\sqrt {2\pi}} e^{-\frac{d_1^2}{2}} \cdot S_0 \cdot \sqrt T > 0 \end{split} \end{equation} Since every term of right hand side of the equation is positive, which means the BSM price increases with the increasing of volatility $\sigma$. __Q.E.D.__
github_jupyter
# Uploading a Qiskit runtime program <div class="alert alert-block alert-info"> <b>Note:</b> Qiskit Runtime allows authorized users to upload runtime programs. Access to the Qiskit Runtime service may not mean you have access to upload a runtime program. </div> Here we provide an overview on how to construct and upload a runtime program. A runtime program is a piece of Python code that lives in the cloud and can be invoked by passing in just its parameters. Runtime programs are private by default, which means only you can see and access your programs. Some authorized users can also mark their programs as public, making them visible and accessible by everyone. ## Constructing a runtime program Below is a template of a runtime program. You can find the template file in the [`qiskit-ibmq-provider`](https://github.com/Qiskit/qiskit-ibmq-provider/blob/master/qiskit/providers/ibmq/runtime/program/program_template.py) repository. ``` import sys import json from qiskit.providers.ibmq.runtime import UserMessenger, ProgramBackend def program(backend: ProgramBackend, user_messenger: UserMessenger, **kwargs): """Function that does classical-quantum calculation.""" # UserMessenger can be used to publish interim results. user_messenger.publish("This is an interim result.") return "final result" def main(backend: ProgramBackend, user_messenger: UserMessenger, **kwargs): """This is the main entry point of a runtime program. The name of this method must not change. It also must have ``backend`` and ``user_messenger`` as the first two positional arguments. Args: backend: Backend for the circuits to run on. user_messenger: Used to communicate with the program user. kwargs: User inputs. """ # Massage the input if necessary. result = program(backend, user_messenger, **kwargs) # Final result can be directly returned return result ``` Each runtime program must have a `main()` function, which serves as the entry point to the program. This function must have `backend` and `user_messenger` as the first two positional arguments: - `backend` is an instance of [`ProgramBackend`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.ProgramBackend.html#qiskit.providers.ibmq.runtime.ProgramBackend) and has a [`run()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.ProgramBackend.run.html#qiskit.providers.ibmq.runtime.ProgramBackend.run) method that can be used to submit circuits. - `user_messenger` is an instance of [`UserMessenger`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.UserMessenger.html#qiskit.providers.ibmq.runtime.UserMessenger) and has a [`publish()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.UserMessenger.publish.html#qiskit.providers.ibmq.runtime.UserMessenger.publish) method that can be used to send interim and final results to the program user. This method takes a parameter `final` that indicates whether it's a final result. However, it is recommended to return the final result directly from the `main()` function. Currently only final results are stored after a program execution finishes. There are several runtime programs in the `qiskit_runtime` directory in this repository. `qiskit_runtime/sample_program/sample_program.py` is one of them. It is a sample runtime program that submits random circuits for user-specified iterations: ``` """A sample runtime program that submits random circuits for user-specified iterations.""" import random from qiskit import transpile from qiskit.circuit.random import random_circuit def prepare_circuits(backend): """Generate a random circuit. Args: backend: Backend used for transpilation. Returns: Generated circuit. """ circuit = random_circuit(num_qubits=5, depth=4, measure=True, seed=random.randint(0, 1000)) return transpile(circuit, backend) def main(backend, user_messenger, **kwargs): """Main entry point of the program. Args: backend: Backend to submit the circuits to. user_messenger: Used to communicate with the program consumer. kwargs: User inputs. """ iterations = kwargs.pop('iterations', 5) for it in range(iterations): qc = prepare_circuits(backend) result = backend.run(qc).result() user_messenger.publish({"iteration": it, "counts": result.get_counts()}) return "All done!" ``` ## Data serialization Runtime programs live in the cloud, and JSON is the standard way of passing data to and from cloud services. Therefore, when a user invokes a runtime program, the input parameters must first be serialized into the JSON format and then deserialized once received by the server. By default, this serialization and deserialization is done automatically using the [`RuntimeEncoder`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.RuntimeEncoder.html#qiskit.providers.ibmq.runtime.RuntimeEncoder) and [`RuntimeDecoder`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.RuntimeDecoder.html#qiskit.providers.ibmq.runtime.RuntimeDecoder) classes. ### Custom classes `RuntimeEncoder` and `RuntimeDecoder` only support types commonly used in Qiskit, such as complex numbers and numpy arrays. If your program uses custom Python classes for input or output, these two methods only have partial support for that. Your custom class should have the following methods: - a `to_json()` method that returns a JSON string representation of the object - a `from_json()` class method that accepts a JSON string and returns the corresponding object. When `RuntimeEncoder` serializes a Python object, it checks whether the object has a `to_json()` method. If so, it calls the method to serialize the object. `RuntimeDecoder`, however, does _not_ invoke `from_json()` to convert the data back because it doesn't know how to import your custom class. Therefore the deserialization needs to be done explicitly. Here is an example of serializing and deserializing a custom class. First we define the class `MyCustomClass`: ``` import json class MyCustomClass: def __init__(self, foo, bar): self._foo = foo self._bar = bar def to_json(self): """Convert this instance to a JSON string.""" return json.dumps({"foo": self._foo, "bar": self._bar}) @classmethod def from_json(cls, json_str): """Return a MyCustomClass instance based on the input JSON string.""" return cls(**json.loads(json_str)) ``` Note that it has the `to_json()` method that converts a `MyCustomClass` instance to a JSON string, and a `from_json()` class method that converts a JSON string back to a `MyCustomClass` instance. Here is how one would use `MyCustomClass` as an **input** to your program: ``` program_inputs = { 'my_obj': MyCustomClass("my foo", "my bar") } options = {"backend_name": "ibmq_qasm_simulator"} job = provider.runtime.run(program_id="some-program", options=options, inputs=program_inputs ) ``` Since `MyCustomClass` has a `to_json()` method, the method is automatically called to convert the instance to a JSON string when `provider.runtime.run()` is invoked. Your program can then use the `from_json()` method to restore the JSON string back to a `MyCustomClass` instance: ``` def main(backend, user_messenger, **kwargs): """Main entry point of the program.""" my_obj_str = kwargs.pop('my_obj') my_obj = MyCustomClass.from_json(my_obj_str) ``` Similarly, if you pass a `MyCustomClass` instance as an **output** of your program, it is automatically converted to a JSON string (via the `to_json()` method): ``` def main(backend, user_messenger, **kwargs): """Main entry point of the program.""" return MyCustomClass("this foo", "that bar") ``` Now when the user of this program calls `job.result()`, they will receive a JSON string rather than a `MyCustomClass` instance. The user can convert the string back to `MyCustomClass` themselves: ``` output_str = job.result() output = MyCustomClass.from_json(output_str) ``` Alternatively, you can provide a decoder for the users. Your decoder class should inherit [`ResultDecoder`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.ResultDecoder.html#qiskit.providers.ibmq.runtime.ResultDecoder) and overwrites the `decode()` method: ``` from qiskit.providers.ibmq.runtime import ResultDecoder class MyResultDecoder(ResultDecoder): @classmethod def decode(cls, data): data = super().decoded(data) # Perform any preprocessing. return MyCustomClass.from_json(data) ``` Your user can then use this `MyResultDecoder` to decode the result of your program: ``` output = job.result(decoder=MyResultDecoder) ``` ## Testing your runtime program You can test your runtime program using a local simulator or a real backend before uploading it. Simply import and invoke the `main()` function of your program and pass the following parameters: - the `backend` instance you want to use - a new `UserMessenger` instance. - program input parameters that are serialized and then deserialized using the correct encoder and decoder. While this may seem redundant, it is to ensure input parameters can be passed to your program properly once it's uploaded to the cloud. The following example tests the `sample-program` program we saw earlier. It uses the `qasm_simulator` from Qiskit Aer as the test backend. It serializes and deserializes input data using `RuntimeEncoder` and `RuntimeDecoder`, which are the default en/decoders used by runtime. ``` import sys sys.path.insert(0, '..') # Add qiskit_runtime directory to the path from qiskit_runtime.sample_program import sample_program from qiskit import Aer from qiskit.providers.ibmq.runtime.utils import RuntimeEncoder, RuntimeDecoder from qiskit.providers.ibmq.runtime import UserMessenger inputs = {"iterations": 3} backend = Aer.get_backend('qasm_simulator') user_messenger = UserMessenger() serialized_inputs = json.dumps(inputs, cls=RuntimeEncoder) deserialized_inputs = json.loads(serialized_inputs, cls=RuntimeDecoder) sample_program.main(backend, user_messenger, **deserialized_inputs) ``` ## Defining program metadata Program metadata helps users to understand how to use your program. It includes: - `name`: Name of the program. - `max_execution_time`: Maximum amount of time, in seconds, a program can run before being forcibly terminated. - `description`: Describes the program. - `spec`: Detailed information about the program, which includes the following attributes: - `backend_requirements`: Describes the backend attributes needed to run the program. - `parameters`: Describes the program input parameters as a JSON schema - `return_values`: Describes the return values as a JSON schema - `interim_results`: Describes the interim results as a JSON schema When uploading a program, you must specify at least `name`, `max_execution_time`, and `description`. It is strongly encouraged to also specify `parameters`, `return_values`, and `interim_results` within `spec` if the program has them. Below shows the metadata JSON file of the `sample-program` program as an example: ``` import os sample_program_json = os.path.join(os.getcwd(), "../qiskit_runtime/sample_program/sample_program.json") with open(sample_program_json, 'r') as file: data = file.read() print(data) ``` ## Uploading a program You can use the [`IBMRuntimeService.upload_program()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.upload_program) method to upload your program. In the example below, the program data lives in the file `sample_program.py`, and its metadata, as described above, is in `sample_program.json`. ``` import os from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(project='qiskit-runtime') # Substitute with your provider. sample_program_data = os.path.join(os.getcwd(), "../qiskit_runtime/sample_program/sample_program.py") sample_program_json = os.path.join(os.getcwd(), "../qiskit_runtime/sample_program/sample_program.json") program_id = provider.runtime.upload_program( data=sample_program_data, metadata=sample_program_json ) print(program_id) ``` `upload_program()` returns a program ID, which uniquely identifies the program. It is derived from the program name, usually with a randomly-generated suffix. Program ID is needed for users to invoke the program ## Updating a program You can use the [`IBMRuntimeService.update_program()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.update_program.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.update_program) method to update the source code and/or metadata of a program: ``` provider.runtime.update_program(program_id=program_id, description="A new description.") ``` This method allows you to make changes to your program while retaining the same program ID. ## Deleting a program You can use the [`IBMRuntimeService.delete_program()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.delete_program) method to delete a program. Only the person who uploaded the program can delete it.
github_jupyter
# Installing Tensorflow We will creat an environment for tensorflow that will activate every time we use th package ### NOTE: it will take some time! ``` %pip install --upgrade pip %pip install tensorflow==2.5.0 ``` #### If you see the message below, restart the kernel please from the panel above (Kernels>restart)! 'Note: you may need to restart the kernel to use updated packages.' #### Let's check if you have everything! ``` import tensorflow as tf print(tf.__version__) reachout='Please repeat the steps above. If it still does not work, reach out to me (asa279@cornell.edu)' try: import tensorflow print('tensorflow is all good!') except: print("An exception occurred in tensorflow installation."+reachout) try: import keras print('keras is all good!') except: print("An exception occurred in keras installation."+reachout) ``` ### Now let's explore tensorflow! From its name tensorflow stores constants as tensor objects! Let's create our first constant! ``` import tensorflow as tf import tensorflow.compat.v1 as tf tf.disable_v2_behavior() myfirstconst = tf.constant('Hello World') myfirstconst x = tf.constant(130.272) x ``` ### TF Sessions Let's create a TensorFlow Session. It can be thought of as a class for running TensorFlow operations. The session encapsulates the environment in which operations take place. Let's do a quick example: ``` a = tf.constant(1) b = tf.constant(5) with tf.Session() as Session: print('TF simple Operations') print('Multiply',Session.run(a*b)) print('Divide',Session.run(a/b)) print('Add',Session.run(a+b)) print('Subtract',Session.run(b-a)) ``` #### Now let's multiply a matrix ``` import numpy as np m = np.array([[1.0,2.0]]) n = np.array([[3.0],[4.0]]) multi = tf.matmul(m,n) multi with tf.Session() as Session: res = Session.run(multi) print(res) ``` ### TF Variables Sometimes you want to define a variable rsulting from operations. **tf.variable is ideal for this case!** Let's see how to use it! ``` #We have to start a session! sess = tf.InteractiveSession() atensor = tf.random_uniform((2,2),0,1) atensor var = tf.Variable(initial_value=atensor) var try: with tf.Session() as Session: res = Session.run(var) print(res) except: print("error!") initialize = tf.global_variables_initializer() initialize.run() var.eval() sess.run(var) ``` ## Now let's custom build our first neural networks! ``` xd = np.linspace(0,10,100) + np.random.uniform(-3,.5,100) yd = np.linspace(0,10,100) + np.random.uniform(-.5,2,100) import matplotlib.pyplot as plt plt.plot(xd,yd,'o') ``` ### Let's define our variables here $y=m*x+b$ ``` #Let's intialize with a guess m = tf.Variable(1.0) b = tf.Variable(0.1) #Let's build or objective function! #initialize error e=0 for x,y in zip(xd,yd): #our model y_pred = m*x + b # our error e += (y-y_pred)**2 ## tensorflow optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001) ## we want to minimize error training = optimizer.minimize(e) ## initilize our variables with tensorflow initalize = tf.global_variables_initializer() #start the session for 1000 epochs! with tf.Session() as sess: sess.run(initalize) epochs = 100 for i in range(epochs): sess.run(training) # Get results mf, bf = sess.run([m,b]) print("The slope is {} and the intercept is {}".format(mf, bf)) #Let's evalute our results x_v = np.linspace(-3,11,300) y_v = mf*x_v + bf plt.plot(x_v,y_v,'r') plt.plot(xd,yd,'o') ```
github_jupyter
# Classifying Fashion-MNIST Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world. <img src='assets/fashion-mnist-sprite.png' width=500px> In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this. First off, let's load the dataset through torchvision. ``` import torch from torchvision import datasets, transforms import helper # Define a transform to normalize the data transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))] ) # Download and load the training data trainset = datasets.FashionMNIST( "~/.pytorch/F_MNIST_data/", download=True, train=True, transform=transform ) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST( "~/.pytorch/F_MNIST_data/", download=True, train=False, transform=transform ) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here we can see one of the images. ``` image, label = next(iter(trainloader)) helper.imshow(image[0, :]); ``` ## Building the network Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers. ``` from torch import nn, optim import torch.nn.functional as F # TODO: Define your network architecture here class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.log_softmax(self.fc4(x), dim=1) return x ``` # Train the network Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) (something like `nn.CrossEntropyLoss` or `nn.NLLLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`). Then write the training code. Remember the training pass is a fairly straightforward process: * Make a forward pass through the network to get the logits * Use the logits to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4. ``` # TODO: Create the network, define the criterion and optimizer model = Classifier() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) # TODO: Train the network here epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: log_ps = model(images) loss = criterion(log_ps, labels) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper # Test out your network! dataiter = iter(testloader) images, labels = dataiter.next() img = images[1] # TODO: Calculate the class probabilities (softmax) for img ps = torch.exp(model(img)) # Plot the image and probabilities helper.view_classify(img, ps, version="Fashion") ```
github_jupyter
# Classification and Prediction in GenePattern Notebook This notebook will show you how to use k-Nearest Neighbors (kNN) to build a predictor, use it to classify leukemia subtypes, and assess its accuracy in cross-validation. ### K-nearest-neighbors (KNN) KNN classifies an unknown sample by assigning it the phenotype label most frequently represented among the k nearest known samples. Additionally, you can select a weighting factor for the 'votes' of the nearest neighbors. For example, one might weight the votes by the reciprocal of the distance between neighbors to give closer neighors a greater vote. <h2>1. Log in to GenePattern</h2> <ul> <li>Select Broad Institute as the server</li> <li>Enter your username and password.</li> <li>Click <em>Login to GenePattern</em>.</li> <li>When you are logged in, you can click the - button in the upper right hand corner to collapse the cell.</li> <li>Alternatively, if you are prompted to Login as your username, just click that button and give it a couple seconds to authenticate.</li> </ul> ``` # Requires GenePattern Notebook: pip install genepattern-notebook import gp import genepattern # Username and password removed for security reasons. genepattern.GPAuthWidget(genepattern.register_session("https://gp-beta-ami.genepattern.org/gp", "", "")) ``` ## 2. Run k-Nearest Neighbors Cross Validation <div class="alert alert-info"> - Drag [BRCA_HUGO_symbols.preprocessed.gct](https://datasets.genepattern.org/data/ccmi_tutorial/2017-12-15/BRCA_HUGO_symbols.preprocessed.gct) to the **data filename** field below. - Drag [BRCA_HUGO_symbols.preprocessed.cls](https://datasets.genepattern.org/data/ccmi_tutorial/2017-12-15/BRCA_HUGO_symbols.preprocessed.cls) to the **class filename** field. - Click **Run**. ``` knnxvalidation_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00013') knnxvalidation_job_spec = knnxvalidation_task.make_job_spec() knnxvalidation_job_spec.set_parameter("data.filename", "") knnxvalidation_job_spec.set_parameter("class.filename", "") knnxvalidation_job_spec.set_parameter("num.features", "10") knnxvalidation_job_spec.set_parameter("feature.selection.statistic", "0") knnxvalidation_job_spec.set_parameter("min.std", "") knnxvalidation_job_spec.set_parameter("num.neighbors", "3") knnxvalidation_job_spec.set_parameter("weighting.type", "1") knnxvalidation_job_spec.set_parameter("distance.measure", "1") knnxvalidation_job_spec.set_parameter("pred.results.file", "<data.filename_basename>.pred.odf") knnxvalidation_job_spec.set_parameter("feature.summary.file", "<data.filename_basename>.feat.odf") genepattern.GPTaskWidget(knnxvalidation_task) ``` ## 3. View a list of features used in the prediction model <div class="alert alert-info"> - Select the XXXXXX.KNNXvalidation job result cell by clicking anywhere in it. - Click on the i icon next to the `<filename>.**feat**.odf` file - Select "Send to DataFrame" - You will see a new cell created below the job result cell. - Execute this cell. - You will see a table of features, descriptions, and the number of times each feature was included in a model in a cross-validation loop. ## 4. View prediction results <div class="alert alert-info"> - For the **prediction results file** parameter below, click the down arrow in the file input box. - Select the `BRCA_HUGO_symbols.preprocessed.pred.odf` file. - Click **Run**. - You will see the prediction results in an interactive viewer. ``` predictionresultsviewer_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.visualizer:00019') predictionresultsviewer_job_spec = predictionresultsviewer_task.make_job_spec() predictionresultsviewer_job_spec.set_parameter("prediction.results.file", "") genepattern.GPTaskWidget(predictionresultsviewer_task) ``` ## References Breiman, L., Friedman, J. H., Olshen, R. A., & Stone, C. J. 1984. [Classification and regression trees](https://www.amazon.com/Classification-Regression-Wadsworth-Statistics-Probability/dp/0412048418?ie=UTF8&*Version*=1&*entries*=0). Wadsworth & Brooks/Cole Advanced Books & Software, Monterey, CA. Golub, T.R., Slonim, D.K., Tamayo, P., Huard, C., Gaasenbeek, M., Mesirov, J.P., Coller, H., Loh, M., Downing, J.R., Caligiuri, M.A., Bloomfield, C.D., and Lander, E.S. 1999. Molecular Classification of Cancer: Class Discovery and Class Prediction by Gene Expression. [Science 286:531-537](http://science.sciencemag.org/content/286/5439/531.long). Lu, J., Getz, G., Miska, E.A., Alvarez-Saavedra, E., Lamb, J., Peck, D., Sweet-Cordero, A., Ebert, B.L., Mak, R.H., Ferrando, A.A, Downing, J.R., Jacks, T., Horvitz, H.R., Golub, T.R. 2005. MicroRNA expression profiles classify human cancers. [Nature 435:834-838](http://www.nature.com/nature/journal/v435/n7043/full/nature03702.html). Rifkin, R., Mukherjee, S., Tamayo, P., Ramaswamy, S., Yeang, C-H, Angelo, M., Reich, M., Poggio, T., Lander, E.S., Golub, T.R., Mesirov, J.P. 2003. An Analytical Method for Multiclass Molecular Cancer Classification. [SIAM Review 45(4):706-723](http://epubs.siam.org/doi/abs/10.1137/S0036144502411986). Slonim, D.K., Tamayo, P., Mesirov, J.P., Golub, T.R., Lander, E.S. 2000. Class prediction and discovery using gene expression data. In [Proceedings of the Fourth Annual International Conference on Computational Molecular Biology (RECOMB)](http://dl.acm.org/citation.cfm?id=332564). ACM Press, New York. pp. 263-272.
github_jupyter
# Time Series Analysis 1 In the first lecture, we are mainly concerned with how to manipulate and smooth time series data. ``` %matplotlib inline import matplotlib.pyplot as plt import os import time import numpy as np import pandas as pd ! python3 -m pip install --quiet gmaps import gmaps import gmaps.datasets ``` ## Dates and times ### Timestamps ``` now = pd.to_datetime('now') now now.year, now.month, now.week, now.day, now.hour, now.minute, now.second, now.microsecond now.month_name(), now.day_name() ``` ### Formatting timestamps See format [codes](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior) ``` now.strftime('%I:%m%p %d-%b-%Y') ``` ### Parsing time strings #### `pandas` can handle standard formats ``` ts = pd.to_datetime('6-Dec-2018 4:45 PM') ts ``` #### For unusual formats, use `strptime` ``` from datetime import datetime ts = datetime.strptime('10:11PM 02-Nov-2018', '%I:%m%p %d-%b-%Y') ts ``` ### Intervals ``` then = pd.to_datetime('now') time.sleep(5) now = pd.to_datetime('now') now - then ``` ### Date ranges A date range is just a collection of time stamps. ``` dates = pd.date_range(then, now, freq='s') dates (then - pd.to_timedelta('1.5s')) in dates ``` ### Periods Periods are intervals, not a collection of timestamps. ``` span = dates.to_period() span (then + pd.to_timedelta('1.5s')) in span ``` ## Lag and lead with `shift` We will use a periodic time series as an example. Periodicity is important because many biological phenomena are linked to natural periods (seasons, diurnal, menstrual cycle) or are intrinsically periodic (e.g. EEG, EKG measurements). ``` index = pd.date_range('1-1-2018', '31-1-2018', freq='12h') ``` You can shift by periods or by frequency. Shifting by frequency maintains boundary data. ``` wave = pd.Series(np.sin(np.arange(len(index))), index=index) wave.shift(periods=1).head(3) wave.shift(periods=1).tail(3) wave.shift(freq=pd.Timedelta(1, freq='D')).head(3) wave.shift(freq=pd.Timedelta(1, freq='D')).tail(3) ``` #### Visualizing shifts ``` wave.plot() pass wave.plot(c='blue') wave.shift(-1).plot(c='red') pass wave.plot(c='blue') wave.shift(1).plot(c='red') pass (wave - wave.shift(-6)).plot(c='blue') (wave - wave.shift(-3)).plot(c='red') pass ``` Embedding the time series with its lagged version reveals its periodic nature. ``` plt.scatter(wave, wave.shift(-1)) pass ``` ### Find percent change from previous period ``` wave.pct_change().head() ``` `pct_change` is just a convenience wrapper around the use of `shift` ``` ((wave - wave.shift(-1, freq='12h'))/wave).head() ``` ## Resampling and window functions The `resample` and window method have the same syntax as `groupby`, in that you can apply an aggregate function to the new intervals. ### Resampling Sometimes there is a need to generate new time intervals, for example, to regularize irregularly timed observations. #### Down-sampling ``` index = pd.date_range(pd.to_datetime('1-1-2018'), periods=365, freq='d') series = pd.Series(np.arange(len(index)), index=index) series.head() sereis_weekly_average = series.resample('w').mean() sereis_weekly_average.head() sereis_monthly_sum = series.resample('m').sum() sereis_monthly_sum.head() sereis_10day_median = series.resample('10d').median() sereis_10day_median.head() ``` #### Up-sampling For up-sampling, we need to figure out what we want to do with the missing values. The usual choices are forward fill, backward fill, or interpolation using one of many built-in methods. ``` upsampled = series.resample('12h') upsampled.asfreq()[:5] upsampled.ffill().head() upsampled.bfill().head() upsampled.interpolate('linear').head() ``` ### Window functions Window functions are typically used to smooth time series data. There are 3 variants - rolling, expanding and exponentially weighted. We use the Nile flooding data for these examples. ``` df = pd.read_csv('data/nile.csv', index_col=0) df.head() df.plot() pass ``` #### Rolling windows generate windows of a specified width ``` ts = pd.DataFrame(dict(ts=np.arange(5))) ts['rolling'] = ts.rolling(window=3).sum() ts rolling10 = df.rolling(window=10) rolling100 = df.rolling(window=100) df.plot() plt.plot(rolling10.mean(), c='orange') plt.plot(rolling100.mean(), c='red') pass ``` #### Expanding windows grow as the time series progresses ``` ts['expanding'] = ts.ts.expanding().sum() ts df.plot() plt.plot(df.expanding(center=True).mean(), c='orange') plt.plot(df.expanding().mean(), c='red') pass ``` #### Exponentially weighted windows place more weight on center of mass ``` n = 10 xs = np.arange(n, dtype='float')[::-1] xs ``` Exponentially weighted windows without adjustment. ``` pd.Series(xs).ewm(alpha=0.8, adjust=False).mean() ``` Re-implementation for insight. ``` α = 0.8 ys = np.zeros_like(xs) ys[0] = xs[0] for i in range(1, len(xs)): ys[i] = (1-α)*ys[i-1] + α*xs[i] ys ``` Exponentially weighted windows with adjustment (default) ``` pd.Series(xs).ewm(alpha=0.8, adjust=True).mean() ``` Re-implementation for insight. ``` α = 0.8 ys = np.zeros_like(xs) ys[0] = xs[0] for i in range(1, len(xs)): ws = np.array([(1-α)**(i-t) for t in range(i+1)]) ys[i] = (ws * xs[:len(ws)]).sum()/ws.sum() ys df.plot() plt.plot(df.ewm(alpha=0.8).mean(), c='orange') plt.plot(df.ewm(alpha=0.2).mean(), c='red') pass ``` Alternatives to $\alpha$ Using `span` $$ \alpha = \frac{2}{\text{span} + 1} $$ Using `halflife` $$ \alpha = 1 - e^\frac{-\log{2}}{t_{1/2}} $$ Using `com` $$ \alpha = \frac{1}{1 + \text{com}} $$ ``` df.plot() plt.plot(df.ewm(span=10).mean(), c='orange') plt.plot(1+ df.ewm(alpha=2/11).mean(), c='red') # offfset for visibility pass ``` ## Correlation between time series Suppose we had a reference time series. It is often of interest to know how any particular time series is correlated with the reference. Often the reference might be a population average, and we want to see where a particular time series deviates in behavior. ``` ! python3 -m pip install --quiet pandas_datareader import pandas_datareader.data as web ``` We will look at the correlation of some stocks. ``` QQQ tracks Nasdaq MSFT is Microsoft GOOG is Gogole BP is British Petroleum ``` We expect that the technology stocks should be correlated with Nasdaq, but maybe not BP. ``` df = web.DataReader(['QQQ', 'MSFT','GOOG', 'BP'], 'stooq') # api_key=os.environ['IEX_SECRET_KEY']) df = df[['Close']].reset_index() df df = df.set_index(( 'Date', '')) df.head() df.columns df.rolling(100).corr(df[('Close', 'QQQ')]).plot() pass ``` ## Visualizing space and time data Being able to visualize events in space and time can be impressive. With Python, often you need a trivial amount of code to produce an impressive visualization. For example, lets generate a heatmap of crimes in Sacramento in 2006, and highlight the crimes committed 10 seconds before midnight. See the [gmaps](https://github.com/pbugnion/gmaps) package for more information. ``` sacramento_crime = pd.read_csv('data/SacramentocrimeJanuary2006.csv', index_col=0) sacramento_crime.index = pd.to_datetime(sacramento_crime.index) sacramento_crime.head() gmaps.configure(api_key=os.environ["GOOGLE_API_KEY"]) locations = sacramento_crime[['latitude', 'longitude']] late_locations = sacramento_crime.between_time('23:59', '23:59:59')[['latitude', 'longitude']] fig = gmaps.figure() fig.add_layer(gmaps.heatmap_layer(locations)) markers = gmaps.marker_layer(late_locations) fig.add_layer(markers) fig ```
github_jupyter
Your name here. Your Woskshop section here. # Homework 3: Arrays, File I/O and Plotting **Submit this notebook to bCourses to receive a grade for this Workshop.** Please complete homework activities in code cells in this iPython notebook. Be sure to comment your code well so that anyone who reads it can follow it and use it. Enter your name in the cell at the top of the notebook. When you are ready to submit it, you should download it as a python notebook (click "File", "Download as", "Notebook (.ipynb)") and upload it on bCourses under the Assignments tab. Please also save the notebook as PDF and upload to bCourses. ## Problem 1: Sunspots [Adapted from Newman, Exercise 3.1] At <a href="http://www-personal.umich.edu/~mejn/computational-physics/sunspots.txt">this link</a> (and also in your current directory on datahub) you will find a file called `sunspots.txt`, which contains the observed number of sunspots on the Sun for each month since January 1749. The file contains two columns of numbers, the first being the month and the second being the sunspot number. a. Write a program that reads in the data and makes a graph of sunspots as a function of time. Adjust the $x$ axis so that the data fills the whole horizontal width of the graph. b. Modify your code to display two subplots in a single figure: The plot from Part 1 with all the data, and a second subplot with the first 1000 data points on the graph. c. Write a function `running_average(y, r)` that takes an array or list $y$ and calculates the running average of the data, defined by $$ Y_k = \frac{1}{2r+1} \sum_{m=-r}^r y_{k+m},$$ where $y_k$ are the sunspot numbers in our case. Use this function and modify your second subplot (the one with the first 1000 data points) to plot both the original data and the running average on the same graph, again over the range covered by the first 1000 data points. Use $r=5$, but make sure your program allows the user to easily change $r$. The next two parts may require you to google for how to do things. Make a strong effort to do these parts on your own without asking for help. If you do ask for help from a GSI or friend, first ask them to point you to the resource they used, and do your best to learn the necessary techniques from that resource yourself. Finding and learning from online documentation and forums is a very important skill. (Hint: Stack Exchange/Stack Overflow is often a great resource.) d. Add legends to each of your subplots, but make them partially transparent, so that you can still see any data that they might overlap. *Note: In your program, you should only have to change $r$ for the running average in one place to adjust both the graph and the legend.* e. Since the $x$ and $y$ axes in both subplots have the same units, add shared $x$ and $y$ labels to your plot that are centered on the horizontal and vertical dimensions of your figure, respectively. Also add a single title to your figure. When your are finished, your plot should look something close to this: ``` # Don't rerun this snippet of code. # If you accidentally do, uncomment the lines below and rerun #from IPython.display import Image #Image(filename="img/p1_output.png") ``` #### Hints * The running average is not defined for the first and last few points that you're taking a running average over. (Why is that?) Notice, for instance, that the black curve in the plot above doesn't extend quite as far on either side as the red curve. For making your plot, it might be helpful if your `running_average` function returns an array of the $x$-values $x_k$ (or their corresponding indices $k$) along with an array of the $y$-values $Y_k$ that you compute for the running average. * You can use the Latex code `$\pm$` for the $\pm$ symbol in the legend. You can also just write `+/-` if you prefer. ## Problem 2: Variety Plot In this problem, you will reproduce the following as a single figure with four subplots, as best you can: ``` # Don't rerun this snippet of code. # If you accidentally do, uncomment the lines below and rerun #from IPython.display import Image #Image(filename="img/p2_output.png") ``` Here are some hints and directions for each one: **Upper-left:** This is an image of silicon taken with an electron microscope. You can find the data file `stm.txt` [here](http://www-personal.umich.edu/~mejn/computational-physics/stm.txt) and in your datahub directory, among resources for the [Newman](http://www-personal.umich.edu/~mejn/computational-physics/) text. You may assume that the upper-left of the array is indeed the upper-left of the image. Both axes should run from 0 to 5.5. This subplot uses the `gray` colormap. **Upper-Right:** Matplotlib can plot any list of $(x,y)$ points you give it, including parametric or polar curves. The curve in this subplot is called a "deltoid", and is the result of the equations $$ \begin{align*} x &= 2\cos\theta + \cos2\theta \\ y &= 2\sin\theta - \sin2\theta \end{align*} $$ over a range of $\theta$ from $0$ to $2\pi$. To get the aspect ratio equal with nice spacing around the curve, try one of the following, depending on how you are making your subplots: - if you're using `plt.subplot(...)` to get each subplot (the "state-machine" approach), add the `aspect='equal'` and `adjustable='datalim'` arguments to the deltoid subplot, so your command will look something like `plt.subplot(..., aspect='equal', adjustable='datalim')`. - if you're using `... = plt.subplots(...)` (note the 's'!) or `ax = fig.add_subplot(...)` on a figure `fig` to get subplots with axes objects (the "object-oriented" approach), add the line `ax.set_aspect(aspect='equal', adjustable='datalim')`, where `ax` is the axes object you want to affect. **Lower-Left:** This kind of plot is called a log-log plot, where both axes are on a logarithmic scale. Google or look in the matplotlib gallery to learn how to make this kind of plot. The three curves are $y = x$, $y = x^2$, and $y = x^3$, where $x$ ranges over $10^{-1}$ to $10^1$. (Note: You can write powers of ten in python using the shorthand `1e-1` for $10^{-1}$, `1e1` for $10^1$, and so on.) To make the pretty mathematical labels you see in the sample figure above, you can use * `r'$y = x, x^2, x^3$'` for the title * `r'$x$'` for the $x$-axis, and * `r'$y$'` for the $y$-axis. Just put these bits of code as you see them (with the **`r`** outside the quotes!) where you would normally put a string for the title or axes labels. **Lower-Right:** Here you see a density plot with contours of the function $$f(x,y) = \cos^2(\pi\,x\,y ) e^{-\frac{x^2 + 4 y}{8}},$$ over $x$ from -2 to 2 and $y$ from -3 to 0.2. Use `meshgrid` to generate the $x$ and $y$ values. Be careful to make sure that the point $(-2,-3)$ is in the bottom left corner of the plot. You'll need to use both `imshow` and `contour` to generate the density plot and then overlay it with contours. This plot uses the default contour spacing, so you don't need to worry about adjusting that. The colormap is `jet`, matplotlib's current default. (The default colormap will be changing to `viridis` in the next version.) To get the ticks spaced out like you see here, use matplotlib's `xticks` or `set_xticks` functions for the $x$-axis (depending on how you're making your plots), and similar functions for the $y$-axis. You can pass each of these a single argument: a simple list or array of the numbers you want ticked on each axis. **Spacing the subplots:** Once all is said and done and you run `plt.show()`, you may notice your plots are cramped and overlapping each other. Add the line `plt.tight_layout()` before `plt.show()`, and matplotlib will space things out in an attempt to avoid overlapping subplots. ## Problem 3: Wind Statistics In this problem, you'll create a new array that summarizes a larger dataset. In the folder for this Homework assignment, you'll find a dataset "`wind.data`", and a description of the dataset "`wind.desc`". Read the description `wind.desc` to understand the format of `wind.data`. Then, write a program which loads the data and generates a new array with the following format: each row should contain the year, month, mean wind speed, min wind speed, and max wind speed, in that order. The means, mins, and maxes should be taken over all days in that particular month, over all the cities that `wind.data` includes. The rows should be in chronological order. Printing your array should give you something like this: [[ 61. 1. 11.38064516 0.5 27.71 ] [ 61. 2. 13.49235119 2.21 29.63 ] [ 61. 3. 11.07236559 1.46 23.45 ] ..., [ 78. 10. 9.23389785 0.21 28.21 ] [ 78. 11. 12.72102778 0.96 30.21 ] [ 78. 12. 12.70357527 0.42 41.46 ]] Next, plot the means, mins, and maxes over time using just a single `plt.plot()` command, along with whatever other commands you need to label and display the figure. (Hint: What happens when you plot a 2D array?) Be sure to label your figure with proper units, etc. The x-axis may be labeled using year/month or just the years (i.e. have ticks along the axis which are labeled with some of the years, like 1961,1964,1967,1970,etc.). There are several ways to accomplish this. One way is to use `plt.xticks()`, which will give you the most explicit control over which points on the x axis get labeled and what labels they get. To understand this function, check out the documentation for `matplotlib.pyplot.xticks` on the `matplotlib` website. ** Just for fun ** If you look at your plot, you'll notice some periodicity in the wind speeds. To better see the period, use the `running_average` function you wrote for the Sunspots problem to plot the running average of the means, mins, and maxes along with the original curves. Depending on how you wrote your `running_average` function, it may work immediately on this new 2D array without any modifications! From visual inspection of your running average, how long is the period, and when are wind speeds fastest and slowest in Ireland? #### Hints Depending on how you do this problem, you may find it useful to use boolean indexing along with numpy's `logical_and` function. It's okay to use lists as intermediate steps in generating your array. That is, not everything has to be an array, though it is possible to do this with only arrays. (Challenge!)
github_jupyter
# Pandas Cheat Sheet ## Inspect **df.info( )** - tells you the type of object you have eg object, int, float AND the amount of memory your DataFrame is using up! **df.describe( )** - gives you a series of information about your DataFrame - mean, stdev, count, max, min values... **df.shape** - gives you a tuple of the dimensions of your data | command | description | |:-----------------:|:------------------:| | df.head( ) | shows n first rows | | df.tail( ) | shows n last rows | | df.info( ) | lists columns and their types | | df.describe( ) | calculates descriptive statistics | | df['Series'].value_counts( ) | counts distinct values in a column | | df.count( ) | calculates number of non-empty rows | | df.sum( ) | calculates sum of each column | | df.mean( ) | calculates mean of each column | | df.shape |tuple with the number of rows and columns| | df.values | contents as a NumPy array | ## Selecting Rows and Columns | Operation | Description | |:----------------:|:------------------:| | df[col] |select one column as a Series| | df[[col1, col2]] | select 2+ columns as a DataFrame| | df.loc[row] | select one row as a Series | | df.loc[row, col] | select 2+ rows as a DataFrame| | df.iloc[a:b,c:d] | select rows/columns by index | | df[df[col] > x] | select rows by condition | |df[df[col].between(x, y) | select rows by range | |df[df[col].str.startswith('A')] | apply string function| |df[col].str.lower( ) | apply string function| |df[col] = x | assign to existing or new column| | del df[col]| delete column | | df.columns | list with column names | | df.index | list with row index | | df.iterrows( ) | iterate over rows | ## Plots | command | description | |:--------------:|:------------------:| | df.plot( ) | line plot of each column| | df.plot.bar( ) |one bar for each column | |df.plot.scatter( ) | creates a single scatter plot | | df.hist( ) | draws a histogram for each column | |df.boxplot( ) | draws a boxplot for each column | |df.scatter_matrix( )| draws scatterplot matrix | ## Manipulating DataFrames | command | description | |:----------------:|:----------------:| | df.transpose( ) | swaps rows and columns | | df.stack( ) | moves columns to hierarchical row index | | df.unstack( ) | creates columns from hierarchical row index| | df.pivot_table | Create a spreadsheet-style pivot table as a DF | |df.groupby([ x, y]) [ z ] | Groups DF or Series using a mapper or by a Series of columns| | df.fillna( ) | Fills Na values with value indicated | | df.isna( ) | Detect missing (Na) values | | df.dropna( ) | Removes rows from DF with Na values | ## Data Wrangling | command | description | |:------------------------:|:------------------------:| | df.sort_values( ) | sort the rows by one or more columns| | pd.concat([df1, df2, ...]) | sticks two DataFrames together | | df.join( ) | joins two DataFrames on specific keys/columns| | df.merge( ) | join with more options | | df['Series'].astype( ) | convert type of a column | | df.replace( ) | replace values by others | | df.set_index( ) | moves one column into the index | | df.reset_index( ) | moves the index into a column | | df.iterrows( ) | iterate over rows (LAST RESORT)| | del df[col] | delete column | ## Aggregation | Command | Description | |:----------------:|:------------------:| | df.groupby( )| Groups DF or Series using a mapper or by a Series of columns | | df.transform( )| Call func on self producing a DF with transformed values and that has the same axis length as self | | df.apply( ) | apply a function along an axis of the DF | | df.mean( ) | gives you the mean of a numerical column | | df.max( ) | gives you max value for the column | | df.min( ) | gives you min value for the column | | df.sum( ) | return the sum of the values for requested axis | | df.cumsum( ) | gives you the cumulative sum for column | | df.count( ) | counts non-NA cells for each column or row | | df.std( ) | returns standard deviation over requested axis | | df.median( ) | returns median over requested axis | | df.quantile(0.9) | Return values at the given quantile over axis | | df.describe( ) | gives you a series of information about your DF - mean, stdev, count, max, min values... | | df.corr( ) | Compute pairwise correlation of columns, excluding null values | ## Reading and Writing DataFrames - **pd.read_csv( )** - reads your file and copies data to your notebook - **pd.read_excel( )** - reads your excel file - **df.to_csv( )** - saves your DataFrame to a csv file - **df.to_excel( )** - saves your DF to an xlsx file - **df.to_json( )** - saves your DF to a json file **Reading and using the first column as index** df = pd.read_csv("large_countries_2015.csv", index_col=0) The following parameters may be useful: - `sep` - column separator when reading CSV - `header` - whether there is a row with the header (boolean) - `names` - column names when there is no header - `index_col` - which column to use as index ## Useful Pandas links https://www.google.de/search?q=pandas+cookbook&rlz=1C5CHFA_enGB842GB842&oq=pandas+cook&aqs=chrome.0.0j69i57j0l4.2929j0j7&sourceid=chrome&ie=UTF-8 https://pandas.pydata.org/pandas-docs/stable/user_guide/cookbook.html https://pandas.pydata.org/pandas-docs/stable/index.html https://www.dropbox.com/sh/mxzo38txgdja9g7/AAAQTIlurE4OsQ1xd9EwW43aa?dl=0 https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf https://s3.amazonaws.com/assets.datacamp.com/blog_assets/PandasPythonForDataScience.pdf file:///Users/maximcondon/Documents/datascience/_downloads/pandas-cheat-sheet.pdf ## Exercise 1 ### 1. Read a .csv file (or.txt) with .read_csv ``` import pandas as pd df = pd.read_csv ('yob2017.txt', names = ['name', 'gender', 'count'], header = None) df.head() ``` ### 2. Save it to an excel file ``` df.to_excel('names.xlsx') ``` ### Read the spreadsheet again ``` pd.read_excel('names.xlsx').head(10) ``` ### 3. Select all rows with boys ``` df[df['gender'] == 'M'].head(10) boys = df[df['gender'] == 'M'] ``` ### 4. Select the top 10 names that occur more than 10,000 times ``` boys_10 = boys[boys['count'] > 10000].head(10) boys_10 ``` ### 5. Select names that occur between 100 and 200 times ``` df[(df['count'] > 100) & (df['count'] < 200)].head(20) ``` #### Can also do this snazzy bit of code - df.between(..., ...): ``` #but it includes 200!!! df[df['count'].between(100,200)].head(10) ``` ### 6. Count the boys and girls ``` df.groupby('gender').sum() ``` ### 7. Extract first and last character as separate columns, using df.apply( )! ``` def first(string): return string[0] df['first'] = df['name'].apply(first) df.head(10) def last(string): return string[-1] df['last'] = df['name'].apply(last) df.head(10) ``` #### For this particular operation there is a nice shortcut on pandas, df.str : ``` df['name'].str[0].head(10) ``` #### For the last letter just do -1 index position!! ``` df['name'].str[-1].head(10) ``` ### 8. Finding the length of a string inside a DataFrame ##### Can use a function just like above ##### Or a super-handy function .str.len( ) !!! ``` df['name'].str.len().head(10) def length(string): return len(string) df['Length of name'] = df['name'].apply(length) df.head(10) ``` ### 9. Count frequency combinations of first/last characters ``` count = df.groupby(['first','last'])['count'].count() count count.sort_values(ascending=False).head(10) ``` ### This gives us a 26 x 26 DataFrame of number of names with their first character and last character being the letters in the rows/columns: ``` cross = count.unstack() cross = cross.fillna(0.0) cross ``` ### 10. Use Seaborn to visualise this matrix! ``` %matplotlib inline from matplotlib import pyplot as plt import seaborn as sns plt.figure(figsize=(10,8)) sns.heatmap(cross) ``` **We used cross.fillna(0) to fill the NaN spots so we don't have white space!** ### Change the colour of your heatmap using cmap! sns.heatmap(cross, cmap='RRR') - cmap changes the colour scheme ``` plt.figure(figsize=(10,8)) sns.heatmap(cross, cmap='viridis') ``` ## Exercise 2 df = pd.read_csv('yob2000.txt', names=['name', 'gender', 'count']) ### 1. Create two separate DataFrames for boys and girls ``` df = pd.read_csv('yob2000.txt', names=['name', 'gender', 'count']) boys = df[df['gender'] == 'M'] boys.head() girls = df[df['gender'] == 'F'] girls.head() ``` ### 2. Put the two DataFrames together again ``` dfs = [girls, boys] df = pd.concat(dfs) df.iloc[17648:17658] # Can see from this that we've managed to put the dataframes # back together again! ``` ### 3. Connect boys and girls horizontally only where the names match ``` merged = girls.merge(boys, on='name', suffixes=('_female', '_male')) merged ``` ### 4. Create a gender column with the values ‘male’ and ‘female’ ``` #merged['gender'] = None #merged # def sex(string): # if merged['gender_female'] == 'F': # merged['gender'] = 'female' # else: # merged['gender'] = 'male' # merged['gender'] = merged['gender_female'].apply(sex) # merged # for x in merged: # if merged['gender_female'] == 'F': # merged['gender'].apply # df['gender'] = ``` ### 5. Use the name as an index ## Exercise 3 Combine two DataFrames ### 1. load the data in the file data/gapminder_lifeexpectancy.xlsx ### 2. select a year of your choice ``` df = pd.read_excel('gapminder_lifeexpectancy.xlsx', index_col=0) df = df[[2000]] df.head(5) df = df.reset_index() df.head() ``` ### 3. create an separate DataFrame that has two columns: country and continent, and manually fill it with data for 10-20 countries ### 4. merge both DataFrames ### 5. remove all rows with empty values ``` d = {'Life expectancy': ['United Kingdom', 'France', 'Germany', 'Italy', 'Spain', 'Brazil', 'United States', 'Argentina', 'Colombia', 'Peru', 'Russia', 'Myanmar', 'China', 'India', 'Vietnam', 'Senegal', 'Nigeria', 'Niger', 'South Africa', 'Lesotho'], 'continent': ['Europe', 'Europe', 'Europe', 'Europe', 'Europe', 'America', 'America', 'America', 'America', 'America', 'Asia', 'Asia', 'Asia', 'Asia', 'Asia', 'Africa', 'Africa', 'Africa', 'Africa', 'Africa']} df2 = pd.DataFrame(data=d) df2 merge = df.merge(df2, on='Life expectancy') merge ``` ### 6. print the highest and lowest fertility for each continent ``` continents = list(merge['continent'].unique()) continents america = merge[merge['continent'] == 'America'] europe = merge[merge['continent'] == 'Europe'] africa = merge[merge['continent'] == 'Africa'] asia = merge[merge['continent'] == 'Asia'] print(f'Country in {continents[0]} with the highest fertility is {america.max()[0]}') print(f'Country in {continents[0]} with the lowest fertility is {america.min()[0]}\n') print(f'Country in {continents[1]} with the highest fertility is {europe.max()[0]}') print(f'Country in {continents[1]} with the lowest fertility is {europe.min()[0]}\n') print(f'Country in {continents[2]} with the highest fertility is {africa.max()[0]}') print(f'Country in {continents[2]} with the lowest fertility is {africa.min()[0]}\n') print(f'Country in {continents[3]} with the highest fertility is {asia.max()[0]}') print(f'Country in {continents[3]} with the lowest fertility is {asia.min()[0]}\n') merge[merge['continent'] == 'America'] merge[merge['continent'] == 'America'].max()[0] continents[0] merge['continent'].unique() def high_low(df): continents = list(df['continent'].unique()) america = df[df['continent'] == 'America'] europe = df[df['continent'] == 'Europe'] africa = df[df['continent'] == 'Africa'] asia = df[df['continent'] == 'Asia'] # for i in continents: # print(i) return f'Country in {continents[0]} with the highest fertility is {america.max()[0]}' # america = # merge[merge['continent'] == 'America'].max()[0] # print(df[df['continent'] == 'America'].max()) # print(f'Country in {continents[0]} with the highest fertility is {america.max()[0]}') # print(f'Country in {continents[0]} with the lowest fertility is {america.min()[0]}\n') # print(f'Country in {continents[1]} with the highest fertility is {europe.max()[0]}') # print(f'Country in {continents[1]} with the lowest fertility is {europe.min()[0]}\n') # print(f'Country in {continents[2]} with the highest fertility is {africa.max()[0]}') # print(f'Country in {continents[2]} with the lowest fertility is {africa.min()[0]}\n') # print(f'Country in {continents[3]} with the highest fertility is {asia.max()[0]}') # print(f'Country in {continents[3]} with the lowest fertility is {asia.min()[0]}\n') high_low(merge) def high_low(df): continents = list(df['continent'].unique()) # return continents for i in continents: return df[df[i] == i[0].max()[0]] # i + df[df[i] == i[0].max()[0]] + df[df[i] == i[0].min()[0]] ```
github_jupyter
## BERT model for MITMovies Dataset I was going to make this repository a package with setup.py and everything but because of my deadlines and responsibilities at my current workplace I haven't got the time to do that so I shared the structure of the project in README.md file. ``` # If any issues open the one that gives error # !pip install transformers # !pip install torch==1.5.1 # !pip install tqdm # !pip install tensorboard # !pip install seqeval # ! pip install tqdm # ! pip install seaborn # !pip install gensim import os import sys import json import numpy as np from tqdm import tqdm sys.path.append("..") import torch from torch import nn import torch.nn.functional as F from torch.utils import tensorboard from seqeval.metrics import classification_report from transformers import Trainer, TrainingArguments from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from transformers import (WEIGHTS_NAME, AdamW, BertConfig, BertForTokenClassification, BertTokenizerFast, get_linear_schedule_with_warmup) from src.namedentityrecognizer.models.bertner import BertNerModel from src.namedentityrecognizer.data.analyze_dataset import Analyzer from src.namedentityrecognizer.data.build_dataset import BuildData from src.namedentityrecognizer.data.make_dataset import MakeData from src.namedentityrecognizer.utils.processors import NerPreProcessor, NerDataset from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score # Some initializers train = True num_train_epochs = 5 train_batch_size = 32 eval_batch_size = 8 # Weight decay for regularization weight_decay = 0.01 # Now 1 but if batches wont fit RAM can be increased gradient_accumulation_steps = 1 # %10 warm up warmup_proportion = 0.1 # Adam variables adam_epsilon = 1e-8 learning_rate = 5e-5 # 16 floating point instead of 32 fp16 = False if fp16: # Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'] fp16_opt_level # max seq length (for engtrain.bio since the lengths are pretty short 128 is alright) max_seq_length = 128 # For gradient clipping max_grad_norm = 1.0 # For having flexibility over hardware device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Data path data_dir = "/content/drive/MyDrive/MovieEntityRecognizer/data/modified" # Tensorboard Name tensorboard_writer = tensorboard.SummaryWriter("bert_base_uncased_default") validate = True test = True # For downloading data, doesn't require ssl so if downloaded no need to run it again # dataset_names = ["engtrain.bio", "engtest.bio", "trivia10k13train.n,bio", "trivia10k13test.bio"] # (MakeData.download_data(os.path.join("http://groups.csail.mit.edu/sls/downloads/movie", dataset_name) for dataset_name in dataset_names) # Count also word frequencies and lengths or sentences train_labels = Analyzer.count_labels("/home/karaz/Desktop/MovieEntityRecognizer/data/raw/mitmovies/engtrain.bio", without_o=True) Analyzer.plot_data(train_labels) test_labels = Analyzer.count_labels("/home/karaz/Desktop/MovieEntityRecognizer/data/raw/mitmovies/engtest.bio", without_o=True) Analyzer.plot_data(test_labels) # Get distinct labels label_list = sorted(list(train_labels.keys())) label_list.append("O") num_labels = len(label_list) label_map = {label: id for id, label in enumerate(label_list)} print(f"Size of labels of regular dataset: {len(label_list)}\n{label_map}") # model configurations and tokenizer config = BertConfig.from_pretrained("bert-large-uncased", num_labels=num_labels, finetuning_task="ner") tokenizer = BertTokenizerFast.from_pretrained("bert-large-uncased") # Change home karaz desktop path to your home directory (basically where the repository is) dataset = BuildData.create_dataset("/home/karaz/Desktop/MovieEntityRecognizer/data/modified/mitmovies") id2label = {id: label for (label,id) in label_map.items()} id2label[-100] = 'X' id2label if train: num_train_optimization_steps = int( len(dataset['train_instances']) / train_batch_size / gradient_accumulation_steps) * num_train_epochs print(f"Number of training steps {num_train_optimization_steps}") print(f"Number of training instances {len(dataset['train_instances'])}") if test: test_steps = int( len(dataset['test_instances']) / eval_batch_size) print(f"Number of test steps {test_steps}") print(f"Number of test instances {len(dataset['test_instances'])}") # Tokenize the datasets train_tokens = tokenizer(dataset["train_instances"], is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True) test_tokens = tokenizer(dataset['test_instances'], is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True) # Encode labels and give -100 to tokens which you dont want to backpropagate (basically mask them out) train_labels = NerPreProcessor.convert_labels(dataset["train_labels"], label_map, train_tokens) test_labels = NerPreProcessor.convert_labels(dataset['test_labels'], label_map, test_tokens) # Get rid of unnecessary data and create final data if train_tokens["offset_mapping"]: train_tokens.pop("offset_mapping") if test_tokens["offset_mapping"]: test_tokens.pop("offset_mapping") train_dataset = NerDataset(train_tokens, train_labels) test_dataset = NerDataset(test_tokens, test_labels) # Model initialization for high level api of huggingface def model_init(): model = BertForTokenClassification.from_pretrained('bert-large-uncased', num_labels=len(label_map)) return model # I left the compute metrics here in order to show how the evaluation def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [id2label[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [id2label[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] preds_stretched = [label for doc in true_predictions for label in doc] trues_stretched = [label for doc in true_labels for label in doc] return { "accuracy_score": accuracy_score(trues_stretched, preds_stretched), "precision": precision_score(trues_stretched, preds_stretched, labels=np.unique(preds_stretched), average='macro'), "recall": recall_score(trues_stretched, preds_stretched, labels=np.unique(preds_stretched), average='macro'), "f1_macro": f1_score(trues_stretched, preds_stretched, labels=np.unique(preds_stretched), average='macro'), "f1_micro": f1_score(trues_stretched, preds_stretched, average='micro'), } model_name = "bert-large-uncased-micro-10epoch" training_args = TrainingArguments( output_dir = "/home/kemalaraz/Desktop/MovieEntityRecognizer/pretrained_models/" + model_name, # output directory overwrite_output_dir = True, evaluation_strategy='epoch', num_train_epochs = 10, # total number of training epochs per_device_train_batch_size=16, # batch size per device during training per_device_eval_batch_size=64, # batch size for evaluation warmup_steps=500, # number of warmup steps for learning rate scheduler weight_decay=0.01, # strength of weight decay logging_dir = "/home/kemalaraz/Desktop/MovieEntityRecognizer/pretrained_models/" + model_name + '/logs', # directory for storing logs logging_steps=10, load_best_model_at_end=True, learning_rate = 5e-5, seed = 42 ) # The high level api of the trainer trainer = Trainer( model_init = model_init, args = training_args, train_dataset = train_dataset, eval_dataset = test_dataset, compute_metrics = compute_metrics ) training_results = trainer.train() evaluate_results_with_best_epoch = trainer.evaluate() # For basic inference model = BertForTokenClassification.from_pretrained(path_to_the_model, num_labels=len(label_map)) tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode("list the five star movies starring john lennon"))) inputs = tokenizer.encode("list the four star movies starring john lennon", return_tensors="pt") outputs = model(inputs)[0] predictions = torch.argmax(outputs, dim=2) print([(token, label_list[prediction]) for token, prediction in zip(tokens, predictions[0].tolist())]) ``` ## Attachments ![image.png](attachment:image.png) ![image.png](attachment:image.png) ![image.png](attachment:image.png) ![image.png](attachment:image.png) ![image.png](attachment:image.png)
github_jupyter
``` # Import common packages and create database connection import pandas as pd import sqlite3 as db conn = db.connect('Db-IMDB.db') ``` 1.List all the directors who directed a 'Comedy' movie in a leap year. (You need to check that the genre is 'Comedy’ and year is a leap year) Your query should return director name, the movie name, and the year. ``` %%time # List all the distinct directors who directed a 'Comedy' movie in a leap year. # citation https://stackoverflow.com/questions/6534788/check-for-leap-year # https://www.mathsisfun.com/leap-years.html result = pd.read_sql_query( ''' SELECT DISTINCT trim(P.NAME) as director, M.title as movie, M.year, G.Name FROM Movie M JOIN M_Director MD ON M.MID = MD.MID JOIN Person P on trim(MD.PID) = trim(P.PID) JOIN M_Genre MG on M.MID = MG.MID JOIN Genre G on MG.GID = G.GID WHERE G.Name LIKE '%Comedy%' AND (((M.year % 4 = 0) AND (M.year % 100 != 0)) OR (M.year % 400 = 0)) GROUP BY director ORDER BY director ''' , conn); result %%time # List all the directors who directed a 'Comedy' movie in a leap year. A director can direct multiple movies in leap year. # citation https://stackoverflow.com/questions/6534788/check-for-leap-year # https://www.mathsisfun.com/leap-years.html result = pd.read_sql_query( ''' SELECT DISTINCT trim(P.NAME) as director, M.title as movie, M.year, G.Name FROM Movie M JOIN M_Director MD ON M.MID = MD.MID JOIN Person P on trim(MD.PID) = trim(P.PID) JOIN M_Genre MG on M.MID = MG.MID JOIN Genre G on MG.GID = G.GID WHERE G.Name LIKE '%Comedy%' AND (((M.year % 4 = 0) AND (M.year % 100 != 0)) OR (M.year % 400 = 0)) ORDER BY director ''' , conn); result ``` 2.List the names of all the actors who played in the movie 'Anand' (1971) ``` %%time result = pd.read_sql_query( ''' SELECT p.Name FROM Movie m JOIN M_Cast mc ON m.MID=mc.MID JOIN Person p ON trim(mc.PID)=trim(p.PID) WHERE m.title='Anand' AND m.year=1971 ''' , conn) result ``` 3. List all the actors who acted in a film before 1970 andin a film after 1990. (That is: < 1970 and > 1990.) ``` %%time result = pd.read_sql_query( ''' SELECT DISTINCT trim(p.PID) as pid, p.Name FROM Movie m JOIN M_Cast mc ON m.MID = mc.MID JOIN Person p ON trim(mc.PID) = trim(p.PID) WHERE m.year > 1990 AND trim(p.PID) IN (SELECT DISTINCT trim(p.PID) as pid FROM Movie m JOIN M_Cast mc ON m.MID = mc.MID JOIN Person p ON trim(mc.PID) = trim(p.PID) WHERE m.year < 1970) GROUP BY trim(p.PID) ''', conn) result ``` 4. List all directors who directed 10 movies or more, in descending order of the number of movies they directed. Return the directors' names and the number of movies each of them directed. ``` %%time result = pd.read_sql_query( ''' SELECT p.Name, count(md.ID) movieCount FROM M_Director md JOIN Person p ON md.PID=p.PID GROUP BY md.PID HAVING movieCount >= 10 ORDER BY movieCount DESC ''', conn) result ``` 5a. For each year, count the number of movies in that year that had only female actors. ``` %%time result = pd.read_sql_query( ''' SELECT count(m.year) movie_count, m.year as movie_year FROM Movie m where m.MID not in (SELECT mc.MID FROM Person p JOIN M_Cast mc ON trim(p.PID)=trim(mc.PID) WHERE p.Gender='Male') GROUP BY movie_year ORDER BY movie_count DESC ''', conn) result ``` 5b.Now include a small change: report for each year the percentage of movies in that year with only female actors, and the total number of movies made that year. For example, one answer will be:1990 31.81 13522 meaning that in 1990 there were 13,522 movies, and 31.81% had only female actors. You do not need to round your answer. ``` %%time result = pd.read_sql_query( ''' SELECT y.allMov as 'movie_count', x.year as movie_year, ((x.Movies_Cnt*100.0)/y.allMov) as Percent FROM (SELECT count(*) Movies_Cnt , m.year FROM Movie m where m.MID not in (SELECT mc.MID FROM Person p JOIN M_Cast mc ON trim(p.PID) = trim(mc.PID) WHERE p.Gender='Male') GROUP BY m.year) x INNER JOIN (SELECT count(*) allMov, m.year FROM Movie m GROUP BY m.year) y on x.year=y.year ''', conn) result ``` 6. Find the film(s) with the largest cast. Return the movie title and the size of the cast. By "cast size" we mean the number of distinct actors that played in that movie: if an actor played multiple roles, or if it simply occurs multiple times in casts,we still count her/him only once. ``` %%time result = pd.read_sql_query( ''' SELECT count(DISTINCT mc.PId) as cast_count, m.title FROM Movie m JOIN M_Cast mc ON m.MID=mc.MID JOIN Person p ON trim(mc.PID)=trim(p.PID) GROUP BY m.MID ORDER BY cast_count DESC limit 1 ''', conn) result ``` 7. A decade is a sequence of 10 consecutive years. For example,say in your database you have movie information starting from 1965. Then the first decade is 1965, 1966, ..., 1974; the second one is 1967, 1968, ..., 1976 and so on. Find the decade D with the largest number of films and the total number of films in D. ``` %%time # citation https://stackoverflow.com/questions/25955049/sql-how-to-sum-up-count-for-many-decades?rq=1 # result = pd.read_sql_query( # ''' # SELECT (ROUND(m.year / 10) * 10) AS Decade, COUNT(1) AS total_movies # FROM Movie m # GROUP BY ROUND(m.year/ 10) # ORDER BY total_movies DESC LIMIT 1 # ''', conn) # result result = pd.read_sql_query(''' SELECT d_year.year AS start, d_year.year+9 AS end, count(1) AS total_movies FROM (SELECT DISTINCT(year) FROM Movie) d_year JOIN Movie m WHERE m.year>=start AND m.year<=end GROUP BY end ORDER BY total_movies DESC LIMIT 1 ''', conn) result ``` 8. Find the actors that were never unemployed for more than 3 years at a stretch. (Assume that the actors remain unemployed between two consecutive movies). ``` %%time # citation https://stackoverflow.com/questions/57733454/to-find-actors-who-were-never-unemployed-for-more-than-3-years-in-a-stretch # Here I am using window function (LEAD) that provides comparing current row with next row result = pd.read_sql_query( ''' SELECT *, (next_year - year) AS gap FROM (SELECT * , LEAD(year, 1, 0) OVER (PARTITION BY Name ORDER BY year ASC) AS next_year FROM (SELECT p.Name, m.title, m.year FROM Movie m JOIN M_Cast mc ON m.MID=mc.MID JOIN Person p ON trim(mc.PID)=trim(p.PID))) WHERE gap <=3 and gap >=0 GROUP BY Name ORDER BY Name ASC ''', conn) result ``` 9. Find all the actors that made more movies with Yash Chopra than any other director. ``` # %%time # The following query is correct but didn't give the results, # Running below query gives "database or disk is full" error # result = pd.read_sql_query( # ''' # SELECT P1.PID, P1.Name, count(Movie.MID) AS movies_with_yc from Person as P1 # JOIN M_Cast # JOIN Movie # JOIN M_Director ON (trim(Movie.MID) = trim(M_Director.MID)) # JOIN Person as P2 ON (trim(M_Director.PID) = trim(P2.PID)) where P2.Name = 'Yash Chopra' # GROUP BY P1.PID HAVING count(Movie.MID) > # ( # SELECT count(Movie.MID) FROM Person AS P3 # JOIN M_Cast # JOIN Movie # JOIN M_Director ON (trim(Movie.MID) = trim(M_Director.MID)) # JOIN Person AS P4 ON (trim(M_Director.PID) = trim(P4.PID)) # WHERE P1.PID = P3.PID AND P4.Name != 'Yash Chopra' # GROUP BY P4.PID # ) # ORDER BY movies_with_yc DESC; # ''', conn) # result %%time result = pd.read_sql_query( ''' SELECT Director, Actor, Count(1) AS Movies_with_YashChopra FROM ( SELECT p.Name AS Director, m.title AS Movie FROM Person p JOIN M_Director md ON trim(md.PID)=trim(p.PID) JOIN Movie m ON trim(md.MID)=m.MID and p.Name LIKE 'Yash%' GROUP BY p.Name, m.title ) t1 JOIN ( SELECT p.Name AS Actor, m.title AS Movie FROM Person p JOIN M_Cast mc ON trim(mc.PID)=trim(p.PID) JOIN Movie m ON trim(mc.MID)=m.MID GROUP BY p.Name, m.title ) t2 ON t1.Movie=t2.Movie GROUP BY t1.Director, t2.Actor ORDER By Movies_with_YashChopra DESC ''', conn) result ``` 10. The Shahrukh number of an actor is the length of the shortest path between the actor and Shahrukh Khan in the "co-acting" graph. That is, Shahrukh Khan has Shahrukh number 0; all actors who acted in the same film as Shahrukh have Shahrukh number 1; all actors who acted in the same film as some actor with Shahrukh number 1 have Shahrukh number 2, etc. Return all actors whoseShahrukh number is 2 ``` %%time result = pd.read_sql_query( ''' SELECT Name FROM Person WHERE trim(Name) LIKE '%shah rukh khan%' ''', conn) result ``` <h2>Using below steps we can get Shah Rukh Khan 2nd Degree Connection</h2> - Logic to Build following Query - Select movies in which Shah Rukh Khan worked - Select Shah Rukh level 1 i.e. 1st Degree connection of Shah Rukh Khan - Select movies in which Shah Rukh level 1 worked but exclude movies with Shah Rukh Khan - Select Shah Rukh level 2 who worked in some movie with Shah Rukh level 1 ``` %%time result = pd.read_sql_query(''' SELECT DISTINCT P.Name FROM Person p JOIN M_Cast mc ON trim(p.PID) = trim(mc.PID) WHERE mc.MID IN (SELECT mc.MID FROM M_Cast mc WHERE trim(mc.PID) IN ( SELECT trim(p.PID) as pid FROM Person p JOIN M_Cast mc ON trim(p.PID) = trim(mc.PID) WHERE mc.MID IN ( SELECT mc.MID FROM Person p JOIN M_Cast mc ON trim(p.PID) = trim(mc.PID) WHERE trim(p.Name) LIKE '%shah rukh khan%' ) AND trim(p.Name) NOT LIKE '%shah rukh khan%' ) AND mc.MID NOT IN (SELECT mc.MID FROM Person p JOIN M_Cast mc ON trim(p.PID) = trim(mc.PID) WHERE trim(p.Name) LIKE '%shah rukh khan%')) ''', conn) result ```
github_jupyter
``` !pip install torchvision==0.2.2 !pip install https://download.pytorch.org/whl/cu100/torch-1.1.0-cp36-cp36m-linux_x86_64.whl !pip install typing !pip install opencv-python !pip install slackweb !pip list | grep torchvision !pip list | grep torch # import cv2 import audioread import logging import os import random import time import warnings import glob from tqdm import tqdm import librosa import numpy as np import pandas as pd import soundfile as sf import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data from contextlib import contextmanager from pathlib import Path from typing import Optional from fastprogress import progress_bar from sklearn.metrics import f1_score from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from torchvision import models import matplotlib.pyplot as plt import slackweb def set_seed(seed: int = 42): random.seed(seed) np.random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) # type: ignore torch.backends.cudnn.deterministic = True # type: ignore torch.backends.cudnn.benchmark = True # type: ignore def get_logger(out_file=None): logger = logging.getLogger() formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") logger.handlers = [] logger.setLevel(logging.INFO) handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) if out_file is not None: fh = logging.FileHandler(out_file) fh.setFormatter(formatter) fh.setLevel(logging.INFO) logger.addHandler(fh) logger.info("logger set up") return logger @contextmanager def timer(name: str, logger: Optional[logging.Logger] = None): t0 = time.time() msg = f"[{name}] start" if logger is None: print(msg) else: logger.info(msg) yield msg = f"[{name}] done in {time.time() - t0:.2f} s" if logger is None: print(msg) else: logger.info(msg) logger = get_logger("main.log") set_seed(1213) DATA_PATH = '/home/knikaido/work/Cornell-Birdcall-Identification/data/birdsong_recognition/' TRAIN_PATH = DATA_PATH + 'train_audio/' MEL_PATH = '/home/knikaido/work/Cornell-Birdcall-Identification/data/feature/08_06_melspectrogram_small/' class ResNet(nn.Module): def __init__(self, base_model_name: str, pretrained=False, num_classes=264): super().__init__() base_model = models.__getattribute__(base_model_name)( pretrained=pretrained) layers = list(base_model.children())[:-2] layers.append(nn.AdaptiveMaxPool2d(1)) self.encoder = nn.Sequential(*layers) in_features = base_model.fc.in_features self.classifier = nn.Sequential( nn.Linear(in_features, 1024), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(1024, 1024), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(1024, num_classes)) def forward(self, x): batch_size = x.size(0) x = self.encoder(x).view(batch_size, -1) x = self.classifier(x) multiclass_proba = F.softmax(x, dim=1) multilabel_proba = torch.sigmoid(x) return { "logits": x, "multiclass_proba": multiclass_proba, "multilabel_proba": multilabel_proba } model_config = { "base_model_name": "resnet50", "pretrained": False, "num_classes": 264 } BIRD_CODE = { 'aldfly': 0, 'ameavo': 1, 'amebit': 2, 'amecro': 3, 'amegfi': 4, 'amekes': 5, 'amepip': 6, 'amered': 7, 'amerob': 8, 'amewig': 9, 'amewoo': 10, 'amtspa': 11, 'annhum': 12, 'astfly': 13, 'baisan': 14, 'baleag': 15, 'balori': 16, 'banswa': 17, 'barswa': 18, 'bawwar': 19, 'belkin1': 20, 'belspa2': 21, 'bewwre': 22, 'bkbcuc': 23, 'bkbmag1': 24, 'bkbwar': 25, 'bkcchi': 26, 'bkchum': 27, 'bkhgro': 28, 'bkpwar': 29, 'bktspa': 30, 'blkpho': 31, 'blugrb1': 32, 'blujay': 33, 'bnhcow': 34, 'boboli': 35, 'bongul': 36, 'brdowl': 37, 'brebla': 38, 'brespa': 39, 'brncre': 40, 'brnthr': 41, 'brthum': 42, 'brwhaw': 43, 'btbwar': 44, 'btnwar': 45, 'btywar': 46, 'buffle': 47, 'buggna': 48, 'buhvir': 49, 'bulori': 50, 'bushti': 51, 'buwtea': 52, 'buwwar': 53, 'cacwre': 54, 'calgul': 55, 'calqua': 56, 'camwar': 57, 'cangoo': 58, 'canwar': 59, 'canwre': 60, 'carwre': 61, 'casfin': 62, 'caster1': 63, 'casvir': 64, 'cedwax': 65, 'chispa': 66, 'chiswi': 67, 'chswar': 68, 'chukar': 69, 'clanut': 70, 'cliswa': 71, 'comgol': 72, 'comgra': 73, 'comloo': 74, 'commer': 75, 'comnig': 76, 'comrav': 77, 'comred': 78, 'comter': 79, 'comyel': 80, 'coohaw': 81, 'coshum': 82, 'cowscj1': 83, 'daejun': 84, 'doccor': 85, 'dowwoo': 86, 'dusfly': 87, 'eargre': 88, 'easblu': 89, 'easkin': 90, 'easmea': 91, 'easpho': 92, 'eastow': 93, 'eawpew': 94, 'eucdov': 95, 'eursta': 96, 'evegro': 97, 'fiespa': 98, 'fiscro': 99, 'foxspa': 100, 'gadwal': 101, 'gcrfin': 102, 'gnttow': 103, 'gnwtea': 104, 'gockin': 105, 'gocspa': 106, 'goleag': 107, 'grbher3': 108, 'grcfly': 109, 'greegr': 110, 'greroa': 111, 'greyel': 112, 'grhowl': 113, 'grnher': 114, 'grtgra': 115, 'grycat': 116, 'gryfly': 117, 'haiwoo': 118, 'hamfly': 119, 'hergul': 120, 'herthr': 121, 'hoomer': 122, 'hoowar': 123, 'horgre': 124, 'horlar': 125, 'houfin': 126, 'houspa': 127, 'houwre': 128, 'indbun': 129, 'juntit1': 130, 'killde': 131, 'labwoo': 132, 'larspa': 133, 'lazbun': 134, 'leabit': 135, 'leafly': 136, 'leasan': 137, 'lecthr': 138, 'lesgol': 139, 'lesnig': 140, 'lesyel': 141, 'lewwoo': 142, 'linspa': 143, 'lobcur': 144, 'lobdow': 145, 'logshr': 146, 'lotduc': 147, 'louwat': 148, 'macwar': 149, 'magwar': 150, 'mallar3': 151, 'marwre': 152, 'merlin': 153, 'moublu': 154, 'mouchi': 155, 'moudov': 156, 'norcar': 157, 'norfli': 158, 'norhar2': 159, 'normoc': 160, 'norpar': 161, 'norpin': 162, 'norsho': 163, 'norwat': 164, 'nrwswa': 165, 'nutwoo': 166, 'olsfly': 167, 'orcwar': 168, 'osprey': 169, 'ovenbi1': 170, 'palwar': 171, 'pasfly': 172, 'pecsan': 173, 'perfal': 174, 'phaino': 175, 'pibgre': 176, 'pilwoo': 177, 'pingro': 178, 'pinjay': 179, 'pinsis': 180, 'pinwar': 181, 'plsvir': 182, 'prawar': 183, 'purfin': 184, 'pygnut': 185, 'rebmer': 186, 'rebnut': 187, 'rebsap': 188, 'rebwoo': 189, 'redcro': 190, 'redhea': 191, 'reevir1': 192, 'renpha': 193, 'reshaw': 194, 'rethaw': 195, 'rewbla': 196, 'ribgul': 197, 'rinduc': 198, 'robgro': 199, 'rocpig': 200, 'rocwre': 201, 'rthhum': 202, 'ruckin': 203, 'rudduc': 204, 'rufgro': 205, 'rufhum': 206, 'rusbla': 207, 'sagspa1': 208, 'sagthr': 209, 'savspa': 210, 'saypho': 211, 'scatan': 212, 'scoori': 213, 'semplo': 214, 'semsan': 215, 'sheowl': 216, 'shshaw': 217, 'snobun': 218, 'snogoo': 219, 'solsan': 220, 'sonspa': 221, 'sora': 222, 'sposan': 223, 'spotow': 224, 'stejay': 225, 'swahaw': 226, 'swaspa': 227, 'swathr': 228, 'treswa': 229, 'truswa': 230, 'tuftit': 231, 'tunswa': 232, 'veery': 233, 'vesspa': 234, 'vigswa': 235, 'warvir': 236, 'wesblu': 237, 'wesgre': 238, 'weskin': 239, 'wesmea': 240, 'wessan': 241, 'westan': 242, 'wewpew': 243, 'whbnut': 244, 'whcspa': 245, 'whfibi': 246, 'whtspa': 247, 'whtswi': 248, 'wilfly': 249, 'wilsni1': 250, 'wiltur': 251, 'winwre3': 252, 'wlswar': 253, 'wooduc': 254, 'wooscj2': 255, 'woothr': 256, 'y00475': 257, 'yebfly': 258, 'yebsap': 259, 'yehbla': 260, 'yelwar': 261, 'yerwar': 262, 'yetvir': 263 } INV_BIRD_CODE = {v: k for k, v in BIRD_CODE.items()} train_path = DATA_PATH + 'train.csv' train = pd.read_csv(train_path) le = LabelEncoder() encoded = le.fit_transform(train['channels'].values) decoded = le.inverse_transform(encoded) train['channels'] = encoded for i in tqdm(range(len(train))): train['ebird_code'][i] = BIRD_CODE[train['ebird_code'][i]] train['filename'] = train['filename'].str.replace(".mp3", "") train.head() mel_list = sorted(glob.glob(MEL_PATH + '*.npy')) mel_list = pd.Series(mel_list) len(mel_list) import joblib target_list = joblib.load(MEL_PATH+'target_list.pkl') for i in tqdm(range(len(target_list))): target_list[i] = BIRD_CODE[target_list[i]] len(target_list) X_train_mel, X_valid_mel, target_train, taret_valid = train_test_split(mel_list, target_list, test_size=0.2, stratify=target_list) class TrainDateset(torch.utils.data.Dataset): def __init__(self, mel_list, train, transform=None): self.transform = transform self.mel_list = mel_list self.data_num = len(mel_list) def __len__(self): return self.data_num def __getitem__(self, idx): if self.transform: pass # out_data = self.transform(self.data)[0][idx] # out_label = self.label[idx] else: # print(idx) out_data = np.array(np.load(mel_list[idx])) out_mel_list = mel_list[idx] out_label = target_list[idx] # out_label = self.label[idx] return out_data, out_label train_dataset = TrainDateset(X_train_mel, target_train) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True) valid_dataset = TrainDateset(X_valid_mel, taret_valid) valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=128, shuffle=True) WEIGHT_DECAY = 0.005 LEARNING_RATE = 0.0001 EPOCH = 100 device = 'cuda' if torch.cuda.is_available() else 'cpu' print(torch.cuda.is_available()) net = ResNet('resnet50') net = net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) %%time train_losses = [] valid_losses = [] for epoch in tqdm(range(EPOCH)): # loop over the dataset multiple times train_loss = 0.0 valid_loss = 0.0 net.train() for i, data in enumerate(train_dataloader): # 第二引数は,スタート位置で,0なのでenumerate(trainloader)と同じ # https://docs.python.org/3/library/functions.html#enumerate # get the inputs inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) # wrap them in Variable # inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs['logits'], labels) loss.backward() optimizer.step() # print statistics # running_loss += loss.data[0] train_loss += loss.to('cpu').detach().numpy().copy() print('[%d, %5d] train loss: %.3f' % (epoch + 1, i + 1, train_loss / (i+1))) train_losses.append(train_loss / (i+1)) net.eval() for i, data in enumerate(valid_dataloader): # 第二引数は,スタート位置で,0なのでenumerate(trainloader)と同じ # https://docs.python.org/3/library/functions.html#enumerate # get the inputs inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) # wrap them in Variable # inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs['logits'], labels) # print statistics # running_loss += loss.data[0] valid_loss += loss.to('cpu').detach().numpy().copy() print('[%d, %5d] valid loss: %.3f' % (epoch + 1, i + 1, valid_loss / (i+1))) valid_losses.append(valid_loss / (i+1)) # break ``` https://qiita.com/derodero24/items/f4cc46f144f404054501 ``` import cloudpickle with open('model.pkl', 'wb') as f: cloudpickle.dump(net, f) slack = slackweb.Slack(url="https://hooks.slack.com/services/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") slack.notify(text="おわた") with open('model.pkl', 'rb') as f: net = cloudpickle.load(f) ``` ## plot loss ``` plt.figure(figsize=(16,5), dpi= 80) plt.plot(train_losses, color='tab:red', label='valid') plt.plot(valid_losses, color='tab:blue', label='train') plt.legend() ```
github_jupyter
Python programmers will often suggest that there many ways the language can be used to solve a particular problem. But that some are more appropriate than others. The best solutions are celebrated as Idiomatic Python and there are lots of great examples of this on StackOverflow and other websites. A sort of sub-language within Python, Pandas has its own set of idioms. We've alluded to some of these already, such as using vectorization whenever possible, and not using iterative loops if you don't need to. Several developers and users within the Panda's community have used the term __pandorable__ for these idioms. I think it's a great term. So, I wanted to share with you a couple of key features of how you can make your code pandorable. ``` # Let's start by bringing in our data processing libraries import pandas as pd import numpy as np # And we'll bring in some timing functionality too, from the timeit module import timeit # And lets look at some census data from the US df = pd.read_csv('datasets/census.csv') df.head() # The first of the pandas idioms I would like to talk about is called method chaining. The general idea behind # method chaining is that every method on an object returns a reference to that object. The beauty of this is # that you can condense many different operations on a DataFrame, for instance, into one line or at least one # statement of code. # Here's the pandorable way to write code with method chaining. In this code I'm going to pull out the state # and city names as a multiple index, and I'm going to do so only for data which has a summary level of 50, # which in this dataset is county-level data. I'll rename a column too, just to make it a bit more readable. (df.where(df['SUMLEV']==50) .dropna() .set_index(['STNAME','CTYNAME']) .rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'})) # Lets walk through this. First, we use the where() function on the dataframe and pass in a boolean mask which # is only true for those rows where the SUMLEV is equal to 50. This indicates in our source data that the data # is summarized at the county level. With the result of the where() function evaluated, we drop missing # values. Remember that .where() doesn't drop missing values by default. Then we set an index on the result of # that. In this case I've set it to the state name followed by the county name. Finally. I rename a column to # make it more readable. Note that instead of writing this all on one line, as I could have done, I began the # statement with a parenthesis, which tells python I'm going to span the statement over multiple lines for # readability. # Here's a more traditional, non-pandorable way, of writing this. There's nothing wrong with this code in the # functional sense, you might even be able to understand it better as a new person to the language. It's just # not as pandorable as the first example. # First create a new dataframe from the original df = df[df['SUMLEV']==50] # I'll use the overloaded indexing operator [] which drops nans # Update the dataframe to have a new index, we use inplace=True to do this in place df.set_index(['STNAME','CTYNAME'], inplace=True) # Set the column names df.rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'}) # Now, the key with any good idiom is to understand when it isn't helping you. In this case, you can actually # time both methods and see which one runs faster # We can put the approach into a function and pass the function into the timeit function to count the time the # parameter number allows us to choose how many times we want to run the function. Here we will just set it to # 10 # Lets write a wrapper for our first function def first_approach(): global df # And we'll just paste our code right here return (df.where(df['SUMLEV']==50) .dropna() .set_index(['STNAME','CTYNAME']) .rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'})) # Read in our dataset anew df = pd.read_csv('datasets/census.csv') # And now lets run it timeit.timeit(first_approach, number=10) # Now let's test the second approach. As you may notice, we use our global variable df in the function. # However, changing a global variable inside a function will modify the variable even in a global scope and we # do not want that to happen in this case. Therefore, for selecting summary levels of 50 only, I create a new # dataframe for those records # Let's run this for once and see how fast it is def second_approach(): global df new_df = df[df['SUMLEV']==50] new_df.set_index(['STNAME','CTYNAME'], inplace=True) return new_df.rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'}) # Read in our dataset anew df = pd.read_csv('datasets/census.csv') # And now lets run it timeit.timeit(second_approach, number=10) # As you can see, the second approach is much faster! So, this is a particular example of a classic time # readability trade off. # You'll see lots of examples on stack overflow and in documentation of people using method chaining in their # pandas. And so, I think being able to read and understand the syntax is really worth your time. But keep in # mind that following what appears to be stylistic idioms might have performance issues that you need to # consider as well. # Here's another pandas idiom. Python has a wonderful function called map, which is sort of a basis for # functional programming in the language. When you want to use map in Python, you pass it some function you # want called, and some iterable, like a list, that you want the function to be applied to. The results are # that the function is called against each item in the list, and there's a resulting list of all of the # evaluations of that function. # Pandas has a similar function called applymap. In applymap, you provide some function which should operate # on each cell of a DataFrame, and the return set is itself a DataFrame. Now I think applymap is fine, but I # actually rarely use it. Instead, I find myself often wanting to map across all of the rows in a DataFrame. # And pandas has a function that I use heavily there, called apply. Let's look at an example. # Let's take a look at our census DataFrame. In this DataFrame, we have five columns for population estimates, # with each column corresponding with one year of estimates. It's quite reasonable to want to create some new # columns for minimum or maximum values, and the apply function is an easy way to do this. # First, we need to write a function which takes in a particular row of data, finds a minimum and maximum # values, and returns a new row of data nd returns a new row of data. We'll call this function min_max, this # is pretty straight forward. We can create some small slice of a row by projecting the population columns. # Then use the NumPy min and max functions, and create a new series with a label values represent the new # values we want to apply. def min_max(row): data = row[['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']] return pd.Series({'min': np.min(data), 'max': np.max(data)}) # Then we just need to call apply on the DataFrame. # Apply takes the function and the axis on which to operate as parameters. Now, we have to be a bit careful, # we've talked about axis zero being the rows of the DataFrame in the past. But this parameter is really the # parameter of the index to use. So, to apply across all rows, which is applying on all columns, you pass axis # equal to 'columns'. df.apply(min_max, axis='columns').head() # Of course there's no need to limit yourself to returning a new series object. If you're doing this as part # of data cleaning your likely to find yourself wanting to add new data to the existing DataFrame. In that # case you just take the row values and add in new columns indicating the max and minimum scores. This is a # regular part of my workflow when bringing in data and building summary or descriptive statistics, and is # often used heavily with the merging of DataFrames. # Here's an example where we have a revised version of the function min_max Instead of returning a separate # series to display the min and max we add two new columns in the original dataframe to store min and max def min_max(row): data = row[['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']] # Create a new entry for max row['max'] = np.max(data) # Create a new entry for min row['min'] = np.min(data) return row # Now just apply the function across the dataframe df.apply(min_max, axis='columns') # Apply is an extremely important tool in your toolkit. The reason I introduced apply here is because you # rarely see it used with large function definitions, like we did. Instead, you typically see it used with # lambdas. To get the most of the discussions you'll see online, you're going to need to know how to at least # read lambdas. # Here's You can imagine how you might chain several apply calls with lambdas together to create a readable # yet succinct data manipulation script. One line example of how you might calculate the max of the columns # using the apply function. rows = ['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013','POPESTIMATE2014', 'POPESTIMATE2015'] # Now we'll just apply this across the dataframe with a lambda df.apply(lambda x: np.max(x[rows]), axis=1).head() # If you don't remember lambdas just pause the video for a moment and look up the syntax. A lambda is just an # unnamed function in python, in this case it takes a single parameter, x, and returns a single value, in this # case the maximum over all columns associated with row x. # The beauty of the apply function is that it allows flexibility in doing whatever manipulation that you # desire, as the function you pass into apply can be any customized however you want. Let's say we want to # divide the states into four categories: Northeast, Midwest, South, and West We can write a customized # function that returns the region based on the state the state regions information is obtained from Wikipedia def get_state_region(x): northeast = ['Connecticut', 'Maine', 'Massachusetts', 'New Hampshire', 'Rhode Island','Vermont','New York','New Jersey','Pennsylvania'] midwest = ['Illinois','Indiana','Michigan','Ohio','Wisconsin','Iowa', 'Kansas','Minnesota','Missouri','Nebraska','North Dakota', 'South Dakota'] south = ['Delaware','Florida','Georgia','Maryland','North Carolina', 'South Carolina','Virginia','District of Columbia','West Virginia', 'Alabama','Kentucky','Mississippi','Tennessee','Arkansas', 'Louisiana','Oklahoma','Texas'] west = ['Arizona','Colorado','Idaho','Montana','Nevada','New Mexico','Utah', 'Wyoming','Alaska','California','Hawaii','Oregon','Washington'] if x in northeast: return "Northeast" elif x in midwest: return "Midwest" elif x in south: return "South" else: return "West" # Now we have the customized function, let's say we want to create a new column called Region, which shows the # state's region, we can use the customized function and the apply function to do so. The customized function # is supposed to work on the state name column STNAME. So we will set the apply function on the state name # column and pass the customized function into the apply function df['state_region'] = df['STNAME'].apply(lambda x: get_state_region(x)) # Now let's see the results df[['STNAME','state_region']].head() ``` So there are a couple of Pandas idioms. But I think there's many more, and I haven't talked about them here. So here's an unofficial assignment for you. Go look at some of the top ranked questions on pandas on Stack Overflow, and look at how some of the more experienced authors, answer those questions. Do you see any interesting patterns? Feel free to share them with myself and others in the class.
github_jupyter
# 讀取字典 ``` import pandas as pd import numpy as np import os filepath = '/Volumes/backup_128G/z_repository/Yumin_data/玉敏_俄羅斯課本的研究' file_dic = '華語八千詞(內含注音字型檔)/Chinese_8000W_20190515_v1.xlsx' book_file = '實用漢語教科書2010_生詞表.xlsx' to_file = 'processed/chinese_8000Words_results.xlsx' # write_level_doc = '{0}/{1}'.format(filepath, to_level_doc) read_dic = '{0}/{1}'.format(filepath, file_dic) read_book = '{0}/{1}'.format(filepath, book_file) write_file = '{0}/{1}'.format(filepath, to_file) dicDf = pd.DataFrame() with pd.ExcelFile(read_dic) as reader: # read sheet by sheet for sheet in reader.sheet_names: # print(sheet) sheetDf = pd.read_excel(reader, sheet, header=None) sheetDf = sheetDf.fillna(0) dicDf = dicDf.append(sheetDf, ignore_index=True) # change to lowercase len(dicDf.index) dicDf.head() dicList = {} for idx in range(0, len(dicDf)): row = dicDf.loc[idx] dicWord = row[0] dicLevel = row[1] if dicWord not in dicList: dicList[dicWord] = [dicLevel] else: # print(dicWord, dicLevel) dicList[dicWord].append(dicLevel) # dicList ``` # 讀取待分析檔 ``` bookDf = pd.read_excel(read_book) bookDf.head() wordDifferentLevel = [] def wordLevel(word): foundLevel = 9 if word in dicList: foundLevel = dicList[word][0] return foundLevel levelList = [] for idx in range(0, len(bookDf)): row = bookDf.loc[idx] chapter = row[0] wtype = row[1] word = row[3] level = wordLevel(word) levelList.append([word, level, wtype, chapter]) # print(chapter, wtype, word) levelDf = pd.DataFrame(levelList) levelDf = levelDf.sort_values(by=[1, 3, 2, 0]) levelDf.head() # levelDf.loc[levelDf[2] == 'A'] # levelDf.loc[levelDf[2] == 'B'] levelDf[~levelDf[2].isin(['A', 'B'])] def statsLevel(INdf): levelCountList = [] for level in range(1, 10): levelCount = INdf[1].loc[INdf[1] == level].count() levelCountList.append(levelCount) levelCountDf = pd.DataFrame(levelCountList) return levelCountDf def statsLessonLevel(INdf): levels = list(range(1, 10)) statDf = pd.DataFrame(levels) lessons = INdf[3].unique() lessons = np.sort(lessons) for lesson in lessons: lessonDf = INdf.loc[INdf[3] == lesson] statDf[lesson] = statsLevel(lessonDf) return statDf headers = ['Word', 'Level', 'A/B', 'Lesson'] with pd.ExcelWriter(write_file) as writer: # 1.列出每一個詞的等級 levelDf.to_excel(writer, 'All', index=False, header=headers) # 2.統計每一個等級共有多少字 levels = list(range(1, 10)) levelCountDf = pd.DataFrame(levels) ## A.主要詞彙的統計 major = levelDf.loc[levelDf[2] == 'A'] levelCountDf['A'] = statsLevel(major) ## B.補充詞彙的統計 minor = levelDf.loc[levelDf[2] == 'B'] levelCountDf['B'] = statsLevel(minor) ## C.主要詞彙+補充詞彙的統計 levelCountDf['A/B'] = statsLevel(levelDf) levelCountDf.to_excel(writer, 'Stats', index=False, header=['Level', 'A', 'B', 'A/B']) # 3.統計每一個等級共有多少字 by lesson lessonDf = statsLessonLevel(levelDf) lessonDf.T.to_excel(writer, 'lessons', header=False) # 4.列出不在8000詞的生詞有哪些 wordsNotIn = levelDf.loc[levelDf[1] == 9] wordsNotInDf = pd.DataFrame(wordsNotIn) wordsNotInDf.to_excel(writer, 'WordsNotIn', index=False, header=headers) writer.save() ```
github_jupyter
# The Great Pyramid This is an estimate of the number of people needed to raise stones to the top of the [great pyramid](https://en.wikipedia.org/wiki/Great_Pyramid_of_Giza) using basic physics, such as force, energy, and power. It relies solely on reasonable estimates of known dimensions of the great pyramid and typical human labor capacity. The analysis will show that it is possible for crews of workers to raise 2.5 ton limestones to almost any level using ropes alone. Each crew would stand on an unfinished level and pull wooden sleds carrying stones up the 51.86 degree incline of the pyramid. This solution does not require ramps, pulleys, levers or any other mechanical advantage. It only requires coordination, rope, and well fed crews. If a crew tires after raising a set of stones, they could be quickly replaced by another well rested crew. The analysis will estimate the minimum crew size, number of crews required, the rate at which stones can be raised, and the maneuvering area available at each level. The dimensions of the great pyramid are shown below: ![Pyramid](data/Khufu.png) | Parameter | Value | | ----- | ----:| | Total number of stones| 2.5 million | | Average mass of each stone | 2.5 tons | | Total build time | 20 years | | Power available per worker | 200 Calories/day | | Active build time | 3 months/year | | Pyramid slope | 51.86 degrees | | Pyramid height | 146.5 meters | | Pyramid base | 230 m | | Coefficient of friction | 0.3 | | Number of layers | 210 | | Course | Height | Amount of Material | | ------ |:------:| ------------------:| | 1 | 15m | 30% | | 2 | 30m | 22% | | 3 | 50m | 30% | | 4 | 100m | 15% | | 5 | 146m | 3% | ``` from math import * import pandas as pd import matplotlib.pyplot as plt # All values are in SI (MKS) units lbm_per_kg = 2.20462 newtons_per_lbf = 4.44822 joules_per_kcal = 4184 sec_per_day = 24 * 3600 watts_per_hp = 746 # Total number of stones N_s = 2.5e6 # Mass of one stone in kg m_s = 2.5 * 2000 / lbm_per_kg # Total build time in seconds T_b = 20 * 365.25 * sec_per_day # Average available power per crew member in kilocalories (nutrition calorie) P_w_kcal = 200 # Average available power on crew member in Watts P_w = P_w_kcal * joules_per_kcal / sec_per_day # Pyramid slope in radians theta = 51.86*pi/180 # Pyramid base length in meters l_b = 230 # Coefficient of friction between limestone and wood sleds mu = 0.3 # Acceleration of gravity in m/2^s g = 9.81 # Number of layers N_l = 210 # Height of pyramid in meters h_max = 146.5 ``` # Pulling Force It is possible for a crew of men to stand on top of one flat level and simply pull a single stone up the side of a pyramid covered with smooth casing stones. It is expected that smooth casing stones were added at the same time each layer of rough blocks were added, which is very likely. This simple approach does not require large ramps, elaborate machines, deep knowledge, or alien intervention. It just requires many crews of workers pulling on ropes attached to rough stones. Of course, a number of additional crews are needed to place stones and align stones properly, but the solutions to those problems are well documented. This analysis focuses solely on the rigging problem of raising stones to the proper level just prior to final placement. The [force required](https://en.wikipedia.org/wiki/Inclined_plane) to pull one stone up the side of the pyramid is $$ F_p = m_s g (sin \theta + \mu cos \theta)$$ Where $m_s$ is the mass of one stone, $g$ is acceleration of gravity, $\theta$ is the pyramid slope, and $\mu$ is the coefficient of friction. Given the parameters above, the pulling force is ``` F_p = m_s * g * (sin(theta) + mu*cos(theta)) print('%.4f N' % F_p) print('%.4f lbf' % (F_p / newtons_per_lbf) ) ``` $$ F_p \approx 21620 N $$ or $$ F_p \approx 4860 lbf $$ This is slightly less than the 5000 lb weight of each stone, which is due to the slope of incline and static friction. Dynamic friction is perhaps lower, so the actual pulling force while in motion may be less. # Energy to Raise Stones Energy is force times distance moved. The distance along the slope up to a height $h$ is $$ d = \frac{h}{sin \theta} $$ Given the force derived earlier, energy required to raise a single stone to a height $h$ is $$ E_s = \frac{F_p h}{sin \theta} $$ For all stones the total energy is $$ E_t = \frac{F_p}{sin \theta} \sum_{i=1}^{m} h N_{blocks} $$ An approximate estimate for comparison is: $$ E_t = \frac{F_p N_s}{sin \theta} (15m \times 0.3 + 30m \times 0.22 + 50m \times 0.3 + 100m \times 0.15 + 146m \times 0.03) $$ The total energy is estimate in two steps: * Compute the total volume to get average block volume * Compute energy per layer given average block volume The iterative computation will be compared with the approximate estimate. The total energy is ``` dh = h_max / N_l total_volume = 0 h = 0 tan_theta2 = tan(theta)**2 for i in range(N_l): th = (h_max - h)**2 A_f = 4 * th / tan_theta2 total_volume += dh * A_f h += dh print('Total volume: %.3e m^3' % total_volume) block_volume = total_volume/N_s print('Block volume: %.3e m^3' % block_volume) E_t = 0 h = 0 for i in range(N_l): th = (h_max - h)**2 A_f = 4 * th / tan_theta2 num_blocks = dh * A_f / block_volume E_t += F_p * num_blocks * h / sin(theta) h += dh print('Total energy: %.2e Joules' % E_t) print('Total energy: %.2e kcal' % (E_t/joules_per_kcal)) E_t_approx = F_p * N_s * (15*0.3 + 30*0.22 + 50*0.3 + 100*0.15 + 146*0.03) / sin(theta) print('Approximate: %.2e Joules' % E_t_approx) print('Approximate: %.2e kcal' % (E_t_approx/joules_per_kcal)) ``` The iterative estimate is somewhat less than the approximate energy, which is reasonable. $$ E_t \approx 2.5 \times 10^{12} J $$ or $$ E_t \approx 5.97 \times 10^8 {kcal} $$ # Average Power The average power required to raise all stones is $$ P_{avg} = \frac{E_t}{T_b} $$ ``` P_avg = E_t/T_b print('%.2f W' % (P_avg)) print('%.2f HP' % (P_avg/watts_per_hp)) ``` In watts, the value is: $$ P_{avg} \approx 3960 W $$ In horse power: $$ P_{avg} \approx 5.31 {HP} $$ This surprisingly modest number is due to the 20 year build time for the pyramid. Even though the size of the pyramid is staggering, the build time is equally large. By inspection, we can imagine the number of workers needed to deliver this power, which is not as large as might be expected. 5.3 horse power would be easily available using a few draught animals, but that would require coaxing animals to climb to high levels and repeatedly pulling over a significant distance. This presents several logistical challenges, which might explain why there is little evidence of animal power used to raise stones. Humans can stand in one place and pull ropes hand over hand with upper body power or two crews could alternate pulling one set of ropes using lower body power. Perhaps different techniques were used depending on available maneuvering area. # Workforce Size Human are not efficient machines, perhaps 20% thermal efficiency. Given a modest diet where 1000 calories are burned, one worker might deliver 200 calories/day of mechanical work. This is an average power of 9.7 Watts. Assuming work is performed during only one season (one quarter of a year), the total number of workers required to raise all blocks is given by $$ N_w = 4 \frac{P_{avg}}{P_w} $$ The approximate number of workers is ``` N_w = 4 * P_avg / P_w print('%d workers' % N_w) ``` $$ N_w \approx 1635 $$ Other estimates of total workforce are about 10 times this value, which makes sense given resting time, and many other tasks, such as cutting and transporting stones, finish work, food preparation, management, accounting, and other support activities. To lift a single stone, a crew of workers would be required to raise each stone. Assuming each worker can pull 75 lbs, the size of a single lifting crew is $$ N_{lc} = \frac{F_p}{75 lbf} $$ The number of workers in a lifting crew is ``` F_1p = 75 * newtons_per_lbf N_lc = F_p / F_1p print('%.1f workers per lifting crew' % N_lc) ``` $$ N_{lc} \approx 65 $$ That's 65 workers per lifting crew. The total number of crews is $$ N_c = \frac{N_w}{N_{lc}} $$ ``` N_c = N_w / N_lc print('%.1f crews' % N_c) ``` Roughly 25 concurrent crews of 65 people are required just to raise all stones over 20 years. # Stone Raising Rate Assuming all 25 crews are operating concurrently, it is possible to estimate the block raising rate. 200 calories per day of worker output is an average number. Humans are not machines and need rest, so in practice, crews may only raise blocks as little as 4 hours per day. Assuming all 200 calories per worker is delivered in a four hour shift, the available peak crew power would be six times the average daily power: $$ P_{cp} = 6 N_{lc} P_w$$ ``` P_cp = 6 * N_lc * P_w print('%.2f W' % (P_cp)) print('%.2f HP' % (P_cp/watts_per_hp)) ``` This value is about 3.8 kW or just a little over 5 horsepower for a crew of 65 workers. This suggests about 13 humans can do the same amount of work as one horse for four hours a day, which seems reasonable. The average velocity of a single block raised by a crew is given by $$ v_{bc} = \frac{P_{cp}}{F_p} $$ ``` feet_per_meter = 3.28084 v_bc = P_cp / F_p print('%.3f m/s' % (v_bc)) print('%.3f ft/s' % (v_bc * feet_per_meter)) ``` The rate along the slope is about 0.17 $m/s$ or 0.57 $ft/s$. To raise one stone to a height h, the time required is $$ t = \frac{h}{v_{bc} sin \theta} $$ ``` h = 30 t = h/(v_bc * sin(theta)) print('%.1f seconds' % (t)) print('%.1f minutes' % (t/60)) ``` To raise one block to a height of 30m, which includes more than 50% of all stones, the time is about 219 seconds or 3.6 minutes. With all 25 crews operating concurrently, one stone could be raised every nine seconds or less. # Logistics Fitting 1635 workers on a level at one time requires room to maneuver. The area available is reduced higher up the pyramid. Assuming all 25 crews are operating concurrently and each worker requires at least $1 m^2$, the minimum area required is $A_c \approx 1635 m^2$. The available area at a height $h$ is $$ A_l = \left(\frac{2 (h_{max} - h)}{tan \theta}\right)^2 $$ Where $l_b$ is the length of the base of the pyramid. The fraction of available maneuvering area is $$ r_m = \frac{A_l-A_c}{A_l} $$ A plot of available maneuvering area and completed volume is shown below. ``` A_c = N_w dh = h_max / N_l h = 0 tan_theta2 = tan(theta)**2 heights = [] areas = [] volumes = [] volume = 0 for i in range(N_l): th = (h_max - h)**2 A_l = 4 * th / tan_theta2 volume += dh * A_l r_a = (A_l-A_c)/A_l heights.append(h) areas.append(100*r_a) volumes.append(100*(volume/total_volume)) h += dh limit = -40 plt.plot(heights[0:limit], areas[0:limit], label='Maneuvering area', color='blue') plt.plot(heights[0:limit], volumes[0:limit], label='Completed volume', color='red') plt.ylabel('Percentage (%)') plt.xlabel('Height (m)') plt.legend(loc='best') plt.show() limit = -66 print('At a height of %.1f m, %.1f %% of the pyramid is complete.' % (heights[limit], volumes[limit])) ``` Even at a height of 100m, where only 3% of the pyramid remains, more than two times the area required by all 25 lifting crews is still available. This should leave sufficient room for others to position stones after they have been lifted. At 117m, there is just enough room for all 25 crews, so stone placement will slow down. Fortunately, fewer stones are required at the highest levels. # Ramps and Stone Size This theory might explain why there is little evidence of external or internal ramps, simply because a smooth pyramid can act as the ramp itself. It might also explain how large granite blocks were hauled up to the kings chamber. Considering the required rate of block raising, a wide ramp is needed. Narrow ramps that can only support one or two blocks side by side seem like a bottleneck. Ramps with right angles require more time to rotate and orient blocks. Using the sides of the pyramid offers the largest ramp possible on all four sides, so the only limitation would be the number of workers that could be on top at any one time. Even if one set of crews becomes fatigued raising stones, they could be relieved by another crew later in the day. It is possible that two or more shifts of lifting crews were used to minimize fatigue or injury. If ropes were long enough, it is possible that workers could have walked down the opposite slope of the pyramid, using their own weight to counter the weight of stones they were attempting to lift. A similar energy analysis can be done using conventional shallow ramps to raise stones. Interestingly, a ramp with a 7% grade requires almost 5 times more energy to raise all 2.5 million stones than using the side of pyramid. Although a shallow ramp reduces the amount of force required to move stones, the distance travelled is much farther, so more energy is lost in friction. Additionally, a conventional ramp requires workers to climb the height of the pyramid along with the stone they are pulling, so they must lift their own weight in addition to the weight of the stone. This requires more energy, which is not used to lift stones. Clearly, it's a highly inefficient strategy. The Egyptians were free to decide how big to make the rough limestone blocks. They could have made them small enough for one person to carry, but they chose not to. After many pyramid construction attempts, they decided that 2.5 ton blocks were small enough to handle without too much difficulty, so raising these stones had to be straightforward. It seems that simply dragging blocks up the side of a smooth pyramid is a straightforward solution that they could have easily developed on their own. It probably seemed so obvious to them that it made no sense to document it. # Summary A crew of about 65 workers can raise 2.5 ton stones using simple ropes alone. Over a 20 year period, 25 concurrent crews totalling roughly 1625 workers are sufficient to raise all 2.5 million stones. There are a number of factors that could reduce the number of workers required. Friction could be reduced using available lubricants or particularly strong, well fed workers could have been selected for this critical role. Building the pyramids seems staggering to us today, but that may be due more to our short attention span and availability of powerful machines to do the heavy lifting. We don't stop to consider that a large, organized workforce, all pulling together at the same time, can do a lot of work. It's not magic, just dedication and arithmetic. In the modern day, we expect a return on our investment in a reasonable time, perhaps five or ten years for large public works projects. For the pharoahs, 20 years was a completely acceptable delivery schedule for their investment and exit strategy. To achieve higher rates of return, we build powerful machines that could be operated by a single person. We just don't accept slow progress over a long period of time because our expectations and labor costs are so high. The pharoahs on the other hand, were in the opposite position. They had a large workforce that was willing dedicate themselves to a single cause over a significant part of their lifetime. This dedication is perhaps the real achievement we should admire. Copyright (c) Madhu Siddalingaiah 2020
github_jupyter
# Aeff toy MC ``` import numpy as np from matplotlib import pyplot as plt plt.rcParams['text.usetex'] = False ``` define the function for the selection efficiency, as a function of log10(E): ``` def Eeff(logE): x0=2.7 y=1-1./(np.exp((logE-x0)/0.5)+1) return y ``` <img src="plotEff.png" alt="efficiency" width="300px"> generatig random distributions using numpy arrays: ``` Nev=10000000 # n. of events we simulate E_MIN=100 E_MAX=10000000 # generate energies with E^-1 spectrum, using the inversion method u1=np.random.rand(Nev) energies=E_MIN*np.exp(u1*np.log(E_MAX/E_MIN)) #generate phi angles (even if this is not used at the moment) u2=np.random.rand(Nev) phi = 2*np.pi*u2 #generate theta angles u3=np.random.rand(Nev) theta= np.arccos(u3) #generate x,y impact points with -1<x<1, -1<y<1 (2x2 m^2) x= -1+2*np.random.rand(Nev) y=-1+2*np.random.rand(Nev) #theta1=[40/180 *np.pi for i in range(0,int(0.6*Nev))] #theta2=[60/180 *np.pi for i in range(0,int(0.4*Nev))] #theta=np.concatenate((theta1,theta2)) ``` apply cuts using array masks: ``` #energy mask using the hit/miss method. u5=np.random.rand(Nev) pe=Eeff(np.log10(energies)) e_mask=u5<pe # keep the cases where u < y #combine all cuts: totalMask=(e_mask& (x>-np.sqrt(2)/2)& (x<np.sqrt(2)/2) & (y>-np.sqrt(2)/2)& (y<np.sqrt(2)/2))# detector dimensions -0.5-0.5 on both axis ``` plot the distributions: Energy: (note that the resulting histo include all cuts, not only on energy) ``` plt.figure(1,figsize=(7,5)) plt.hist(np.log10(energies[totalMask]), bins=50, alpha=1, histtype='step', label='final E') plt.hist(np.log10(energies), bins=50, alpha=1, histtype='step',label='simulated E') plt.legend() plt.xlabel('log10(E)') plt.ylabel('counts') plt.savefig("counts(E).png") plt.show() ``` theta and phi: ``` plt.figure(2,figsize=(7,5)) plt.hist(theta[totalMask], bins=50, alpha=1, histtype='step', label='theta final') plt.hist(theta, bins=50, alpha=1, histtype='step', label='theta simulated') #plt.figure(3) plt.hist(phi[totalMask], bins=50, alpha=1, histtype='step', label='phi final') plt.hist(phi, bins=50, alpha=1, histtype='step', label='phi simulated') plt.xlabel('[rad]') plt.ylabel('counts') plt.legend() plt.savefig("counts(angle).png") plt.show() ``` x,y ``` plt.figure(3,figsize=(7,5)) plt.hist(x[totalMask], bins=50, alpha=1, histtype='step', label='x final') plt.hist(x, bins=50, alpha=1, histtype='step', label='x simulated') #plt.figure(3) plt.hist(y[totalMask], bins=50, alpha=1, histtype='step', label='y final') plt.hist(y, bins=50, alpha=1, histtype='step', label='y simulated') plt.xlabel('[m]') plt.ylabel('counts') plt.legend() plt.savefig("counts(area).png") plt.show() ``` create rensponse matrix: ``` nBins2d=40 H,xbins,ybins=np.histogram2d(np.log10(energies[totalMask]),np.cos(theta[totalMask]),nBins2d) # histogram normaliztion normFactor=Nev/(2*nBins2d**2) # the factor 4 comes form the area reatio H=H/(normFactor) # plot #y=[i/np.pi*180 for i in ybins] fig, ax1 = plt.subplots(figsize=(8, 5)) p=ax1.pcolormesh(xbins,ybins,H.T, cmap='rainbow') bar=fig.colorbar(p, ax=ax1, extend='both') bar.ax.set_ylabel('Aeff [m^2]', rotation=270,labelpad=13,fontsize=13) ax1.set_xlabel('log10(E)',fontsize=14) ax1.set_ylabel('theta[°]',fontsize=14) plt.savefig("Aeff_costheta.png") plt.show() y ybins nBins2d=40 H,xbins,ybins=np.histogram2d(np.log10(energies[totalMask]),theta[totalMask],nBins2d) # histogram normaliztion normFactor=Nev/(2*nBins2d**2) # the factor 4 comes form the area reatio H=H/(normFactor) # plot y=[i/np.pi*180 for i in ybins] fig, ax1 = plt.subplots(figsize=(8, 5)) p=ax1.pcolormesh(xbins,y,H.T, cmap='rainbow') bar=fig.colorbar(p, ax=ax1, extend='both') bar.ax.set_ylabel('Aeff [m^2]', rotation=270,labelpad=13,fontsize=13) ax1.set_xlabel('log10(E)',fontsize=14) ax1.set_ylabel('theta[°]',fontsize=14) #ax1.set_xscale("log") plt.savefig("Aeff_theta.png") plt.show() # Finallly slect one bin in cos(theta) and plot Aeff vs log10(E) for that bin plt.figure(5) halfxbin=(xbins[1]-xbins[0])/2. plt.plot(xbins[:-1]+halfxbin, H.T[18],label="40°" ) plt.plot(xbins[:-1]+halfxbin, H.T[27],label="60°" ) plt.legend() plt.xlabel('log10(E)') plt.ylabel('Aeff [m]') plt.savefig("40_60") plt.show() mean=[0.6*H.T[18][i]+0.4*H.T[27][i] for i in range(0,len(H.T[18]))] # Finallly slect one bin in cos(theta) and plot Aeff vs log10(E) for that bin plt.figure(5) halfxbin=(xbins[1]-xbins[0])/2. plt.plot(xbins[:-1]+halfxbin, mean ) plt.title("Mean" ) plt.xlabel('log10(E)') plt.ylabel('Aeff [m]') plt.savefig("Mean") plt.show() # Finallly slect one bin in cos(theta) and plot Aeff vs log10(E) for that bin plt.figure(5) halfxbin=(xbins[1]-xbins[0])/2. plt.plot(xbins[:-1]+halfxbin, H.T[18],label="40°" ) plt.plot(xbins[:-1]+halfxbin, H.T[27],label="60°" ) plt.plot(xbins[:-1]+halfxbin,mean,label="mean" ) plt.legend() plt.xlabel('log10(E)') plt.ylabel('Aeff [m]') plt.savefig("Aeff_all") plt.show() ```
github_jupyter
# Importing from the COVID Tracking Project This script pulls data from the API provided by the [COVID Tracking Project](https://covidtracking.com/). They're collecting data from 50 US states, the District of Columbia, and five U.S. territories to provide the most comprehensive testing data. They attempt to include positive and negative results, pending tests and total people tested for each state or district currently reporting that data.. ``` import pandas as pd import requests import json import datetime import pycountry # papermill parameters output_folder = '../output/' raw_response = requests.get("https://covidtracking.com/api/states/daily").text raw_data = pd.DataFrame.from_dict(json.loads(raw_response)) ``` ### Data Quality 1. Replace empty values with zero 2. Convert "date" int column to "Date" datetime column 4. Rename columns in order to match with other source 5. Drop unnecessary columns 6. Add "Country/Region" column, since the source contains data from US states, it can be hardcoded ``` data = raw_data.fillna(0) data['Date'] = pd.to_datetime(data['date'].astype(str), format='%Y%m%d') data = data.rename( columns={ "state": "ISO3166-2", "positive": "Positive", "negative": "Negative", "pending": "Pending", "death": "Death", "totalTestResults": "Total", "hospitalized": "Hospitalized" }) data = data.drop(labels=['dateChecked', "date"], axis='columns') data['Country/Region'] = "United States" data['ISO3166-1'] = "US" states = {k.code.replace("US-", ""): k.name for k in pycountry.subdivisions.get(country_code="US")} data["Province/State"] = data["ISO3166-2"].apply(lambda x: states[x]) ``` ## Sorting data by Province/State before calculating the daily differences ``` sorted_data = data.sort_values(by=['Province/State'] + ['Date'], ascending=True) sorted_data['Positive_Since_Previous_Day'] = sorted_data['Positive'] - sorted_data.groupby(['Province/State'])["Positive"].shift(1, fill_value=0) sorted_data['Total_Since_Previous_Day'] = sorted_data['Total'] - sorted_data.groupby(['Province/State'])["Total"].shift(1, fill_value=0) sorted_data['Negative_Since_Previous_Day'] = sorted_data['Negative'] - sorted_data.groupby(['Province/State'])["Negative"].shift(1, fill_value=0) sorted_data['Pending_Since_Previous_Day'] = sorted_data['Pending'] - sorted_data.groupby(['Province/State'])["Pending"].shift(1, fill_value=0) sorted_data['Death_Since_Previous_Day'] = sorted_data['Death'] - sorted_data.groupby(['Province/State'])["Death"].shift(1, fill_value=0) sorted_data['Hospitalized_Since_Previous_Day'] = sorted_data['Hospitalized'] - sorted_data.groupby(['Province/State'])["Hospitalized"].shift(1, fill_value=0) ``` ## Rearrange columns ``` rearranged_data = sorted_data.filter(items=['Country/Region', 'Province/State', 'Date', 'Positive', 'Positive_Since_Previous_Day', 'Negative', 'Negative_Since_Previous_Day', 'Pending', 'Pending_Since_Previous_Day', 'Death', 'Death_Since_Previous_Day', 'Hospitalized', 'Hospitalized_Since_Previous_Day', 'Total', 'Total_Since_Previous_Day', 'ISO3166-1', 'ISO3166-2']) ``` ## Add `Last_Update_Date` ``` rearranged_data.loc[:, "Last_Update_Date"] = datetime.datetime.utcnow() ``` ## Export to CSV ``` rearranged_data.to_csv(output_folder + "CT_US_COVID_TESTS.csv", index=False) ```
github_jupyter
# Time series analysis and visualization ``` # Hide all warnings import warnings warnings.simplefilter('ignore') import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import statsmodels as sm import statsmodels.api from tqdm import tqdm from pylab import rcParams # Run-Control (default) parameters rcParams['figure.figsize'] = 16, 8 rcParams['lines.linewidth'] = 4 rcParams['font.size'] = 26 ``` <br> ## Time series analysis is for * compact **dynamics description** of observable processes * interpretation of dynamics and **estimation of impulse response** * **forecasting** and simulation * solution **optimal control** problems <br> ## The objective of time series analysis Construct a model of time series for _current value_ of **endogeneous** variable $y_t$ * by the _history_ of itself $$y_{:t} = (y_{t-1}, y_{t-2}, \ldots)$$ * by _current value_ of **exogeneous** variables $x_t$ and possibly by its _history_ too $$ y_t \approx \text{model}\bigl( t,\, y_{:t},\, x_t,\, x_{:t} \bigr) \,. $$ Usually one forecasts a single time step ahead. <br> ## Difference from other Machine Learning tasks * Data are sequential * order of **time** has to be respected strictly due to not break the causality * Much attention to **extrapolation** — a forecast of future values related to observed sample * It is important to be sure that data do not leak from future to current and to past observations of train subsample during feature engineering and training the model Thus features of the model can depend only on * **endogeneous** variables $y_{t-1}, y_{t-2}, \ldots$, i.e. they are available to the moment $t-1$ _inclusively_ * **exogeneous** variables $x_t, x_{t-1}, \ldots$, i.e. they are available to the moment $t$ _inclusively_ <br> ## $CO_2$ concentration in atmosphere [dataset](https://www.co2.earth/weekly-co2) ``` dataset = pd.read_csv('./mauna_loa_atmospheric_c02.csv', index_col=None, usecols=['date', 'WMLCO2']) dataset.head() ``` When you loads a time series within `Pandas` you have to set format of date and time explicitly ``` dataset['date'] = pd.to_datetime(dataset['date'], format='%Y-%m-%d') ``` Create the index for loaded data: it will be **weekly periodical index**. We will get data with regular frequency. ``` dataset = dataset.set_index('date').to_period('W') dataset.head() ``` Plot dynamics of the time series ``` dataset.plot() plt.grid(which='major', axis='both') ``` Aggregate weekly data to monthly ``` dataset = dataset.to_timestamp() dataset = dataset.resample('M').mean() dataset.head() dataset.plot() plt.grid(which='major', axis='both') ``` Create summary statistics ``` print('Series {1}, Observations {0}'.format(*dataset.shape)) dataset.describe().T.head() dataset.loc['1960':'1967'].plot() plt.grid(which='major', axis='both') ``` ### Missed values ``` maginfy_slice = slice('1960', '1967') ``` Missed values can be filled by 1) last known observable * **+** doesn't look through the future * **-** can't fill the beginning of the series * **-** doesn't account specificity of the series ``` dataset_ff = dataset.fillna(method='ffill') dataset_ff.loc[maginfy_slice].plot() plt.grid(which='major', axis='both') ``` 2) iterpolation of the neighboring values * **+** smooth peaks * **-** doesn't fill the ends of the series * **-** slightly look through the future ``` dataset_linterp = dataset.interpolate(method='linear') dataset_pinterp = dataset.interpolate(method='polynomial', order=2) ax = dataset_pinterp.loc[maginfy_slice].plot() dataset_linterp.loc[maginfy_slice].plot(ax=ax, linewidth=4, alpha=0.7) plt.grid(which='major', axis='both') ``` 3) exlude at all * **+** doesn't change the values * **-** break the regularity and related periodicity * **-** deplete the sampling ``` dataset_drop = dataset.dropna() dataset_drop.loc[maginfy_slice].plot() plt.grid(which='major', axis='both') ``` 4) estimate by probabilty model * **+** filling based on extracted patterns (learned dependencies) * **-** it is needed to specify the model and to train it 5) smooth by splines or by local kernel model * **+** explicitly accounts close in time observations * **+** allows to increase the frequency of observations ("_resolution_") * **+** allows to fill missed boundary values * **-** look through the future far * **-** it is needed to define th kernel and the model for extrapolation Looking into the future can be ignorred if **missed values are minority**. But if missed values are majority then it is needed to understand why it is happened in the sampling. ``` full_dataset = dataset_pinterp ``` Prepare train and test samplings in the ratio 3 to 1 ``` holdout = full_dataset.loc['1991-01-01':] dataset = full_dataset.loc[:'1990-12-31'] print(len(dataset), len(holdout)) ``` Make sure the parts don't intersect ``` pd.concat([ dataset.tail(), holdout.head() ], axis=1) ``` Store the bounds of the intervals explicitly ``` holdout_slice = slice(*holdout.index[[0, -1]]) print('Train sample from {} to {}'.format(*dataset.index[[0, -1]])) print('Test sample from {} to {}'.format(holdout_slice.start, holdout_slice.stop)) ``` Select the column of target variable ``` target_column = 'WMLCO2' fig = plt.figure() ax = fig.add_subplot(111, xlabel='Date', ylabel='value', title=target_column) # 111 means 1 row 1 column 1st axes on the "grid" # plot dynamics of entire time series full_dataset[target_column].plot(ax=ax) # highlight delayed interval for testing ax.axvspan(holdout_slice.start, holdout_slice.stop, color='C1', alpha=0.25, zorder=-99) ax.grid(which='major', axis='both'); ``` <br> # A property **Stationarity** is a property of a process $\{y_t\}_{t\geq0}$ meaning > probabilistic interconnections in the set $(y_{t_1},\,\ldots,\,y_{t_m})$ are invariant with respect to shift $s \neq 0$. That means * **there are no special moments** in the time when statistical properties of observables are changing * patterns are stable in time and are determined by **indentation of observables** relative to each other: * mean, dispersion, and autocorrelation doesn't depend on moment of time ## A ghost property Stochastic processes in real problems are **almost always non-stationary** * mean depends on time (there is a trend in the dynamics) * calendar events (holidays or vacations) * season periodicity * daily rhythm of power grid load * season temperature * yearly peak of monthly inflation in the beginning of year * unpredictable structural drift * political decisions * blackouts * hysteresis Thus majority of time series especially economic, climatic, and financial are non-stationary. <br> # Visualization and diagnosis of non-stationarity Visualization in time series analysis allows to * get preliminary picture of correlations * select reasonable strategy of validation a model * estimate if there is structural drift * leaps and gaps * clusters of intensive oscillations or periods of plateau * diagnose non-stationarity: trend, seasonality, etc. ### A plot of moving statistics Moving statistics of a series within window of length $N$ allow to discover changes in time * **moving average** of time series level $$ m_t = \frac1{N} \sum_{s=t-N+1}^t y_s $$ * **moving standard deviation** (scatter) $$ s_t = \sqrt{s^2_t} \,, \quad s^2_t = \frac1{N-1} \sum_{s=t-N+1}^t (y_s - m_t)^2 $$ ``` rcParams['figure.figsize'] = 16, 10 def rolling_diagnostics(series, window=500): rolling = series.rolling(window) # Create top and bottom plots fig = plt.figure() ax_top = fig.add_subplot(211, title='Moving average', xlabel='Date', ylabel='value') ax_bottom = fig.add_subplot(212, title='Moving standard deviation', sharex=ax_top, xlabel='Date', ylabel='std.') # Plot the graphs # series itself and moving average rolling.mean().plot(ax=ax_top) series.plot(ax=ax_top, color='black', lw=2, alpha=.25, zorder=-10) ax_top.grid(which='major', axis='both') # moving std. rolling.std().plot(ax=ax_bottom) ax_bottom.grid(which='major', axis='both') fig.tight_layout() return fig rolling_diagnostics(dataset[target_column], window=36); ``` The graphs show the trend in the dynamics of time series <br> ### Rough estimate of seasonality It is disarable to make season normalization relatively to trend. Let's discover seasonality, for example monthly ``` def monthly_seasonality_diagnostics(series, fraction=0.66, period='month'): # Use non-parametric local linear regression to preliminary estimate the trend trend = sm.api.nonparametric.lowess(series, np.r_[:len(series)], frac=fraction, it=5) # Aggregate by months and calculate average and standard deviation by = getattr(series.index, period, 'month') season_groupby = (series - trend[:, 1]).groupby(by) seas_mean, seas_std = season_groupby.mean(), season_groupby.std() # Create subplots fig = plt.figure() ax_top = fig.add_subplot(211, title='Trend', xlabel='Date') ax_bottom = fig.add_subplot(212, title='Seasonality', xlabel=period) # Plot the graphs # The series and the trend pd.Series(trend[:, 1], index=series.index).plot(ax=ax_top) series.plot(ax=ax_top, color="black", lw=2, alpha=.25, zorder=-10) ax_top.grid(which="major", axis="both") # Seasonality and 90% normal confidence interval ax_bottom.plot(1 + np.r_[:len(seas_mean)], seas_mean, lw=2) ax_bottom.fill_between(1 + np.r_[:len(seas_mean)], seas_mean - 1.96 * seas_std, seas_mean + 1.96 * seas_std, zorder=-10, color="C1", alpha=0.15) ax_bottom.grid(which="major", axis="both") fig.tight_layout() return fig monthly_seasonality_diagnostics(dataset[target_column], fraction=0.33, period='month'); ``` The graph shows the **monthly** seasonality in the dynamics ``` ## TODO: check visually if there is weekly seasonality ``` <br> ### Total vs. partial autocorrelations The functions estimate influence of observation of $h$ steps (_lags_) on the current observation, but they does it differently * **total autocorrelation** $\rho_h$ * shows cumulative impact $y_{t-h}$ to $y_t$ **via** influence on all intermediate $y_{t-j}$, $j=1,\,...,\,h-1$ * **partial autocorrelation** $\phi_h$ * shows **net** (pure) impract $y_{t-h}$ to $y_t$ **excluding** influence on all intermediate $y_{t-j}$, $j=1,\,...,\,h-1$ ``` from statsmodels.tsa.stattools import acf, pacf from statsmodels.graphics.tsaplots import plot_acf, plot_pacf def correlation_diagnostics(series, lags=60): fig = plt.figure(figsize=(20, 6)) ax_left, ax_right = fig.subplots( nrows=1, ncols=2, sharey=True, sharex=True, subplot_kw={'xlabel': 'lag', 'ylim': (-1.1, 1.1)}) # Use intrinsic statsmodels functions plot_acf(series, ax_left, lags=lags, zero=False, alpha=0.05, title='Sample autocorrelation', marker=None) plot_pacf(series, ax_right, lags=lags, zero=False, alpha=0.05, title='Sample partial autocorrelation', marker=None) fig.tight_layout() return fig ``` Let's explore sample autocorrelations of the series ``` correlation_diagnostics(dataset[target_column], lags=250); ``` * On the **left plot** autocorrelation of small lags is near to $1.0$ and decreases pretty slowly * On the **right plot** observations with lag $1$, $110$, $215$ has statistically non-null net effect It is indication of very typical kind of non-stationarity: $y_t = y_{t-1} + \ldots$. That means it is observed strong dependance of the past (the history of a process). --- # Key steps of model construction for time series * Stationarize a time series * Estimate parameter of the model * Visualize remains after stationarization * check if respect the model requirements * Validation of the model
github_jupyter
<center> <img src="../../img/ods_stickers.jpg"> ## Открытый курс по машинному обучению <center>Автор материала: Ефремова Дина (@ldinka). # <center>Исследование возможностей BigARTM</center> ## <center>Тематическое моделирование с помощью BigARTM</center> #### Интро BigARTM — библиотека, предназначенная для тематической категоризации текстов; делает разбиение на темы без «учителя». Я собираюсь использовать эту библиотеку для собственных нужд в будущем, но так как она не предназначена для обучения с учителем, решила, что для начала ее стоит протестировать на какой-нибудь уже размеченной выборке. Для этих целей был использован датасет "20 news groups". Идея экперимента такова: - делим выборку на обучающую и тестовую; - обучаем модель на обучающей выборке; - «подгоняем» выделенные темы под действительные; - смотрим, насколько хорошо прошло разбиение; - тестируем модель на тестовой выборке. #### Поехали! **Внимание!** Данный проект был реализован с помощью Python 3.6 и BigARTM 0.9.0. Методы, рассмотренные здесь, могут отличаться от методов в других версиях библиотеки. <img src="../../img/bigartm_logo.png"/> ### <font color="lightgrey">Не</font>множко теории У нас есть словарь терминов $W = \{w \in W\}$, который представляет из себя мешок слов, биграмм или n-грамм; Есть коллекция документов $D = \{d \in D\}$, где $d \subset W$; Есть известное множество тем $T = \{t \in T\}$; $n_{dw}$ — сколько раз термин $w$ встретился в документе $d$; $n_{d}$ — длина документа $d$. Мы считаем, что существует матрица $\Phi$ распределения терминов $w$ в темах $t$: (фи) $\Phi = (\phi_{wt})$ и матрица распределения тем $t$ в документах $d$: (тета) $\Theta = (\theta_{td})$, переумножение которых дает нам тематическую модель, или, другими словами, представление наблюдаемого условного распределения $p(w|d)$ терминов $w$ в документах $d$ коллекции $D$: <center>$\large p(w|d) = \Phi \Theta$</center> <center>$$\large p(w|d) = \sum_{t \in T} \phi_{wt} \theta_{td}$$</center> где $\phi_{wt} = p(w|t)$ — вероятности терминов $w$ в каждой теме $t$ и $\theta_{td} = p(t|d)$ — вероятности тем $t$ в каждом документе $d$. <img src="../../img/phi_theta.png"/> Нам известны наблюдаемые частоты терминов в документах, это: <center>$ \large \hat{p}(w|d) = \frac {n_{dw}} {n_{d}} $</center> Таким образом, наша задача тематического моделирования становится задачей стохастического матричного разложения матрицы $\hat{p}(w|d)$ на стохастические матрицы $\Phi$ и $\Theta$. Напомню, что матрица является стохастической, если каждый ее столбец представляет дискретное распределение вероятностей, сумма значений каждого столбца равна 1. Воспользовавшись принципом максимального правдоподобия, т. е. максимизируя логарифм правдоподобия, мы получим: <center>$ \begin{cases} \sum_{d \in D} \sum_{w \in d} n_{dw} \ln \sum_{t \in T} \phi_{wt} \theta_{td} \rightarrow \max\limits_{\Phi,\Theta};\\ \sum_{w \in W} \phi_{wt} = 1, \qquad \phi_{wt}\geq0;\\ \sum_{t \in T} \theta_{td} = 1, \quad\quad\;\; \theta_{td}\geq0. \end{cases} $</center> Чтобы из множества решений выбрать наиболее подходящее, введем критерий регуляризации $R(\Phi, \Theta)$: <center>$ \begin{cases} \sum_{d \in D} \sum_{w \in d} n_{dw} \ln \sum_{t \in T} \phi_{wt} \theta_{td} + R(\Phi, \Theta) \rightarrow \max\limits_{\Phi,\Theta};\\ \sum_{w \in W} \phi_{wt} = 1, \qquad \phi_{wt}\geq0;\\ \sum_{t \in T} \theta_{td} = 1, \quad\quad\;\; \theta_{td}\geq0. \end{cases} $</center> Два наиболее известных частных случая этой системы уравнений: - **PLSA**, вероятностный латентный семантический анализ, когда $R(\Phi, \Theta) = 0$ - **LDA**, латентное размещение Дирихле: $$R(\Phi, \Theta) = \sum_{t,w} (\beta_{w} - 1) \ln \phi_{wt} + \sum_{d,t} (\alpha_{t} - 1) \ln \theta_{td} $$ где $\beta_{w} > 0$, $\alpha_{t} > 0$ — параметры регуляризатора. Однако оказывается запас неединственности решения настолько большой, что на модель можно накладывать сразу несколько ограничений, такой подход называется **ARTM**, или аддитивной регуляризацией тематических моделей: <center>$ \begin{cases} \sum_{d,w} n_{dw} \ln \sum_{t} \phi_{wt} \theta_{td} + \sum_{i=1}^k \tau_{i} R_{i}(\Phi, \Theta) \rightarrow \max\limits_{\Phi,\Theta};\\ \sum_{w \in W} \phi_{wt} = 1, \qquad \phi_{wt}\geq0;\\ \sum_{t \in T} \theta_{td} = 1, \quad\quad\;\; \theta_{td}\geq0. \end{cases} $</center> где $\tau_{i}$ — коэффициенты регуляризации. Теперь давайте познакомимся с библиотекой BigARTM и разберем еще некоторые аспекты тематического моделирования на ходу. Если Вас очень сильно заинтересовала теоретическая часть категоризации текстов и тематического моделирования, рекомендую посмотреть видеолекции из курса Яндекса на Coursera «Поиск структуры в данных» четвертой недели: <a href="https://www.coursera.org/learn/unsupervised-learning/home/week/4">Тематическое моделирование</a>. ### BigARTM #### Установка Естественно, для начала работы с библиотекой ее надо установить. Вот несколько видео, которые рассказывают, как это сделать в зависимости от вашей операционной системы: - <a href="https://www.coursera.org/learn/unsupervised-learning/lecture/qmsFm/ustanovka-bigartm-v-windows">Установка BigARTM в Windows</a> - <a href="https://www.coursera.org/learn/unsupervised-learning/lecture/zPyO0/ustanovka-bigartm-v-linux-mint">Установка BigARTM в Linux</a> - <a href="https://www.coursera.org/learn/unsupervised-learning/lecture/nuIhL/ustanovka-bigartm-v-mac-os-x">Установка BigARTM в Mac OS X</a> Либо можно воспользоваться инструкцией с официального сайта, которая, скорее всего, будет гораздо актуальнее: <a href="https://bigartm.readthedocs.io/en/stable/installation/index.html">здесь</a>. Там же указано, как можно установить BigARTM в качестве <a href="https://bigartm.readthedocs.io/en/stable/installation/docker.html">Docker-контейнера</a>. #### Использование BigARTM ``` import artm import re import numpy as np import seaborn as sns; sns.set() from sklearn.metrics import classification_report, confusion_matrix from sklearn.preprocessing import normalize from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from matplotlib import pyplot as plt %matplotlib inline artm.version() ``` Скачаем датасет ***the 20 news groups*** с заранее известным количеством категорий новостей: ``` from sklearn.datasets import fetch_20newsgroups newsgroups = fetch_20newsgroups('../../data/news_data') newsgroups['target_names'] ``` Приведем данные к формату *Vowpal Wabbit*. Так как BigARTM не рассчитан на обучение с учителем, то мы поступим следующим образом: - обучим модель на всем корпусе текстов; - выделим ключевые слова тем и по ним определим, к какой теме они скорее всего относятся; - сравним наши полученные результаты разбиения с истинными значенями. ``` TEXT_FIELD = "text" def to_vw_format(document, label=None): return str(label or '0') + ' |' + TEXT_FIELD + ' ' + ' '.join(re.findall('\w{3,}', document.lower())) + '\n' all_documents = newsgroups['data'] all_targets = newsgroups['target'] len(newsgroups['target']) train_documents, test_documents, train_labels, test_labels = \ train_test_split(all_documents, all_targets, random_state=7) with open('../../data/news_data/20news_train_mult.vw', 'w') as vw_train_data: for text, target in zip(train_documents, train_labels): vw_train_data.write(to_vw_format(text, target)) with open('../../data/news_data/20news_test_mult.vw', 'w') as vw_test_data: for text in test_documents: vw_test_data.write(to_vw_format(text)) ``` Загрузим данные в необходимый для BigARTM формат: ``` batch_vectorizer = artm.BatchVectorizer(data_path="../../data/news_data/20news_train_mult.vw", data_format="vowpal_wabbit", target_folder="news_batches") ``` Данные в BigARTM загружаются порционно, укажем в - *data_path* путь к обучающей выборке, - *data_format* — формат наших данных, может быть: * *bow_n_wd* — это вектор $n_{wd}$ в виду массива *numpy.ndarray*, также необходимо передать соответствующий словарь терминов, где ключ — это индекс вектора *numpy.ndarray* $n_{wd}$, а значение — соответствующий токен. ```python batch_vectorizer = artm.BatchVectorizer(data_format='bow_n_wd', n_wd=n_wd, vocabulary=vocabulary) ``` * *vowpal_wabbit* — формат Vowpal Wabbit; * *bow_uci* — UCI формат (например, с *vocab.my_collection.txt* и *docword.my_collection.txt* файлами): ```python batch_vectorizer = artm.BatchVectorizer(data_path='', data_format='bow_uci', collection_name='my_collection', target_folder='my_collection_batches') ``` * *batches* — данные, уже сконверченные в батчи с помощью BigARTM; - *target_folder* — путь для сохранения батчей. Пока это все параметры, что нам нужны для загрузки наших данных. После того, как BigARTM создал батчи из данных, можно использовать их для загрузки: ``` batch_vectorizer = artm.BatchVectorizer(data_path="news_batches", data_format='batches') ``` Инициируем модель с известным нам количеством тем. Количество тем — это гиперпараметр, поэтому если он заранее нам неизвестен, то его необходимо настраивать, т. е. брать такое количество тем, при котором разбиение кажется наиболее удачным. **Важно!** У нас 20 предметных тем, однако некоторые из них довольно узкоспециализированны и смежны, как например 'comp.os.ms-windows.misc' и 'comp.windows.x', или 'comp.sys.ibm.pc.hardware' и 'comp.sys.mac.hardware', тогда как другие размыты и всеобъемлющи: talk.politics.misc' и 'talk.religion.misc'. Скорее всего, нам не удастся в чистом виде выделить все 20 тем — некоторые из них окажутся слитными, а другие наоборот раздробятся на более мелкие. Поэтому мы попробуем построить 40 «предметных» тем и одну фоновую. Чем больше вы будем строить категорий, тем лучше мы сможем подстроиться под данные, однако это довольно трудоемкое занятие сидеть потом и распределять в получившиеся темы по реальным категориям (<strike>я правда очень-очень задолбалась!</strike>). Зачем нужны фоновые темы? Дело в том, что наличие общей лексики в темах приводит к плохой ее интерпретируемости. Выделив общую лексику в отдельную тему, мы сильно снизим ее количество в предметных темах, таким образом оставив там лексическое ядро, т. е. ключевые слова, которые данную тему характеризуют. Также этим преобразованием мы снизим коррелированность тем, они станут более независимыми и различимыми. ``` T = 41 model_artm = artm.ARTM(num_topics=T, topic_names=[str(i) for i in range(T)], class_ids={TEXT_FIELD:1}, num_document_passes=1, reuse_theta=True, cache_theta=True, seed=4) ``` Передаем в модель следующие параметры: - *num_topics* — количество тем; - *topic_names* — названия тем; - *class_ids* — название модальности и ее вес. Дело в том, что кроме самих текстов, в данных может содержаться такая информация, как автор, изображения, ссылки на другие документы и т. д., по которым также можно обучать модель; - *num_document_passes* — количество проходов при обучении модели; - *reuse_theta* — переиспользовать ли матрицу $\Theta$ с предыдущей итерации; - *cache_theta* — сохранить ли матрицу $\Theta$ в модели, чтобы в дальнейшем ее использовать. Далее необходимо создать словарь; передадим ему какое-нибудь название, которое будем использовать в будущем для работы с этим словарем. ``` DICTIONARY_NAME = 'dictionary' dictionary = artm.Dictionary(DICTIONARY_NAME) dictionary.gather(batch_vectorizer.data_path) ``` Инициализируем модель с тем именем словаря, что мы передали выше, можно зафиксировать *random seed* для вопроизводимости результатов: ``` np.random.seed(1) model_artm.initialize(DICTIONARY_NAME) ``` Добавим к модели несколько метрик: - перплексию (*PerplexityScore*), чтобы индентифицировать сходимость модели * Перплексия — это известная в вычислительной лингвистике мера качества модели языка. Можно сказать, что это мера неопределенности или различности слов в тексте. - специальный *score* ключевых слов (*TopTokensScore*), чтобы в дальнейшем мы могли идентифицировать по ним наши тематики; - разреженность матрицы $\Phi$ (*SparsityPhiScore*); - разреженность матрицы $\Theta$ (*SparsityThetaScore*). ``` model_artm.scores.add(artm.PerplexityScore(name='perplexity_score', dictionary=DICTIONARY_NAME)) model_artm.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score', class_id="text")) model_artm.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score')) model_artm.scores.add(artm.TopTokensScore(name="top_words", num_tokens=15, class_id=TEXT_FIELD)) ``` Следующая операция *fit_offline* займет некоторое время, мы будем обучать модель в режиме *offline* в 40 проходов. Количество проходов влияет на сходимость модели: чем их больше, тем лучше сходится модель. ``` %%time model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=40) ``` Построим график сходимости модели и увидим, что модель сходится довольно быстро: ``` plt.plot(model_artm.score_tracker["perplexity_score"].value); ``` Выведем значения разреженности матриц: ``` print('Phi', model_artm.score_tracker["sparsity_phi_score"].last_value) print('Theta', model_artm.score_tracker["sparsity_theta_score"].last_value) ``` После того, как модель сошлась, добавим к ней регуляризаторы. Для начала сглаживающий регуляризатор — это *SmoothSparsePhiRegularizer* с большим положительным коэффициентом $\tau$, который нужно применить только к фоновой теме, чтобы выделить в нее как можно больше общей лексики. Пусть тема с последним индексом будет фоновой, передадим в *topic_names* этот индекс: ``` model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=1e5, dictionary=dictionary, class_ids=TEXT_FIELD, topic_names=str(T-1))) ``` Дообучим модель, сделав 20 проходов по ней с новым регуляризатором: ``` %%time model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=20) ``` Выведем значения разреженности матриц, заметим, что значение для $\Theta$ немного увеличилось: ``` print('Phi', model_artm.score_tracker["sparsity_phi_score"].last_value) print('Theta', model_artm.score_tracker["sparsity_theta_score"].last_value) ``` Теперь добавим к модели разреживающий регуляризатор, это тот же *SmoothSparsePhiRegularizer* резуляризатор, только с отрицательным значением $\tau$ и примененный ко всем предметным темам: ``` model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi2', tau=-5e5, dictionary=dictionary, class_ids=TEXT_FIELD, topic_names=[str(i) for i in range(T-1)]), overwrite=True) %%time model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=20) ``` Видим, что значения разреженности увеличились еще больше: ``` print(model_artm.score_tracker["sparsity_phi_score"].last_value) print(model_artm.score_tracker["sparsity_theta_score"].last_value) ``` Посмотрим, сколько категорий-строк матрицы $\Theta$ после регуляризации осталось, т. е. не занулилось/выродилось. И это одна категория: ``` len(model_artm.score_tracker["top_words"].last_tokens.keys()) ``` Теперь выведем ключевые слова тем, чтобы определить, каким образом прошло разбиение, и сделать соответствие с нашим начальным списком тем: ``` for topic_name in model_artm.score_tracker["top_words"].last_tokens.keys(): tokens = model_artm.score_tracker["top_words"].last_tokens res_str = topic_name + ': ' + ', '.join(tokens[topic_name]) print(res_str) ``` Далее мы будем подгонять разбиение под действительные темы с помощью *confusion matrix*. ``` target_dict = { 'alt.atheism': 0, 'comp.graphics': 1, 'comp.os.ms-windows.misc': 2, 'comp.sys.ibm.pc.hardware': 3, 'comp.sys.mac.hardware': 4, 'comp.windows.x': 5, 'misc.forsale': 6, 'rec.autos': 7, 'rec.motorcycles': 8, 'rec.sport.baseball': 9, 'rec.sport.hockey': 10, 'sci.crypt': 11, 'sci.electronics': 12, 'sci.med': 13, 'sci.space': 14, 'soc.religion.christian': 15, 'talk.politics.guns': 16, 'talk.politics.mideast': 17, 'talk.politics.misc': 18, 'talk.religion.misc': 19 } mixed = [ 'comp.sys.ibm.pc.hardware', 'talk.politics.mideast', 'sci.electronics', 'rec.sport.hockey', 'sci.med', 'rec.motorcycles', 'comp.graphics', 'rec.sport.hockey', 'talk.politics.mideast', 'talk.religion.misc', 'rec.autos', 'comp.graphics', 'sci.space', 'soc.religion.christian', 'comp.os.ms-windows.misc', 'sci.crypt', 'comp.windows.x', 'misc.forsale', 'sci.space', 'sci.crypt', 'talk.religion.misc', 'alt.atheism', 'comp.os.ms-windows.misc', 'alt.atheism', 'sci.med', 'comp.os.ms-windows.misc', 'soc.religion.christian', 'talk.politics.guns', 'rec.autos', 'rec.autos', 'talk.politics.mideast', 'rec.sport.baseball', 'talk.religion.misc', 'talk.politics.misc', 'rec.sport.hockey', 'comp.sys.mac.hardware', 'misc.forsale', 'sci.space', 'talk.politics.guns', 'rec.autos', '-' ] ``` Построим небольшой отчет о правильности нашего разбиения: ``` theta_train = model_artm.get_theta() model_labels = [] keys = np.sort([int(i) for i in theta_train.keys()]) for i in keys: max_val = 0 max_idx = 0 for j in theta_train[i].keys(): if j == str(T-1): continue if theta_train[i][j] > max_val: max_val = theta_train[i][j] max_idx = j topic = mixed[int(max_idx)] if topic == '-': print(i, '-') label = target_dict[topic] model_labels.append(label) print(classification_report(train_labels, model_labels)) print(classification_report(train_labels, model_labels)) mat = confusion_matrix(train_labels, model_labels) sns.heatmap(mat.T, annot=True, fmt='d', cbar=False) plt.xlabel('True label') plt.ylabel('Predicted label'); accuracy_score(train_labels, model_labels) ``` Нам удалось добиться 80% *accuracy*. По матрице ответов мы видим, что для модели темы *comp.sys.ibm.pc.hardware* и *comp.sys.mac.hardware* практически не различимы (<strike>честно говоря, для меня тоже</strike>), в остальном все более или менее прилично. Проверим модель на тестовой выборке: ``` batch_vectorizer_test = artm.BatchVectorizer(data_path="../../data/news_data/20news_test_mult.vw", data_format="vowpal_wabbit", target_folder="news_batches_test") theta_test = model_artm.transform(batch_vectorizer_test) test_score = [] for i in range(len(theta_test.keys())): max_val = 0 max_idx = 0 for j in theta_test[i].keys(): if j == str(T-1): continue if theta_test[i][j] > max_val: max_val = theta_test[i][j] max_idx = j topic = mixed[int(max_idx)] label = target_dict[topic] test_score.append(label) print(classification_report(test_labels, test_score)) mat = confusion_matrix(test_labels, test_score) sns.heatmap(mat.T, annot=True, fmt='d', cbar=False) plt.xlabel('True label') plt.ylabel('Predicted label'); accuracy_score(test_labels, test_score) ``` Итого почти 77%, незначительно хуже, чем на обучающей. **Вывод:** безумно много времени пришлось потратить на подгонку категорий к реальным темам, но в итоге я осталась довольна результатом. Такие смежные темы, как *alt.atheism*/*soc.religion.christian*/*talk.religion.misc* или *talk.politics.guns*/*talk.politics.mideast*/*talk.politics.misc* разделились вполне неплохо. Думаю, что я все-таки попробую использовать BigARTM в будущем для своих <strike>корыстных</strike> целей.
github_jupyter
<img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> <br></br> <br></br> ## *Data Science Unit 4 Sprint 3 Assignment 2* # Convolutional Neural Networks (CNNs) # Assignment Load a pretrained network from Keras, [ResNet50](https://tfhub.dev/google/imagenet/resnet_v1_50/classification/1) - a 50 layer deep network trained to recognize [1000 objects](https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt). Starting usage: ```python import numpy as np from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions ResNet50 = ResNet50(weights='imagenet') features = model.predict(x) ``` Next you will need to remove the last layer from the ResNet model. Here, we loop over the layers to use the sequential API. There are easier ways to add and remove layers using the Keras functional API, but doing so introduces other complexities. ```python # Remote the Last Layer of ResNEt ResNet50._layers.pop(0) # Out New Model model = Sequential() # Add Pre-trained layers of Old Model to New Model for layer in ResNet50.layers: model.add(layer) # Turn off additional training of ResNet Layers for speed of assignment for layer in model.layers: layer.trainable = False # Add New Output Layer to Model model.add(Dense(1, activation='sigmoid')) ``` Your assignment is to apply the transfer learning above to classify images of Mountains (`./data/mountain/*`) and images of forests (`./data/forest/*`). Treat mountains as the postive class (1) and the forest images as the negative (zero). Steps to complete assignment: 1. Load in Image Data into numpy arrays (`X`) 2. Create a `y` for the labels 3. Train your model with pretrained layers from resnet 4. Report your model's accuracy ``` import numpy as np import os from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras import datasets from tensorflow.keras.models import Sequential, Model # <- May Use from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten,GlobalAveragePooling2D def process_img_path(img_path): return image.load_img(img_path, target_size=(224, 224)) imagedata = [] labeldata = [] for label in ['mountain','forest']: files = os.listdir(f'./data/{label}') for file in files: if file[-3:] == 'jpg': img_path = f'./data/{label}/{file}' img = process_img_path(img_path) img_array = image.img_to_array(img) imagedata.append(img_array) #Label 1 for Mountain and 0 for Forest if(label=='mountain'): labeldata.append(1) else: labeldata.append(0) imagedata = np.array(imagedata) labeldata = np.array(labeldata) imagedata.shape,labeldata.shape resnet = ResNet50(weights='imagenet', include_top=False) def img_contains_banana(img): x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) model = ResNet50(weights='imagenet') features = model.predict(x) results = decode_predictions(features, top=3)[0] print(results) for entry in results: if entry[1] == 'banana': return entry[2] return 0.0 for layer in resnet.layers: layer.trainable = False x = resnet.output x = GlobalAveragePooling2D()(x) # This layer is a really fancy flatten x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(resnet.input, predictions) from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(imagedata,labeldata,stratify=labeldata,test_size=0.2) X_train.shape,X_test.shape,y_train.shape,y_test.shape model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(X_train, y_train, batch_size=20, epochs=15, validation_data=(X_test, y_test)) ``` # Resources and Stretch Goals Stretch goals - Enhance your code to use classes/functions and accept terms to search and classes to look for in recognizing the downloaded images (e.g. download images of parties, recognize all that contain balloons) - Check out [other available pretrained networks](https://tfhub.dev), try some and compare - Image recognition/classification is somewhat solved, but *relationships* between entities and describing an image is not - check out some of the extended resources (e.g. [Visual Genome](https://visualgenome.org/)) on the topic - Transfer learning - using images you source yourself, [retrain a classifier](https://www.tensorflow.org/hub/tutorials/image_retraining) with a new category - (Not CNN related) Use [piexif](https://pypi.org/project/piexif/) to check out the metadata of images passed in to your system - see if they're from a national park! (Note - many images lack GPS metadata, so this won't work in most cases, but still cool) Resources - [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) - influential paper (introduced ResNet) - [YOLO: Real-Time Object Detection](https://pjreddie.com/darknet/yolo/) - an influential convolution based object detection system, focused on inference speed (for applications to e.g. self driving vehicles) - [R-CNN, Fast R-CNN, Faster R-CNN, YOLO](https://towardsdatascience.com/r-cnn-fast-r-cnn-faster-r-cnn-yolo-object-detection-algorithms-36d53571365e) - comparison of object detection systems - [Common Objects in Context](http://cocodataset.org/) - a large-scale object detection, segmentation, and captioning dataset - [Visual Genome](https://visualgenome.org/) - a dataset, a knowledge base, an ongoing effort to connect structured image concepts to language
github_jupyter
# Question 2 You're an aspiring computational biologist, working with some alveolar (lung) cells to study some of the cellular machinery involved in disease progression. You've tagged the proteins you're interested in, run your experiment, and collected your data from the confocal microscope in your advisor's lab. Unfortunately, someone didn't properly secure the confocal microscope, because some dust or something got shaken loose during your imaging slot and it seems to have corrupted your images! <img src="noisy.png" width="60%" /> You don't have enough time to completely re-do the experiments, so you'll need to use your computational skills to clean up the data post-acquisition. ### Part A The `scipy.ndimage` submodule has lots of "filters" you can use to process your images. In the lecture we saw how the Gaussian filter worked for smoothing; we'll use that again here, in addition to a median filter. The functions you'll want to use are [**`ndimage.gaussian_filter`**](http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.gaussian_filter.html#scipy.ndimage.gaussian_filter) and [**`ndimage.median_filter`**](http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.median_filter.html#scipy.ndimage.median_filter). Check out their respective documentation pages to see how to use them. The upshot is both functions have 2 required arguments: the first is the image (of course), and the second is an integer that indicates the filter size; for the Gaussian filter, this argument is **`sigma`**; for the median filter, this argument is **`size`**. **Experiment with both filters, and with a few filter sizes. Plot the results of your filters using `plt.imshow()`, which has already been imported for you.** Make sure you post the results! Create new cells if you need to, but please try to show multiple plots of your different "experiments" (different filters with different parameter values, and the resulting images). ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import scipy.ndimage as ndimage img = ndimage.imread("noisy.png", flatten = True) ### BEGIN SOLUTION ### END SOLUTION ``` ### Part B Compare and constrast the two types of filters (Gaussian and median). Are there similarities between their effects? Are there differences? How do the filter sizes affect the outputs? Can you speculate as to how these filters work under-the-hood? ### Part C Use your function from Question 1, Part B to count the number of cells in this image. Write a function which: - is named `count_cells` - takes 3 arguments: a NumPy image, an optional median filter size (default: 5), and an optional pixel threshold (default: 0) - returns 1 number: the number of cells found in the image It's pretty much the same deal as Part B on the last question, except this time we're also performing a median filter on the image to try and get rid of some of the noise in the image. The threshold procedure is also different. Before, you simply set any pixel below a certain value to 0. In this case, you're still doing that, but in addition **you will also set all *other* pixels to 1.** This is known as **binarization**: every pixel in the entire image is either a 1 or a 0. You can use `scipy.ndimage`, `skimage`, `numpy`, and `matplotlib`, but no other built-in functions or imported Python packages. ``` import scipy.ndimage as ndimage img = ndimage.imread("noisy.png", flatten = True) t1 = 30 s1 = 5 a1 = 33 assert a1 == count_cells(img, s1, t1) img = ndimage.imread("noisy.png", flatten = True) t2 = 30 s2 = 20 a2 = 21 assert a2 == count_cells(img, s2, t2) img = ndimage.imread("noisy.png", flatten = True) t3 = 100 s3 = 5 a3 = 97 assert a3 == count_cells(img, s3, t3) img = ndimage.imread("noisy.png", flatten = True) t4 = 100 s4 = 20 a4 = 21 assert a4 == count_cells(img, s4, t4) ``` ### Part D Using the function you created in the previous question, re-run the cell counter, but this time on the *original* noisy image. Run it a few times, changing the pixel threshold you set (but using the original noisy image each time). How does the number of objects your function finds change with the pixel threshold? Now run it on a *filtered* image, but change the filter size. Make it really small and count the number of objects. Make it really large and count the number of objects. Keep the pixel threshold constant for this. How does the number of objects your function finds change with the filter size? Put your code in the box below, and write your responses in the box below that.
github_jupyter
# SIR-X This notebook exemplifies how Open-SIR can be used to fit the SIR-X model by [Maier and Dirk (2020)](https://science.sciencemag.org/content/early/2020/04/07/science.abb4557.full) to existing data and make predictions. The SIR-X model is a standard generalization of the Susceptible-Infectious-Removed (SIR) model, which includes the influence of exogenous factors such as policy changes, lockdown of the whole population and quarantine of the infectious individuals. The Open-SIR implementation of the SIR-X model will be validated reproducing the parameter fitting published in the [supplementary material](https://science.sciencemag.org/cgi/content/full/science.abb4557/DC1) of the original article published by [Maier and Brockmann (2020)](https://science.sciencemag.org/content/early/2020/04/07/science.abb4557.full). For simplicity, the validation will be performed only for the city of Guangdong, China. ## Import modules ``` # Uncomment this cell to activate black code formatter in the notebook # %load_ext nb_black # Import packages import pandas as pd import matplotlib.pyplot as plt import numpy as np %matplotlib inline ``` ## Data sourcing We will source data from the repository of the [John Hopkins University COVID-19 dashboard] (https://coronavirus.jhu.edu/map.html) published formally as a correspondence in [The Lancet](https://www.thelancet.com/journals/laninf/article/PIIS1473-3099(20)30120-1/fulltext#seccestitle10). This time series data contains the number of reported cases $C(t)$ per day for a number of cities. ``` # Source data from John Hokpins university reposotiry # jhu_link = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/who_covid_19_situation_reports/who_covid_19_sit_rep_time_series/who_covid_19_sit_rep_time_series.csv" jhu_link = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" jhu_df = pd.read_csv(jhu_link) # Explore the dataset jhu_df.head(10) ``` It is observed that the column "Province/States" contains the name of the cities, and since the forth column a time series stamp (or index) is provided to record daily data of reported cases. Additionally, there are many days without recorded data for a number of chinese cities. This won't be an issue for parameter fitting as Open-SIR doesn't require uniform spacement of the observed data. ### Data preparation In the following lines, the time series for Guangdong reported cases $C(t)$ is extracted from the original dataframe. Thereafter, the columns are converted to a pandas date time index in order to perform further data preparation steps. ``` China = jhu_df[jhu_df[jhu_df.columns[1]] == "China"] city_name = "Guangdong" city = China[China["Province/State"] == city_name] city = city.drop(columns=["Province/State", "Country/Region", "Lat", "Long"]) time_index = pd.to_datetime(city.columns) data = city.values # Visualize the time ts = pd.Series(data=city.values[0], index=time_index) ``` Using the function ts.plot() a quick visualization of the dataset is obtained: ``` ts.plot() plt.title("Guangdong COVID-19 cases") plt.ylabel("$C(t)$: Number of reported cases", size=12) plt.show() ``` Data cleaning ``` ts_clean = ts.dropna() # Extract data ts_fit = ts_clean["2020-01-21":"2020-02-12"] # Convert index to numeric ts_num = pd.to_numeric(ts_fit.index) t0 = ts_num[0] # Convert datetime to days t_days = (ts_num - t0) / (10 ** 9 * 86400) t_days = t_days.astype(int).values # t_days is an input for SIR # Define the X number nX = ts_fit.values # Number of infected N = 104.3e6 # Population size of Guangdong ``` Exploration of the dataset ``` ts_fit.plot(style="ro") plt.xlabel("Number of infected") plt.show() ``` ### Setting up SIR and SIR-X models The population $N$ of the city is a necessary input for the model. In this notebook, this was hardocded, but it can be sourced directly from a web source. Note that whilst the SIR model estimates directly the number of infected people, $N I(t)$, SIR-X estimates the number of infected people based on the number of tested cases that are in quarantine or in an hospital $N X(t)$ ``` # These lines are required only if opensir wasn't installed using pip install, or if opensir is running in the pipenv virtual environment import sys path_opensir = "../../" sys.path.append(path_opensir) # Import SIR and SIRX models from opensir.models import SIR, SIRX nX = ts_fit.values # Number of observed infections of the time series N = 104.3e6 # Population size of Guangdong params = [0.95, 0.38] w0 = (N - nX[0], nX[0], 0) G_sir = SIR() G_sir.set_params(p=params, initial_conds=w0) G_sir.fit_input = 2 G_sir.fit(t_days, nX) G_sir.solve(t_days[-1], t_days[-1] + 1) t_SIR = G_sir.fetch()[:, 0] I_SIR = G_sir.fetch()[:, 2] ``` ### Try to fit a SIR model to Guangdong data ``` ax = plt.axes() ax.tick_params(axis="both", which="major", labelsize=14) plt.plot(t_SIR, I_SIR) plt.plot(t_days, nX, "ro") plt.show() ``` The SIR model is clearly not appropriate to fit this data, as it cannot resolve the effect of exogeneous containment efforts such as quarantines or lockdown. We will repeat the process with a SIR-X model. ### Fit SIR-X to Guangdong Data ``` g_sirx = SIRX() params = [6.2 / 8, 1 / 8, 0.05, 0.05, 5] # X_0 can be directly ontained from the statistics n_x0 = nX[0] # Number of people tested positive n_i0 = nX[0] w0 = (N - n_x0 - n_i0, n_i0, 0, n_x0) g_sirx.set_params(p=params, initial_conds=w0) # Fit all parameters fit_index = [False, False, True, True, True] g_sirx.fit(t_days, nX, fit_index=fit_index) g_sirx.solve(t_days[-1], t_days[-1] + 1) t_sirx = g_sirx.fetch()[:, 0] inf_sirx = g_sirx.fetch()[:, 4] plt.figure(figsize=[6, 6]) ax = plt.axes() plt.plot(t_sirx, inf_sirx, "b-", linewidth=2) plt.plot(t_SIR, I_SIR, "g-", linewidth=2) plt.plot(t_days, nX, "ro") plt.legend( ["SIR-X model fit", "SIR model fit", "Number of reported cases"], fontsize=13 ) plt.title("SARS-CoV-2 evolution in Guangdong, China", size=15) plt.xlabel("Days", fontsize=14) plt.ylabel("COVID-19 confirmed cases", fontsize=14) ax.tick_params(axis="both", which="major", labelsize=14) plt.show() ``` After fitting the parameters, the effective infectious period $T_{I,eff}$ and the effective reproduction rate $R_{0,eff}$ can be obtained from the model properties $$T_{I,eff} = (\beta + \kappa + \kappa_0)^{-1}$$ $$R_{0,eff} = \alpha T_{I,eff}$$ Aditionally, the Public containment leverage $P$ and the quarantine probability $Q$ can be calculated through: $$P = \frac{\kappa_0}{\kappa_0 + \kappa}$$ $$Q = \frac{\kappa_0 + \kappa}{\beta + \kappa_0 + \kappa}$$ ``` print("Effective infectious period T_I_eff = %.2f days " % g_sirx.t_inf_eff) print( "Effective reproduction rate R_0_eff = %.2f, Maier and Brockmann = %.2f" % (g_sirx.r0_eff, 3.02) ) print( "Public containment leverage = %.2f, Maier and Brockmann = %.2f" % (g_sirx.pcl, 0.75) ) print( "Quarantine probability = %.2f, Maier and Brockmann = %.2f" % (g_sirx.q_prob, 0.51) ) ``` ### Make predictions using `model.predict` ``` # Make predictions and visualize # Obtain the results 14 days after the train data ends sirx_pred = g_sirx.predict(14) print("T n_S \t n_I \tn_R \tn_X") for i in sirx_pred: print(*i.astype(int)) ``` Prepare date time index to plot predictions ``` # Import datetime module from the standard library import datetime # Obtain the last day from the data used to train the model last_time = ts_fit.index[-1] # Create a date time range based on the number of rows of the prediction numdays = sirx_pred.shape[0] day_zero = datetime.datetime(last_time.year, last_time.month, last_time.day) date_list = [day_zero + datetime.timedelta(days=x) for x in range(numdays)] ``` Plot predictions ``` # Extract figure and axes fig, ax = plt.subplots(figsize=[5, 5]) # Create core plot attributes plt.plot(date_list, sirx_pred[:, 4], color="blue", linewidth=2) plt.title("Prediction of Guangdong Cases", size=14) plt.ylabel("Number of infected", size=14) # Remove trailing space plt.xlim(date_list[0], date_list[-1]) # Limit the amount of data displayed ax.xaxis.set_major_locator(plt.MaxNLocator(3)) # Increase the size of the ticks ax.tick_params(labelsize=12) plt.show() ``` ### Calculation of predictive confidence intervals The confidence intervals on the predictions of the SIR-X model can be calculated using a block cross validation. This technique is widely used in Time Series Analysis. In the open-sir API, the function `model.ci_block_cv` calculates the average mean squared error of the predictions, a list of the rolling mean squared errors and the list of parameters which shows how much each parameter changes taking different number of days for making predictions. The three first parameters are the same as the fit function, while the last two parameters are the `lags` and the `min_sample`. The `lags` parameter indicates how many periods in the future will be forecasted in order to calculate the mean squared error of the model prediction. The `min_sample` parameter indicates the initial number of observations and days that will be taken to perform the block cross validation. In the following example, `model.ci_block_cv` is used to estimate the average mean squared error of *1-day* predictions taking *6* observations as the starting point of the cross validation. For Guangdong, a `min_sample=6` higher than the default 3 is required to handle well the missing data. This way, both the data on the four first days, and two days after the data starts again, are considered for cross validation. ``` # Calculate confidence intervals mse_avg, mse_list, p_list, pred_data = g_sirx.block_cv(lags=1, min_sample=6) ``` If it is assumed that the residuals distribute normally, then a good estimation of a 95% confidence interval on the one-day prediction of the number of confirmed cases is $$\sigma \sim \mathrm{MSE} \rightarrow n_{X,{t+1}} \sim \hat{n}_{X,{t+1}} \pm 2 \sigma$$ Where $n_{X,{t+1}}$ is the real number of confirmed cases in the next day, and $\hat{n}_{X,{t+1}}$ is the estimation using the SIR-X model using cross validation. We can use the `PredictionResults` instance `pred_data` functionality to explore the mean-squared errors and the predictions confidence intervals: ``` pred_data.print_mse() ``` The predictive accuracy of the model is quite impressive, even for 9-day predictions. Let's take advantage of the relatively low mean squared error to forecast a 10 days horizon with confidence intervals using `pred_data.plot_predictions(n_days=9)` ``` pred_data.plot_pred_ci(n_days=9) ``` If it is assumed that the residuals distribute normally, then a good estimation of a 95% confidence interval on the one-day prediction of the number of confirmed cases is $$\sigma \sim \mathrm{MSE} \rightarrow n_{X,{t+1}} \sim \hat{n}_{X,{t+1}} \pm 2 \sigma$$ Where $n_{X,{t+1}}$ is the real number of confirmed cases in the next day, and $\hat{n}_{X,{t+1}}$ is the estimation using the SIR-X model using cross validation. We use solve to make a 1-day prediction and append the 95% confidence interval. ``` # Predict g_sirx.solve(t_days[-1] + 1, t_days[-1] + 2) n_X_tplusone = g_sirx.fetch()[-1, 4] print("Estimation of n_X_{t+1} = %.0f +- %.0f " % (n_X_tplusone, 2 * mse_avg[0])) # Transform parameter list into a DataFrame par_block_cv = pd.DataFrame(p_list) # Rename dataframe columns based on SIR-X parameter names par_block_cv.columns = g_sirx.PARAMS # Add the day. Note that we take the days from min_sample until the end of the array, as days # 0,1,2 are used for the first sampling in the block cross-validation par_block_cv["Day"] = t_days[5:] # Explore formatted dataframe for parametric analysis par_block_cv.head(len(p_list)) plt.figure(figsize=[5, 5]) ax = plt.axes() ax.tick_params(axis="both", which="major", labelsize=14) plt.plot(mse_list[0], "ro") plt.xlabel("Number of days used to predict the next day", size=14) plt.ylabel("MSE", size=14) plt.show() ``` There is an outlier on day 1, as this is when the missing date starts. A more reliable approach would be to take the last 8 values of the mean squared error to calculate a new average assuming that there will be no more missing data. #### Variation of fitted parameters Finally, it is possible to observe how the model parameters change as more days and number of confirmed cases are introduced in the block cross validation. It is clear to observe that after day 15 all parameters except kappa begin to converge. Therefore, care must be taken when performing inference over the parameter kappa. ### Long term prediction Now we can use the model to predict when the peak will occur and what will be the maximum number of infected ``` # Predict plt.figure(figsize=[6, 6]) ax = plt.axes() ax.tick_params(axis="both", which="major", labelsize=14) g_sirx.solve(40, 41) # Plot plt.plot(g_sirx.fetch()[:, 4], "b-", linewidth=2) # X(t) plt.plot(g_sirx.fetch()[:, 2], "b--", linewidth=2) # I(t) plt.xlabel("Day", size=14) plt.ylabel("Number of people", size=14) plt.legend(["X(t): Confirmed", "I(t) = Infected"], fontsize=13) plt.title(city_name) plt.show() ``` The model was trained with a limited amount of data. It is clear to observe that since the measures took place in Guangdong, at least 6 weeks of quarantine were necessary to control the pandemics. Note that a limitation of this model is that it predicts an equilibrium where the number of infected, denoted by the yellow line in the figure above, is 0 after a short time. In reality, this amount will decrease to a small number. After the peak of infections is reached, it is necessary to keep the quarantine and effective contact tracing for at least 30 days more. ### Validate long term plot using model.plot() The function `model.plot()` offers a handy way to visualize model fitting and predictions. Custom visualizations can be validated against the `model.plot()` function. ``` g_sirx.plot() ```
github_jupyter
**Chapter 3 – Classification** _This notebook contains all the sample code and solutions to the exercises in chapter 3._ # Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: ``` # To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "classification" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300) ``` # MNIST ``` from sklearn.datasets import fetch_mldata try: mnist = fetch_mldata('MNIST original') except Exception as ex: from six.moves import urllib from scipy.io import loadmat import os mnist_path = os.path.join(".", "datasets", "mnist-original.mat") # download dataset from github. mnist_alternative_url = "https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat" response = urllib.request.urlopen(mnist_alternative_url) with open(mnist_path, "wb") as f: content = response.read() f.write(content) mnist_raw = loadmat(mnist_path) mnist = { "data": mnist_raw["data"].T, "target": mnist_raw["label"][0], "COL_NAMES": ["label", "data"], "DESCR": "mldata.org dataset: mnist-original", } print("Done!") X, y = mnist["data"], mnist["target"] X.shape y.shape 28*28 %matplotlib inline import matplotlib import matplotlib.pyplot as plt some_digit = X[36000] some_digit_image = some_digit.reshape(28, 28) plt.imshow(some_digit_image, cmap = matplotlib.cm.binary, interpolation="nearest") plt.axis("off") save_fig("some_digit_plot") plt.show() def plot_digit(data): image = data.reshape(28, 28) plt.imshow(image, cmap = matplotlib.cm.binary, interpolation="nearest") plt.axis("off") # EXTRA def plot_digits(instances, images_per_row=10, **options): size = 28 images_per_row = min(len(instances), images_per_row) images = [instance.reshape(size,size) for instance in instances] n_rows = (len(instances) - 1) // images_per_row + 1 row_images = [] n_empty = n_rows * images_per_row - len(instances) images.append(np.zeros((size, size * n_empty))) for row in range(n_rows): rimages = images[row * images_per_row : (row + 1) * images_per_row] row_images.append(np.concatenate(rimages, axis=1)) image = np.concatenate(row_images, axis=0) plt.imshow(image, cmap = matplotlib.cm.binary, **options) plt.axis("off") plt.figure(figsize=(9,9)) example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]] plot_digits(example_images, images_per_row=10) save_fig("more_digits_plot") plt.show() y[36000] X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:] import numpy as np shuffle_index = np.random.permutation(60000) X_train, y_train = X_train[shuffle_index], y_train[shuffle_index] ``` # Binary classifier ``` y_train_5 = (y_train == 5) y_test_5 = (y_test == 5) from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier(max_iter=5, random_state=42) sgd_clf.fit(X_train, y_train_5) sgd_clf.predict([some_digit]) from sklearn.model_selection import cross_val_score cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy") from sklearn.model_selection import StratifiedKFold from sklearn.base import clone skfolds = StratifiedKFold(n_splits=3, random_state=42) for train_index, test_index in skfolds.split(X_train, y_train_5): clone_clf = clone(sgd_clf) X_train_folds = X_train[train_index] y_train_folds = (y_train_5[train_index]) X_test_fold = X_train[test_index] y_test_fold = (y_train_5[test_index]) clone_clf.fit(X_train_folds, y_train_folds) y_pred = clone_clf.predict(X_test_fold) n_correct = sum(y_pred == y_test_fold) print(n_correct / len(y_pred)) from sklearn.base import BaseEstimator class Never5Classifier(BaseEstimator): def fit(self, X, y=None): pass def predict(self, X): return np.zeros((len(X), 1), dtype=bool) never_5_clf = Never5Classifier() cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy") from sklearn.model_selection import cross_val_predict y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3) from sklearn.metrics import confusion_matrix confusion_matrix(y_train_5, y_train_pred) y_train_perfect_predictions = y_train_5 confusion_matrix(y_train_5, y_train_perfect_predictions) from sklearn.metrics import precision_score, recall_score precision_score(y_train_5, y_train_pred) 4344 / (4344 + 1307) recall_score(y_train_5, y_train_pred) 4344 / (4344 + 1077) from sklearn.metrics import f1_score f1_score(y_train_5, y_train_pred) 4344 / (4344 + (1077 + 1307)/2) y_scores = sgd_clf.decision_function([some_digit]) y_scores threshold = 0 y_some_digit_pred = (y_scores > threshold) y_some_digit_pred threshold = 200000 y_some_digit_pred = (y_scores > threshold) y_some_digit_pred y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function") ``` Note: there was an [issue](https://github.com/scikit-learn/scikit-learn/issues/9589) in Scikit-Learn 0.19.0 (fixed in 0.19.1) where the result of `cross_val_predict()` was incorrect in the binary classification case when using `method="decision_function"`, as in the code above. The resulting array had an extra first dimension full of 0s. Just in case you are using 0.19.0, we need to add this small hack to work around this issue: ``` y_scores.shape # hack to work around issue #9589 in Scikit-Learn 0.19.0 if y_scores.ndim == 2: y_scores = y_scores[:, 1] from sklearn.metrics import precision_recall_curve precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores) def plot_precision_recall_vs_threshold(precisions, recalls, thresholds): plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2) plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2) plt.xlabel("Threshold", fontsize=16) plt.legend(loc="upper left", fontsize=16) plt.ylim([0, 1]) plt.figure(figsize=(8, 4)) plot_precision_recall_vs_threshold(precisions, recalls, thresholds) plt.xlim([-700000, 700000]) save_fig("precision_recall_vs_threshold_plot") plt.show() (y_train_pred == (y_scores > 0)).all() y_train_pred_90 = (y_scores > 70000) precision_score(y_train_5, y_train_pred_90) recall_score(y_train_5, y_train_pred_90) def plot_precision_vs_recall(precisions, recalls): plt.plot(recalls, precisions, "b-", linewidth=2) plt.xlabel("Recall", fontsize=16) plt.ylabel("Precision", fontsize=16) plt.axis([0, 1, 0, 1]) plt.figure(figsize=(8, 6)) plot_precision_vs_recall(precisions, recalls) save_fig("precision_vs_recall_plot") plt.show() ``` # ROC curves ``` from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_train_5, y_scores) def plot_roc_curve(fpr, tpr, label=None): plt.plot(fpr, tpr, linewidth=2, label=label) plt.plot([0, 1], [0, 1], 'k--') plt.axis([0, 1, 0, 1]) plt.xlabel('False Positive Rate', fontsize=16) plt.ylabel('True Positive Rate', fontsize=16) plt.figure(figsize=(8, 6)) plot_roc_curve(fpr, tpr) save_fig("roc_curve_plot") plt.show() from sklearn.metrics import roc_auc_score roc_auc_score(y_train_5, y_scores) from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier(random_state=42) y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba") y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest) plt.figure(figsize=(8, 6)) plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD") plot_roc_curve(fpr_forest, tpr_forest, "Random Forest") plt.legend(loc="lower right", fontsize=16) save_fig("roc_curve_comparison_plot") plt.show() roc_auc_score(y_train_5, y_scores_forest) y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3) precision_score(y_train_5, y_train_pred_forest) recall_score(y_train_5, y_train_pred_forest) ``` # Multiclass classification ``` sgd_clf.fit(X_train, y_train) sgd_clf.predict([some_digit]) some_digit_scores = sgd_clf.decision_function([some_digit]) some_digit_scores np.argmax(some_digit_scores) sgd_clf.classes_ sgd_clf.classes_[5] from sklearn.multiclass import OneVsOneClassifier ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, random_state=42)) ovo_clf.fit(X_train, y_train) ovo_clf.predict([some_digit]) len(ovo_clf.estimators_) forest_clf.fit(X_train, y_train) forest_clf.predict([some_digit]) forest_clf.predict_proba([some_digit]) cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy") from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train.astype(np.float64)) cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy") y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3) conf_mx = confusion_matrix(y_train, y_train_pred) conf_mx def plot_confusion_matrix(matrix): """If you prefer color and a colorbar""" fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) cax = ax.matshow(matrix) fig.colorbar(cax) plt.matshow(conf_mx, cmap=plt.cm.gray) save_fig("confusion_matrix_plot", tight_layout=False) plt.show() row_sums = conf_mx.sum(axis=1, keepdims=True) norm_conf_mx = conf_mx / row_sums np.fill_diagonal(norm_conf_mx, 0) plt.matshow(norm_conf_mx, cmap=plt.cm.gray) save_fig("confusion_matrix_errors_plot", tight_layout=False) plt.show() cl_a, cl_b = 3, 5 X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)] X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)] X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)] X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)] plt.figure(figsize=(8,8)) plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5) plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5) plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5) plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5) save_fig("error_analysis_digits_plot") plt.show() ``` # Multilabel classification ``` from sklearn.neighbors import KNeighborsClassifier # y_train_large = (y_train >= 7) # y_train_odd = (y_train % 2 == 1) # y_multilabel = np.c_[y_train_large, y_train_odd] # knn_clf = KNeighborsClassifier() # knn_clf.fit(X_train, y_multilabel) # knn_clf.predict([some_digit]) ``` **Warning**: the following cell may take a very long time (possibly hours depending on your hardware). ``` # y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1) # f1_score(y_multilabel, y_train_knn_pred, average="macro") ``` # Multioutput classification ``` noise = np.random.randint(0, 100, (len(X_train), 784)) X_train_mod = X_train + noise noise = np.random.randint(0, 100, (len(X_test), 784)) X_test_mod = X_test + noise y_train_mod = X_train y_test_mod = X_test some_index = 5500 plt.subplot(121); plot_digit(X_test_mod[some_index]) plt.subplot(122); plot_digit(y_test_mod[some_index]) save_fig("noisy_digit_example_plot") plt.show() # knn_clf.fit(X_train_mod, y_train_mod) # clean_digit = knn_clf.predict([X_test_mod[some_index]]) # plot_digit(clean_digit) # save_fig("cleaned_digit_example_plot") ``` # Extra material ## Dummy (ie. random) classifier ``` from sklearn.dummy import DummyClassifier dmy_clf = DummyClassifier() y_probas_dmy = cross_val_predict(dmy_clf, X_train, y_train_5, cv=3, method="predict_proba") y_scores_dmy = y_probas_dmy[:, 1] fprr, tprr, thresholdsr = roc_curve(y_train_5, y_scores_dmy) plot_roc_curve(fprr, tprr) ``` ## KNN classifier ``` from sklearn.neighbors import KNeighborsClassifier # knn_clf = KNeighborsClassifier(n_jobs=2, weights='distance', n_neighbors=4) # knn_clf.fit(X_train, y_train) # y_knn_pred = knn_clf.predict(X_test) from sklearn.metrics import accuracy_score # accuracy_score(y_test, y_knn_pred) from scipy.ndimage.interpolation import shift def shift_digit(digit_array, dx, dy, new=0): return shift(digit_array.reshape(28, 28), [dy, dx], cval=new).reshape(784) plot_digit(shift_digit(some_digit, 5, 1, new=100)) X_train_expanded = [X_train] y_train_expanded = [y_train] for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)): shifted_images = np.apply_along_axis(shift_digit, axis=1, arr=X_train, dx=dx, dy=dy) X_train_expanded.append(shifted_images) y_train_expanded.append(y_train) X_train_expanded = np.concatenate(X_train_expanded) y_train_expanded = np.concatenate(y_train_expanded) X_train_expanded.shape, y_train_expanded.shape # knn_clf.fit(X_train_expanded, y_train_expanded) # y_knn_expanded_pred = knn_clf.predict(X_test) # accuracy_score(y_test, y_knn_expanded_pred) # ambiguous_digit = X_test[2589] # knn_clf.predict_proba([ambiguous_digit]) # plot_digit(ambiguous_digit) ``` # Exercise solutions # Exercise 1 ``` from sklearn.model_selection import GridSearchCV knn2 = KNeighborsClassifier() search_space = [{"weights" : ["uniform", "distance"], "n_neighbors" : [2, 4, 6, 8]}] clf = GridSearchCV(knn2, search_space, cv=3, n_jobs=2) best_model = clf.fit(X_train_scaled, y_train) predictions = best_model.predict(X_train_scaled) cross_val_score(best_model, X_train_scaled, y_train, cv=3, scoring="accuracy") ``` # Exercise 2 ``` def shift_image(arr, direction): direction_dict = {"up" : [-1, 0], "down" : [1, 0], "left" : [0, -1], "right" : [0, 1]} return shift(arr.reshape(28,28), direction_dict['direction']).reshape(784) ```
github_jupyter
``` import os import numpy as np import yaml from astropy.io import ascii as asc from astropy.time import Time import astropy.units as u import astropy.constants as c from astropy.modeling import models, fitting from matplotlib import pyplot as plt %matplotlib inline import supernova TEST_FILE_DIR = '../data/line_info/testing/' FIG_DIR = '../figures/' DATA_DIR = '../data/line_info' HA = 6563.0 SiII = 6355.0 FeII = 5169.0 IR_dates = Time(['2015-09-05','2015-10-05', '2015-10-10']) sn15oz = supernova.LightCurve2('asassn-15oz') texpl = Time(sn15oz.jdexpl, format='jd') old_fitting = asc.read(os.path.join(TEST_FILE_DIR, 'HA-cachito_old.tab')) new_fit_together = asc.read(os.path.join(TEST_FILE_DIR, 'HA-cachito.tab')) new_fit_cachito = asc.read(os.path.join(TEST_FILE_DIR, 'cachito.tab')) new_fit_HA = asc.read(os.path.join(TEST_FILE_DIR, 'HA.tab')) def calc_velocity(obs_wl, rest_wl): velocity = c.c*(obs_wl/rest_wl - 1) return velocity fig = plt.figure(figsize=[10, 5]) ax_HA = fig.add_subplot(1,2,1) ax_cachito = fig.add_subplot(1,2,2) ax_HA.plot((Time(old_fitting['date'])-texpl).value, -1*calc_velocity(old_fitting['vel1'], HA).to(u.km/u.s), 'o', label='old fit') ax_HA.plot((Time(new_fit_together['date'])-texpl).value, -1*calc_velocity(new_fit_together['vel1'], HA).to(u.km/u.s), 's', label='new fit together') ax_HA.plot((Time(new_fit_HA['date'])-texpl).value, -1*calc_velocity(new_fit_HA['vel0'], HA).to(u.km/u.s), '^', label='new fit separate') ax_HA.set_ylim(7500, 12000) ax_HA.set_xticks(np.arange(0, 90, 10)) ax_HA.legend() ax_HA.set_title(r'H-$\alpha$ Velocity') ax_cachito.plot((Time(old_fitting['date'])-texpl).value, -1*calc_velocity(old_fitting['vel0'], HA).to(u.km/u.s), 'o', label='old fit') ax_cachito.plot((Time(new_fit_together['date'])-texpl).value, -1*calc_velocity(new_fit_together['vel0'], HA).to(u.km/u.s), 's', label='new fit together') ax_cachito.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], HA).to(u.km/u.s), '^', label='new fit separate') ax_cachito.set_xticks(np.arange(0, 90, 10)) ax_cachito.grid() ax_cachito.set_title('Cachito Velocity (if Hydrogen)') ax_cachito.vlines((IR_dates-texpl).value, linestyle='--', ymin=10000, ymax=20000, label='IR spectra') ax_cachito.legend(loc='lower left') plt.savefig(os.path.join(FIG_DIR, 'HA-cachito_velocity_test.pdf')) fig = plt.figure(figsize=[10, 5]) ax_HA = fig.add_subplot(1,1,1) ax_cachito = ax_HA.twinx() #ax_HA.plot((Time(old_fitting['date'])-texpl).value, -1*calc_velocity(old_fitting['vel1'], HA).to(u.km/u.s), 'o', label='old fit') #ax_HA.plot((Time(new_fit_together['date'])-texpl).value, -1*calc_velocity(new_fit_together['vel1'], HA).to(u.km/u.s), 's', label='new fit together') ax_HA.plot((Time(new_fit_HA['date'])-texpl).value, -1*calc_velocity(new_fit_HA['vel0'], HA).to(u.km/u.s), '^', label='new fit separate') ax_HA.set_ylim(7500, 12000) ax_HA.set_xticks(np.arange(0, 90, 10)) ax_HA.legend() ax_HA.set_title(r'H-$\alpha$/Cachito Velocity') #ax_cachito.plot((Time(old_fitting['date'])-texpl).value, -1*calc_velocity(old_fitting['vel0'], HA).to(u.km/u.s), 'co', label='old fit') #ax_cachito.plot((Time(new_fit_together['date'])-texpl).value, -1*calc_velocity(new_fit_together['vel0'], HA).to(u.km/u.s), 'rs', label='new fit together') ax_cachito.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], HA).to(u.km/u.s), '^', color='lime', label='new fit separate') ax_cachito.set_xticks(np.arange(0, 90, 10)) ax_cachito.grid() ax_cachito.vlines((IR_dates-texpl).value, linestyle='--', ymin=10000, ymax=20000, label='IR spectra') ax_cachito.legend(loc='lower left') ``` # Fit Velocity ## Cachito Fit ``` phase_cachito = (Time(new_fit_cachito['date'])-texpl).value velocity_cachito = -1*calc_velocity(new_fit_cachito['vel0'], HA).to(u.km/u.s).value fitter_power = fitting.LevMarLSQFitter() fitter_linear = fitting.LinearLSQFitter() power_model = models.PowerLaw1D() poly_model3 = models.Polynomial1D(degree=3) poly_model4 = models.Polynomial1D(degree=4) poly_model5 = models.Polynomial1D(degree=5) power_fit_cachito = fitter_power(power_model, phase_cachito, velocity_cachito) poly_fit3_cachito = fitter_linear(poly_model3, phase_cachito, velocity_cachito) poly_fit4_cachito = fitter_linear(poly_model4, phase_cachito, velocity_cachito) poly_fit5_cachito = fitter_linear(poly_model5, phase_cachito, velocity_cachito) fit_time = np.arange(1, phase_cachito[-1]+1,1) fig = plt.figure(figsize=[10, 5]) ax_cachito = fig.add_subplot(2,1,1) ax_resid = fig.add_subplot(2,1,2, sharex=ax_cachito) ax_cachito.plot(phase_cachito, velocity_cachito, '^', color='lime', label='new fit separate') ax_cachito.set_xticks(np.arange(0, 90, 10)) ax_cachito.grid() ax_cachito.plot(fit_time, power_fit_cachito(fit_time), label='Power Law') ax_cachito.plot(fit_time, poly_fit4_cachito(fit_time), label='Polynomial deg={}'.format(poly_model4.degree)) ax_cachito.set_title('Cachito Velocity (if Hydrogen)') ax_cachito.vlines((IR_dates-texpl).value, linestyle='--', ymin=12000, ymax=21000, label='IR spectra') ax_cachito.set_ylabel('Velocity (km/s)') ax_cachito.set_ylim(ymin=12000, ymax=21000) ax_cachito.legend(loc='best') ax_resid.axhline(0, color='k') ax_resid.vlines((IR_dates-texpl).value, linestyle='--', ymin=-500, ymax=500, label='IR spectra') ax_resid.plot(phase_cachito, velocity_cachito - power_fit_cachito(phase_cachito), 'o', label='Power') ax_resid.plot(phase_cachito, velocity_cachito - poly_fit3_cachito(phase_cachito), 'o', label='deg3') ax_resid.plot(phase_cachito, velocity_cachito - poly_fit4_cachito(phase_cachito), 'o', label='deg4') ax_resid.plot(phase_cachito, velocity_cachito - poly_fit5_cachito(phase_cachito), 'o', label='deg5') ax_resid.set_yticks([-500, -250, 0, 250, 500]) ax_resid.grid() ax_resid.legend(loc='best', ncol=3) ax_resid.set_ylabel('Residual (km/s)') ax_resid.set_xlabel('Phase (days)') plt.savefig(os.path.join(FIG_DIR, 'cachito_velocity_fit.pdf')) print('Power law std = {}'.format(np.std(velocity_cachito - power_fit_cachito(phase_cachito)))) print('Deg 4 polynomial std = {}'.format(np.std(velocity_cachito - poly_fit4_cachito(phase_cachito)))) print('Deg 3 polynomial std = {}'.format(np.std(velocity_cachito - poly_fit3_cachito(phase_cachito)))) ``` Speaking with Stefano - we're going to use the power law fit; Nugent (2006) and Faran (2014) both fit power laws ## H-Alpha Fit ``` phase_HA = (Time(new_fit_HA['date'])-texpl).value velocity_HA = -1*calc_velocity(new_fit_HA['vel0'], HA).to(u.km/u.s).value fitter_power = fitting.LevMarLSQFitter() fitter_linear = fitting.LinearLSQFitter() power_model = models.PowerLaw1D() poly_model3 = models.Polynomial1D(degree=3) poly_model4 = models.Polynomial1D(degree=4) poly_model5 = models.Polynomial1D(degree=5) power_fit_HA = fitter_power(power_model, phase_HA, velocity_HA) poly_fit3_HA = fitter_linear(poly_model3, phase_HA, velocity_HA) poly_fit4_HA = fitter_linear(poly_model4, phase_HA, velocity_HA) poly_fit5_HA = fitter_linear(poly_model5, phase_HA, velocity_HA) fit_time = np.arange(1, phase_HA[-1]+1,1) fig = plt.figure(figsize=[10, 5]) ax_HA = fig.add_subplot(2,1,1) ax_resid = fig.add_subplot(2,1,2, sharex=ax_HA) ax_HA.plot(phase_HA, velocity_HA, '^', color='lime', label='new fit separate') ax_HA.set_xticks(np.arange(0, 90, 10)) ax_HA.grid() ax_HA.plot(fit_time, power_fit_HA(fit_time), label='Power Law') ax_HA.plot(fit_time, poly_fit4_HA(fit_time), label='Polynomial deg={}'.format(poly_model4.degree)) ax_HA.set_title('HA Velocity (if Hydrogen)') ax_HA.vlines((IR_dates-texpl).value, linestyle='--', ymin=8000, ymax=12000, label='IR spectra') ax_HA.set_ylim(ymin=8000, ymax=12000) ax_HA.legend(loc='best') ax_HA.set_ylabel('velocity (km/s)') ax_resid.axhline(0, color='k') ax_resid.vlines((IR_dates-texpl).value, linestyle='--', ymin=-500, ymax=500, label='IR spectra') ax_resid.plot(phase_HA, velocity_HA - power_fit_HA(phase_HA), 'o', label='Power') ax_resid.plot(phase_HA, velocity_HA - poly_fit3_HA(phase_HA), 'o', label='deg3') ax_resid.plot(phase_HA, velocity_HA - poly_fit4_HA(phase_HA), 'o', label='deg4') ax_resid.plot(phase_HA, velocity_HA - poly_fit5_HA(phase_HA), 'o', label='deg5') ax_resid.grid() ax_resid.legend(loc='best', ncol=2) ax_resid.set_xlabel('Phase (days)') ax_resid.set_ylabel('Residual') print('Power law std = {}'.format(np.std(velocity_HA - power_fit_HA(phase_HA)))) print('Deg 4 polynomial std = {}'.format(np.std(velocity_HA - poly_fit4_HA(phase_HA)))) print('Deg 3 polynomial std = {}'.format(np.std(velocity_HA - poly_fit3_HA(phase_HA)))) plt.savefig(os.path.join(FIG_DIR, 'HA_velocity_fit.pdf')) ``` # Look at Silicon Velocity and fit the FeII Velocity ``` tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_multi.tab')) tbdata_feII.remove_columns(['vel1', 'vel_err_left_1', 'vel_err_right_1', 'vel_pew_1', 'vel_pew_err1']) tbdata_feII.rename_column('vel0', 'velocity') tbdata_feII.rename_column('vel_err_left_0', 'vel_err_left') tbdata_feII.rename_column('vel_err_right_0', 'vel_err_right') tbdata_feII.rename_column('vel_pew_0', 'pew') tbdata_feII.rename_column('vel_pew_err0', 'pew_err') phase_feII = (Time(tbdata_feII['date'])-texpl).value velocity_feII = -1*calc_velocity(tbdata_feII['velocity'], FeII).to(u.km/u.s) power_model_feII = models.PowerLaw1D(alpha=power_fit_cachito.alpha, x_0=power_fit_cachito.x_0) power_fit_feII = fitter_power(power_model_feII, phase_feII, velocity_feII) fig = plt.figure(figsize=[10, 5]) ax_Fe = fig.add_subplot(2,1,1) ax_resid = fig.add_subplot(2,1,2, sharex=ax_Fe) ax_Fe.plot(phase_feII, velocity_feII, '^', label='FeII (5169)') ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], SiII).to(u.km/u.s), '^', label='Cachito (as SiII 6533)') ax_Fe.plot(fit_time, power_fit_feII(fit_time)) ax_Fe.vlines((IR_dates-texpl).value, linestyle='--', ymin=-3000, ymax=12000, label='IR spectra') ax_Fe.set_xticks(np.arange(0, 90, 10)) ax_Fe.legend() ax_Fe.set_title(r'FeII 5169 Velocity') ax_Fe.set_ylim(3000, 11000) ax_resid.axhline(0, color='k') ax_resid.plot(phase_feII, velocity_feII - power_fit_feII(phase_feII), 'o') ax_resid.set_yticks([-500, -250, 0, 250, 500]) ax_resid.grid() ax_resid.vlines((IR_dates-texpl).value, linestyle='--', ymin=-500, ymax=500, label='IR spectra') print('Power law std = {}'.format(np.std(velocity_feII - power_fit_feII(phase_feII)))) fig = plt.figure() ax_Fe = fig.add_subplot(1,1,1) ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], SiII).to(u.km/u.s), '^', label='Cachito (as SiII 6533)') #ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_together['vel0'], SiII).to(u.km/u.s), '^', label='Cachito (as SiII 6533); new joint fit', alpha=0.25) #ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(old_fitting['vel0'], SiII).to(u.km/u.s), '^', label='Cachito (as SiII 6533); old joint fit', alpha=0.25) ax_Fe.plot(phase_feII, velocity_feII, 'o', label='FeII (5169)') ax_Fe.set_xticks(np.arange(0, 90, 10)) ax_Fe.legend() ax_Fe.set_title(r'FeII 5169 Velocity') ax_Fe.set_ylim(5000, 11000) ax_Fe.set_xlim(0, 40) ax_Fe.set_xlabel('Phase (days)') ax_Fe.set_ylabel('Velocity (km/s)') plt.savefig(os.path.join(FIG_DIR, 'cachito_fe_vel_comp.pdf')) cp ../figures/cachito_fe_vel_comp.pdf ../paper/figures/ ```
github_jupyter
# Huggingface SageMaker-SDK - BERT Japanese QA example 1. [Introduction](#Introduction) 2. [Development Environment and Permissions](#Development-Environment-and-Permissions) 1. [Installation](#Installation) 2. [Permissions](#Permissions) 3. [Uploading data to sagemaker_session_bucket](#Uploading-data-to-sagemaker_session_bucket) 3. [(Optional) Deepen your understanding of SQuAD](#(Optional)-Deepen-your-understanding-of-SQuAD) 4. [Fine-tuning & starting Sagemaker Training Job](#Fine-tuning-\&-starting-Sagemaker-Training-Job) 1. [Creating an Estimator and start a training job](#Creating-an-Estimator-and-start-a-training-job) 2. [Estimator Parameters](#Estimator-Parameters) 3. [Download fine-tuned model from s3](#Download-fine-tuned-model-from-s3) 4. [Question Answering on Local](#Question-Answering-on-Local) 5. [_Coming soon_:Push model to the Hugging Face hub](#Push-model-to-the-Hugging-Face-hub) # Introduction このnotebookはHuggingFaceの[run_squad.py](https://github.com/huggingface/transformers/blob/master/examples/legacy/question-answering/run_squad.py)を日本語データで動作する様に変更を加えたものです。 データは[運転ドメインQAデータセット](https://nlp.ist.i.kyoto-u.ac.jp/index.php?Driving%20domain%20QA%20datasets)を使用します。 このデモでは、AmazonSageMakerのHuggingFace Estimatorを使用してSageMakerのトレーニングジョブを実行します。 _**NOTE: このデモは、SagemakerNotebookインスタンスで動作検証しています**_ _**データセットは各自許諾に同意の上ダウンロードしていただけますようお願いいたします(データサイズは約4MBです)**_ # Development Environment and Permissions ## Installation このNotebookはSageMakerの`conda_pytorch_p36`カーネルを利用しています。 日本語処理のため、`transformers`ではなく`transformers[ja]`をインスールします。 **_Note: このnotebook上で推論テストを行う場合、(バージョンが古い場合は)pytorchのバージョンアップが必要になります。_** ``` # localで推論のテストを行う場合 !pip install torch==1.7.1 !pip install "sagemaker>=2.31.0" "transformers[ja]==4.6.1" "datasets[s3]==1.6.2" --upgrade ``` ## Permissions ローカル環境でSagemakerを使用する場合はSagemakerに必要な権限を持つIAMロールにアクセスする必要があります。[こちら](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html)を参照してください ``` import sagemaker sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() role = sagemaker.get_execution_role() sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}") ``` # データの準備 事前にデータ(`DDQA-1.0.tar.gz`)をこのnotobookと同じ階層に配置してください 以下では、データをダウンロードして解凍 (unzip) します。 ``` # Unzip !tar -zxvf DDQA-1.0.tar.gz ``` ## Uploading data to `sagemaker_session_bucket` S3へデータをアップロードします。 ``` s3_prefix = 'samples/datasets/driving-domain-qa' input_train = sess.upload_data( path='./DDQA-1.0/RC-QA/DDQA-1.0_RC-QA_train.json', key_prefix=f'{s3_prefix}/train' ) input_validation = sess.upload_data( path='./DDQA-1.0/RC-QA/DDQA-1.0_RC-QA_dev.json', key_prefix=f'{s3_prefix}/valid' ) # データのUpload path print(input_train) print(input_validation) ``` # (Optional) Deepen your understanding of SQuAD **このセクションはオプションであり、Fine-tuning & starting Sagemaker Training Jobまでスキップできます** ## 運転ドメインQAデータセットについて 運転ドメインQAデータセットはSQuAD2.0形式となっており、`run_squad.py`でそのまま実行できます。 トレーニングジョブの実行とは関連しませんが、ここでは少しデータについて理解を深めたいと思います。 QAデータセットの形式(README_ja.txt) -------------------- 本QAデータセットの形式はSQuAD2.0と同じです。SQuAD2.0の問題は、「文章」、「質問」、「答え」の三つ組になっており、「答え」は「文章」の中の一部になっています。一部の問題は、「文章」の中に「答え」が無いなど、答えられない問題になっています。詳細は以下の論文をご参照ください。 Pranav Rajpurkar, Robin Jia, and Percy Liang. Know what you don’t know: Unanswerable questions for SQuAD, In ACL2018, pages 784–789. https://www.aclweb.org/anthology/P18-2124.pdf 以下に、jsonファイル中のQAデータセットを例示します。 注)jsonファイル中の"context"は「文章」 ```json { "version": "v2.0", "data": [ { "title": "運転ドメイン", "paragraphs": [ { "context": "著者は以下の文章を書きました。本日お昼頃、梅田方面へ自転車で出かけました。ちょっと大きな交差点に差し掛かりました。自転車にまたがった若い女性が信号待ちしています。その後で私も止まって信号が青になるのを待っていました。", "qas": [ { "id": "55604556390008_00", "question": "待っていました、の主語は何か?", "answers": [ { "text": "私", "answer_start": 85 }, { "text": "著者", "answer_start": 0 } ], "is_impossible": false } ] } ] } ] } ``` 参考文献 -------- 高橋 憲生、柴田 知秀、河原 大輔、黒橋 禎夫 ドメインを限定した機械読解モデルに基づく述語項構造解析 言語処理学会 第25回年次大会 発表論文集 (2019年3月)  https://www.anlp.jp/proceedings/annual_meeting/2019/pdf_dir/B1-4.pdf   ※データセットの構築方法について記載 Norio Takahashi, Tomohide Shibata, Daisuke Kawahara and Sadao Kurohashi. Machine Comprehension Improves Domain-Specific Japanese Predicate-Argument Structure Analysis, In Proceedings of 2019 Conference on Empirical Methods in Natural Language Processing and 9th International Joint Conference on Natural Language Processing, Workshop MRQA: Machine Reading for Question Answering, 2019.  https://mrqa.github.io/assets/papers/42_Paper.pdf   ※データセットの構築方法、文章中に答えが無い問題について記載 ``` # データの読み込み import json with open("./DDQA-1.0/RC-QA/DDQA-1.0_RC-QA_train.json", "r") as f: squad = json.load(f) squad['data'][0]['paragraphs'][0] ``` SQuAD2.0形式は少し複雑なjson形式となっています。 次に`run_squad.py`内でどのような前処理が実行されているかについて少し触れます。 このparagraphsにはコンテキストが1つと質問が2つ、回答が6つ含まれていますが、後の処理ではここから **2つの「コンテキスト」、「質問」、「答え」の三つ組**が作成されます。 回答は1番目のものが使用されます。 ``` from transformers.data.processors.squad import SquadV2Processor from transformers import squad_convert_examples_to_features data_dir = './DDQA-1.0/RC-QA' train_file = 'DDQA-1.0_RC-QA_train.json' max_seq_length = 384 # トークン化後の最大入力シーケンス長。これより長いシーケンスは切り捨てられ、これより短いシーケンスはパディングされます doc_stride = 128 # 長いドキュメントをチャンクに分割する場合、チャンク間でどのくらいのストライドを取るか max_query_length = 64 # 質問のトークンの最大数。 これより長い質問はこの長さに切り捨てられます threads = 1 from transformers import AutoTokenizer # Tokenizer tokenizer = AutoTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking') # jsonファイルを読みこみ、複雑な構造を分解します processor = SquadV2Processor() examples = processor.get_train_examples(data_dir, filename=train_file) # QuestionAnsweringモデルへ入力できるようにトークナイズします # 以下の実行に数分時間がかかります features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=True, return_dataset="pt", threads=threads, ) ``` `dataset`は後に`dataloader`に渡され、以下のように使用されます。 ```python for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } ``` `input_ids`, `attention_mask`, `token_type_ids`はTransformerベースのモデルで一般的な入力形式です QuestionAnsweringモデル特有のものとして`start_positions`, `end_positions`が挙げられます ``` # 参考に一つ目の中身を見てみます i = 0 dataset[i] # すでに テキスト→トークン化→ID化されているため、逆の操作で元に戻します。 # 質問と文章が含まれていることが確認できます tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(dataset[i][0])) # ID化→トークン化まで tokenizer.convert_ids_to_tokens(dataset[i][0]) # 回答は、start_positionsのトークンで始まり、end_positionsでトークンで終わるように表現されます # 試しに該当箇所のトークンを文字に戻してみます。 print(tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens([dataset[i][0][dataset[i][3]]]))) print(tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens([dataset[i][0][dataset[i][4]]]))) ``` これから実行する`QuestionAnswering`は、**「コンテキスト」**内から**「質問」**に対する**「答え」**となる`start_positions`と`end_positions`を予測し、そのスパンを抽出するタスクとなります。 # Fine-tuning & starting Sagemaker Training Job `HuggingFace`のトレーニングジョブを作成するためには`HuggingFace` Estimatorが必要になります。 Estimatorは、エンドツーエンドのAmazonSageMakerトレーニングおよびデプロイタスクを処理します。 Estimatorで、どのFine-tuningスクリプトを`entry_point`として使用するか、どの`instance_type`を使用するか、どの`hyperparameters`を渡すかなどを定義します。 ```python huggingface_estimator = HuggingFace( entry_point='train.py', source_dir='./scripts', base_job_name='huggingface-sdk-extension', instance_type='ml.p3.2xlarge', instance_count=1, transformers_version='4.4', pytorch_version='1.6', py_version='py36', role=role, hyperparameters={ 'epochs': 1, 'train_batch_size': 32, 'model_name':'distilbert-base-uncased' } ) ``` SageMakerトレーニングジョブを作成すると、SageMakerは`huggingface`コンテナを実行するために必要なec2インスタンスの起動と管理を行います。 Fine-tuningスクリプト`train.py`をアップロードし、`sagemaker_session_bucket`からコンテナ内の`/opt/ml/input/data`にデータをダウンロードして、トレーニングジョブを実行します。 ```python /opt/conda/bin/python train.py --epochs 1 --model_name distilbert-base-uncased --train_batch_size 32 ``` `HuggingFace estimator`で定義した`hyperparameters`は、名前付き引数として渡されます。 またSagemakerは、次のようなさまざまな環境変数を通じて、トレーニング環境に関する有用なプロパティを提供しています。 * `SM_MODEL_DIR`:トレーニングジョブがモデルアーティファクトを書き込むパスを表す文字列。トレーニング後、このディレクトリのアーティファクトはモデルホスティングのためにS3にアップロードされます。 * `SM_NUM_GPUS`:ホストで使用可能なGPUの数を表す整数。 * `SM_CHANNEL_XXXX`:指定されたチャネルの入力データを含むディレクトリへのパスを表す文字列。たとえば、HuggingFace estimatorのfit呼び出しで`train`と`test`という名前の2つの入力チャネルを指定すると、環境変数`SM_CHANNEL_TRAIN`と`SM_CHANNEL_TEST`が設定されます。 このトレーニングジョブをローカル環境で実行するには、`instance_type='local'`、GPUの場合は`instance_type='local_gpu'`で定義できます。 **_Note:これはSageMaker Studio内では機能しません_** ``` # requirements.txtはトレーニングジョブの実行前に実行されます(コンテナにライブラリを追加する際に使用します) # 残念なことにSageMakerのHuggingFaceコンテナは日本語処理(トークナイズ)に必要なライブラリが組み込まれていません # したがってtransformers[ja]==4.6.1をジョブ実行前にインストールしています(fugashiとipadic)でも構いません # tensorboardも組み込まれていないため、インストールします !pygmentize ./scripts/requirements.txt # トレーニングジョブで実行されるコード !pygmentize ./scripts/run_squad.py from sagemaker.huggingface import HuggingFace # hyperparameters, which are passed into the training job hyperparameters={ 'model_type': 'bert', 'model_name_or_path': 'cl-tohoku/bert-base-japanese-whole-word-masking', 'output_dir': '/opt/ml/model', 'data_dir':'/opt/ml/input/data', 'train_file': 'train/DDQA-1.0_RC-QA_train.json', 'predict_file': 'validation/DDQA-1.0_RC-QA_dev.json', 'version_2_with_negative': 'True', 'do_train': 'True', 'do_eval': 'True', 'fp16': 'True', 'per_gpu_train_batch_size': 16, 'per_gpu_eval_batch_size': 16, 'max_seq_length': 384, 'doc_stride': 128, 'max_query_length': 64, 'learning_rate': 5e-5, 'num_train_epochs': 2, #'max_steps': 100, # If > 0: set total number of training steps to perform. Override num_train_epochs. 'save_steps': 1000, } # metric definition to extract the results metric_definitions=[ {"Name": "train_runtime", "Regex": "train_runtime.*=\D*(.*?)$"}, {'Name': 'train_samples_per_second', 'Regex': "train_samples_per_second.*=\D*(.*?)$"}, {'Name': 'epoch', 'Regex': "epoch.*=\D*(.*?)$"}, {'Name': 'f1', 'Regex': "f1.*=\D*(.*?)$"}, {'Name': 'exact_match', 'Regex': "exact_match.*=\D*(.*?)$"}] ``` ## Creating an Estimator and start a training job ``` # estimator huggingface_estimator = HuggingFace( entry_point='run_squad.py', source_dir='./scripts', metric_definitions=metric_definitions, instance_type='ml.p3.8xlarge', instance_count=1, volume_size=200, role=role, transformers_version='4.6', pytorch_version='1.7', py_version='py36', hyperparameters=hyperparameters ) # starting the train job with our uploaded datasets as input huggingface_estimator.fit({'train': input_train, 'validation': input_validation}) # ml.p3.8xlarge, 2 epochでの実行時間の目安 # Training seconds: 758 # Billable seconds: 758 ``` ## Estimator Parameters ``` # container image used for training job print(f"container image used for training job: \n{huggingface_estimator.image_uri}\n") # s3 uri where the trained model is located print(f"s3 uri where the trained model is located: \n{huggingface_estimator.model_data}\n") # latest training job name for this estimator print(f"latest training job name for this estimator: \n{huggingface_estimator.latest_training_job.name}\n") # access the logs of the training job huggingface_estimator.sagemaker_session.logs_for_job(huggingface_estimator.latest_training_job.name) ``` ## Download-fine-tuned-model-from-s3 ``` import os OUTPUT_DIR = './output/' if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) from sagemaker.s3 import S3Downloader # 学習したモデルのダウンロード S3Downloader.download( s3_uri=huggingface_estimator.model_data, # s3 uri where the trained model is located local_path='.', # local path where *.targ.gz is saved sagemaker_session=sess # sagemaker session used for training the model ) # OUTPUT_DIRに解凍します !tar -zxvf model.tar.gz -C output ``` ## Question Answering on Local ``` from transformers import AutoTokenizer, AutoModelForQuestionAnswering import torch model = AutoModelForQuestionAnswering.from_pretrained('./output') tokenizer = AutoTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking') ``` 以下のセルは`./DDQA-1.0/RC-QA/DDQA-1.0_RC-QA_dev.json`からコピーしたものです ``` context = '実は先週、CBR600RRで事故りました。たまにはCBRにも乗らなきゃなーと思い久々にCBRで出勤したところ、家から10分ほど走ったところにある片側一車線の交差点で対向右折車と衝突してしまいました。自分が直進青信号で交差点へ進入したところで対向右折車線の車が突然右折を開始。とっさに急ブレーキはかけましたが、止まることはできずに右折車に衝突、自分は空中で一回転して左斜め数メートル先の路上へと飛ばされました。' question='何に乗っていて事故りましたか?' #context = 'まぁ,何回か改正してるわけで,自転車を走らせる領域を変更しないって言うのは,怠慢っていうか責任逃れっていうか,道交法に携わってるヤツはみんな馬鹿なのか.大体の人はここまで極端な意見ではないだろうけど,自転車は歩道を走るほうが自然だとは考えているだろう.というのも, みんな自転車乗ってる時歩道を走るでしょ?自転車で歩道走ってても歩行者にそこまで危険な目に合わせないと考えているし,車道に出たら明らかに危険な目に合うと考えている.' #question='大体の人は自転車はどこを走るのが自然だと思っている?' #context = '幸いけが人が出なくて良かったものの、タイヤの脱落事故が後を絶たない。先日も高速道路でトラックのタイヤがはずれ、中央分離帯を越え、反対車線を通行していた観光バスに直撃した。不幸にもバスを運転していた運転手さんがお亡くなりになった。もし、僕がこんな場面に遭遇していたら、この運転手さんのように、乗客の安全を考えて冷静に止まっただろうか?' #question = '後を絶たないのは何ですか?' #context = '右折待ちの一般ドライバーの方は、直進車線からの右折タクシーに驚いて右折のタイミングを失ってしまい、更なる混雑を招いているようでした」と述べていました。2004年8月6日付けには、ある女性が「道を譲っても挨拶をしない人が多い。特に女性の方。そのため意地悪ですが対向車のドライバーが女性だと譲りません。私はまだ人間が出来ていないので受け流すことが出来ません」ということを言っていましたが、その気持ち良く分かります。私は横断歩道の歩行者に対しては特別真面目で、歩行者がいるかどうかを常に注意して、いるときは必ず止まるよう心掛けています。それでも気付かずに止まることができなかったときは、「ああ、悪いことしちゃったな…」と、バックミラーを見ながら思います。' #question = '歩行者がいるかどうかを常に注意しているのは誰ですか?' # 推論 inputs = tokenizer.encode_plus(question, context, add_special_tokens=True, return_tensors="pt") input_ids = inputs["input_ids"].tolist()[0] output = model(**inputs) answer_start = torch.argmax(output.start_logits) answer_end = torch.argmax(output.end_logits) + 1 answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end])) # 結果 print("質問: "+question) print("回答: "+answer) ```
github_jupyter
``` from idf_analysis.idf_class import IntensityDurationFrequencyAnalyse from idf_analysis.definitions import * import pandas as pd %matplotlib inline ``` # Intensity Duration Frequency Analyse ## Parameter **series_kind**: `PARTIAL` = Partielle Serie (partial duration series, PDS) (peak over threshold, POT) `ANNUAL` = Jährliche Serie (annual maximum series, AMS) **worksheet**: `DWA`: - DWA-A 531 - KOSTRA - empfohlen - Stützstellen: 60min und 12h `DWA-adv`: - DWA-A 531 - Unterscheidung in überwiegend konvektiv und advektiv verursachte Starkregen - Stützstellen: 3h und 24h `ATV`: - ATV-A 121 - Stützstellen: 3h und 48h **output_path** = Pfad zum Speichern **extended_durations** = Inkludiert die Dauerstufen `[720, 1080, 1440, 2880, 4320, 5760, 7200, 8640]` in der Analyse ``` out = 'example' name = 'EXAMPLE' idf = IntensityDurationFrequencyAnalyse(series_kind=PARTIAL, worksheet=DWA, output_path=out, extended_durations=True, output_filename=name, auto_save=True, unix=True) ``` Es wird nun ein Ordner `<name>_data` erstellt in `<out>` ``` data = pd.read_parquet('example/expample_rain_data.parquet') data.head() data.tail() idf.set_series(data['precipitation']) ``` Bei jeder neuen Berechnung werden Zwischenergebnisse erstellt, welche nur abhängig von der gewählten Serie `series_kind`sind. Dieser Vorgang dauert einige Sekunden. Abgerufen können diese Zwischenergebnisse mit: (Dies Operation geschieht im Hintergrund und muss nicht explizit durchgeführt werden.) ``` idf.interim_results ``` Ist `auto_save=True` werden die Zwischenergebnisse je Serie (abhängig von `output_path` und `output_filename`) in die csv-Datei `<name>_interim_results.csv` geschrieben. Dies wird empfohlen da jeder neue Aufruf etwas Zeit beansprucht und dadurch die Bereichnung verkürzt wird. ``` ! tree example/EXAMPLE_data pd.read_csv(idf.output_filename + '_interim_results.csv', index_col=0) ``` Aus diesen Zwischenergebnissen werden in weiterer Folge die Parameter zur Berechnung der Regenhöhe und Regenspende ermittelt. Hier sind bereist die Berechnungsverfahren und Stückpunkte laut dem gewählten `worksheet` berücksichtigt. (Dies Operation geschieht ebenfalls im Hintergrund und muss nicht explizit durchgeführt werden.) ``` idf.parameter ``` ## Berechnungen ``` from IPython.display import Latex def print_latex(string): Latex('$' + string.replace(' ', '\;')+ '$') idf.depth_of_rainfall(duration=15, return_period=1) idf.print_depth_of_rainfall(duration=15, return_period=1) idf.rain_flow_rate(duration=15, return_period=1) idf.print_rain_flow_rate(duration=15, return_period=1) idf.r_720_1() idf.get_return_period(height_of_rainfall=10, duration=15) idf.get_duration(height_of_rainfall=10, return_period=1) idf.result_table() idf.result_table(add_names=True) ``` To save the table as a csv: ``` idf.write_table() ! tree example/EXAMPLE_data fig = idf.result_figure(color=True) ``` ---------------------------------------------------------------------------------- *Dieser Block funktioniert nur in einen laufenden Jupyter Notebook!* ``` from ipywidgets import interact, fixed, interact_manual import ipywidgets as widgets def f(min_duration = 5, max_duration=720, color=True, logx=False): fig = idf.result_figure(return_periods=[1,2,5,10,50], min_duration=min_duration, max_duration=max_duration, color=color, logx=logx) interact(f, min_duration = (0,60,5), max_duration=(60,8640, 60), color=True, logx=False) ``` ---------------------------------------------------------------------------------- To save the plot as a png use: ``` idf.result_plot() ! tree example/EXAMPLE_data ```
github_jupyter
``` # Compute overlap proportion between: # 1) clustering of individuals based on similar FCI responses # 2) FCI question clusters results and their behavioral interpretations # Author: Jessica Bartley # Last edited: 11/6/17 %matplotlib inline # libraries import numpy as np from __future__ import division import matplotlib.pyplot as plt import pandas as pd from math import pi #import seaborn as sns # read in files f_idKey = "idKey.csv" #Key from study PIDs to IDs Eric used in R code (lets call those rIDs) f_fciQcomm = "fci_charac.csv" #List of question (lets shorten to Q) community membership for all FCI answer choices. f_fciPcomm = "communities.csv" #List of rIDs making up each person (lets shorten to P) cluster f_fciresp = "responses.csv" #Each subject's response for the 9 in-scanner FCI questions. """ Global definitions """ # Number of P clusters observed nclusters = 13 # all possible in-scanner FCI Q responses (named via Eric's Q coding scheme) Qs = ['X2a', 'X2b', 'X2c', 'X2d', 'X3a', 'X3b', 'X3c', \ 'X3e', 'X6a', 'X6b', 'X6c', 'X6e', 'X7a', 'X7b', \ 'X7c', 'X7e', 'X8a', 'X8b', 'X8d', 'X8e', 'X12b', \ 'X12c', 'X12d', 'X12e', 'X14a', 'X14b', 'X14c', \ 'X14d', 'X27a', 'X27b', 'X27c', 'X27d', 'X29a', \ 'X29b', 'X29d', 'X29e'] # Incorrect FCI answer choices not included in Eric's origional Q community analysis (due to infrequent responses). missingQs = ['X6e', 'X12e', 'X29a', 'X29e'] # Eric's names for each FCI Q community membership. Cluster '10' has the correct answer choices and cluster 'NA' has the above missingQs allmems = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'NA'] # Number of subjects observed for each P cluster, ordered sequentially from cluster 1 to cluster 13 clustersizes = [24, 17, 10, 10, 8, 7, 7, 5, 5, 5, 4, 4, 2] # All P clusters by name clusterlist = ('cluster1','cluster2','cluster3','cluster4','cluster5',\ 'cluster6','cluster7','cluster8','cluster9','cluster10',\ 'cluster11','cluster12','cluster13') def dequote(a_string): """ If a string has single or double quotes around it, remove them. Make sure the pair of quotes match. If a matching pair of quotes is not found, return the string unchanged. """ if (a_string[0] == a_string[-1]) and a_string.startswith(("'", '"')): return a_string[1:-1] return a_string def listToFloat(a_list): """ If all elements in a list are numbers being interpreted as strings then turn them into floats. """ a = [] for i in range(0,len(a_list)): a.append(int(dequote(a_list[i]))) a_list = a return a_list def dropExtraQs(allQs, scannerQs): """ Remove all quesiton answer choices that were not shown in the scanner. """ qlist = [] for item in allQs: if item[0] in scannerQs: qlist.append(item) return qlist def computeProportion(a_list): """ Creates dictionay where keys = Q community membership value = proportion of Q responses falling within any given Q community. e.g. for Q2 in cluster 1: {'1':0.75, '2': 0.2, '3': 0 ...} """ proportion = {} for item in a_list: key = str(item) if key in proportion.keys(): proportion[key] += 1 else: proportion[key] = 1 total = sum(proportion.itervalues(), 0.0) proportion = {item: value / total for item, value in proportion.iteritems()} for mem in allmems: if mem not in proportion.keys(): proportion.update({mem: 0}) return proportion # convert files to readable format list_idKey = np.genfromtxt(f_idKey, names=True, delimiter=',', dtype=None) list_pComm = np.genfromtxt(f_fciPcomm, names=True, delimiter=',', dtype=None) list_Qcomm = np.genfromtxt(f_fciQcomm, names=True, delimiter=',', dtype=None) list_Qcomm = dropExtraQs(list_Qcomm, Qs) # keep only Q responses shown within the scanning session list_fciresp = np.genfromtxt(f_fciresp, names=True, delimiter=',', dtype=None) # make the above lists into separate strings communities = zip(*list_pComm)[0] # P clusters string {"1.1", "1.2"...} subjects = zip(*list_pComm)[1] # rIDs ordered by appearence in P clusters rIDs = zip(*list_idKey)[0] rIDs = listToFloat(rIDs) # rIDs ordered numerically IDs = zip(*list_idKey)[1] # Study IDs QIDs = zip(*list_Qcomm)[0] # Eric's Q response IDs questionlist = list(list_fciresp.dtype.names) # Qs asked within scanner questionlist.pop(0) # remove empty first element from list # generate dictionaries commKey = {} # rID : P cluster for counter, communitymem in enumerate(communities): commKey.update({subjects[counter]: communitymem}) idKey_pid2rid = {} # study ID : rID for counter, rID in enumerate(rIDs): idKey_pid2rid.update({IDs[counter]: rID}) idKey_rid2pid = {} # rID : study ID for counter, rID in enumerate(rIDs): idKey_rid2pid.update({rID: IDs[counter]}) questKey = {} # FCI Q response code : Q community membership for counter, qid in enumerate(QIDs): questKey.update({qid: list_Qcomm[counter][1]}) for question in questionlist: x = "{0}".format(question.replace('Q','X')) questKey.update({x+'N':0}) for missingQ in missingQs: questKey.update({missingQ:'NA'}) pComm = {} # P cluster number : rIDs for pair in list_pComm: key = str(pair[0].split('.')[0].replace('\"','')) value = pair[1] if key in pComm.keys(): pComm[key].append(value) else: pComm[key] = [] pComm[key].append(value) def respToQcode(): """ Converts in-scanner responses (1="a", 2="b"...) to FCI response codes ("X2a", "X2b"...) e.g. takes in ('"216"', 3, 4, 3, 4, 2, 1, 2, 3, 2) and gives back ('"216"', 'X2c', 'X3e', 'X6b', 'X7e', 'X8b', 'X12b', 'X14b', 'X27c', 'X29b') """ # row is an integer in [0,len(list_fciresp)] # col is an integer in [0,len(list_fciresp.dtype.names)-1] names = list_fciresp.dtype.names list_fciresp_mod = [] for participant in list_fciresp: sublist = [] for name in names: if 'q' not in name.lower(): sublist.append(participant[name]) else: qnumber = int(name[1:]) # Some origional FCI answer choices were not shown in the scanner # (the in-scanner FCI only showed 4 answer choices while the origional FCI has 5) # e.g. if an origional FCI answer coded as "d" was not shown, then the corresponding # in-scanner coded as "d" would actually map to origional FCI answer choice "e". # Below is a re-odering of those in-scanner answer choices to map them to the # correct origional FCI answer choice. def NumberAnswerToLetterAnswer(x): if qnumber in [3,6,7]: if x == 1: return 'a' elif x == 2: return 'b' elif x == 3: return 'c' elif x == 4: return 'e' else: return 'N' elif qnumber in [8,29]: if x == 1: return 'a' elif x == 2: return 'b' elif x == 3: return 'd' elif x == 4: return 'e' else: return 'N' elif qnumber in [12]: if x == 1: return 'b' elif x == 2: return 'c' elif x == 3: return 'd' elif x == 4: return 'e' else: return 'N' else: if x == 1: return 'a' elif x == 2: return 'b' elif x == 3: return 'c' elif x == 4: return 'd' else: return 'N' x = "{0}{1}".format(name.replace('Q','X'),NumberAnswerToLetterAnswer(participant[name])) sublist.append(x) list_fciresp_mod.append(tuple(sublist)) return list_fciresp_mod # The below gives each subject's index in the respToQcode() list of answer choices per subject ID idToClusterIndex = {} # Study ID : corresponding P cluster index person_list = [] for i in range(0,len(respToQcode())): person_list.append(dequote(respToQcode()[i][0])) for counter, person in enumerate(person_list): personindex = [i for i, row in enumerate(respToQcode()) if respToQcode()[counter][0] in row] #index of subject in respToQcode idToClusterIndex.update({respToQcode()[counter][0]: personindex}) """ Compute the proportion of Q community membership represented within each P cluster """ # Step 1: compute proportion of Q community membership for each Q within a P cluster personlist = [] for i in range(0,len(respToQcode())): person_list.append(dequote(respToQcode()[i][0])) # Create nested dictionary with: # dict structure: allMemberships['P cluster']['Question']['Q membership'] = Proportion # e.g. allMemberships['11']['Q2']['1'] = 0.0046 # key = P cluster name # value = dictionary w/ # key = Q# # value = dictionary w/ # key = Q community membership name # value = proportion of Q community membership allMemberships = {} accuracy = {} # P cluster : average accuracy assoicated with P cluster for cluster in pComm.keys(): #loop through P clusters clustMembership = {} for qcounter, question in enumerate(questionlist): #loop through questions in P cluster FCImembership_list = [] for idcounter, id in enumerate(pComm[cluster]): #loop through subject responses to question person = '"' + str(idKey_rid2pid[id]) + '"' num = str(idKey_rid2pid[id]) numAsString = '"' + num + '"' index = idToClusterIndex[numAsString][0] #map Q response code to Q community membership FCImembership_list.append(questKey[respToQcode()[index][qcounter+1]]) Qmembership = computeProportion(FCImembership_list) clustMembership.update({question: Qmembership}) #get average accuracy in P cluster to use later acc_list = [] for question in questionlist: acc_list.append(clustMembership[question]['10']) avg_acc = reduce(lambda x, y: x + y, acc_list) / len(acc_list) accuracy.update({cluster: avg_acc}) allMemberships.update({cluster: clustMembership}) print accuracy # Step 2: compute proportion of Q community membership per cluster (average across Q's within a P cluster) # Create nested dictionary with: # dict structure: clusterdict['P cluster']['Q membership'] = Average proportion # e.g. clusterdict['2']['10'] = 0.732 # key = P cluster name # value = dictionary w/ # key = Q community membership name # value = Average proportion across cluster clusterdict = {} for cluster in pComm.keys(): #loop through P clusters d = {} for mem in allmems: #loop through Q community memberships #get Q community membership proportion across questions proportion_list = [] for question in questionlist: #loop through questions in P cluster proportion_list.append(allMemberships[cluster][question][mem]) #gives [propQ1,propQ2,...] for each mem in allmems #compute average proportion of Q community membership for P cluster d.update({mem: reduce(lambda x, y: x + y, proportion_list) / len(proportion_list)}) clusterdict.update({cluster: d}) # Step 3: scale Q community membership proportions by P cluster size for visualization scaledProportions = {} for cluster in pComm.keys(): #loop through P clusters scaledNestedDict = {} keys = clusterdict[cluster].keys() #Q community memberships values = clusterdict[cluster].values() #average (unscaled) proportions scaledValues = [] #scale average Q community membership proportions by cluster size for value in values: scaledValues.append(value*(len(pComm[cluster])/sum(clustersizes))) for index, key in enumerate(keys): scaledNestedDict.update({key: scaledValues[index]}) scaledProportions.update({cluster: scaledNestedDict}) """ Create Facited radar plots """ # Set data df = pd.DataFrame({ 'group': ['1','2','3','4','5','6','7','8','9','10','11','12','13'], # The below are scaled by cluster size. Probably the best way to do it. 'm1': [scaledProportions['1']['1'],scaledProportions['2']['1'],scaledProportions['3']['1'],scaledProportions['4']['1'],scaledProportions['5']['1'],scaledProportions['6']['1'],scaledProportions['7']['1'],scaledProportions['8']['1'],scaledProportions['9']['1'],scaledProportions['10']['1'],scaledProportions['11']['1'],scaledProportions['12']['1'],scaledProportions['13']['1']], 'm2': [scaledProportions['1']['2'],scaledProportions['2']['2'],scaledProportions['3']['2'],scaledProportions['4']['2'],scaledProportions['5']['2'],scaledProportions['6']['2'],scaledProportions['7']['2'],scaledProportions['8']['2'],scaledProportions['9']['2'],scaledProportions['10']['2'],scaledProportions['11']['2'],scaledProportions['12']['2'],scaledProportions['13']['2']], 'm3': [scaledProportions['1']['3'],scaledProportions['2']['3'],scaledProportions['3']['3'],scaledProportions['4']['3'],scaledProportions['5']['3'],scaledProportions['6']['3'],scaledProportions['7']['3'],scaledProportions['8']['3'],scaledProportions['9']['3'],scaledProportions['10']['3'],scaledProportions['11']['3'],scaledProportions['12']['3'],scaledProportions['13']['3']], 'm4': [scaledProportions['1']['4'],scaledProportions['2']['4'],scaledProportions['3']['4'],scaledProportions['4']['4'],scaledProportions['5']['4'],scaledProportions['6']['4'],scaledProportions['7']['4'],scaledProportions['8']['4'],scaledProportions['9']['4'],scaledProportions['10']['4'],scaledProportions['11']['4'],scaledProportions['12']['4'],scaledProportions['13']['4']], 'm5': [scaledProportions['1']['5'],scaledProportions['2']['5'],scaledProportions['3']['5'],scaledProportions['4']['5'],scaledProportions['5']['5'],scaledProportions['6']['5'],scaledProportions['7']['5'],scaledProportions['8']['5'],scaledProportions['9']['5'],scaledProportions['10']['5'],scaledProportions['11']['5'],scaledProportions['12']['5'],scaledProportions['13']['5']], 'm6': [scaledProportions['1']['6'],scaledProportions['2']['6'],scaledProportions['3']['6'],scaledProportions['4']['6'],scaledProportions['5']['6'],scaledProportions['6']['6'],scaledProportions['7']['6'],scaledProportions['8']['6'],scaledProportions['9']['6'],scaledProportions['10']['6'],scaledProportions['11']['6'],scaledProportions['12']['6'],scaledProportions['13']['6']], 'm7': [scaledProportions['1']['7'],scaledProportions['2']['7'],scaledProportions['3']['7'],scaledProportions['4']['7'],scaledProportions['5']['7'],scaledProportions['6']['7'],scaledProportions['7']['7'],scaledProportions['8']['7'],scaledProportions['9']['7'],scaledProportions['10']['7'],scaledProportions['11']['7'],scaledProportions['12']['7'],scaledProportions['13']['7']], 'm8': [scaledProportions['1']['8'],scaledProportions['2']['8'],scaledProportions['3']['8'],scaledProportions['4']['8'],scaledProportions['5']['8'],scaledProportions['6']['8'],scaledProportions['7']['8'],scaledProportions['8']['8'],scaledProportions['9']['8'],scaledProportions['10']['8'],scaledProportions['11']['8'],scaledProportions['12']['8'],scaledProportions['13']['8']], 'm9': [scaledProportions['1']['9'],scaledProportions['2']['9'],scaledProportions['3']['9'],scaledProportions['4']['9'],scaledProportions['5']['9'],scaledProportions['6']['9'],scaledProportions['7']['9'],scaledProportions['8']['9'],scaledProportions['9']['9'],scaledProportions['10']['9'],scaledProportions['11']['9'],scaledProportions['12']['9'],scaledProportions['13']['9']], #'m10': [scaledProportions['1']['10'],scaledProportions['2']['10'],scaledProportions['3']['10'],scaledProportions['4']['10'],scaledProportions['5']['10'],scaledProportions['6']['10'],scaledProportions['7']['10'],scaledProportions['8']['10'],scaledProportions['9']['10'],scaledProportions['10']['10'],scaledProportions['11']['10'],scaledProportions['12']['10'],scaledProportions['13']['10']], #'mN': [scaledProportions['1']['NA'],scaledProportions['2']['NA'],scaledProportions['3']['NA'],scaledProportions['4']['NA'],scaledProportions['5']['NA'],scaledProportions['6']['NA'],scaledProportions['7']['NA'],scaledProportions['8']['NA'],scaledProportions['9']['NA'],scaledProportions['10']['NA'],scaledProportions['11']['NA'],scaledProportions['12']['NA'],scaledProportions['13']['NA']] }) def make_spider( row, title, color): # number of variable categories=list(df)[1:] N = len(categories) # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(N) * 2 * pi for n in range(N)] angles += angles[:1] # Initialise the spider plot ax = plt.subplot(4,4,row+1, polar=True, ) # If you want the first axis to be on top: ax.set_theta_offset(pi / 2) ax.set_theta_direction(-1) # Draw one axis per variable + add labels plt.xticks(angles[:-1], categories, color='grey', size=7) # Extend margins plt.subplots_adjust(left=0.2, hspace=0.4) #plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) # Draw ylabels ax.set_rlabel_position(0) #values if I'm plotting the scaled values and including cluster 10 (correct answers): # plt.yticks([.5,.1,.15], [".5",".1",".15"], color="grey", size=6) # plt.ylim(0,0.2) #values if I'm plotting the scaled values and not including cluster 10 (correct answers) plt.yticks([.01,.02], ["",""], color="grey", size=6) plt.ylim(0,0.03) # Ind1 values=df.loc[row].drop('group').values.flatten().tolist() values += values[:1] ax.plot(angles, values, color=color, linewidth=2, linestyle='solid') ax.fill(angles, values, color=color, alpha=0.4) # Add a title ttl = plt.title(title, size=11, color=color, y=1.052) ttl.set_position([.5, 1.12]) # ------- PART 2: Apply to all individuals # initialize the figure my_dpi=250 plt.figure(figsize=(3200/my_dpi, 3200/my_dpi), dpi=my_dpi) # Create a color palette: # good options: "tab10", "Set2", "gist_stern", "tab20", "tab20b", "Paired", "Dark2", "Set1" my_palette = plt.cm.get_cmap("tab10", len(df.index)) # Plot the three normative groups for row in range(0, 3): make_spider( row=row, title='cluster '+df['group'][row]+' (n='+str(clustersizes[row])+')', color=my_palette(row+1)) # plt.text(1, 0.037, r'accuracy {0}%'.format(acc), color="gray", size=8.5) plt.text(-.815, 0.04651, r'{0}% correct'.format(int(accuracy[df['group'][row]]*100)), color="gray", size=8) plt.show() plt.clf() """ Create singular radar plot Results are basically unreadable... """ # Set data df = pd.DataFrame({ 'group': ['1 (n=27)','2 (n=18)','3 (n=10)','4 (n=10)','5 (n=8)',\ '6 (n=7)','7 (n=7)','8 (n=5)','9 (n=5)','10 (n=5)',\ '11 (n=4)','12 (n=4)','13 (n=2)'], # The below are scaled by cluster size 'm1': [scaledProportions['1']['1'],scaledProportions['2']['1'],scaledProportions['3']['1'],scaledProportions['4']['1'],scaledProportions['5']['1'],scaledProportions['6']['1'],scaledProportions['7']['1'],scaledProportions['8']['1'],scaledProportions['9']['1'],scaledProportions['10']['1'],scaledProportions['11']['1'],scaledProportions['12']['1'],scaledProportions['13']['1']], 'm2': [scaledProportions['1']['2'],scaledProportions['2']['2'],scaledProportions['3']['2'],scaledProportions['4']['2'],scaledProportions['5']['2'],scaledProportions['6']['2'],scaledProportions['7']['2'],scaledProportions['8']['2'],scaledProportions['9']['2'],scaledProportions['10']['2'],scaledProportions['11']['2'],scaledProportions['12']['2'],scaledProportions['13']['2']], 'm3': [scaledProportions['1']['3'],scaledProportions['2']['3'],scaledProportions['3']['3'],scaledProportions['4']['3'],scaledProportions['5']['3'],scaledProportions['6']['3'],scaledProportions['7']['3'],scaledProportions['8']['3'],scaledProportions['9']['3'],scaledProportions['10']['3'],scaledProportions['11']['3'],scaledProportions['12']['3'],scaledProportions['13']['3']], 'm4': [scaledProportions['1']['4'],scaledProportions['2']['4'],scaledProportions['3']['4'],scaledProportions['4']['4'],scaledProportions['5']['4'],scaledProportions['6']['4'],scaledProportions['7']['4'],scaledProportions['8']['4'],scaledProportions['9']['4'],scaledProportions['10']['4'],scaledProportions['11']['4'],scaledProportions['12']['4'],scaledProportions['13']['4']], 'm5': [scaledProportions['1']['5'],scaledProportions['2']['5'],scaledProportions['3']['5'],scaledProportions['4']['5'],scaledProportions['5']['5'],scaledProportions['6']['5'],scaledProportions['7']['5'],scaledProportions['8']['5'],scaledProportions['9']['5'],scaledProportions['10']['5'],scaledProportions['11']['5'],scaledProportions['12']['5'],scaledProportions['13']['5']], 'm6': [scaledProportions['1']['6'],scaledProportions['2']['6'],scaledProportions['3']['6'],scaledProportions['4']['6'],scaledProportions['5']['6'],scaledProportions['6']['6'],scaledProportions['7']['6'],scaledProportions['8']['6'],scaledProportions['9']['6'],scaledProportions['10']['6'],scaledProportions['11']['6'],scaledProportions['12']['6'],scaledProportions['13']['6']], 'm7': [scaledProportions['1']['7'],scaledProportions['2']['7'],scaledProportions['3']['7'],scaledProportions['4']['7'],scaledProportions['5']['7'],scaledProportions['6']['7'],scaledProportions['7']['7'],scaledProportions['8']['7'],scaledProportions['9']['7'],scaledProportions['10']['7'],scaledProportions['11']['7'],scaledProportions['12']['7'],scaledProportions['13']['7']], 'm8': [scaledProportions['1']['8'],scaledProportions['2']['8'],scaledProportions['3']['8'],scaledProportions['4']['8'],scaledProportions['5']['8'],scaledProportions['6']['8'],scaledProportions['7']['8'],scaledProportions['8']['8'],scaledProportions['9']['8'],scaledProportions['10']['8'],scaledProportions['11']['8'],scaledProportions['12']['8'],scaledProportions['13']['8']], 'm9': [scaledProportions['1']['9'],scaledProportions['2']['9'],scaledProportions['3']['9'],scaledProportions['4']['9'],scaledProportions['5']['9'],scaledProportions['6']['9'],scaledProportions['7']['9'],scaledProportions['8']['9'],scaledProportions['9']['9'],scaledProportions['10']['9'],scaledProportions['11']['9'],scaledProportions['12']['9'],scaledProportions['13']['9']], #'m10': [scaledProportions['1']['10'],scaledProportions['2']['10'],scaledProportions['3']['10'],scaledProportions['4']['10'],scaledProportions['5']['10'],scaledProportions['6']['10'],scaledProportions['7']['10'],scaledProportions['8']['10'],scaledProportions['9']['10'],scaledProportions['10']['10'],scaledProportions['11']['10'],scaledProportions['12']['10'],scaledProportions['13']['10']], #'mN': [scaledProportions['1']['NA'],scaledProportions['2']['NA'],scaledProportions['3']['NA'],scaledProportions['4']['NA'],scaledProportions['5']['NA'],scaledProportions['6']['NA'],scaledProportions['7']['NA'],scaledProportions['8']['NA'],scaledProportions['9']['NA'],scaledProportions['10']['NA'],scaledProportions['11']['NA'],scaledProportions['12']['NA'],scaledProportions['13']['NA']] }) # ------- PART 1: Create background # number of variable categories=list(df)[1:] N = len(categories) # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(N) * 2 * pi for n in range(N)] angles += angles[:1] # Initialise the spider plot ax = plt.subplot(111, polar=True) # If you want the first axis to be on top: ax.set_theta_offset(pi / 2) ax.set_theta_direction(-1) # Draw one axe per variable + add labels labels yet plt.xticks(angles[:-1], categories) # Draw ylabels ax.set_rlabel_position(0) #good values if I'm including cluster 10 (all correct answers) #plt.yticks([.5,.1,.15], [".5",".1",".15"], color="grey", size=6) #plt.ylim(0,0.2) plt.yticks([.01,.02], [".01",".02"], color="grey", size=6) plt.ylim(0,0.03) # ------- PART 2: Add plots # Plot each individual = each line of the data # Ind1: Normaitve Group A values=df.loc[0].drop('group').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label="cluster 1") ax.fill(angles, values, 'b', alpha=0.1) # Ind2: Normaitve Group B values=df.loc[1].drop('group').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label="cluster 2") ax.fill(angles, values, 'r', alpha=0.1) # Ind3: Normative Group C values=df.loc[2].drop('group').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label="cluster 3") ax.fill(angles, values, 'g', alpha=0.1) # Add legend plt.legend(loc='upper right', bbox_to_anchor=(1.65, .97)) plt.show() plt.clf() ```
github_jupyter
## DreliaCalc LOC Report ``` import arrow dateformat='DD.MM.YYYY - HH:mm' print(arrow.now('Europe/Vienna').format(dateformat)) %cd /opt/notebooks/dmyplant2 !git pull --rebase %cd ../dReliaCalc import dmyplant2 import pandas as pd import numpy as np from pprint import pprint as pp dval = pd.read_csv("input.csv",sep=';', encoding='utf-8') dval['val start'] = pd.to_datetime(dval['val start'], format='%d.%m.%Y') failures = pd.read_csv("failures.csv",sep=';', encoding='utf-8') failures['date'] = pd.to_datetime(failures['date'], format='%d.%m.%Y') from dmyplant2 import cred mp = dmyplant2.MyPlant(7200) vl = dmyplant2.Validation(mp,dval, cui_log=False) import ipywidgets as widgets from IPython.display import display le = vl.engines[0] w = widgets.Dropdown( options=['Pick Engine'] + [e.__str__() for e in vl.engines], value='Pick Engine', description='Engine:', ) def on_change(change): global le if change['type'] == 'change' and change['name'] == 'value': le = vl.eng_serialNumber(change['new'][:7]) print(le) w.observe(on_change) display(w) from pprint import pprint as pp id = le.id print(le) # fetch Lube Oil Consuption data locdef = {227: 'OilConsumption', 237: 'DeltaOpH', 228: 'OilVolume', 225: 'ActiveEnergy', 226: 'AvgPower'} limit = 2500 # call myplant dfr = le.batch_hist_dataItems(itemIds=locdef, p_limit=2500 ,timeCycle=30) # Set Type of time column to DateTime df = dfr df['datetime'] = pd.to_datetime(df['time'] * 1000000) # Filter to Validation Period df = df[df.datetime > pd.to_datetime(le._d['val start'])] print(dfr['time'].count(), df['time'].count()) # Filter Oil Consumption outliers by < 3 * stdev df = df[np.abs(df.OilConsumption-df.OilConsumption.mean()) <= (3*df.OilConsumption.std())] # Calc Rolling Mean values df['LOC'] = df.OilConsumption.rolling(50).mean() df['Pow'] = df.AvgPower.rolling(50).mean() dfl=df[['datetime','OilConsumption','LOC','AvgPower','Pow']] dfl=df[['datetime','LOC','Pow']] ax = dfl.plot(subplots=False, x='datetime', secondary_y=['AvgPower','Pow'], ylim=(0,0.3), figsize=(16,10), title=le, grid=True) ax.set_ylim(1000,5000) #parameters tdef = {161: 'CountOph', 102: 'PowerAct'} #tlimit = 2500 #tfrom = arrow.get(le.valstart_ts) #tfrom = arrow.get('2020-02-07') tfrom = arrow.now('Europe/Vienna').shift(months=-2) #tfrom = arrow.now('Europe/Vienna').shift(days=-2) #tto = arrow.now('Europe/Vienna') #tto = arrow.get(2021,1,23,16,0) tto=arrow.now('Europe/Vienna') ttimecycle='1800' #tassetType='J-Engine' #tincludeMinMax='false' #tforceDownSampling='false' df = le.batch_hist_dataItems(itemIds=tdef, p_from=tfrom, p_to=tto,timeCycle=ttimecycle) # Set Type of time column to DateTime df['datetime'] = pd.to_datetime(df['time'] * 1000000) df['CountOph'] = df.CountOph - le._d['oph@start'] # Just include the data to plot dfp = df[['datetime','CountOph','PowerAct']] print(dfp.tail(3)) dfp['datetime'].count() dfp.plot(subplots=False, x='datetime', color=['red','blue'], secondary_y = ['CountOph'],ylim=(0,5000), figsize=(16,10), title=le) ```
github_jupyter
``` from google.colab import drive drive.mount('gdrive') %cd /content/gdrive/My\ Drive/colab from __future__ import print_function import json import keras import pickle import os.path from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D, BatchNormalization from keras.callbacks import ModelCheckpoint from keras.callbacks import LambdaCallback from keras import optimizers from keras import regularizers from keras.utils import plot_model import numpy as np import matplotlib.pyplot as plt def build_model(x_shape, weight_decay, num_classes): # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper. model = Sequential() weight_decay = weight_decay model.add(Conv2D(64, (3, 3), padding='same', input_shape=x_shape, kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(200, kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(100, kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) return model def normalize(X_train, X_test): # this function normalize inputs for zero mean and unit variance # it is used when training a model. # Input: training set and test set # Output: normalized training set and test set according to the trianing set statistics. mean = np.mean(X_train, axis=(0, 1, 2, 3)) std = np.std(X_train, axis=(0, 1, 2, 3)) X_train = (X_train - mean) / (std + 1e-7) X_test = (X_test - mean) / (std + 1e-7) return X_train, X_test def normalize_production(x): # this function is used to normalize instances in production according to saved training set statistics # Input: X - a training set # Output X - a normalized training set according to normalization constants. # these values produced during first training and are general for the standard cifar10 training set normalization mean = 120.707 std = 64.15 return (x - mean)/(std+1e-7) def predict(x, normalize=True, batch_size=50): if normalize: x = normalize_production(x) return model.predict(x, batch_size) def updateEpoch(epoch, logs): to_save = num_epoch + epoch + 1 report_data['acc'].append(logs['acc']) report_data['loss'].append(logs['loss']) report_data['val_acc'].append(logs['val_acc']) report_data['val_loss'].append(logs['val_loss']) with open(epoch_file, "w") as file: file.write(str(to_save)) with open(data_file, "wb") as file: pickle.dump(report_data, file) with open(all_file, "a+") as file: all_data = [to_save, report_data['acc'], report_data['val_acc'], report_data['loss'], report_data['val_loss']] file.write(json.dumps(all_data)) print(epoch, logs) def train(model): # training parameters batch_size = 128 maxepoches = 100 learning_rate = 0.1 lr_decay = 1e-6 lr_drop = 20 # The data, shuffled and split between train and test sets: (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train, x_test = normalize(x_train, x_test) y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) def lr_scheduler(epoch): return learning_rate * (0.5 ** (epoch // lr_drop)) # data augmentation datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) # optimization details sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) plot_model(model, to_file='model.png') reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler) checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') lambdaCall = LambdaCallback(on_epoch_end=updateEpoch) callbacks_list = [reduce_lr,checkpoint,lambdaCall] # training process in a for loop with learning rate drop every 20 epoches. history = model.fit_generator( datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=maxepoches, validation_data=(x_test, y_test), callbacks=callbacks_list, verbose=1) model.save_weights('cifar10vgg_3.h5') # summarize history for accuracy plt.plot(report_data['acc']) plt.plot(report_data['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(report_data['loss']) plt.plot(report_data['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() return history num_classes = 10 weight_decay = 0.0005 x_shape = [32, 32, 3] train_bool = True epoch_file="hw1_3_epoch_num.txt" data_file="hw1_3_data.txt" filepath="hw1_3_weights.best.hdf5" all_file="hw1_3_all.txt" model = build_model(x_shape, weight_decay, num_classes) num_epoch = 0 if not os.path.isfile(epoch_file): with open(epoch_file, "w+") as file: file.write(str(num_epoch)) else: with open(epoch_file, "r") as file: num_epoch = int(file.read()) if os.path.isfile(filepath): model.load_weights(filepath) if os.path.isfile(data_file): with open(data_file, "rb") as file: report_data = pickle.load(file) # print the model summary model.summary() report_data = { "acc":[], "val_acc":[], "loss":[], "val_loss":[] } if train_bool: history = train(model) else: model.load_weights('cifar10vgg_3.h5') (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train = keras.utils.to_categorical(y_train, 10) y_test = keras.utils.to_categorical(y_test, 10) predicted_x = model.predict(x_test) residuals = np.argmax(predicted_x, 1) != np.argmax(y_test, 1) loss = sum(residuals)/len(residuals) print("the validation 0/1 loss is: ", loss) ```
github_jupyter
``` # -*- coding: utf-8 -*- # This work is part of the Core Imaging Library (CIL) developed by CCPi # (Collaborative Computational Project in Tomographic Imaging), with # substantial contributions by UKRI-STFC and University of Manchester. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019 UKRI-STFC, The University of Manchester # Authored by: Evangelos Papoutsellis (UKRI-STFC) ``` <h1><center>Primal Dual Hybrid Gradient Algorithm </center></h1> In this demo, we learn how to use the **Primal Dual Hybrid Algorithm (PDHG)** introduced by [Chambolle & Pock](https://hal.archives-ouvertes.fr/hal-00490826/document) for Tomography Reconstruction. We will solve the following minimisation problem under three different regularisation terms, i.e., * $\|\cdot\|_{1}$ or * Tikhonov regularisation or * with $L=\nabla$ and Total variation: <a id='all_reg'></a> $$\begin{equation} u^{*} =\underset{u}{\operatorname{argmin}} \frac{1}{2} \| \mathcal{A} u - g\|^{2} + \underbrace{ \begin{cases} \alpha\,\|u\|_{1}, & \\[10pt] \alpha\,\|\nabla u\|_{2}^{2}, & \\[10pt] \alpha\,\mathrm{TV}(u) + \mathbb{I}_{\{u\geq 0\}}(u). \end{cases}}_{Regularisers} \tag{1} \end{equation}$$ where, 1. $g$ is the Acqusisition data obtained from the detector. 1. $\mathcal{A}$ is the projection operator ( _Radon transform_ ) that maps from an image-space to an acquisition space, i.e., $\mathcal{A} : \mathbb{X} \rightarrow \mathbb{Y}, $ where $\mathbb{X}$ is an __ImageGeometry__ and $\mathbb{Y}$ is an __AcquisitionGeometry__. 1. $\alpha$: regularising parameter that measures the trade-off between the fidelity and the regulariser terms. 1. The total variation (isotropic) is defined as $$\mathrm{TV}(u) = \|\nabla u \|_{2,1} = \sum \sqrt{ (\partial_{y}u)^{2} + (\partial_{x}u)^{2} }$$ 1. $\mathbb{I}_{\{u\geq 0\}}(u) : = \begin{cases} 0, & \mbox{ if } u\geq 0\\ \infty , & \mbox{ otherwise} \, \end{cases} $, $\quad$ a non-negativity constraint for the minimiser $u$. <h2><center><u> Learning objectives </u></center></h2> - Load the data using the CIL reader: `TXRMDataReader`. - Preprocess the data using the CIL processors: `Binner`, `TransmissionAbsorptionConverter`. - Run FBP and SIRT reconstructions. - Setup PDHG for 3 different regularisers: $L^{1}$, Tikhonov and Total variation. <!--- 1. Brief intro for non-smooth minimisation problems using PDHG algorithm. 1. Setup and run PDHG with (__non-smooth__) $L^{1}$ norm regulariser. __(No BlockFramework)__ 1. Use __BlockFunction__ and __Block Framework__ to setup PDHG for Tikhonov and TV reconstructions. 1. Run Total variation reconstruction with different regularising parameters and compared with FBP and SIRT reconstructions. At the end of this demo, we will be able to reproduce all the reconstructions presented in the figure below. One can observe that the __Tikhonov regularisation__ with $L = \nabla$ was able to remove the noise but could not preserve the edges. However, this can be achieved with the the total variation reconstruction. <img src="CIL-Demos/Notebooks/images/recon_all_tomo.jpeg" width="1500"/> ---> <!-- <h2><center><u> Prerequisites </u></center></h2> - AcquisitionData, AcquisitionGeometry, AstraProjectorSimple. - BlockOperator, Gradient. - FBP, SIRT, CGLS, Tikhonov. --> We first import all the necessary libraries for this notebook. <!--- In order to use the PDHG algorithm for the problem above, we need to express our minimisation problem into the following form: <a id='PDHG_form'></a> $$\min_{u} \mathcal{F}(K u) + \mathcal{G}(u)$$ where we assume that: 1. $\mathcal{F}$, $\mathcal{G}$ are __convex__ functionals - $\mathcal{F}: Y \rightarrow \mathbb{R}$ - $\mathcal{G}: X \rightarrow \mathbb{R}$ 2. $K$ is a continuous linear operator acting from a space X to another space Y : $$K : X \rightarrow Y \quad $$ with operator norm defined as $$\| K \| = \max\{ \|K x\|_{Y} : \|x\|_{X}\leq 1 \}.$$ **Note**: The Gradient operator has $\|\nabla\| = \sqrt{8} $ and for the projection operator we use the [Power Method](https://en.wikipedia.org/wiki/Power_iteration) to approximate the greatest eigenvalue of $K$. ---> ``` # Import libraries from cil.framework import BlockDataContainer from cil.optimisation.functions import L2NormSquared, L1Norm, BlockFunction, MixedL21Norm, IndicatorBox, TotalVariation from cil.optimisation.operators import GradientOperator, BlockOperator from cil.optimisation.algorithms import PDHG, SIRT from cil.plugins.astra.operators import ProjectionOperator from cil.plugins.astra.processors import FBP from cil.plugins.ccpi_regularisation.functions import FGP_TV from cil.utilities.display import show2D, show_geometry from cil.utilities.jupyter import islicer from cil.io import TXRMDataReader from cil.processors import Binner, TransmissionAbsorptionConverter, Slicer import matplotlib.pyplot as plt import numpy as np import os ``` # Data information In this demo, we use the **Walnut** found in [Jørgensen_et_all](https://zenodo.org/record/4822516#.YLXyAJMzZp8). In total, there are 6 individual micro Computed Tomography datasets in the native Zeiss TXRM/TXM format. The six datasets were acquired at the 3D Imaging Center at Technical University of Denmark in 2014 (HDTomo3D in 2016) as part of the ERC-funded project High-Definition Tomography (HDTomo) headed by Prof. Per Christian Hansen. # Load walnut data ``` reader = TXRMDataReader() pathname = os.path.abspath("/mnt/materials/SIRF/Fully3D/CIL/Walnut/valnut_2014-03-21_643_28/tomo-A") data_name = "valnut_tomo-A.txrm" filename = os.path.join(pathname,data_name ) reader.set_up(file_name=filename, angle_unit='radian') data3D = reader.read() # reorder data to match default order for Astra/Tigre operator data3D.reorder('astra') # Get Image and Acquisition geometries ag3D = data3D.geometry ig3D = ag3D.get_ImageGeometry() ``` ## Acquisition and Image geometry information ``` print(ag3D) print(ig3D) ``` # Show Acquisition geometry and full 3D sinogram. ``` show_geometry(ag3D) show2D(data3D, slice_list = [('vertical',512), ('angle',800), ('horizontal',512)], cmap="inferno", num_cols=3, size=(15,15)) ``` # Slice through projections ``` islicer(data3D, direction=1, cmap="inferno") ``` ## For demonstration purposes, we extract the central slice and select only 160 angles from the total 1601 angles. 1. We use the `Slicer` processor with step size of 10. 1. We use the `Binner` processor to crop and bin the acquisition data in order to reduce the field of view. 1. We use the `TransmissionAbsorptionConverter` to convert from transmission measurements to absorption based on the Beer-Lambert law. **Note:** To avoid circular artifacts in the reconstruction space, we subtract the mean value of a background Region of interest (ROI), i.e., ROI that does not contain the walnut. ``` # Extract vertical slice data2D = data3D.subset(vertical='centre') # Select every 10 angles sliced_data = Slicer(roi={'angle':(0,1601,10)})(data2D) # Reduce background regions binned_data = Binner(roi={'horizontal':(120,-120,2)})(sliced_data) # Create absorption data absorption_data = TransmissionAbsorptionConverter()(binned_data) # Remove circular artifacts absorption_data -= np.mean(absorption_data.as_array()[80:100,0:30]) # Get Image and Acquisition geometries for one slice ag2D = absorption_data.geometry ag2D.set_angles(ag2D.angles, initial_angle=0.2, angle_unit='radian') ig2D = ag2D.get_ImageGeometry() print(" Acquisition Geometry 2D: {} with labels {}".format(ag2D.shape, ag2D.dimension_labels)) print(" Image Geometry 2D: {} with labels {}".format(ig2D.shape, ig2D.dimension_labels)) ``` ## Define Projection Operator We can define our projection operator using our __astra__ __plugin__ that wraps the Astra-Toolbox library. ``` A = ProjectionOperator(ig2D, ag2D, device = "gpu") ``` ## FBP and SIRT reconstuctions Now, let's perform simple reconstructions using the **Filtered Back Projection (FBP)** and **Simultaneous Iterative Reconstruction Technique [SIRT](../appendix.ipynb/#SIRT) .** Recall, for FBP we type ```python fbp_recon = FBP(ig, ag, device = 'gpu')(absorption_data) ``` For SIRT, we type ```python x_init = ig.allocate() sirt = SIRT(initial = x_init, operator = A, data=absorption_data, max_iteration = 50, update_objective_interval=10) sirt.run(verbose=1) sirt_recon = sirt.solution ``` **Note**: In SIRT, a non-negative constraint can be used with ```python constraint=IndicatorBox(lower=0) ``` ## Exercise 1: Run FBP and SIRT reconstructions Use the code blocks described above and run FBP (`fbp_recon`) and SIRT (`sirt_recon`) reconstructions. **Note**: To display the results, use ```python show2D([fbp_recon,sirt_recon], title = ['FBP reconstruction','SIRT reconstruction'], cmap = 'inferno') ``` ``` # Setup and run the FBP algorithm fbp_recon = FBP(..., ..., device = 'gpu')(absorption_data) # Setup and run the SIRT algorithm, with non-negative constraint x_init = ig2D.allocate() sirt = SIRT(initial = x_init, operator = ..., data= ..., constraint = ..., max_iteration = 300, update_objective_interval=100) sirt.run(verbose=1) sirt_recon = sirt.solution # Show reconstructions show2D([fbp_recon,sirt_recon], title = ['FBP reconstruction','SIRT reconstruction'], cmap = 'inferno', fix_range=(0,0.05)) ``` ## Exercise 1: Solution ``` # Setup and run the FBP algorithm fbp_recon = FBP(ig2D, ag2D, device = 'gpu')(absorption_data) # Setup and run the SIRT algorithm, with non-negative constraint x_init = ig2D.allocate() sirt = SIRT(initial = x_init, operator = A , data = absorption_data, constraint = IndicatorBox(lower=0), max_iteration = 300, update_objective_interval=100) sirt.run(verbose=1) sirt_recon = sirt.solution # Show reconstructions show2D([fbp_recon,sirt_recon], title = ['FBP reconstruction','SIRT reconstruction'], cmap = 'inferno', fix_range=(0,0.05)) ``` <h2><center> Why PDHG? </center></h2> In the previous notebook, we presented the __Tikhonov regularisation__ for tomography reconstruction, i.e., <a id='Tikhonov'></a> $$\begin{equation} u^{*} =\underset{u}{\operatorname{argmin}} \frac{1}{2} \| \mathcal{A} u - g\|^{2} + \alpha\|L u\|^{2}_{2} \tag{Tikhonov} \end{equation}$$ where we can use either the `GradientOperator` ($L = \nabla) $ or the `IdentityOperator` ($L = \mathbb{I}$). Due to the $\|\cdot\|^{2}_{2}$ terms, one can observe that the above objective function is differentiable. As shown in the previous notebook, we can use the standard `GradientDescent` algorithm namely ```python f1 = LeastSquares(A, absorption_data) D = GradientOperator(ig2D) f2 = OperatorCompositionFunction(L2NormSquared(),D) f = f1 + alpha_tikhonov*f2 gd = GD(x_init=ig2D.allocate(), objective_function=f, step_size=None, max_iteration=1000, update_objective_interval = 10) gd.run(100, verbose=1) ``` However, this is not always the case. Consider for example an $L^{1}$ norm for the fidelity, i.e., $\|\mathcal{A} u - g\|_{1}$ or an $L^{1}$ norm of the regulariser i.e., $\|u\|_{1}$ or a non-negativity constraint $\mathbb{I}_{\{u>0\}}(u)$. An alternative is to use **Proximal Gradient Methods**, discused in the previous notebook, e.g., the `FISTA` algorithm, where we require one of the functions to be differentiable and the other to have a __simple__ proximal method, i.e., "easy to solve". For more information, we refer to [Parikh_Boyd](https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf#page=30). Using the __PDHG algorithm__, we can solve minimisation problems where the objective is not differentiable, and the only required assumption is convexity with __simple__ proximal problems. <h2><center> $L^{1}$ regularisation </center></h2> Let $L=$`IdentityOperator` in [Tikhonov regularisation](#Tikhonov) and replace the $$\alpha^{2}\|L u\|^{2}_{2}\quad\mbox{ with }\quad \alpha\|u\|_{1}, $$ which results to a non-differentiable objective function. Hence, we have <a id='Lasso'></a> $$\begin{equation} u^{*} =\underset{u}{\operatorname{argmin}} \frac{1}{2} \| \mathcal{A} u - g\|^{2} + \alpha\|u\|_{1} \tag{$L^{2}-L^{1}$} \end{equation}$$ <h2><center> How to setup and run PDHG? </center></h2> In order to use the PDHG algorithm for the problem above, we need to express our minimisation problem into the following form: <a id='PDHG_form'></a> $$\begin{equation} \min_{u\in\mathbb{X}} \mathcal{F}(K u) + \mathcal{G}(u) \label{PDHG_form} \tag{2} \end{equation}$$ where we assume that: 1. $\mathcal{F}$, $\mathcal{G}$ are __convex__ functionals: - $\mathcal{F}: \mathbb{Y} \rightarrow \mathbb{R}$ - $\mathcal{G}: \mathbb{X} \rightarrow \mathbb{R}$ 1. $K$ is a continuous linear operator acting from a space $\mathbb{X}$ to another space $\mathbb{Y}$ : $$K : \mathbb{X} \rightarrow \mathbb{Y} \quad $$ with operator norm defined as $$\| K \| = \max\{ \|K x\|_{\mathbb{Y}} : \|x\|_{\mathbb{X}}\leq 1 \}.$$ We can write the problem [($L^{2}-L^{1})$](#Lasso) into [(2)](#PDHG_form), if we let 1. $K = \mathcal{A} \quad \Longleftrightarrow \quad $ `K = A` 1. $\mathcal{F}: Y \rightarrow \mathbb{R}, \mbox{ with } \mathcal{F}(z) := \frac{1}{2}\| z - g \|^{2}, \quad \Longleftrightarrow \quad$ ` F = 0.5 * L2NormSquared(absorption_data)` 1. $\mathcal{G}: X \rightarrow \mathbb{R}, \mbox{ with } \mathcal{G}(z) := \alpha\|z\|_{1}, \quad \Longleftrightarrow \quad$ ` G = alpha * L1Norm()` Hence, we can verify that with the above setting we have that [($L^{2}-L^{1})$](#Lasso)$\Rightarrow$[(2)](#PDHG_form) for $x=u$, $$\underset{u}{\operatorname{argmin}} \frac{1}{2}\|\mathcal{A} u - g\|^{2}_{2} + \alpha\|u\|_{1} = \underset{u}{\operatorname{argmin}} \mathcal{F}(\mathcal{A}u) + \mathcal{G}(u) = \underset{x}{\operatorname{argmin}} \mathcal{F}(Kx) + \mathcal{G}(x) $$ The algorithm is described in the [Appendix](../appendix.ipynb/#PDHG) and for every iteration, we solve two (proximal-type) subproblems, i.e., __primal & dual problems__ where $\mbox{prox}_{\tau \mathcal{G}}(x)$ and $\mbox{prox}_{\sigma \mathcal{F^{*}}}(x)$ are the **proximal operators** of $\mathcal{G}$ and $\mathcal{F}^{*}$ (convex conjugate of $\mathcal{F}$), i.e., $$\begin{equation} \mbox{prox}_{\lambda \mathcal{F}}(x) = \underset{z}{\operatorname{argmin}} \frac{1}{2}\|z - x \|^{2} + \lambda \mathcal{F}(z) \end{equation} $$ One application of the proximal operator is similar to a gradient step but is defined for convex and not necessarily differentiable functions. To setup and run PDHG in CIL: ```python pdhg = PDHG(f = F, g = G, operator = K, max_iterations = 500, update_objective_interval = 100) pdhg.run(verbose=1) ``` **Note:** To monitor convergence, we use `pdhg.run(verbose=1)` that prints the objective value of the primal problem, or `pdhg.run(verbose=2)` that prints the objective value of the primal and dual problems, as well as the primal dual gap. Nothing is printed with `verbose=0`. <a id='sigma_tau'></a> ### Define operator $K$, functions $\mathcal{F}$ and $\mathcal{G}$ ``` K = A F = 0.5 * L2NormSquared(b=absorption_data) alpha = 0.01 G = alpha * L1Norm() ``` ### Setup and run PDHG ``` # Setup and run PDHG pdhg_l1 = PDHG(f = F, g = G, operator = K, max_iteration = 500, update_objective_interval = 100) pdhg_l1.run(verbose=1) # Show reconstuction and ground truth show2D([pdhg_l1.solution,fbp_recon], fix_range=(0,0.05), title = ['L1 regularisation', 'FBP'], cmap = 'inferno') # Plot middle line profile plt.figure(figsize=(30,8)) plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'lines.linewidth': 5}) plt.plot(fbp_recon.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'FBP') plt.plot(pdhg_l1.solution.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'L1 regularisation') plt.legend() plt.title('Middle Line Profiles') plt.show() ``` <h2><center> PDHG for Total Variation Regularisation </center></h2> Now, we continue with the setup of the PDHG algorithm using the Total variation regulariser appeared in [(1)](#all_reg). Similarly, to the [($L^{2}-L^{1}$)](#Lasso) problem, we need to express [($L^{2}-TV$)](#all_reg) in the general form of [PDHG](#PDHG_form). This can be done using two different formulations: 1. Explicit formulation: All the subproblems in the PDHG algorithm have a closed form solution. 1. Implicit formulation: One of the subproblems in the PDHG algorithm is not solved explicitly but an inner solver is used. --- <h2><center> ($L^{2}-TV$) with Explicit PDHG </center></h2> For the setup of the **($L^{2}-TV$) Explicit PDHG**, we let $$\begin{align} & f_{1}: \mathbb{Y} \rightarrow \mathbb{R}, \quad f_{1}(z_{1}) = \alpha\,\|z_{1}\|_{2,1}, \mbox{ ( the TV term ) }\\ & f_{2}: \mathbb{X} \rightarrow \mathbb{R}, \quad f_{2}(z_{2}) = \frac{1}{2}\|z_{2} - g\|_{2}^{2}, \mbox{ ( the data-fitting term ). } \end{align}$$ ```python f1 = alpha * MixedL21Norm() f2 = 0.5 * L2NormSquared(b=absorption_data) ``` For $z = (z_{1}, z_{2})\in \mathbb{Y}\times \mathbb{X}$, we define a separable function, e.g., [BlockFunction,](../appendix.ipynb/#BlockFunction) $$\mathcal{F}(z) : = \mathcal{F}(z_{1},z_{2}) = f_{1}(z_{1}) + f_{2}(z_{2})$$ ```python F = BlockFunction(f1, f2) ``` In order to obtain an element $z = (z_{1}, z_{2})\in \mathbb{Y}\times \mathbb{X}$, we need to define a `BlockOperator` $K$, using the two operators involved in [$L^{2}-TV$](#TomoTV), i.e., the `GradientOperator` $\nabla$ and the `ProjectionOperator` $\mathcal{A}$. $$ \mathcal{K} = \begin{bmatrix} \nabla\\ \mathcal{A} \end{bmatrix} $$ ```python Grad = GradientOperator(ig) K = BlockOperator(Grad, A) ``` Finally, we enforce a non-negativity constraint by letting $\mathcal{G} = \mathbb{I}_{\{u>0\}}(u)$ $\Longleftrightarrow$ `G = IndicatorBox(lower=0)` Again, we can verify that with the above setting we can express our problem into [(2)](#PDHG_form), for $x=u$ $$ \begin{align} \underset{u}{\operatorname{argmin}}\alpha\|\nabla u\|_{2,1} + \frac{1}{2}\|\mathcal{A} u - g\|^{2}_{2} + \mathbb{I}_{\{u>0\}}(u) = \underset{u}{\operatorname{argmin}} f_{1}(\nabla u) + f_{2}(\mathcal{A}u) + \mathbb{I}_{\{u>0\}}(u) \\ = \underset{u}{\operatorname{argmin}} F( \begin{bmatrix} \nabla \\ \mathcal{A} \end{bmatrix}u) + \mathbb{I}_{\{u>0\}}(u) = \underset{u}{\operatorname{argmin}} \mathcal{F}(Ku) + \mathcal{G}(u) = \underset{x}{\operatorname{argmin}} \mathcal{F}(Kx) + \mathcal{G}(x) \end{align} $$ ``` # Define BlockFunction F alpha_tv = 0.0003 f1 = alpha_tv * MixedL21Norm() f2 = 0.5 * L2NormSquared(b=absorption_data) F = BlockFunction(f1, f2) # Define BlockOperator K Grad = GradientOperator(ig2D) K = BlockOperator(Grad, A) # Define Function G G = IndicatorBox(lower=0) # Setup and run PDHG pdhg_tv_explicit = PDHG(f = F, g = G, operator = K, max_iteration = 1000, update_objective_interval = 200) pdhg_tv_explicit.run(verbose=1) # Show reconstuction and ground truth show2D([pdhg_tv_explicit.solution,fbp_recon], fix_range=(0,0.055), title = ['TV regularisation','FBP'], cmap = 'inferno') # Plot middle line profile plt.figure(figsize=(30,8)) plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'lines.linewidth': 5}) plt.plot(fbp_recon.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'FBP') plt.plot(pdhg_tv_explicit.solution .subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'TV regularisation') plt.legend() plt.title('Middle Line Profiles') plt.show() ``` ## Speed of PDHG convergence The PDHG algorithm converges when $\sigma\tau\|K\|^{2}<1$, where the variable $\sigma$, $\tau$ are called the _primal and dual stepsizes_. When we setup the PDHG algorithm, the default values of $\sigma$ and $\tau$ are used: - $\sigma=1.0$ - $\tau = \frac{1.0}{\sigma\|K\|^{2}}$, and are not passed as arguments in the setup of PDHG. However, **the speed of the algorithm depends heavily on the choice of these stepsizes.** For the following, we encourage you to use different values, such as: - $\sigma=\frac{1}{\|K\|}$ - $\tau =\frac{1}{\|K\|}$ where $\|K\|$ is the operator norm of $K$. ```python normK = K.norm() sigma = 1./normK tau = 1./normK PDHG(f = F, g = G, operator = K, sigma=sigma, tau=tau, max_iteration = 2000, update_objective_interval = 500) ``` The operator norm is computed using the [Power Method](https://en.wikipedia.org/wiki/Power_iteration) to approximate the greatest eigenvalue of $K$. ## Exercise 2: Setup and run PDHG algorithm for Tikhonov regularisation Use exactly the same code as above and replace: $$f_{1}(z_{1}) = \alpha\,\|z_{1}\|_{2,1} \mbox{ with } f_{1}(z_{1}) = \alpha\,\|z_{1}\|_{2}^{2}.$$ ``` # Define BlockFunction F alpha_tikhonov = 0.05 f1 = ... F = BlockFunction(f1, f2) # Setup and run PDHG pdhg_tikhonov_explicit = PDHG(f = F, g = G, operator = K, max_iteration = 500, update_objective_interval = 100) pdhg_tikhonov_explicit.run(verbose=1) ``` ## Exercise 2: Solution ``` # Define BlockFunction F alpha_tikhonov = 0.05 f1 = alpha_tikhonov * L2NormSquared() F = BlockFunction(f1, f2) # Setup and run PDHG pdhg_tikhonov_explicit = PDHG(f = F, g = G, operator = K, max_iteration = 1000, update_objective_interval = 200) pdhg_tikhonov_explicit.run(verbose=1) # Show reconstuction and ground truth show2D([pdhg_tikhonov_explicit.solution,fbp_recon], fix_range=(0,0.055), title = ['Tikhonov regularisation','FBP'], cmap = 'inferno') # Plot middle line profile plt.figure(figsize=(30,8)) plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'lines.linewidth': 5}) plt.plot(fbp_recon.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'FBP') plt.plot(pdhg_tikhonov_explicit.solution .subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'Tikhonov regularisation') plt.legend() plt.title('Middle Line Profiles') plt.show() ``` --- <h2><center> ($L^{2}-TV$) with Implicit PDHG </center></h2> In the implicit PDHG, one of the proximal subproblems, i.e., $\mathrm{prox}_{\tau\mathcal{F}^{*}}$ or $\mathrm{prox}_{\sigma\mathcal{G}}$ are not solved exactly and an iterative solver is used. For the setup of the **Implicit PDHG**, we let $$\begin{align} & \mathcal{F}: \mathbb{Y} \rightarrow \mathbb{R}, \quad \mathcal{F}(z_{1}) = \frac{1}{2}\|z_{1} - g\|_{2}^{2}\\ & \mathcal{G}: \mathbb{X} \rightarrow \mathbb{R}, \quad \mathcal{G}(z_{2}) = \alpha\, \mathrm{TV}(z_{2}) = \|\nabla z_{2}\|_{2,1} \end{align}$$ For the function $\mathcal{G}$, we can use the `TotalVariation` `Function` class from `CIL`. Alternatively, we can use the `FGP_TV` `Function` class from our `cil.plugins.ccpi_regularisation` that wraps regularisation routines from the [CCPi-Regularisation Toolkit](https://github.com/vais-ral/CCPi-Regularisation-Toolkit). For these functions, the `proximal` method implements an iterative solver, namely the **Fast Gradient Projection (FGP)** algorithm that solves the **dual** problem of $$\begin{equation} \mathrm{prox}_{\tau G}(u) = \underset{z}{\operatorname{argmin}} \frac{1}{2} \| u - z\|^{2} + \tau\,\alpha\,\mathrm{TV}(z) + \mathbb{I}_{\{z>0\}}(z), \end{equation} $$ for every PDHG iteration. Hence, we need to specify the number of iterations for the FGP algorithm. In addition, we can enforce a non-negativity constraint using `lower=0.0`. For the `FGP_TV` class, we can either use `device=cpu` or `device=gpu` to speed up this inner solver. ```python G = alpha * FGP_TV(max_iteration=100, nonnegativity = True, device = 'gpu') G = alpha * TotalVariation(max_iteration=100, lower=0.) ``` ## Exercise 3: Setup and run implicit PDHG algorithm with the Total variation regulariser - Using the TotalVariation class, from CIL. This solves the TV denoising problem (using the FGP algorithm) in CPU. - Using the FGP_TV class from the CCPi regularisation plugin. **Note:** In the FGP_TV implementation no pixel size information is included when in the forward and backward of the finite difference operator. Hence, we need to divide our regularisation parameter by the pixel size, e.g., $$\frac{\alpha}{\mathrm{ig2D.voxel\_size\_y}}$$ ## $(L^{2}-TV)$ Implicit PDHG: using FGP_TV ``` F = 0.5 * L2NormSquared(b=absorption_data) G = (alpha_tv/ig2D.voxel_size_y) * ... K = A # Setup and run PDHG pdhg_tv_implicit_regtk = PDHG(f = F, g = G, operator = K, max_iteration = 1000, update_objective_interval = 200) pdhg_tv_implicit_regtk.run(verbose=1) ``` ## Exercise 3: Solution ``` F = 0.5 * L2NormSquared(b=absorption_data) G = (alpha_tv/ig2D.voxel_size_y) * FGP_TV(max_iteration=100, device='gpu') K = A # Setup and run PDHG pdhg_tv_implicit_regtk = PDHG(f = F, g = G, operator = K, max_iteration = 1000, update_objective_interval = 200) pdhg_tv_implicit_regtk.run(verbose=1) # Show reconstuction and ground truth show2D([pdhg_tv_implicit_regtk.solution,pdhg_tv_explicit.solution, (pdhg_tv_explicit.solution-pdhg_tv_implicit_regtk.solution).abs()], fix_range=[(0,0.055),(0,0.055),(0,1e-3)], title = ['TV (Implicit CCPi-RegTk)','TV (Explicit)', 'Absolute Difference'], cmap = 'inferno', num_cols=3) # Plot middle line profile plt.figure(figsize=(30,8)) plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'lines.linewidth': 5}) plt.plot(pdhg_tv_explicit.solution.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'TV (explicit)') plt.plot(pdhg_tv_implicit_regtk.solution.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'TV (implicit)') plt.legend() plt.title('Middle Line Profiles') plt.show() ``` In the above comparison between explicit and implicit TV reconstructions, we observe some differences in the reconstructions and in the middle line profiles. This is due to a) the number of iterations and b) $\sigma, \tau$ values used in both the explicit and implicit setup of the PDHG algorithm. You can try more iterations with different values of $\sigma$ and $\tau$ for both cases in order to be sure that converge to the same solution. For example, you can use: * max_iteration = 2000 * $\sigma=\tau=\frac{1}{\|K\|}$ ## $(L^{2}-TV)$ Implicit PDHG: using TotalVariation ``` G = alpha_tv * TotalVariation(max_iteration=100, lower=0.) # Setup and run PDHG pdhg_tv_implicit_cil = PDHG(f = F, g = G, operator = K, max_iteration = 500, update_objective_interval = 100) pdhg_tv_implicit_cil.run(verbose=1) # Show reconstuction and ground truth show2D([pdhg_tv_implicit_regtk.solution, pdhg_tv_implicit_cil.solution, (pdhg_tv_implicit_cil.solution-pdhg_tv_implicit_regtk.solution).abs()], fix_range=[(0,0.055),(0,0.055),(0,1e-3)], num_cols=3, title = ['TV (CIL)','TV (CCPI-RegTk)', 'Absolute Difference'], cmap = 'inferno') # Plot middle line profile plt.figure(figsize=(30,8)) plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'lines.linewidth': 5}) plt.plot(pdhg_tv_implicit_regtk.solution.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'TV (CCPi-RegTk)') plt.plot(pdhg_tv_implicit_cil.solution.subset(horizontal_y = int(ig2D.voxel_num_y/2)).as_array(), label = 'TV (CIL)') plt.legend() plt.title('Middle Line Profiles') plt.show() ``` # FBP reconstruction with all the projection angles. ``` binned_data3D = Binner(roi={'horizontal':(120,-120,2)})(data3D) absorption_data3D = TransmissionAbsorptionConverter()(binned_data3D.subset(vertical=512)) absorption_data3D -= np.mean(absorption_data3D.as_array()[80:100,0:30]) ag3D = absorption_data3D.geometry ag3D.set_angles(ag3D.angles, initial_angle=0.2, angle_unit='radian') ig3D = ag3D.get_ImageGeometry() fbp_recon3D = FBP(ig3D, ag3D)(absorption_data3D) ``` # Show all reconstructions - FBP (1601 projections) - FBP (160 projections) - SIRT (160 projections) - $L^{1}$ regularisation (160 projections) - Tikhonov regularisation (160 projections) - Total variation regularisation (160 projections) ``` show2D([fbp_recon3D, fbp_recon, sirt_recon, pdhg_l1.solution, pdhg_tikhonov_explicit.solution, pdhg_tv_explicit.solution], title=['FBP 1601 projections', 'FBP', 'SIRT','$L^{1}$','Tikhonov','TV'], cmap="inferno",num_cols=3, size=(25,20), fix_range=(0,0.05)) ``` ## Zoom ROIs ``` show2D([fbp_recon3D.as_array()[175:225,150:250], fbp_recon.as_array()[175:225,150:250], sirt_recon.as_array()[175:225,150:250], pdhg_l1.solution.as_array()[175:225,150:250], pdhg_tikhonov_explicit.solution.as_array()[175:225,150:250], pdhg_tv_implicit_regtk.solution.as_array()[175:225,150:250]], title=['FBP 1601 projections', 'FBP', 'SIRT','$L^{1}$','Tikhonov','TV'], cmap="inferno",num_cols=3, size=(25,20), fix_range=(0,0.05)) ``` <h1><center>Conclusions</center></h1> In the PDHG algorithm, the step-sizes $\sigma, \tau$ play a significant role in terms of the convergence speed. In the above problems, we used the default values: * $\sigma = 1.0$, $\tau = \frac{1.0}{\sigma\|K\|^{2}}$ and we encourage you to try different values provided that $\sigma\tau\|K\|^{2}<1$ is satisfied. Certainly, these values are not the optimal ones and there are sevelar accelaration methods in the literature to tune these parameters appropriately, see for instance [Chambolle_Pock2010](https://hal.archives-ouvertes.fr/hal-00490826/document), [Chambolle_Pock2011](https://ieeexplore.ieee.org/document/6126441), [Goldstein et al](https://arxiv.org/pdf/1305.0546.pdf), [Malitsky_Pock](https://arxiv.org/pdf/1608.08883.pdf). In the following notebook, we are going to present a stochastic version of PDHG, namely **SPDHG** introduced in [Chambolle et al](https://arxiv.org/pdf/1706.04957.pdf) which is extremely useful to reconstruct large datasets, e.g., 3D walnut data. The idea behind SPDHG is to split our initial dataset into smaller chunks and apply forward and backward operations to these randomly selected subsets of the data. SPDHG has been used for different imaging applications and produces significant computational improvements over the PDHG algorithm, see [Ehrhardt et al](https://arxiv.org/abs/1808.07150) and [Papoutsellis et al](https://arxiv.org/pdf/2102.06126.pdf).
github_jupyter
# Isolated skyrmion in confined helimagnetic nanostructure **Authors**: Marijan Beg, Marc-Antonio Bisotti, Weiwei Wang, Ryan Pepper, David Cortes-Ortuno **Date**: 26 June 2016 (Updated 24 Jan 2019) This notebook can be downloaded from the github repository, found [here](https://github.com/computationalmodelling/fidimag/blob/master/doc/ipynb/isolated_skyrmion.ipynb). ## Problem specification A thin film disk sample with thickness $t=10 \,\text{nm}$ and diameter $d=100 \,\text{nm}$ is simulated. The material is FeGe with material parameters [1]: - exchange energy constant $A = 8.78 \times 10^{-12} \,\text{J/m}$, - magnetisation saturation $M_\text{s} = 3.84 \times 10^{5} \,\text{A/m}$, and - Dzyaloshinskii-Moriya energy constant $D = 1.58 \times 10^{-3} \,\text{J/m}^{2}$. It is expected that when the system is initialised in the uniform out-of-plane direction $\mathbf{m}_\text{init} = (0, 0, 1)$, it relaxes to the isolated Skyrmion (Sk) state (See Supplementary Information in Ref. 1). (Note that LLG dynamics is important, which means that artificially disable the precession term in LLG may lead to other states). ## Simulation using the LLG equation ``` from fidimag.micro import Sim from fidimag.common import CuboidMesh from fidimag.micro import UniformExchange, Demag, DMI from fidimag.common import plot import time %matplotlib inline ``` The cuboidal thin film mesh which contains the disk is created: ``` d = 100 # diameter (nm) t = 10 # thickness (nm) # Mesh discretisation. dx = dy = 2.5 # nm dz = 2 mesh = CuboidMesh(nx=int(d/dx), ny=int(d/dy), nz=int(t/dz), dx=dx, dy=dy, dz=dz, unit_length=1e-9) ``` Since the disk geometry is simulated, it is required to set the saturation magnetisation to zero in the regions of the mesh outside the disk. In order to do that, the following function is created: ``` def Ms_function(Ms): def wrapped_function(pos): x, y, z = pos[0], pos[1], pos[2] r = ((x-d/2.)**2 + (y-d/2.)**2)**0.5 # distance from the centre if r <= d/2: # Mesh point is inside the disk. return Ms else: # Mesh point is outside the disk. return 0 return wrapped_function ``` To reduce the relaxation time, we define a state using a python function. ``` def init_m(pos): x,y,z = pos x0, y0 = d/2., d/2. r = ((x-x0)**2 + (y-y0)**2)**0.5 if r<10: return (0,0, 1) elif r<30: return (0,0, -1) elif r<60: return (0, 0, 1) else: return (0, 0, -1) ``` Having the magnetisation saturation function, the simulation object can be created: ``` # FeGe material paremeters. Ms = 3.84e5 # saturation magnetisation (A/m) A = 8.78e-12 # exchange energy constant (J/m) D = 1.58e-3 # Dzyaloshinkii-Moriya energy constant (J/m**2) alpha = 1 # Gilbert damping gamma = 2.211e5 # gyromagnetic ration (m/As) # Create simulation object. sim = Sim(mesh) # sim = Sim(mesh, driver='steepest_descent') sim.Ms = Ms_function(Ms) sim.driver.alpha = alpha sim.driver.gamma = gamma # Add energies. sim.add(UniformExchange(A=A)) sim.add(DMI(D=D)) sim.add(Demag()) # Since the magnetisation dynamics is not important in this stage, # the precession term in LLG equation can be set to artificially zero. # sim.driver.do_precession = False # Initialise the system. sim.set_m(init_m) ``` This is the initial configuration used before relaxation: ``` plot(sim, component='all', z=0.0, cmap='RdBu') ``` Now the system is relaxed to find a metastable state of the system: ``` # Relax the system to its equilibrium. start = time.time() sim.driver.relax(dt=1e-13, stopping_dmdt=0.1, max_steps=10000, save_m_steps=None, save_vtk_steps=None, printing=False) end = time.time() #NBVAL_IGNORE_OUTPUT print('Timing: ', end - start) sim.save_vtk() ``` The magnetisation components of obtained equilibrium configuration can be plotted in the following way: We plot the magnetisation at the bottom of the sample: ``` plot(sim, component='all', z=0.0, cmap='RdBu') ``` and at the top of the sample: ``` plot(sim, component='all', z=10.0, cmap='RdBu') ``` and we plot the xy spin angle through the middle of the sample: ``` plot(sim, component='angle', z=5.0, cmap='hsv') ``` ## Simulation using Steepest Descent An alternative method for the minimisation of the energy is using a SteepestDescent method: ``` # Create simulation object. sim = Sim(mesh, driver='steepest_descent') sim.Ms = Ms_function(Ms) sim.driver.gamma = gamma # Add energies. sim.add(UniformExchange(A=A)) sim.add(DMI(D=D)) sim.add(Demag()) # The maximum timestep: sim.driver.tmax = 1 # Initialise the system. sim.set_m(init_m) ``` In this case the driver has a `minimise` method ``` start = time.time() sim.driver.minimise(max_steps=10000, stopping_dm=0.5e-4, initial_t_step=1e-2) end = time.time() #NBVAL_IGNORE_OUTPUT print('Timing: ', end - start) ``` And the final state is equivalent to the one found with the LLG technique ``` plot(sim, component='all', z=0.0, cmap='RdBu') ``` ## References [1] Beg, M. et al. Ground state search, hysteretic behaviour, and reversal mechanism of skyrmionic textures in confined helimagnetic nanostructures. *Sci. Rep.* **5**, 17137 (2015).
github_jupyter
``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline ``` ## 1. 加载并可视化数据 ``` path = 'LogiReg_data.txt' pdData = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted']) pdData.head() pdData.shape positive = pdData[pdData['Admitted'] == 1] negative = pdData[pdData['Admitted'] == 0] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(positive['Exam 1'], positive['Exam 2'], s=30, c='b', marker='o', label='Admitted') ax.scatter(negative['Exam 1'], negative['Exam 2'], s=30, c='r', marker='x', label='Not Admitted') ax.legend() ax.set_xlabel('Exam 1 Score') ax.set_ylabel('Exam 2 Score') ``` ## 2. Sigmoid函数 $$ g(z) = \frac{1}{1+e^{-z}} $$ ``` def sigmoid(z): return 1 / (1 + np.exp(-z)) nums = np.arange(-10, 10, step=1) fig, ax = plt.subplots(figsize=(12, 4)) ax.plot(nums, sigmoid(nums), 'r') ``` ## 3. 建立Model $$ \begin{array}{ccc} \begin{pmatrix}\theta_{0} & \theta_{1} & \theta_{2}\end{pmatrix} & \times & \begin{pmatrix}1\\ x_{1}\\ x_{2} \end{pmatrix}\end{array}=\theta_{0}+\theta_{1}x_{1}+\theta_{2}x_{2} $$ ``` def model(X, theta): return sigmoid(np.dot(X, theta.T)) # 在第0列插入1 pdData.insert(0, 'Ones', 1) # 获取<training data, y> orig_data = pdData.values cols = orig_data.shape[1] X = orig_data[:, 0:cols-1] y = orig_data[:, cols-1:cols] # 初始化参数 theta = np.zeros([1, 3]) X[:5] y[:5] theta ``` ## 4. 建立Loss Function 将对数似然函数去负号 $$ D(h_\theta(x), y) = -y\log(h_\theta(x)) - (1-y)\log(1-h_\theta(x)) $$ 求平均损失 $$ J(\theta)=\frac{1}{n}\sum_{i=1}^{n} D(h_\theta(x_i), y_i) $$ ``` def cost(X, y, theta): left = np.multiply(-y, np.log(model(X, theta))) right = np.multiply(1 - y, np.log(1 - model(X, theta))) return np.sum(left - right) / (len(X)) cost(X, y, theta) ``` ## 5. 计算梯度 $$ \frac{\partial J}{\partial \theta_j}=-\frac{1}{m}\sum_{i=1}^n (y_i - h_\theta (x_i))x_{ij} $$ ``` def gradient(X, y, theta): grad = np.zeros(theta.shape) error = (model(X, theta) - y).ravel() # 对于每一个参数,取出相关列的数据进行更新 for j in range(len(theta.ravel())): term = np.multiply(error, X[:, j]) grad[0, j] = np.sum(term) / len(X) return grad ``` ## 6. 梯度下降 ``` import time import numpy.random STOP_ITER = 0 STOP_COST = 1 STOP_GRAD = 2 def stopCriterion(dtype, value, threshold): if dtype == STOP_ITER: return value > threshold elif dtype == STOP_COST: return abs(value[-1] - value[-2]) < threshold elif dtype == STOP_GRAD: return np.linalg.norm(value) < threshold def shuffleData(data): # 洗牌操作 np.random.shuffle(data) cols = data.shape[1] X = data[:, 0:cols-1] y = data[:, cols-1:] return X, y def descent(data, theta, batchSize, stopType, thresh, alpha): i = 0 k = 0 init_time = time.time() X, y = shuffleData(data) grad = np.zeros(theta.shape) costs = [cost(X, y, theta)] while True: grad = gradient(X[k: k+batchSize], y[k: k+batchSize], theta) k += batchSize if k >= n: k = 0 X, y = shuffleData(data) theta = theta - alpha*grad costs.append(cost(X, y, theta)) i += 1 if stopType == STOP_ITER: value = i elif stopType == STOP_COST: value = costs elif stopType == STOP_GRAD: value = grad if stopCriterion(stopType, value, thresh): break return theta, i-1, costs, grad, time.time()-init_time def runExpe(data, theta, batchSize, stopType, thresh, alpha): theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha) name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled" name += " data - learning rate: {} - ".format(alpha) if batchSize == n: strDescType = "Gradient" elif batchSize == 1: strDescType = "Stochastic" else: strDescType = "Mini-batch ({})".format(batchSize) name += strDescType + " descent - Stop: " if stopType == STOP_ITER: strStop = "{} iterations".format(thresh) elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh) else: strStop = "gradient norm < {}".format(thresh) name += strStop print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format( name, theta, iter, costs[-1], dur)) fig, ax = plt.subplots(figsize=(12,4)) ax.plot(np.arange(len(costs)), costs, 'r') ax.set_xlabel('Iterations') ax.set_ylabel('Cost') ax.set_title(name.upper() + ' - Error vs. Iteration') return theta ``` ## 7. 不同的停止策略 ### 设定迭代次数 ``` #选择的梯度下降方法是基于所有样本的 n=100 runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001) ``` ### 根据损失值停止 ``` runExpe(orig_data, theta, n, STOP_COST, thresh=0.000001, alpha=0.001) ``` ### 根据梯度变化停止 ``` runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001) ``` ## 8. 不同的梯度下降方法 ### Stochastic descent ``` runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001) # 降低学习率 runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002) ``` 结论: 速度快,但稳定性差,需要很小的学习率 ### Mini-batch descent ``` runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001) from sklearn import preprocessing as pp # 对数据进行标准化 将数据按其属性(按列进行)减去其均值,然后除以其方差。最后得到的结果是,对每个属性/每列来说所有数据都聚集在0附近,方差值为1 scaled_data = orig_data.copy() scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3]) runExpe(scaled_data, theta, n, STOP_ITER, thresh=5000, alpha=0.001) ``` 结论: 原始数据为0.61,而预处理后0.38。数据做预处理非常重要 ``` runExpe(scaled_data, theta, n, STOP_GRAD, thresh=0.02, alpha=0.001) theta = runExpe(scaled_data, theta, 1, STOP_GRAD, thresh=0.002/5, alpha=0.001) runExpe(scaled_data, theta, 16, STOP_GRAD, thresh=0.002*2, alpha=0.001) ``` ## 9. 测试精度 ``` def predict(X, theta): return [1 if x >= 0.5 else 0 for x in model(X, theta)] scaled_X = scaled_data[:, :3] y = scaled_data[:, 3] predictions = predict(scaled_X, theta) correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)] accuracy = (sum(map(int, correct)) % len(correct)) print ('accuracy = {0}%'.format(accuracy)) ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # MinDiff Data Preparation <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://tensorflow.org/responsible_ai/model_remediation/min_diff/guide/min_diff_data_preparation.ipynb"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-remediation/blob/master/docs/min_diff/guide/min_diff_data_preparation.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/model-remediation/blob/master/docs/min_diff/guide/min_diff_data_preparation.ipynb"> <img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a> </td> <td> <a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/model-remediation/docs/min_diff/guide/min_diff_data_preparation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table></div> ##Introduction When implementing MinDiff, you will need to make complex decisions as you choose and shape your input before passing it on to the model. These decisions will largely determine the behavior of MinDiff within your model. This guide will cover the technical aspects of this process, but will not discuss how to evaluate a model for fairness, or how to identify particular slices and metrics for evaluation. Please see the [Fairness Indicators guidance](https://www.tensorflow.org/responsible_ai/fairness_indicators/guide/guidance) for details on this. To demonstrate MinDiff, this guide uses the [UCI income dataset](https://archive.ics.uci.edu/ml/datasets/census+income). The model task is to predict whether an individual has an income exceeding $50k, based on various personal attributes. This guide assumes there is a problematic gap in the FNR (false negative rate) between `"Male"` and `"Female"` slices and the model owner (you) has decided to apply MinDiff to address the issue. For more information on the scenarios in which one might choose to apply MinDiff, see the [requirements page](https://www.tensorflow.org/responsible_ai/model_remediation/min_diff/guide/requirements). Note: We recognize the limitations of the categories used in the original dataset, and acknowledge that these terms do not encompass the full range of vocabulary used in describing gender. Further, we acknowledge that this task doesn’t represent a real-world use case, and is used only to demonstrate the technical details of the MinDiff library. MinDiff works by penalizing the difference in distribution scores between examples in two sets of data. This guide will demonstrate how to choose and construct these additional MinDiff sets as well as how to package everything together so that it can be passed to a model for training. ##Setup ``` !pip install --upgrade tensorflow-model-remediation import tensorflow as tf from tensorflow_model_remediation import min_diff from tensorflow_model_remediation.tools.tutorials_utils import uci as tutorials_utils ``` ## Original Data For demonstration purposes and to reduce runtimes, this guide uses only a sample fraction of the UCI Income dataset. In a real production setting, the full dataset would be utilized. ``` # Sampled at 0.3 for reduced runtimes. train = tutorials_utils.get_uci_data(split='train', sample=0.3) print(len(train), 'train examples') ``` ### Converting to `tf.data.Dataset` `MinDiffModel` requires that the input be a `tf.data.Dataset`. If you were using a different format of input prior to integrating MinDiff, you will have to convert your input data. Use `tf.data.Dataset.from_tensor_slices` to convert to `tf.data.Dataset`. ``` dataset = tf.data.Dataset.from_tensor_slices((x, y, weights)) dataset.shuffle(...) # Optional. dataset.batch(batch_size) ``` See [`Model.fit`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit) documentation for details on equivalences between the two methods of input. In this guide, the input is downloaded as a Pandas DataFrame and therefore, needs this conversion. ``` # Function to convert a DataFrame into a tf.data.Dataset. def df_to_dataset(dataframe, shuffle=True): dataframe = dataframe.copy() labels = dataframe.pop('target') ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) if shuffle: ds = ds.shuffle(buffer_size=5000) # Reasonable but arbitrary buffer_size. return ds # Convert the train DataFrame into a Dataset. original_train_ds = df_to_dataset(train) ``` Note: The training dataset has not been batched yet but it will be later. ## Creating MinDiff data During training, MinDiff will encourage the model to reduce differences in predictions between two additional datasets (which may include examples from the original dataset). The selection of these two datasets is the key decision which will determine the effect MinDiff has on the model. The two datasets should be picked such that the disparity in performance that you are trying to remediate is evident and well-represented. Since the goal is to reduce a gap in FNR between `"Male"` and `"Female"` slices, this means creating one dataset with only _positively_ labeled `"Male"` examples and another with only _positively_ labeled `"Female"` examples; these will be the MinDiff datasets. Note: The choice of using only _positively_ labeled examples is directly tied to the target metric. This guide is concerned with _false negatives_ which, by definition, are _positively_ labeled examples that were incorrectly classified. First, examine the data present. ``` female_pos = train[(train['sex'] == ' Female') & (train['target'] == 1)] male_pos = train[(train['sex'] == ' Male') & (train['target'] == 1)] print(len(female_pos), 'positively labeled female examples') print(len(male_pos), 'positively labeled male examples') ``` It is perfectly acceptable to create MinDiff datasets from subsets of the original dataset. While there aren't 5,000 or more positive `"Male"` examples as recommended in the [requirements guidance](https://www.tensorflow.org/responsible_ai/model_remediation/min_diff/guide/requirements#how_much_data_do_i_need), there are over 2,000 and it is reasonable to try with that many before collecting more data. ``` min_diff_male_ds = df_to_dataset(male_pos) ``` Positive `"Female"` examples, however, are much scarcer at 385. This is probably too small for good performance and so will require pulling in additional examples. Note: Since this guide began by reducing the dataset via sampling, this problem (and the corresponding solution) may seem contrived. However, it serves as a good example of how to approach concerns about the size of your MinDiff datasets. ``` full_uci_train = tutorials_utils.get_uci_data(split='train') augmented_female_pos = full_uci_train[((full_uci_train['sex'] == ' Female') & (full_uci_train['target'] == 1))] print(len(augmented_female_pos), 'positively labeled female examples') ``` Using the full dataset has more than tripled the number of examples that can be used for MinDiff. It’s still low but it is enough to try as a first pass. ``` min_diff_female_ds = df_to_dataset(augmented_female_pos) ``` Both the MinDiff datasets are significantly smaller than the recommended 5,000 or more examples. While it is reasonable to attempt to apply MinDiff with the current data, you may need to consider collecting additional data if you observe poor performance or overfitting during training. ### Using `tf.data.Dataset.filter` Alternatively, you can create the two MinDiff datasets directly from the converted original `Dataset`. Note: When using `.filter` it is recommended to use `.cache()` if the dataset can easily fit in memory for runtime performance. If it is too large to do so, consider storing your filtered datasets in your file system and reading them in. ``` # Male def male_predicate(x, y): return tf.equal(x['sex'], b' Male') and tf.equal(y, 0) alternate_min_diff_male_ds = original_train_ds.filter(male_predicate).cache() # Female def female_predicate(x, y): return tf.equal(x['sex'], b' Female') and tf.equal(y, 0) full_uci_train_ds = df_to_dataset(full_uci_train) alternate_min_diff_female_ds = full_uci_train_ds.filter(female_predicate).cache() ``` The resulting `alternate_min_diff_male_ds` and `alternate_min_diff_female_ds` will be equivalent in output to `min_diff_male_ds` and `min_diff_female_ds` respectively. ## Constructing your Training Dataset As a final step, the three datasets (the two newly created ones and the original) need to be merged into a single dataset that can be passed to the model. ### Batching the datasets Before merging, the datasets need to batched. * The original dataset can use the same batching that was used before integrating MinDiff. * The MinDiff datasets do not need to have the same batch size as the original dataset. In all likelihood, a smaller one will perform just as well. While they don't even need to have the same batch size as each other, it is recommended to do so for best performance. While not strictly necessary, it is recommended to use `drop_remainder=True` for the two MinDiff datasets as this will ensure that they have consistent batch sizes. Warning: The 3 datasets must be batched **before** they are merged together. Failing to do so will likely result in unintended input shapes that will cause errors downstream. ``` original_train_ds = original_train_ds.batch(128) # Same as before MinDiff. # The MinDiff datasets can have a different batch_size from original_train_ds min_diff_female_ds = min_diff_female_ds.batch(32, drop_remainder=True) # Ideally we use the same batch size for both MinDiff datasets. min_diff_male_ds = min_diff_male_ds.batch(32, drop_remainder=True) ``` ### Packing the Datasets with `pack_min_diff_data` Once the datasets are prepared, pack them into a single dataset which will then be passed along to the model. A single batch from the resulting dataset will contain one batch from each of the three datasets you prepared previously. You can do this by using the provided `utils` function in the `tensorflow_model_remediation` package: ``` train_with_min_diff_ds = min_diff.keras.utils.pack_min_diff_data( original_dataset=original_train_ds, sensitive_group_dataset=min_diff_female_ds, nonsensitive_group_dataset=min_diff_male_ds) ``` And that's it! You will be able to use other `util` functions in the package to unpack individual batches if needed. ``` for inputs, original_labels in train_with_min_diff_ds.take(1): # Unpacking min_diff_data min_diff_data = min_diff.keras.utils.unpack_min_diff_data(inputs) min_diff_examples, min_diff_membership = min_diff_data # Unpacking original data original_inputs = min_diff.keras.utils.unpack_original_inputs(inputs) ``` With your newly formed data, you are now ready to apply MinDiff in your model! To learn how this is done, please take a look at the other guides starting with [Integrating MinDiff with MinDiffModel](./integrating_min_diff_with_min_diff_model). ### Using a Custom Packing Format (optional) You may decide to pack the three datasets together in whatever way you choose. The only requirement is that you will need to ensure the model knows how to interpret the data. The default implementation of `MinDiffModel` assumes that the data was packed using `min_diff.keras.utils.pack_min_diff_data`. One easy way to format your input as you want is to transform the data as a final step after you have used `min_diff.keras.utils.pack_min_diff_data`. ``` # Reformat input to be a dict. def _reformat_input(inputs, original_labels): unpacked_min_diff_data = min_diff.keras.utils.unpack_min_diff_data(inputs) unpacked_original_inputs = min_diff.keras.utils.unpack_original_inputs(inputs) return { 'min_diff_data': unpacked_min_diff_data, 'original_data': (unpacked_original_inputs, original_labels)} customized_train_with_min_diff_ds = train_with_min_diff_ds.map(_reformat_input) ``` Your model will need to know how to read this customized input as detailed in the [Customizing MinDiffModel guide](./customizing_min_diff_model#customizing_default_behaviors_of_mindiffmodel). ``` for batch in customized_train_with_min_diff_ds.take(1): # Customized unpacking of min_diff_data min_diff_data = batch['min_diff_data'] # Customized unpacking of original_data original_data = batch['original_data'] ``` ## Additional Resources * For an in depth discussion on fairness evaluation see the [Fairness Indicators guidance](https://www.tensorflow.org/responsible_ai/fairness_indicators/guide/guidance) * For general information on Remediation and MinDiff, see the [remediation overview](https://www.tensorflow.org/responsible_ai/model_remediation). * For details on requirements surrounding MinDiff see [this guide](https://www.tensorflow.org/responsible_ai/model_remediation/min_diff/guide/requirements). * To see an end-to-end tutorial on using MinDiff in Keras, see [this tutorial](https://www.tensorflow.org/responsible_ai/model_remediation/min_diff/tutorials/min_diff_keras). ## Utility Functions for other Guides This guide outlines the process and decision making that you can follow whenever applying MinDiff. The rest of the guides build off this framework. To make this easier, logic found in this guide has been factored out into helper functions: * `get_uci_data`: This function is already used in this guide. It returns a `DataFrame` containing the UCI income data from the indicated split sampled at whatever rate is indicated (100% if unspecified). * `df_to_dataset`: This function converts a `DataFrame` into a `tf.data.Dataset` as detailed in this guide with the added functionality of being able to pass the batch_size as a parameter. * `get_uci_with_min_diff_dataset`: This function returns a `tf.data.Dataset` containing both the original data and the MinDiff data packed together using the Model Remediation Library util functions as described in this guide. Warning: These utility functions are **not** part of the official `tensorflow-model-remediation` package API and are subject to change at any time. The rest of the guides will build off of these to show how to use other parts of the library.
github_jupyter
# 重定义森林火灾模拟 在前面的例子中,我们定义了一个 `BurnableForest`,实现了一个循序渐进的生长和燃烧过程。 假设我们现在想要定义一个立即燃烧的过程(每次着火之后燃烧到不能燃烧为止,之后再生长,而不是每次只燃烧周围的一圈树木),由于燃烧过程不同,我们需要从 `BurnableForest` 中派生出两个新的子类 `SlowBurnForest`(原来的燃烧过程) 和 `InsantBurnForest`,为此 - 将 `BurnableForest` 中的 `burn_trees()` 方法改写,不做任何操作,直接 `pass`(因为在 `advance_one_step()` 中调用了它,所以不能直接去掉) - 在两个子类中定义新的 `burn_trees()` 方法。 ``` import numpy as np from scipy.ndimage.measurements import label class Forest(object): """ Forest can grow trees which eventually die.""" def __init__(self, size=(150,150), p_sapling=0.0025): self.size = size self.trees = np.zeros(self.size, dtype=bool) self.p_sapling = p_sapling def __repr__(self): my_repr = "{}(size={})".format(self.__class__.__name__, self.size) return my_repr def __str__(self): return self.__class__.__name__ @property def num_cells(self): """Number of cells available for growing trees""" return np.prod(self.size) @property def tree_fraction(self): """ Fraction of trees """ num_trees = self.trees.sum() return float(num_trees) / self.num_cells def _rand_bool(self, p): """ Random boolean distributed according to p, less than p will be True """ return np.random.uniform(size=self.trees.shape) < p def grow_trees(self): """ Growing trees. """ growth_sites = self._rand_bool(self.p_sapling) self.trees[growth_sites] = True def advance_one_step(self): """ Advance one step """ self.grow_trees() class BurnableForest(Forest): """ Burnable forest support fires """ def __init__(self, p_lightning=5.0e-6, **kwargs): super(BurnableForest, self).__init__(**kwargs) self.p_lightning = p_lightning self.fires = np.zeros((self.size), dtype=bool) def advance_one_step(self): """ Advance one step """ super(BurnableForest, self).advance_one_step() self.start_fires() self.burn_trees() @property def fire_fraction(self): """ Fraction of fires """ num_fires = self.fires.sum() return float(num_fires) / self.num_cells def start_fires(self): """ Start of fire. """ lightning_strikes = (self._rand_bool(self.p_lightning) & self.trees) self.fires[lightning_strikes] = True def burn_trees(self): pass class SlowBurnForest(BurnableForest): def burn_trees(self): """ Burn trees. """ fires = np.zeros((self.size[0] + 2, self.size[1] + 2), dtype=bool) fires[1:-1, 1:-1] = self.fires north = fires[:-2, 1:-1] south = fires[2:, 1:-1] east = fires[1:-1, :-2] west = fires[1:-1, 2:] new_fires = (north | south | east | west) & self.trees self.trees[self.fires] = False self.fires = new_fires class InstantBurnForest(BurnableForest): def burn_trees(self): # 起火点 strikes = self.fires # 找到连通区域 groves, num_groves = label(self.trees) fires = set(groves[strikes]) self.fires.fill(False) # 将与着火点相连的区域都烧掉 for fire in fires: self.fires[groves == fire] = True self.trees[self.fires] = False self.fires.fill(False) ``` 测试: ``` forest = Forest() sb_forest = SlowBurnForest() ib_forest = InstantBurnForest() forests = [forest, sb_forest, ib_forest] tree_history = [] for i in xrange(1500): for fst in forests: fst.advance_one_step() tree_history.append(tuple(fst.tree_fraction for fst in forests)) ``` 显示结果: ``` import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(10,6)) plt.plot(tree_history) plt.legend([f.__str__() for f in forests]) plt.show() ```
github_jupyter
# VIME: Self/Semi Supervised Learning for Tabular Data # Setup ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import umap from sklearn.metrics import (average_precision_score, mean_squared_error, roc_auc_score) from sklearn.model_selection import train_test_split from tqdm import tqdm from vime import VIME, VIME_Self from vime_data import ( labelled_loss_fn, mask_generator_tf, pretext_generator_tf, semi_supervised_generator, to_vime_dataset, unlabelled_loss_fn ) %matplotlib inline %load_ext autoreload %autoreload 2 plt.rcParams["figure.figsize"] = (20,10) ``` # Data The example data is taken from [Kaggle](https://www.kaggle.com/c/ieee-fraud-detection) but it's already pre-processed and ready to be used. You can checkout the pre-processing notebook in the same folder to get some understanding about what transformations were done to the features. ``` train = pd.read_csv("fraud_train_preprocessed.csv") test = pd.read_csv("fraud_test_preprocessed.csv") # Drop nan columns as they are not useful for reconstruction error nan_columns = [f for f in train.columns if 'nan' in f] train = train.drop(nan_columns, axis=1) test = test.drop(nan_columns, axis=1) # Also, using only numerical columns because NNs have issue with one-hot encoding num_cols = train.columns[:-125] # Validation size is 10% val_size = int(train.shape[0] * 0.1) X_train = train.iloc[:-val_size, :] X_val = train.iloc[-val_size:, :] # Labelled 1% of data, everything else is unlabelled X_train_labelled = train.sample(frac=0.01) y_train_labelled = X_train_labelled.pop('isFraud') X_val_labelled = X_val.sample(frac=0.01) y_val_labelled = X_val_labelled.pop('isFraud') X_train_unlabelled = X_train.loc[~X_train.index.isin(X_train_labelled.index), :].drop('isFraud', axis=1) X_val_unlabelled = X_val.loc[~X_val.index.isin(X_val_labelled.index), :].drop('isFraud', axis=1) X_train_labelled = X_train_labelled[num_cols] X_val_labelled = X_val_labelled[num_cols] X_train_unlabelled = X_train_unlabelled[num_cols] X_val_unlabelled = X_val_unlabelled[num_cols] X_val_labelled.shape, X_train_labelled.shape print("Labelled Fraudsters", y_train_labelled.sum()) print( "Labelled Proportion:", np.round(X_train_labelled.shape[0] / (X_train_unlabelled.shape[0] + X_train_labelled.shape[0]), 5) ) ``` The following model will be trained with these hyperparameters: ``` vime_params = { 'alpha': 4, 'beta': 10, 'k': 5, 'p_m': 0.36 } ``` ## Self-Supervised Learning ### Data Prep The model needs 1 input - corrupted X, and 2 outputs - mask and original X. ``` batch_size = 1024 # Datasets train_ds, train_m = to_vime_dataset(X_train_unlabelled, vime_params['p_m'], batch_size=batch_size, shuffle=True) val_ds, val_m = to_vime_dataset(X_val_unlabelled, vime_params['p_m'], batch_size=batch_size) num_features = X_train_unlabelled.shape[1] print('Proportion Corrupted:', np.round(train_m.numpy().mean(), 2)) # Training vime_s = VIME_Self(num_features) vime_s.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss={ 'mask': 'binary_crossentropy', 'feature': 'mean_squared_error'}, loss_weights={'mask':1, 'feature': vime_params['alpha']} ) cbs = [tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=10, restore_best_weights=True )] vime_s.fit( train_ds, validation_data=val_ds, epochs=1000, callbacks=cbs ) vime_s.save('./vime_self') vime_s = tf.keras.models.load_model('./vime_self') ``` ### Evaluation All the evaluation will be done on the validation set ``` val_self_preds = vime_s.predict(val_ds) ``` To evaluate the mask reconstruction ability we can simply check the ROC AUC score for mask predictions across all the features. ``` feature_aucs = [] for i in tqdm(range(X_val_unlabelled.shape[1])): roc = roc_auc_score(val_m.numpy()[:, i], val_self_preds['mask'][:, i]) feature_aucs.append(roc) self_metrics = pd.DataFrame({"metric": 'mask_auc', "metric_values": feature_aucs}) ``` Now, we can evaluate the feature reconstruction ability using RMSE and correlation coefficients ``` feature_corrs = [] for i in tqdm(range(X_val_unlabelled.shape[1])): c = np.corrcoef(X_val_unlabelled.values[:, i], val_self_preds['feature'][:, i])[0, 1] feature_corrs.append(c) self_metrics = pd.concat([ self_metrics, pd.DataFrame({"metric": 'feature_correlation', "metric_values": feature_corrs}) ]) ``` From the plot and table above, we can see that the model has learned to reconstruct most of the features. Half of the features are reconstructed with relatively strong correlation with original data. Only a handful of features are not properly reconstructed. Let's check the RMSE across all the features ``` rmses = [] for i in tqdm(range(X_val_unlabelled.shape[1])): mse = mean_squared_error(X_val_unlabelled.values[:, i], val_self_preds['feature'][:, i]) rmses.append(np.sqrt(mse)) self_metrics = pd.concat([ self_metrics, pd.DataFrame({"metric": 'RMSE', "metric_values": rmses}) ]) sns.boxplot(x=self_metrics['metric'], y=self_metrics['metric_values']) plt.title("Self-Supervised VIME Evaluation") ``` RMSE distribution further indicates that mjority of the features are well-reconstructed. Another way to evaluate the self-supervised model is to look at the embeddings. Since the whole point of corrupting the dataset is to learn to generate robust embeddings, we can assume that if a sample was corrupted 5 times, all 5 embeddings should be relatively close to each other in the vector space. Let's check this hypothesis by corrupting 10 different samples 5 times and projecting their embeddings to 2-dimensional space using UMAP. ``` from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Input, Dense, Dropout def generate_k_corrupted(x, k, p_m): x_u_list = [] for i in range(k): mask = mask_generator_tf(p_m, x) _, x_corr = pretext_generator_tf(mask, tf.constant(x, dtype=tf.float32)) x_u_list.append(x_corr) # Prepare input with shape (n, k, d) x_u_corrupted = np.zeros((x.shape[0], k, x.shape[1])) for i in range(x.shape[0]): for j in range(k): x_u_corrupted[i, j, :] = x_u_list[j][i, :] return x_u_corrupted vime_s = tf.keras.models.load_model('./vime_self') # Sequential model to produce embeddings encoding_model = Sequential( [ Input(num_features), vime_s.encoder ] ) dense_model = Sequential( [ Input(num_features), Dense(num_features, activation="relu"), ] ) # Create corrupted sample samples = X_val_unlabelled.sample(10) sample_corrupted = generate_k_corrupted( x=samples, k=5, p_m=0.4 ) val_encoding = encoding_model.predict(sample_corrupted, batch_size=batch_size) random_encoding = dense_model.predict(sample_corrupted, batch_size=batch_size) fig, axs = plt.subplots(1, 2) # Project corrupted samples u = umap.UMAP(n_neighbors=5, min_dist=0.8) corrupted_umap = u.fit_transform(val_encoding.reshape(-1, val_encoding.shape[2])) sample_ids = np.array([np.repeat(i, 5) for i in range(10)]).ravel() sns.scatterplot(corrupted_umap[:, 0], corrupted_umap[:, 1], hue=sample_ids, palette="tab10", ax=axs[0]) axs[0].set_title('VIME Embeddings of Corrupted Samples') plt.legend(title='Sample ID') # Project corrupted samples u = umap.UMAP(n_neighbors=5, min_dist=0.8) corrupted_umap = u.fit_transform(random_encoding.reshape(-1, random_encoding.shape[2])) sample_ids = np.array([np.repeat(i, 5) for i in range(10)]).ravel() sns.scatterplot(corrupted_umap[:, 0], corrupted_umap[:, 1], hue=sample_ids, palette="tab10", ax=axs[1]) axs[1].set_title('Not-trained Embeddings of Corrupted Samples') plt.legend(title='Sample ID') plt.show() ``` As you can see, the embeddings indeed put the same samples closer to each other, even though some of their values were corrupted. According to the authors, this means that the model has learned useful information about the feature correlations which can be helpful in the downstream tasks. Now, we can use this encoder in the next semi-supervised part. ## Semi-Supervised Learning ``` semi_batch_size = 512 num_features = X_train_unlabelled.shape[1] ``` Since we have different number of labelled and unlabelled examples we need to use generators. They will shuffle and select appropriate number of rows for each training iteration. ``` def train_semi_generator(): return semi_supervised_generator( X_train_labelled.values, X_train_unlabelled.values, y_train_labelled.values, bs=semi_batch_size ) def val_semi_generator(): return semi_supervised_generator( X_val_labelled.values, X_val_unlabelled.values, y_val_labelled.values, bs=semi_batch_size ) semi_train_dataset = tf.data.Dataset.from_generator( train_semi_generator, output_signature=( tf.TensorSpec(shape=(semi_batch_size, num_features), dtype=tf.float32), tf.TensorSpec(shape=(semi_batch_size), dtype=tf.float32), tf.TensorSpec(shape=(semi_batch_size, num_features), dtype=tf.float32) ) ) semi_val_dataset = tf.data.Dataset.from_generator( val_semi_generator, output_signature=( tf.TensorSpec(shape=(semi_batch_size, num_features), dtype=tf.float32), tf.TensorSpec(shape=(semi_batch_size), dtype=tf.float32), tf.TensorSpec(shape=(semi_batch_size, num_features), dtype=tf.float32) ) ) ``` ## Self Supervised VIME ``` def train_vime_semi(encoder, train_dataset, val_dataset, train_params, vime_params): # Model vime = VIME(encoder) # Training parameters iterations = train_params['iterations'] optimizer = tf.keras.optimizers.Adam(train_params['learning_rate']) early_stop = train_params['early_stop'] # Set metrics to track best_loss = 1e10 no_improve = 0 # Begining training loop for it in range(iterations): # Grab a batch for iteration it_train = iter(train_dataset) X_l, y_l, X_u = next(it_train) # Generate unlabelled batch with k corrupted examples per sample X_u_corrupted = generate_k_corrupted(X_u, vime_params['k'], vime_params['p_m']) with tf.GradientTape() as tape: # Predict labelled & unlabelled labelled_preds = vime(X_l) unlabelled_preds = vime(X_u_corrupted) # Calculate losses labelled_loss = labelled_loss_fn(y_l, labelled_preds) unlabelled_loss = unlabelled_loss_fn(unlabelled_preds) # Total loss semi_supervised_loss = unlabelled_loss + vime_params['beta'] * labelled_loss if it % 10 == 0: val_iter_losses = [] print(f"\nMetrics for Iteration {it}") for i in range(5): # Grab a batch it_val = iter(val_dataset) X_l_val, y_l_val, X_u_val = next(it_val) # Generate unlabelled batch with k corrupted examples per sample X_u_corrupted = generate_k_corrupted(X_u_val, vime_params['k'], vime_params['p_m']) # Predict labelled & unlabelled labelled_preds_val = vime(X_l_val) unlabelled_preds_val = vime(X_u_corrupted) # Calculate losses labelled_loss_val = labelled_loss_fn(y_l_val, labelled_preds_val) unlabelled_loss_val = unlabelled_loss_fn(unlabelled_preds_val) semi_supervised_loss_val = unlabelled_loss_val + vime_params['beta'] * labelled_loss_val val_iter_losses.append(semi_supervised_loss_val) # Average loss over 5 validation iterations semi_supervised_loss_val = np.mean(val_iter_losses) print(f"Train Loss {np.round(semi_supervised_loss, 5)}, Val Loss {np.round(semi_supervised_loss_val, 5)}") # Update metrics if val_loss is better if semi_supervised_loss_val < best_loss: best_loss = semi_supervised_loss_val no_improve = 0 vime.save('./vime') else: no_improve += 1 print(f"Validation loss not improved {no_improve} times") # Early stopping if no_improve == early_stop: break # Update weights grads = tape.gradient(semi_supervised_loss, vime.trainable_weights) optimizer.apply_gradients(zip(grads, vime.trainable_weights)) vime = tf.keras.models.load_model('./vime') return vime train_params = { 'num_features': num_features, 'iterations': 1000, 'early_stop': 20, 'learning_rate': 0.001 } vime_self = tf.keras.models.load_model('./vime_self') vime_semi = train_vime_semi( encoder = vime_self.encoder, train_dataset = semi_train_dataset, val_dataset = semi_val_dataset, train_params = train_params, vime_params = vime_params ) test_ds = tf.data.Dataset.from_tensor_slices(test[num_cols]).batch(batch_size) vime_tuned_preds = vime_semi.predict(test_ds) pr = average_precision_score(test['isFraud'], vime_tuned_preds) print(pr) ``` ## Evaluation Re-training the model 10 times to get distribution of PR AUC scores. ``` vime_prs = [] test_ds = tf.data.Dataset.from_tensor_slices(test[num_cols]).batch(batch_size) for i in range(10): train_params = { 'num_features': num_features, 'iterations': 1000, 'early_stop': 10, 'learning_rate': 0.001 } vime_self = tf.keras.models.load_model('./vime_self') vime_self.encoder.trainable = False vime_semi = train_vime_semi( encoder = vime_self.encoder, train_dataset = semi_train_dataset, val_dataset = semi_val_dataset, train_params = train_params, vime_params = vime_params ) # fine-tune vime_semi = tf.keras.models.load_model('./vime') vime_semi.encoder.trainable vime_tuned_preds = vime_semi.predict(test_ds) pr = average_precision_score(test['isFraud'], vime_tuned_preds) vime_prs.append(pr) print('VIME Train', i, "PR AUC:", pr) ``` ### Compare with MLP and RF ``` mlp_prs = [] for i in range(10): base_mlp = Sequential([ Input(shape=num_features), Dense(num_features), Dense(128), Dropout(0.2), Dense(128), Dropout(0.2), Dense(1, activation='sigmoid') ]) base_mlp.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='binary_crossentropy' ) # Early stopping based on validation loss cbs = [tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=20, restore_best_weights=True )] base_mlp.fit( x=X_train_labelled.values, y=y_train_labelled, validation_data=(X_val_labelled.values, y_val_labelled), epochs=1000, callbacks=cbs ) base_mlp_preds = base_mlp.predict(test_ds) mlp_prs.append(average_precision_score(test['isFraud'], base_mlp_preds)) from lightgbm import LGBMClassifier train_tree_X = pd.concat([X_train_labelled, X_val_labelled]) train_tree_y = pd.concat([y_train_labelled, y_val_labelled]) rf_prs = [] for i in tqdm(range(10)): rf = RandomForestClassifier(max_depth=4) rf.fit(train_tree_X.values, train_tree_y) rf_preds = rf.predict_proba(test[X_train_labelled.columns]) rf_prs.append(average_precision_score(test['isFraud'], rf_preds[:, 1])) metrics_df = pd.DataFrame({"MLP": mlp_prs, "VIME": vime_prs, "RF": rf_prs}) metrics_df.boxplot() plt.ylabel("PR AUC") plt.show() metrics_df.describe() ```
github_jupyter
``` from google.colab import drive drive.mount('/content/gdrive') %cd gdrive/My\ Drive/Colab\ Notebooks/neural\ project\ new/ # import functions import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable import os import pickle import random from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import cv2 ``` # **Dataset Generation** ``` train_objSize = 9800 test_objSize = 200 imSize = 75 # size of image objSize = 5 #size of objects in image qSize = 11 #6 for one-hot vector of color, 2 for question type, 3 for question subtype nQuestion = 10 # Answer : [yes, no, rectangle, circle, r, g, b, o, k, y] datadir = './data' # directory to store training data colors = [(0,0,255),(0,255,0),(255,0,0),(0,156,255),(128,128,128),(0,255,255)]#r,g,b,o,k,y try: os.makedirs(datadir) except: print('Data directory already exists.') def createCenter(objects): while True: pas = True center = np.random.randint(objSize, imSize - objSize, 2) if len(objects) > 0:# if an object is already present, then find center that is 2X away for all old centers for name,c,shape in objects: if ((center - c) ** 2).sum() < ((objSize * 2) ** 2): pas = False if pas:# if no old object is present, then no need to check anything return center def putObjs():# function create 1 image and list of location,color,shape of 6 objects objects = [] img = np.ones((imSize,imSize,3)) * 255 #plain white BG image for color_id,color in enumerate(colors): # put object of every color center = createCenter(objects) # take 50% of objects as circles and 50% as rectangles if random.random()<0.5: start = (center[0]-objSize, center[1]-objSize)#xmin,ymin end = (center[0]+objSize, center[1]+objSize)#xmax,ymax cv2.rectangle(img, start, end, color, -1) objects.append((color_id,center,'r')) else: center_ = (center[0], center[1]) cv2.circle(img, center_, objSize, color, -1) objects.append((color_id,center,'c')) return objects,img def genOneVQA():# function will create 20 QA for 1 image objects,img = putObjs() rel_questions = [] norel_questions = [] rel_answers = [] norel_answers = [] # 10 Non-relational questions for idx in range(nQuestion): question = np.zeros((qSize)) color = random.randint(0,5) question[color] = 1 question[6] = 1 subtype = random.randint(0,2) question[subtype+8] = 1 norel_questions.append(question) if subtype == 0: """query shape->rectangle/circle""" if objects[color][2] == 'r': answer = 2 else: answer = 3 elif subtype == 1: """query horizontal position->yes/no""" if objects[color][1][0] < imSize / 2: answer = 0 else: answer = 1 elif subtype == 2: """query vertical position->yes/no""" if objects[color][1][1] < imSize / 2: answer = 0 else: answer = 1 norel_answers.append(answer) # 10 Relational questions for idx in range(nQuestion): question = np.zeros((qSize)) color = random.randint(0,5) question[color] = 1 question[7] = 1 subtype = random.randint(0,2) question[subtype+8] = 1 rel_questions.append(question) if subtype == 0: """closest-to->rectangle/circle""" my_obj = objects[color][1] dist_list = [((my_obj - obj[1]) ** 2).sum() for obj in objects] dist_list[dist_list.index(0)] = 999 closest = dist_list.index(min(dist_list)) if objects[closest][2] == 'r': answer = 2 else: answer = 3 elif subtype == 1: """furthest-from->rectangle/circle""" my_obj = objects[color][1] dist_list = [((my_obj - obj[1]) ** 2).sum() for obj in objects] furthest = dist_list.index(max(dist_list)) if objects[furthest][2] == 'r': answer = 2 else: answer = 3 elif subtype == 2: """count->1~6""" my_obj = objects[color][2] count = -1 for obj in objects: if obj[2] == my_obj: count +=1 answer = count+4 rel_answers.append(answer) relations = (rel_questions, rel_answers) norelations = (norel_questions, norel_answers) img = img/255. #normalize image dataset = (img, relations, norelations) return dataset print('Building train and test datasets.') test_datasets = [genOneVQA() for _ in range(test_size)] train_datasets = [genOneVQA() for _ in range(train_size)] filename = os.path.join(datadir,'sort-of-clevr.pickle') with open(filename, 'wb') as f: pickle.dump((train_datasets, test_datasets), f) print('Dataset saved.') # !rm -rf data ``` # **Standard Functions** ``` def tensor_data(data, i): img = torch.from_numpy(np.asarray(data[0][bs*i:bs*(i+1)])) qst = torch.from_numpy(np.asarray(data[1][bs*i:bs*(i+1)])) ans = torch.from_numpy(np.asarray(data[2][bs*i:bs*(i+1)])) input_img.data.resize_(img.size()).copy_(img) input_qst.data.resize_(qst.size()).copy_(qst) label.data.resize_(ans.size()).copy_(ans) def cvt_data_axis(data): img = [e[0] for e in data] qst = [e[1] for e in data] ans = [e[2] for e in data] return (img,qst,ans) def train(epoch, rel, norel): model.train() random.shuffle(rel) random.shuffle(norel) rel = cvt_data_axis(rel) norel = cvt_data_axis(norel) for batch_idx in range(len(rel[0]) // bs): tensor_data(rel, batch_idx) accuracy_rel = model.train_(input_img, input_qst, label) tensor_data(norel, batch_idx) accuracy_norel = model.train_(input_img, input_qst, label) if batch_idx % args.log_interval == 0: print('Train Epoch '+ str(epoch) +' : Relations accuracy: ' + str(accuracy_rel.numpy())+'% : Non Relations accuracy: ' + str(accuracy_norel.numpy())+'%') def test(testFlag, rel, norel): model.eval() rel = cvt_data_axis(rel) norel = cvt_data_axis(norel) accuracy_rels = [] accuracy_norels = [] for batch_idx in range(len(rel[0]) // bs): tensor_data(rel, batch_idx) accuracy_rels.append(model.test_(input_img, input_qst, label)[1]) tensor_data(norel, batch_idx) accuracy_norels.append(model.test_(input_img, input_qst, label)[1]) accuracy_rel = sum(accuracy_rels) / len(accuracy_rels) accuracy_norel = sum(accuracy_norels) / len(accuracy_norels) print('\n '+ testFlag + ' : Relations accuracy: ' + str(accuracy_rel.numpy())+'% : Non Relations accuracy: ' + str(accuracy_norel.numpy())+'%') ``` # **RN Model** ``` # function for finding coordinates of visual words def findCoords(i): if i>=0 and i<=4: tmp=[0,i] elif i>=5 and i<=9: tmp=[1,i%5] elif i>=10 and i<=14: tmp=[2,i%5] elif i>=15 and i<=19: tmp=[3,i%5] elif i>=20 and i<=24: tmp=[4,i%5] return list((np.array(tmp)/2)-1) # cnn model class ConvInputModel(nn.Module): def __init__(self): super(ConvInputModel, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2, padding=1)#input channel,output channel,kernel size self.batchNorm1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1) self.batchNorm2 = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1) self.batchNorm3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1) self.batchNorm4 = nn.BatchNorm2d(256) def forward(self, in_img): x = self.conv1(in_img) x = F.relu(x) x = self.batchNorm1(x) x = self.conv2(x) x = F.relu(x) x = self.batchNorm2(x) x = self.conv3(x) x = F.relu(x) x = self.batchNorm3(x) x = self.conv4(x) x = F.relu(x) x = self.batchNorm4(x) return x # f_phi model class FCOutputModel(nn.Module): def __init__(self): super(FCOutputModel, self).__init__() # self.fc = nn.Linear(2000, 1000) self.fc2 = nn.Linear(1000, 500) self.fc3 = nn.Linear(500, 10) def forward(self, x): # x = self.fc(x) # x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = F.dropout(x) x = self.fc3(x) return F.log_softmax(x,dim=1) # base model class BasicModel(nn.Module): def __init__(self, args, name): super(BasicModel, self).__init__() self.name=name def train_(self, input_img, input_qst, label): #model.train() is predefined, so i created train_ self.optimizer.zero_grad() output = self(input_img, input_qst) loss = F.nll_loss(output, label) loss.backward() self.optimizer.step() pred = output.data.max(1)[1] correct = pred.eq(label.data).cpu().sum() accuracy = correct * 100. / len(label) return accuracy def test_(self, input_img, input_qst, label): output = self(input_img, input_qst) pred = output.data.max(1)[1] correct = pred.eq(label.data).cpu().sum() accuracy = correct * 100. / len(label) return pred,accuracy def save_model(self, epoch): torch.save(self.state_dict(), 'model/epoch_{}_{:02d}.pth'.format(self.name, epoch)) # RN model class RN(BasicModel): def __init__(self, args): super(RN, self).__init__(args, 'RN') self.conv = ConvInputModel() # g_theta ##(number of filters per object+coordinate of object)*2+question vector self.g_fc1 = nn.Linear((256+2)*2+11, 2000) self.g_fc2 = nn.Linear(2000, 2000) self.g_fc3 = nn.Linear(2000, 2000) self.g_fc4 = nn.Linear(2000, 2000) self.coordinates = torch.FloatTensor(args.batch_size, 25, 2) if args.cuda: self.coordinates = self.coordinates.cuda() self.coordinates = Variable(self.coordinates) np_coordinates = np.zeros((args.batch_size, 25, 2)) for i in range(25): np_coordinates[:,i,:] = np.array( findCoords(i) ) self.coordinates.data.copy_(torch.from_numpy(np_coordinates)) # f_phi self.f_fc1 = nn.Linear(2000, 1000) self.fcout = FCOutputModel() # optimiser self.optimizer = optim.Adam(self.parameters(), lr=args.lr) def forward(self, img, q): x = self.conv(img) ## x = (bs x 256 x 5 x 5) bs = x.size()[0] c = x.size()[1] d = x.size()[2] x = x.view(bs,c,d*d).permute(0,2,1)#(bs x 25 x 256) # add coordinates to all the visual words x = torch.cat([x, self.coordinates],2)#(bs x 25 x 258) # repeat question as many times as the no. of objects q = torch.unsqueeze(q, 1) q = q.repeat(1,25,1) q = torch.unsqueeze(q, 2) # combine all object pairs with questions o1 = torch.unsqueeze(x,1) # (bsx1x25x258) o1 = o1.repeat(1,25,1,1) # (bsx25x25x258) o2 = torch.unsqueeze(x,2) # (bsx25x1x258) o2 = torch.cat([o2,q],3)# (bsx25x1x(258+11)) o2 = o2.repeat(1,1,25,1) # (bsx25x25x(258+11)) # concatenate all together x = torch.cat([o1,o2],3) # (bsx25x25x(258+258+11)) # reshape for passing through network x = x.view(bs*d*d*d*d,527) #527=258X2+11 x = F.relu(self.g_fc1(x)) x = F.relu(self.g_fc2(x)) x = F.relu(self.g_fc3(x)) x = F.relu(self.g_fc4(x)) # sum: polling to introduce order invariance amoung objects x = x.view(bs,d*d*d*d,2000) x = x.sum(1).squeeze() # f_phi x = F.relu(self.f_fc1(x)) return self.fcout(x) ``` # **Training** ``` # Load data def load_data(): dirs = './data' filename = os.path.join(dirs,'sort-of-clevr.pickle') with open(filename, 'rb') as f: train_datasets, test_datasets = pickle.load(f) rel_tmp = [] rel_test = [] norel_tmp = [] norel_test = [] for img, relations, norelations in train_datasets: img = np.swapaxes(img,0,2) for qst,ans in zip(relations[0], relations[1]): rel_tmp.append((img,qst,ans)) for qst,ans in zip(norelations[0], norelations[1]): norel_tmp.append((img,qst,ans)) for img, relations, norelations in test_datasets: img = np.swapaxes(img,0,2) for qst,ans in zip(relations[0], relations[1]): rel_test.append((img,qst,ans)) for qst,ans in zip(norelations[0], norelations[1]): norel_test.append((img,qst,ans)) #use 10% of training data as validation rel_train, rel_val = train_test_split(rel_tmp, test_size=0.10, random_state=42) norel_train, norel_val = train_test_split(norel_tmp, test_size=0.10, random_state=42) return (rel_train, rel_test, rel_val, norel_val, norel_train, norel_test) rel_train, rel_test, rel_val, norel_val, norel_train, norel_test = load_data() print('Data loaded.') # create model object from argparse import Namespace # resumeFlag=None resumeFlag='final_epoch_RN_03.pth' bs=64 mydict={'batch_size':bs,'cuda':True,'epochs':20,'log_interval':10,'lr':0.0001,'model':'RN','no_cuda':False,'resume':resumeFlag,'seed':1} args = Namespace(**mydict) model=RN(args) # Detect if we have a GPU available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) model # Training the model and validation in batches model_dirs = './model' bs = args.batch_size input_img = torch.FloatTensor(bs, 3, 75, 75) input_qst = torch.FloatTensor(bs, 11) label = torch.LongTensor(bs) if args.cuda: model.cuda() input_img = input_img.cuda() input_qst = input_qst.cuda() label = label.cuda() input_img = Variable(input_img) input_qst = Variable(input_qst) label = Variable(label) try: os.makedirs(model_dirs) except: print('Model directory already exists.') if args.resume: filename = os.path.join(model_dirs, args.resume) if os.path.isfile(filename): checkpoint = torch.load(filename) model.load_state_dict(checkpoint) print('Checkpoint '+filename+' loaded.') for epoch in range(1, args.epochs + 1): train(epoch, rel_train, norel_train) test('Validation', rel_val, norel_val) model.save_model(epoch) ``` # **Testing** ``` from argparse import Namespace model_dirs = './model' # resumeFlag=None resumeFlag='final_epoch_RN_03.pth'#has 92% test acc. bs=64 # bs=80 mydict={'batch_size':bs,'cuda':True,'epochs':20,'log_interval':10,'lr':0.0001,'model':'RN','no_cuda':False,'resume':resumeFlag,'seed':1} args = Namespace(**mydict) model=RN(args) # Detect if we have a GPU available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) if args.resume: filename = os.path.join(model_dirs, args.resume) if os.path.isfile(filename): checkpoint = torch.load(filename) model.load_state_dict(checkpoint) print('Checkpoint '+filename+' loaded.') test('Test', rel_test, norel_test) ``` # **Result Visualization** ``` #Load the data dirs = './data' filename = os.path.join(dirs,'sort-of-clevr.pickle') with open(filename, 'rb') as f: train_datasets, test_datasets = pickle.load(f) rel_train = [] rel_test = [] norel_train = [] norel_test = [] for img, relations, norelations in train_datasets: img = np.swapaxes(img,0,2) for qst,ans in zip(relations[0], relations[1]): rel_train.append((img,qst,ans)) for qst,ans in zip(norelations[0], norelations[1]): norel_train.append((img,qst,ans)) for img, relations, norelations in test_datasets: img = np.swapaxes(img,0,2) for qst,ans in zip(relations[0], relations[1]): rel_test.append((img,qst,ans)) for qst,ans in zip(norelations[0], norelations[1]): norel_test.append((img,qst,ans)) from argparse import Namespace resumeFlag='final_epoch_RN_03.pth'#has 92% test acc. mydict={'batch_size':64,'cuda':True,'epochs':20,'log_interval':10,'lr':0.0001,'model':'RN','no_cuda':False,'resume':resumeFlag,'seed':1} args = Namespace(**mydict) model=RN(args) model.load_state_dict(torch.load('model/final_epoch_RN_03.pth')) model.eval(); # function plotting and visualizing results colors = ['red ', 'green ', 'blue ', 'orange ', 'gray ', 'yellow '] answer_sheet = ['yes', 'no', 'rectangle', 'circle', '1', '2', '3', '4', '5', '6'] def plot_RN_result(idx): img2 = np.swapaxes(input_img[idx].cpu().detach().numpy(),0,2) plt.imshow(np.dstack((img2[:,:,2],img2[:,:,1],img2[:,:,0]))); plt.grid(False) question=input_qst[idx] if question[6] == 1: query = 'Q: The object with color ' query += colors[question.tolist()[0:6].index(1)]+', ' if question[8] == 1: query += 'has what shape?' if question[9] == 1: query += 'is towards the left?' if question[10] == 1: query += 'is towards the top?' if question[7] == 1: query = 'Q: For the object with color ' query += colors[question.tolist()[0:6].index(1)]+', ' if question[8] == 1: query += 'the shape of the closest object is?' if question[9] == 1: query += 'the shape of the furthest object is??' if question[10] == 1: query += 'the number of objects having the same shape is?' print(query) print('A: ',answer_sheet[accuracy_rels[0][idx]],' (Predicted)') print('A:' ,answer_sheet[label[idx]],' (Desired)') ``` ## *Relational* ``` epoch=0 bs=64 input_img = torch.FloatTensor(bs, 3, 75, 75) input_qst = torch.FloatTensor(bs, 11) label = torch.LongTensor(bs) model.cuda() input_img = input_img.cuda() input_qst = input_qst.cuda() label = label.cuda() accuracy_rels = [] rel_test2 = cvt_data_axis(rel_test) batch_idx=1 # for batch_idx in range(len(rel_test2[0]) // bs): tensor_data(rel_test2, batch_idx); accuracy_rels.append(model.test_(input_img, input_qst, label)[0]); idx=4#index of img,question,answer plot_RN_result(idx) idx=30#index of img,question,answer plot_RN_result(idx) idx=45#index of img,question,answer plot_RN_result(idx) idx=63#index of img,question,answer plot_RN_result(idx) #XXXXXXX idx=34#index of img,question,answer plot_RN_result(idx) ``` ## *Non-Relational* ``` epoch=0 bs=64 input_img = torch.FloatTensor(bs, 3, 75, 75) input_qst = torch.FloatTensor(bs, 11) label = torch.LongTensor(bs) model.cuda() input_img = input_img.cuda() input_qst = input_qst.cuda() label = label.cuda() accuracy_rels = [] norel_test2 = cvt_data_axis(norel_test) batch_idx=1 # for batch_idx in range(len(rel_test2[0]) // bs): tensor_data(norel_test2, batch_idx); accuracy_rels.append(model.test_(input_img, input_qst, label)[0]); idx=4#index of img,question,answer plot_RN_result(idx) idx=20#index of img,question,answer plot_RN_result(idx) idx=52#index of img,question,answer plot_RN_result(idx) idx=33#index of img,question,answer plot_RN_result(idx) idx=40#index of img,question,answer plot_RN_result(idx) ```
github_jupyter
``` JSON_PATH = 'by-article-train_attn-data.json' from json import JSONDecoder data = JSONDecoder().decode(open(JSON_PATH).read()) word = 'Sponsored' hyper_count = dict() main_count = dict() for i, article in enumerate(data): if word in article['normalizedText'][-1]: energies = [e for w, e in article['activations'][-1][0] if w == word] if article['hyperpartisan'] == 'true': hyper_count[i] = { 'energies': energies, 'truth': article['hyperpartisan'], 'prediction': article['prediction'], 'pred_value': article['pred_value'], 'last_sent_e': article['activations'][-1][-1], } elif article['hyperpartisan'] == 'false': main_count[i] = { 'energies': energies, 'truth': article['hyperpartisan'], 'prediction': article['prediction'], 'pred_value': article['pred_value'], 'last_sent_e': article['activations'][-1][-1], } else: raise RuntimeError('json format invalid') # Average word energy of 1st 'Sponsored' tag avg_final_e = [el['energies'][0] * el['last_sent_e'] for el in hyper_count.values()] print('AVG:', sum(avg_final_e) / len(avg_final_e)) avg_final_e # Average final energy of 1st 'Sponsored' tag (word_e * sentence_e) avg_final_e = [el['energies'][0] * el['last_sent_e'] for el in hyper_count.values()] print('AVG:', sum(avg_final_e) / len(avg_final_e)) avg_final_e ### ### ### hyper_articles = [el for el in data if el['hyperpartisan'] == 'true'] main_articles = [el for el in data if el['hyperpartisan'] == 'false'] assert len(hyper_articles) + len(main_articles) == len(data) hyper_sent_att = [activ[-1] for a in hyper_articles for activ in a['activations']] main_sent_att = [activ[-1] for a in main_articles for activ in a['activations']] import seaborn as sns import matplotlib.pyplot as plt sns.distplot(hyper_sent_att, hist=False, rug=False, label="hyperpartisan") sns.distplot(main_sent_att, hist=False, rug=False, label="mainstream") plt.gcf().savefig('imgs/sentence_energy_distribution.png', dpi=400) plt.show() ## Describe distribution from scipy import stats print('Hyperpartisan Sentence Energy distribution:') print(stats.describe(hyper_sent_att), end='\n\n') print('Mainstream Sentence Energy distribution:') print(stats.describe(main_sent_att), end='\n\n') ## Average attention on most important sentence hyper_most_imp_sent = [max(activ[-1] for activ in a['activations']) for a in hyper_articles] main_most_imp_sent = [max(activ[-1] for activ in a['activations']) for a in main_articles] print('Avg Hyperpartisan:', sum(hyper_most_imp_sent) / len(hyper_most_imp_sent)) print('Avg Mainstream:', sum(main_most_imp_sent) / len(main_most_imp_sent)) sns.distplot(hyper_most_imp_sent, hist=False, rug=False, label="hyperpartisan") sns.distplot(main_most_imp_sent, hist=False, rug=False, label="mainstream") plt.gcf().savefig('imgs/most_important_sentence_energy_distribution.png', dpi=400) plt.show() ## Number of sentences with attention above a given threshold of importance THRESHOLD = 0.3 hyper_important_sentences = [sum(1 for activ in a['activations'] if activ[-1] > THRESHOLD) for a in hyper_articles] main_important_sentences = [sum(1 for activ in a['activations'] if activ[-1] > THRESHOLD) for a in main_articles] print('Average number of sentences above {}:'.format(THRESHOLD)) print('\thyperpartisan: {}'.format(sum(hyper_important_sentences) / len(hyper_important_sentences))) print('\tmainstream: {}'.format(sum(main_important_sentences) / len(main_important_sentences))) ### ### ### ## Calculating statistical significance that the two distributions are distinct ## Welch's t-test: https://en.wikipedia.org/wiki/Welch%27s_t-test t_val, p_val = stats.ttest_ind(hyper_sent_att, main_sent_att, equal_var=False) print('p-value for the hypothesis that the two distributions have equal mean:', p_val) ## Statistical significance of hypothesis: ## attention of most important sentence of a mainstream article is larger than that of a hyperpartisan article from statsmodels.stats import weightstats as stests _, p_val = stests.ztest(hyper_most_imp_sent, main_most_imp_sent, value=0) print(p_val) ```
github_jupyter
``` import detectron2 from detectron2.utils.logger import setup_logger setup_logger() import numpy as np import random from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog from detectron2.modeling import build_model from detectron2.evaluation import COCOEvaluator,PascalVOCDetectionEvaluator import matplotlib.pyplot as plt import torch.tensor as tensor from detectron2.data import build_detection_test_loader from detectron2.evaluation import inference_on_dataset import torch from detectron2.structures.instances import Instances from detectron2.modeling import build_model from detectron2.modeling.meta_arch.tracker import Tracker from detectron2.modeling.meta_arch.soft_tracker import SoftTracker %matplotlib inline ``` ## Loading Weights ``` cfg = get_cfg() cfg.merge_from_file("../configs/COCO-Detection/faster_rcnn_R_50_FPN_3x_Video.yaml") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4 # set threshold for this model cfg.MODEL.WEIGHTS = '/media/DATA/Users/Issa/models_pub/kitti_jde.pth' #cfg.MODEL.WEIGHTS = "../models_pub/kitti_jde.pth" print(cfg.MODEL) ``` ## functions to validate annotated data using devkit_tracking from KITTI ``` from contextlib import contextmanager import sys, os @contextmanager def suppress_stdout(): with open(os.devnull, "w") as devnull: old_stdout = sys.stdout sys.stdout = devnull try: yield finally: sys.stdout = old_stdout def print_val_results(results_name): with suppress_stdout(): print("Now you don't") os.system('python2 /home/issa/devkit_tracking/python/validate_tracking.py val') labels = {1:'MOTA',2:'MOTP',3:'MOTAL',4:'MODA',5:'MODP',7:'R',8:'P',12:'MT',13:'PT',14:'ML',18:'FP',19:'FN',22:'IDs'} summary_heading = 'Metric\t' for label in labels.keys(): summary_heading+=labels[label] + '\t' summary_cars = 'Cars\t' summary_peds = 'Peds\t' with open('/home/issa/devkit_tracking/python/results/'+results_name+'/summary_car.txt') as f: i=0 for line in f: if(i==0): i+=1 continue if(i in labels.keys()): summary_cars+= str(round(float(line[len(line)-9:len(line)-1].strip()),2))+'\t' i+=1 print(summary_heading) print(summary_cars) def print_test_results(results_name): #with suppress_stdout(): print("Now you don't") os.system('python2 ../devkit_tracking/python/evaluate_tracking.py test') labels = {1:'MOTA',2:'MOTP',3:'MOTAL',4:'MODA',5:'MODP',7:'R',8:'P',12:'MT',13:'PT',14:'ML',18:'FP',19:'FN',22:'IDs'} summary_heading = 'Metric\t' for label in labels.keys(): summary_heading+=labels[label] + '\t' summary_cars = 'Cars\t' summary_peds = 'Peds\t' with open('../devkit_tracking/python/results/'+results_name+'/summary_car.txt') as f: i=0 for line in f: if(i==0): i+=1 continue if(i in labels.keys()): summary_cars+= str(round(float(line[len(line)-9:len(line)-1].strip()),2))+'\t' i+=1 print(summary_heading) print(summary_cars) ``` ## Inference : Joint Detection and Tracking ``` import json import os import cv2 as cv2 import time from tqdm.notebook import tqdm colors = [[0,0,128],[0,255,0],[0,0,255],[255,0,0],[0,128,128],[128,0,128],[128,128,0],[255,255,0],[0,255,255],[255,255,0],[128,0,0],[0,128,0] ,[0,128,255],[0,255,128],[255,0,128],[128,255,0],[255,128,0],[128,255,255],[128,0,255],[128,128,128],[128,255,128]] #dirC = '/../datasets/KITTI/tracking/data_tracking_image_2/training/image_02/' dirC = '/media/DATA/Datasets/KITTI/tracking/data_tracking_image_2/training/image_02/' #dirDets = '../datasets/KITTI/tracking/data_tracking_det_2_lsvm/training/det_02/' names = [] arr = {2:'Car'} if(not os.path.exists("../results")): os.mkdir('../results') os.mkdir('../results/KITTI') else: if(not os.path.exists("../results/KITTI")): os.mkdir('../results/KITTI') output_path = '/home/issa/devkit_tracking/python/results' settings = [ dict(props=20, #number of proposals to use by rpn st=1.05, #acceptance distance percentage for soft tracker sup_fp = True, # fp suppression based on Intersection over Union for new detections alpha = 0.6, # the percentage of the new embedding in track embedding update (emb = alpha * emb(t) +(1-alpha) emb(t-1)) fp_thresh=0.95, # iou threshold above which the new detection is considered a fp T=True, #use past tracks as proposals D='cosine', # distance metric for embeddings Re=True, #use the embedding head A=True, # use appearance information K=True, # use kalman for motion prediction E=False, #use raw FPN features as appearance descriptors measurement=0.001, #measruement noise for the kalman filter process=1, #process noise for the kalman filter dist_thresh=1.5, # the normalization factor for the appearance distance track_life=7, #frames for which a track is kept in memory without an update track_vis=2, #frames for which a track is displayed without an update ), ] train_folders = ['0000','0002','0003','0004','0005','0009','0011','0017','0020'] val_folders = ['0001','0006','0008','0016','0018','0019'] test_folders = ['0014','0015','0016','0018','0019','0001','0006','0008','0010','0012','0013'] submission_folders = ['0000','0001','0002','0003','0004','0005','0006','0007', '0008','0009','0010','0011','0012','0013','0014','0015','0016','0017', '0018','0019','0020','0021','0022','0023','0024','0025','0026','0027','0028'] final_test_folders = ['0014'] for setting in settings: test_name = 'val' exp_name = output_path+ '/'+test_name if(not os.path.exists(exp_name)): os.mkdir(exp_name) os.mkdir(exp_name+'/data') avg=0 for folder_name in val_folders: dets = {} public_det=False if public_det==True: with open(dirDets+folder_name+'.txt') as det_file: for line in det_file: parts = line.split(' ') if(parts[0] not in dets): dets[parts[0]] = [] if(parts[2] =='Car' and float(parts[17])>-1): dets[parts[0]].append([float(parts[6]) ,float(parts[7]),float(parts[8]) ,float(parts[9]),float(parts[6]),float(parts[17])]) predictor = DefaultPredictor(cfg,True) predictor.model.tracker = Tracker() predictor.model.tracking_proposals = setting['T'] predictor.model.tracker.track_life = setting['track_life'] predictor.model.tracker.track_visibility = setting['track_vis'] predictor.model.tracker.use_appearance = setting['A'] predictor.model.tracker.use_kalman = setting['K'] predictor.model.tracker.embed = setting['E'] predictor.model.tracker.reid = setting['Re'] predictor.model.tracker.dist = setting['D'] predictor.model.tracker.measurement_noise=setting['measurement'] predictor.model.tracker.process_noise = setting['process'] predictor.model.tracker.dist_thresh = setting['dist_thresh'] predictor.model.use_reid = setting['Re'] predictor.model.tracker.soft_thresh = setting['st'] predictor.model.tracker.suppress_fp = setting['sup_fp'] predictor.model.tracker.fp_thresh = setting['fp_thresh'] predictor.model.tracker.embed_alpha = setting['alpha'] max_distance = 0.2 output_file = open('%s/data/%s.txt'%(exp_name,folder_name),'w') frames = {} frame_counter = 0 prev_path = 0 elapsed = 0 predictor.model.prev_path = 0 for photo_name in sorted(os.listdir(dirC+folder_name+'/')): frames[frame_counter] = {} img_path = dirC+folder_name+'/'+photo_name img = cv2.imread(img_path) inp = {} inp['width'] = img.shape[1] inp['height'] = img.shape[0] inp['file_name'] = photo_name inp['image_id'] = photo_name predictor.model.photo_name = img_path start = time.time() outputs = predictor(img,setting['props']) end = time.time() elapsed +=(end-start) for i in outputs: if(i.pred_class in arr): output_file.write("%d %d %s 0 0 -0.20 %d %d %d %d 1.89 0.48 1.20 1.84 1.47 8.41 0.01 %f\n"%(frame_counter ,i.track_id,arr[i.pred_class],i.xmin,i.ymin,i.xmax,i.ymax,i.conf)) frame_counter +=1 predictor.model.prev_path = img_path avg += (frame_counter/elapsed) output_file.close() print(setting) print('avg_time :',avg/len(val_folders)) print_val_results(test_name) ```
github_jupyter
# setup importing stuff and such ``` import spotipy import matplotlib import numpy as np %matplotlib notebook from matplotlib import pylab as plt from matplotlib import mlab sp = spotipy.Spotify() ``` # fetch all the playlist details that have 'punk' in the name (note that this doesn't get the track lists, we'll do that a bit later) ``` results = sp.search(type='playlist', q='punk', limit=50)['playlists'] print "gathering details about", results['total'], "playlists" punk_playlists = results['items'] while results['next']: results = sp.next(results)['playlists'] punk_playlists += results['items'] ``` # basic stats to get a feel for the dataset, let's to do some basic stats before we plow ahead with the track analysis ## title length we expect a peak a 4 characters (the minimal 'Punk'), what else happens? ``` print "number of results:", len(punk_playlists) print title_lengths = filter(lambda c:c<100, map(lambda pl:len(pl['name']), punk_playlists)) n, bins, patches = plt.hist(title_lengths, 50, normed=1, facecolor='green', alpha=0.75) mu = np.mean(title_lengths) sigma = np.std(title_lengths) # add a 'best fit' line y = mlab.normpdf( bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=1) plt.xlabel('Number of Characters') plt.ylabel('Probability') plt.title(r'$\mathrm{Histogram\ of\ Punk playlist title lengths:}\ \mu='+str(mu)+',\ \sigma='+str(sigma)+'$') # plt.axis([40, 160, 0, 0.03]) plt.grid(True) ``` ok, so _a_ peak where expected, but the vast majority are longer, mean is just over 16 characters. --- ## word counts in the titles So picking that apart a little more, let's take a look at some lightly cleaned word counts across all the titles ``` from collections import Counter from string import punctuation stopwords = "and of the or in".split() print "top words in titles" word_count = Counter() for pl in punk_playlists: word_count.update([w.strip(punctuation) for w in pl['name'].lower().split() if w not in stopwords and len(w) > 2]) word_count.most_common(10) ``` remember friends: Daft Punk may be playing in your house, your house, but it's a pretty good guess that when 'punk' is proceeded by 'daft' its probably not actually a punk playlist... the other results here are basically the expected neighbouring genres (e.g. 'pop', 'rock', 'metal') and of course some self labelling ('playlist') small aside, this seems to indicate that some of the playlists don't mention 'punk' in the name. Is that a problem? (this makes me wonder how the search algorithm works...). Let's see how many there are and what they look like. ### That's not punk. ``` print len([pl['name'] for pl in punk_playlists if "punk" not in pl['name'].lower()]), "of the search results don't say punk.\n here they are:" print '\n'.join([pl['name'] for pl in punk_playlists if "punk" not in pl['name'].lower()]) ``` ok so there's 87 and many of which mention neighbouring genres, and a handful are only not matching 'punk' because they use some latin-1 misspellings (e.g. 'Pünk', 'PØP PÛNK') I'm going to just go with the full search results, as the base dataset, filtering
github_jupyter
# Machine learning for medicine ## Linear measures of non-linear things ## Overview In this notebook we're going to address a major limitation of correlations and linear regressions in data analysis. ## Code Setup ``` import numpy as np import scipy import matplotlib.pyplot as plt from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import scipy.stats as stats ``` <a id='test'></a> ## What is a nonlinear relationship? Linear relationships between variables are really nice. It's easy to draw a line, it's easy to explain. There are many things around us in our daily lives that *don't* behave linearly. Think about this: is there anything you do that wouldn't just be doubly-good if you doubled the effort you put it? For example, would you get from home->work in half the time if you pressed the gas pedal twice as hard? A nonlinear relationship is what it sounds like: you can't *reasonable* draw a line between two variables. ``` x = np.linspace(-10,10,1000) lin_f = -2 * x nonlin_f = (x-3)*(x+3)*x plt.figure() #plt.plot(x,lin_f,'--') plt.plot(x,nonlin_f,color='orange') plt.ylim((-50,50)); ``` The orange line represents a nonlinear relationship that's more complicated. You can't just multiply x by a number and get that curve. The actual equation for the orange curve is $y = (x-3)(x+3)x = x^3 - 9x$. Another example is $y = (x-3)(x+3)x^2 = x^4 - 9x^2$. ``` nonlin_f = (x-3)*(x+3)*x*x plt.figure() #plt.plot(x,lin_f,'--') plt.plot(x,nonlin_f,color='orange') plt.ylim((-50,50)); ``` A last example is something that we find very useful in science/engineering: a $\sin$ function. $y = \sin(x)$ ``` nonlin_f = 45*np.sin(x) plt.figure() #plt.plot(x,lin_f,'--') plt.plot(x,nonlin_f,color='orange') plt.ylim((-50,50)); ``` All of these relationships are *nonlinear* but we're lucky because we can *see that clearly*. Things can get more complicated when we look at this from a simulated experiment. ``` def nonlin(noise,samples,do_stats=False): truth = lambda x: 3 * (x-2) * (x+2) * x * x x = np.random.uniform(-5,5,size=(samples,)) y = np.random.normal(truth(x),noise,size=x.shape) x_c = np.linspace(-5,5,100) plt.figure() plt.scatter(x,y) plt.xlim((-5,5)) plt.ylim((-30,30)) plt.plot(x_c,truth(x_c),'--',alpha=0.5) if do_stats: pears = stats.pearsonr(x,y) spears = stats.spearmanr(x,y) plt.title('Correlation: ' + str(pears[0]) + ' p-val: ' + str(pears[1])) plt.plot(x_c,pears[0] * x_c) interact(nonlin,noise=(0.0,10.0,1.0),samples=(0,200,10),do_stats=fixed(False)); ``` ## Correlation in a non-linear relationship Let's take a look at what happens if we just find the correlation between two variables that are non-linearly related. ``` interact(nonlin,noise=fixed(0.0),samples=fixed(0.0),do_stats=fixed(False)); ``` This shows us a major, major problem: the p-value is not significant. In other words, the probability of us seeing this data given x and y are *not* related is about ~80%. But then you re-run the code and it changes drastically. ``` interact(nonlin,noise=(0.0,10.0,0.5),samples=fixed(100)); ``` ## Where linear is "good enough" To finish out this discussion we're going to demonstrate that even if the "line" is wrong, it may be *useful*. Let's revisit the example from our [first section](#test).
github_jupyter
``` from anomaly import io, tmm, adm from sklearn.metrics import f1_score import scipy import pandas as pd import numpy as np import anomaly.utils.modelselect_utils as mu import anomaly.utils.statsutils as su import matplotlib.pyplot as plt import seaborn as sns ``` ## The pipeline We demonstrate below how the anomaly detection pipeline is used ``` predictor = tmm.ARMA() detector = adm.KSigma() bench = io.BenchmarkDataset(2) df = bench.read(8) df.head() ts = df.value predictor.fit(np.array(ts)) ts_predicted = predictor.predict() residuals = ts_predicted - ts detector.fit(ts, ts_predicted) is_anomaly = detector.detect() fig, axs = plt.subplots(1, 2, figsize=(15, 5)) plt.sca(axs[0]) plt.plot(ts, label="Original data") plt.plot(ts_predicted, label="Predicted data") plt.legend() plt.sca(axs[1]) plt.plot(residuals, label="Residuals") plt.plot(residuals[df.is_anomaly == 1], linestyle="", marker="x", color="green", label="True anomaly") plt.plot(residuals[is_anomaly], linestyle="", marker="+", color="red", label="Detected anomaly") plt.legend() plt.show() ``` Here we can observe a failure of the method. ARMA fits the data too closely, and almost becomes a naive predictor which predicts $\hat{s}_t = s_{t-1}$. As a consequence, each down peak in the residual is followed by an up peak because the predictor has lagged the outlier. Of course, this can be fixed by choosing a predictors which handles well trend and seasonality. ### Do the residuals follow a gaussian distribution ? ``` plt.hist(residuals, bins=50) plt.show() ``` Here, the gaussian assumption seems ok from far away. However, this is not always the case, and more importantly, it is not gaussian in the statistical sense. ``` _, pvalue = scipy.stats.normaltest(residuals) print(pvalue) ``` ## Experiments ``` predictor_dict = { "naive_predictor" : tmm.NaivePredictor(), "ar5_predictor" : tmm.AR(order=5), "ma5_predictor" : tmm.MA(order=5), "arma55_predictor" : tmm.ARMA(order_ar=5, order_ma=5), "arima525_predictor" : tmm.ARIMA.ARIMA(order=[5,2,5]), "poly5_predictor" : tmm.Polynomial(degree=5), "trigonometric": tmm.Trigonometric(), "poly+arma": tmm.Sequential(predictors=[tmm.Polynomial(), tmm.ARMA()]), "poly+trigo":tmm.Sequential(predictors=[tmm.Polynomial(), tmm.Trigonometric()]), "poly+trigo+arma": tmm.Sequential(predictors=[tmm.Polynomial(), tmm.Trigonometric(), tmm.ARMA()]), } ``` ### Compute the features and the scores of the models for each time series in the benchmark ⚠️ Don't run those cells, the results are already saved ! ⚠️ Compute features: ``` %%capture --no-stdout assert False, "Don't run this cell unless you want to recompute all features" for benchmark_index in range(1,3): bench = io.BenchmarkDataset(benchmark_index) features = mu.compute_benchmark_features(bench) features.to_csv(f"saved_data/features_{benchmark_index}.csv", index_label="ts_index") ``` Compute scores: ``` %%capture --no-stdout assert False, "Don't run this cell unless you want to recompute all scores (long!)" for benchmark_index in range(1,3): bench = io.BenchmarkDataset(benchmark_index) score_dict = mu.compute_predictor_scores(predictor_dict, bench, detector=adm.KSigma()) score_df = pd.concat([score_dict[model_name].assign(model_name=model_name) for model_name in score_dict.keys()]) score_df.to_csv(f"saved_data/score_df_{benchmark_index}.csv", index_label="ts_index") ``` ### Analyse the results #### Read the saved data ``` benchmark_index = 1 score_df = pd.read_csv(f"saved_data/score_df_{benchmark_index}.csv").set_index("ts_index") features = pd.read_csv(f"saved_data/features_{benchmark_index}.csv").set_index("ts_index") features_normalized = (features - features.mean()) / features.std() features_normalized score_df def get_features(benchmark_index): features = pd.read_csv(f"saved_data/features_{benchmark_index}.csv").set_index("ts_index") features_normalized = (features - features.mean()) / features.std() return features_normalized.to_numpy() def get_best_model(benchmark_index): score_df = pd.read_csv(f"saved_data/score_df_{benchmark_index}.csv").set_index("ts_index") pivot_init = score_df[["f1", "model_name"]] df = pd.pivot_table(pivot_init, index="ts_index", columns="model_name", values="f1") df_np = df.to_numpy() return df_np.argmax(axis=1) ``` #### Plot the results depending on the features using PCA ``` from sklearn import decomposition features_np = get_features(benchmark_index=1) best_model = get_best_model(benchmark_index=1) pca = decomposition.PCA(n_components=2) XY = pca.fit_transform(features_np) plot_df = pd.DataFrame(XY, columns=["X", "Y"]) plot_df["category"] = np.array(list(predictor_dict.keys()))[best_model] groups = plot_df.groupby("category") for name, group in groups: plt.plot(group["X"], group["Y"], marker="o", linestyle="", label=name) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title("Best model depending on the time-series features, as shown using PCA") plt.savefig("figs/best_model_PCA.png", bbox_inches = 'tight') ``` ### See how each feature affects the fscore using a linear regression ``` import statsmodels.formula.api as smf for predictor_name in ["poly5_predictor", "ma5_predictor"]: print(f"------------------------- {predictor_name} --------------------------------") score_model_df = score_df[score_df["model_name"] == predictor_name].reset_index(drop=True) df = features_normalized.join(score_model_df, on="ts_index") smresults = smf.ols('f1 ~ trend_score + seasonality_score + nonlinearity + skew + kurtosis + hurst + lyapunov', df).fit() smresults_robust = smresults.get_robustcov_results() print(smresults_robust.summary()) ``` ### Compare fscores of the pipeline ``` keep = [ "naive_predictor", "ar5_predictor", "poly+trigo", "poly+trigo+arma", ] fig, axes = plt.subplots(2,2, sharex=False, sharey=False, figsize=(20,7)) fig.suptitle('Distributions of F1-scores and recall using three different predictors on the four datasets') scores = ["f1", "recall"] benchmarks = [f"benchmark_{i}" for i in range(1,3)] for j in range(len(benchmarks)): benchmark_index = j + 1 score_df = pd.read_csv(f"saved_data/score_df_{benchmark_index}.csv").set_index("ts_index") for i in range(len(scores)): for model_name in keep: score_df_to_plot = score_df[score_df.model_name == model_name].reset_index() sns.kdeplot(data=score_df_to_plot, x=scores[i], bw_adjust=.8, cut=0, ax=axes[i,j]) axes[i,j].set_ylabel("") axes[i,j].set_xlabel(f"{scores[i]} on benchmark {benchmark_index}") from matplotlib.lines import Line2D custom_lines = [Line2D([0], [0], color="blue", lw=4), Line2D([0], [0], color="orange", lw=4), Line2D([0], [0], color="green", lw=4), Line2D([0], [0], color="red", lw=4)] fig.legend(custom_lines, keep, loc="center left") plt.savefig("figs/F1_recall.png", bbox_inches = 'tight') ```
github_jupyter
# Transpose of a Matrix In this set of exercises, you will work with the transpose of a matrix. Your first task is to write a function that takes the transpose of a matrix. Think about how to use nested for loops efficiently. The second task will be to write a new matrix multiplication function that takes advantage of your matrix transposition function. ``` ### TODO: Write a function called transpose() that ### takes in a matrix and outputs the transpose of the matrix def transpose(matrix): matrix_transpose = [] for i in range(len(matrix[0])): row = [] for j in range(len(matrix)): row.append(matrix[j][i]) matrix_transpose.append(row) return matrix_transpose ### TODO: Run the code in the cell below. If there is no ### output, then your answers were as expected assert transpose([[5, 4, 1, 7], [2, 1, 3, 5]]) == [[5, 2], [4, 1], [1, 3], [7, 5]] assert transpose([[5]]) == [[5]] assert transpose([[5, 3, 2], [7, 1, 4], [1, 1, 2], [8, 9, 1]]) == [[5, 7, 1, 8], [3, 1, 1, 9], [2, 4, 2, 1]] ``` ### Matrix Multiplication Now that you have your transpose function working, write a matrix multiplication function that takes advantage of the transpose. As part of the matrix multiplication code, you might want to re-use your dot product function from the matrix multiplication exercises. But you won't need your get_row and get_column functions anymore because the tranpose essentially takes care of turning columns into row vectors. Remember that if matrix A is mxn and matrix B is nxp, then the resulting product will be mxp. ``` ### TODO: Write a function called matrix_multiplication() that ### takes in two matrices and outputs the product of the two ### matrices ### TODO: Copy your dot_product() function here so that you can ### use it in your matrix_multiplication function def dot_product(vectorA, vectorB): result = 0 for i in range(len(vectorA)): result += vectorA[i] * vectorB[i] return result # Takes in two matrices and outputs the product of the two matrices def matrix_multiplication(matrixA, matrixB): product = [] ## TODO: Take the transpose of matrixB and store the result ## in a new variable ## TODO: Use a nested for loop to iterate through the rows ## of matrix A and the rows of the tranpose of matrix B ## TODO: Calculate the dot product between each row of matrix A ## with each row in the transpose of matrix B ## TODO: As you calculate the results inside your for loops, ## store the results in the product variable # Take the transpose of matrixB and store the result transposeB = transpose(matrixB) # Use a nested for loop to iterate through the rows # of matrix A and the rows of the tranpose of matrix B for r1 in range(len(matrixA)): new_row = [] for r2 in range(len(transposeB)): # Calculate the dot product between each row of matrix A # with each row in the transpose of matrix B dp = dot_product(matrixA[r1], transposeB[r2]) new_row.append(dp) # Store the results in the product variable product.append(new_row) ## TODO: return product ### TODO: Run the code in the cell below. If there is no ### output, then your answers were as expected assert matrix_multiplication([[5, 3, 1], [6, 2, 7]], [[4, 2], [8, 1], [7, 4]]) == [[51, 17], [89, 42]] assert matrix_multiplication([[5]], [[4]]) == [[20]] assert matrix_multiplication([[2, 8, 1, 2, 9], [7, 9, 1, 10, 5], [8, 4, 11, 98, 2], [5, 5, 4, 4, 1]], [[4], [2], [17], [80], [2]]) == [[219], [873], [8071], [420]] assert matrix_multiplication([[2, 8, 1, 2, 9], [7, 9, 1, 10, 5], [8, 4, 11, 98, 2], [5, 5, 4, 4, 1]], [[4, 1, 2], [2, 3, 1], [17, 8, 1], [1, 3, 0], [2, 1, 4]]) == [[61, 49, 49], [83, 77, 44], [329, 404, 39], [104, 65, 23]] ```
github_jupyter
# Introducing `git` version control system * A [version control](https://en.wikipedia.org/wiki/Version_control) system helps keeping track of changes in software source code. * With a version control system, trying and testing possibly risky attempts can be easier. * Currently in the late 2010s, [`git`](https://en.wikipedia.org/wiki/List_of_version_control_software) is one of the [available version control softwares](https://en.wikipedia.org/wiki/List_of_version_control_software), * Linus Torvalds created `git` in 2005 to maintain the Linux kernel. * `git` is an [open source](https://github.com/git/git) distributed version control system. A repository may have remote versions and local versions that are (practically) identical. [![Git Data Transport Commands](https://images.osteele.com/2008/git-transport.png)](https://blog.osteele.com/2008/05/my-git-workflow/) [[ref0](https://git-scm.com/book/en/v2), [ref1](https://github.com/progit)] | command | expected behavior | example | |:-------:|:-----------------:|:-------:| | `init` | initialize a git repository | `git init` | | `clone` | clone a git repository | `git clone <repo url>`<br>`git clone file://<path>` | | `log` | list the commit history | `git log`<br>`git log --help`<br>`git log --stat`<br>`git log --oneline --graph --all` | | `status` | current status of a git repository | `git status` | | `diff` | visualize changes after last commit and/or staging | `git diff`<br>`git diff HEAD`<br>`git diff HEAD^` | | `config` | list or adjust configuration | `git config --list`<br>`git config --global --unset credential.helper` | | `config user.name` | specify the user's name | `git config user.name <your name>` | | `config user.email` | specify the user's email address | `git config user.email <your email>` | | `remote` | manage remote repositories | `git remote add origin <remote repo>` | | `add` | stage some change to commit | `git add <path to a changed file>`<br>`git add -p` | | `commit` | create an entry of change | `git commit`<br>`git commit -m <message>` | | `push` | upload the changes to a remote repository | `git push`<br>`git push -u origin <branch name>` | | `checkout ` | switch workspace to a certain commit | `git checkout <commit hash>`<br>`git checkout -b <new branch>`<br>`git checkout -- <file to undo>` | | [`branch`](https://git-scm.com/docs/git-branch) | manage branches | `git branch`<br>`git branch -r` | | `blame` | relates each line of code with commits | `git blame <file path>`| | [`rebase`](https://git-scm.com/docs/git-rebase) | move current branch on top of another branch | `git rebase <branch>`<br>`git rebase -i <commit>` | | [`merge`](https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging) | merge another branch to the current branch | `git merge --no-ff <other branch>`| # Practice 1. Go to the github website and log in. 1. Go to one of the repositories of your interest.<br>For this example, this page would use Wes McKinney's [Python for Data Analysis](https://github.com/wesm/pydata-book).<br> Its repository addres is : https://github.com/wesm/pydata-book 1. Let's try cloning the repository.<br> `git clone https://github.com/wesm/pydata-book`<br> 1. Now try `cd pydata-book` and `ls -a` commands. <br>Note if a folder `.git` is visible. 1. Enter `pwd` to check the current full path.<br> Let's assume the folder is : `/home/user/Documents/pydata-book` 1. `git remote` would list of available remote repository names. 1. `git remote get-url origin` would show the link to the `origin` repository.<br>If developers contribute to the [Python for Data Analysis](https://github.com/wesm/pydata-book), you would be able to update this repository using `git pull origin`. 1. If your name and email address are "ABC" and abc@naver.com respectively, enter `git config user.name ABC` and `git config user.email abc@naver.com`. 1. `git config --list` would show configurations of this repository. 1. Try `echo "test" > test.txt` to create a sample text file. 1. `git status` would show: <br>The current branch <br>Sync with the branch of the remote repository <br>One file that `git` is not trcking 1. Enter `git add test.txt` 1. `git status` would show: <br>Branch and sync information would not change <br>One file added to the stage (or index) to be committed 1. Enter `git commit -m "Added test.txt"`<br> `git` would show messages. 1. Check `git status`. 1. Now `git log --state` would show the hash value, date & time, your name & email, commit message, and the file change of the commits. 1. Open the new file using an editor : `vi test.txt` or `nano test.txt`. 1. Add one more line, save, and exit the editor. 1. `git status` would show one file is changed. 1. `git diff` would show the changes in the files. 1. `git add test.txt` and `git commit -m "Changed test.txt"` would commit the file. 1. Check `git status` and `git log --stat`. 1. `git log --oneline --graph --all` would show the commit tree. 1. `git branch` would list local branch names.<br>Other repositories may have different branch names. 1. `git branch --all` would show both local and remote branches. 1. `ls` 1. `git checkout 1st-edition` will activate branch `1st-edition`.<br>If it was not one of the local branches, `git` will create a new local branch. 1. `ls` again and compare the content of the folder. 1. `git checkout 2nd-edition` will switch to branch `2nd-edition`.<br>`ls` again to check. 1. Enter `cd ..` to move up one level. 1. Enter `git clone /home/user/Documents/pydata-book temp`.<br>`git` would clone anoter repository in folder `temp`. (This example is just to show cloning a local repository is possible) 1. Enter `cd temp` and `git log`. 1. Enter `git remote`. 1. Enter `git remote get-url origin`. You would be able to see the remote repository location. 1. Try `git config --list` 1. Try `git remote add upstream https://github.com/wesm/pydata-book` 1. Now try `git remote` and/or `git remote get-url upstream`.<br>`git pull upstream` would update this local repository. ## Creating a `github` account [![github](https://avatars1.githubusercontent.com/u/9919?s=200&v=4)](https://www.github.com) * [`github`](https://www.github.com) is one of `git` remote [repository hosting services](https://en.wikipedia.org/wiki/Comparison_of_source_code_hosting_facilities#Version_control_systems). * [`dev.naver.com`](https://developers.naver.com) used to provide such service until recent years. * `github` also has an [education](https://education.github.com) service. * May require to verify email address. * A free user account can generate indefinite number of Public repositories. * Usually a github repository address has following form:<br>`https://github.com/<github id>/<repository name>(.git)`<br> ex : [`https://github.com/tensorflow/tensorflow.git`](https://github.com/tensorflow/tensorflow) * A user can `fork` a public repository.<br>ex : `https://github.com/<github id>/tensorflow.git`<br>This is equivalent to having a clone with a link. * If planning to use only one user account for a specific repository, following command is possible.<br>`git remote add <remote name> https://<github id>@github.com/<github id>/<repository name>(.git)` * With an academic email address and a school ID card image, an instructor (or a [student](https://education.github.com/pack)) may upgrade to an education account; possible to create private repositories. * Depending on the situation, an instructor may create an organization on the github; then a repository may have following form :<br>`https://(<github id>@)github.com/<organization id>/<repository name>(.git)` ### Authentication * To avoid unauthorized source code change, a remote repository may require id+password authentication. * To improve productivity during frequent pushes, `git` may utilize credential helper. * A credential helper stores the authentication information possibly after encryption. * Following command shows current (global) credential helper:<br>`git config (--global) credential.helper` * However, credential information might be sensitive so please use with caution. ## Creating branches and switching between them * Assume you want to test a *radical* new feature; if successful, it would be great new addition. * However, you want the existing code base intact until success is certain. * Then you can start a new branch.<br>Only when the new feature is successful, you would merge into the existing code base. [![git branch](https://git-scm.com/book/en/v2/images/advance-master.png)](https://git-scm.com/book/en/v2/Git-Branching-Branches-in-a-Nutshell) 1. `git branch (--list)` would list branches of the local repository.<br>`git branch -r` to list remote branches. 1. `git branch <new branch name>` would start a new branch. 1. `git checkout <new branch name>` would switch to the new branch. 1. `git checkout -b <new branch name>` would do both steps above. 1. From now on, this new branch would accumulate commits. 1. After a few commits, when `git status` shows no uncommitted changes, try `git checkout <previous branch name>`.<br>Then check the files that you changed after previous step. 1. And then try `git checkout <new branch name>` again. What happened to your changes? ## Synchronizing after fork or distribution * Click following to start a video tutorial. [![sync upstream playlist](https://i.ytimg.com/vi/P39pzSQx5rY/hqdefault.jpg)](https://www.youtube.com/watch?v=P39pzSQx5rY&list=PLA6B0Lmr9oJNDafh3ndnmbXv0I9wddv63) * When you click on the `fork` button of a repository, you can duplicate it so that you can make changes. * However, the developers may continue to the original (or *upstream*) repository; fix bugs and add more features. * At some point of time, you may want to update your duplicate repository. * [Github](https://help.github.com/articles/syncing-a-fork/) described the procedure to synchronize a fork repository with the upstream repository. 1. If not done yet, clone your remote fork repository to a local repository. 1. `git remote` will list names of remote repositories. Let's assume `origin` points to your fork repository. 1. Add the *upstream* repository address as `upstream`. <br>`git remote add upstream <upstream repository address>` 1. `git fetch upstream` would download updates from the upstream repository. However, this alone would not change your workspace yet. 1. Try `git log --oneline --graph --all`. This would show you all the histories of local and remote branches. 1. Choose one of the local branches that you want to update. Let's assume its name is `first_branch`. 1. Try `git rebase first_branch upstream/first_branch`. This would apply new commits in `upstream/first_branch` after fork to your local branch.<br>Depending on the situation, collsion may occur; then we should manually [resolve](https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging) the conflict. 1. Now `git push origin first_branch` to apply the new commits to the remote fork repository. 1. Repeat from `git log --oneline --graph -all` for all branches of interest. ## Travis-CI and Continuous Integration * In short, if you have an open source software project, [Travis-CI](https://www.travis-ci.org) would be able to build, run test software, and reply reports as specified. * Please refer to the [Travis-CI documentation](https://docs.travis-ci.com/) for details. ## Exercises ### 00 : Your first commit 1. Clone your repository to your PC 1. Configure your name and email address 1. Make a new text file with a plain text <br>What would you want to write to the file? 1. Add the new file 1. Commit the changes to your local repository with an appropriate message 1. Push your changes to your repository ### 01 : Sync with upstream 1. Clone your repository to your PC 1. Add the upstream repository url as remote 1. Fetch from the upstream repository <br>Try `git log --oneline --all --graph` 1. Merge with the upstream branch <br>How can we use `git merge` command? <br>Did you see a message of "CONFLICT"? 1. Push your changes to your repository ### 02* : ipynb * This is an optional assignment * Please use a separate file : .txt, .ipynb, or .md 1. Propose a possible procedure to version-control .ipynb files 1. Propose a possible procedure to resolve conflict of an .ipynb file ## `git` and `github` on Harvard CS50 Twitch * Following is a video tutorial (2hr) on `git` and `github` by [Harvard CS50](https://www.youtube.com/channel/UCcabW7890RKJzL968QWEykA). [![CS50 on Twitch - EP. 4 - git and GitHub](https://i.ytimg.com/vi/dAHgwd2U0Jg/hqdefault.jpg)](https://www.youtube.com/watch?v=dAHgwd2U0Jg)
github_jupyter
# Amazon SageMaker로 다중 노드들 간 분산 RL을 이용해 Roboschool 에이전트 훈련 --- 이 노트북은 `rl_roboschool_ray.ipynb` 의 확장으로, Ray와 TensorFlow를 사용한 강화 학습의 수평(horizontal) 스케일링을 보여줍니다. ## 해결해야 할 Roboschool 문제 선택 Roboschool은 가상 로봇 시스템에 대한 RL 정책을 훈련시키는 데 주로 사용되는 [오픈 소스](https://github.com/openai/roboschool/tree/master/roboschool) 물리 시뮬레이터입니다. Roboschool은 다양한 로봇 문제에 해당하는 [다양한](https://github.com/openai/roboschool/blob/master/roboschool/__init__.py) gym 환경을 정의합니다. 아래는 다양한 난이도 중 몇 가지를 보여줍니다. - **Reacher (쉬움)** - 2개의 조인트만 있는 매우 간단한 로봇이 목표물에 도달합니다. - **호퍼 (중간)** - 한쪽 다리와 발이 달린 간단한 로봇이 트랙을 뛰어 내리는 법을 배웁니다. - **휴머노이드 (어려움)** - 두 개의 팔, 두 개의 다리 등이 있는 복잡한 3D 로봇은 넘어지지 않고 균형을 잡은 다음 트랙에서 달리는 법을 배웁니다. 간단한 문제들은 적은 계산 리소스 상에서 더 빨리 훈련됩니다. 물론 더 복잡한 문제들은 훈련이 느리지만 더 재미있습니다. ``` # Uncomment the problem to work on #roboschool_problem = 'reacher' #roboschool_problem = 'hopper' roboschool_problem = 'humanoid' ``` ## 전제 조건(Pre-requisites) ### 라이브러리 임포트 시작하기 위해, 필요한 Python 라이브러리를 가져와서 권한 및 구성을 위한 몇 가지 전제 조건으로 환경을 설정합니다. ``` import sagemaker import boto3 import sys import os import glob import re import subprocess from IPython.display import HTML, Markdown import time from time import gmtime, strftime sys.path.append("common") from misc import get_execution_role, wait_for_s3_object from docker_utils import build_and_push_docker_image from sagemaker.rl import RLEstimator, RLToolkit, RLFramework from markdown_helper import generate_help_for_s3_endpoint_permissions, create_s3_endpoint_manually ``` ### S3 버킷 설정 체크포인트(checkpoint) 및 메타데이터에 사용하려는 S3 버킷에 대한 연결 및 인증을 설정합니다. ``` sage_session = sagemaker.session.Session() s3_bucket = sage_session.default_bucket() s3_output_path = 's3://{}/'.format(s3_bucket) print("S3 bucket path: {}".format(s3_output_path)) ``` ### 변수 설정 훈련 작업의 작업 접두사(job prefix)와 *컨테이너의 이미지 경로(BYOC 인 경우에만)와 같은 변수*를 정의합니다. ``` # create a descriptive job name job_name_prefix = 'rl-roboschool-distributed-' + roboschool_problem aws_region = boto3.Session().region_name ``` ### 훈련이 진행되는 위치 구성 SageMaker 노트북 인스턴스 또는 로컬 노트북 인스턴스를 사용하여 RL 훈련 작업을 훈련할 수 있습니다. 로컬 모드는 SageMaker Python SDK를 사용하여 SageMaker에 배포하기 전에 로컬 컨테이너에서 코드를 실행합니다. 이렇게 하면 , 익숙한 Python SDK 인터페이스를 사용하면서 반복 테스트 및 디버깅 속도를 높일 수 있습니다. 여러분은 `local_mode = True` 만 설정하면 됩니다. ``` # run in local_mode on this machine, or as a SageMaker TrainingJob? local_mode = False if local_mode: instance_type = 'local' else: # If on SageMaker, pick the instance type instance_type = "ml.c5.2xlarge" train_instance_count = 3 ``` ### IAM 역할 생성 SageMaker 노트북 `role = sagemaker.get_execution_role()`을 실행할 때 실행 역할(execution role)을 얻거나 로컬 시스템에서 실행할 때 utils 메소드 `role = get_execution_role()`을 사용하여 실행 역할을 작성하세요. ``` try: role = sagemaker.get_execution_role() except: role = get_execution_role() print("Using IAM role arn: {}".format(role)) ``` ### `로컬` 모드용 도커 설치 로컬 모드에서 작업하려면 도커(docker)가 설치되어 있어야 합니다. 로컬 머신에서 실행할 때는 docker 또는 docker-compose(로컬 CPU 머신의 경우) 및 nvidia-docker(로컬 GPU 머신의 경우)가 설치되어 있는지 확인하세요. 또는, SageMaker 노트북 인스턴스에서 실행할 때 다음 스크립트를 실행하여 관련 패키지들을 설치할 수 있습니다. 참고로, 한 번에 하나의 로컬 노트북만 실행할 수 있습니다. ``` # only run from SageMaker notebook instance if local_mode: !/bin/bash ./common/setup.sh ``` ## 도커 컨테이너 빌드 Roboschool이 설치된 사용자 정의 도커 컨테이너를 빌드해야 합니다. 컨테이너 빌드 작업은 아래 과정을 거쳐 처리됩니다. 1. 기본 컨테이너 이미지 가져오기 2. Roboschool 및 의존성 패키지 설치 3. 새 컨테이너 이미지를 ECR에 업로드 인터넷 연결이 느린 컴퓨터에서 실행 중인 경우, 이 단계에서 시간이 오래 걸릴 수 있습니다. 노트북 인스턴스가 SageMaker 또는 EC2 인 경우 인스턴스 유형에 따라 3-10 분이 걸립니다. ``` %%time cpu_or_gpu = 'gpu' if instance_type.startswith('ml.p') else 'cpu' repository_short_name = "sagemaker-roboschool-ray-%s" % cpu_or_gpu docker_build_args = { 'CPU_OR_GPU': cpu_or_gpu, 'AWS_REGION': boto3.Session().region_name, } custom_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using ECR image %s" % custom_image_name) ``` ## 훈련 코드 작성 훈련 코드는 `/src` 디렉토리에 업로드된 `“train-{roboschool_problem}.py”` 파일에 작성됩니다. 먼저 환경 파일과 사전 설정 파일을 가져온 다음, `main()` 함수를 정의하세요. ``` !pygmentize src/train-{roboschool_problem}.py ``` ## Ray 동종 스케일링 - train_instance_count > 1 지정 동종(Homogeneous) 스케일링을 통해 동일한 유형의 여러 인스턴스를 사용할 수 있습니다. ``` metric_definitions = RLEstimator.default_metric_definitions(RLToolkit.RAY) estimator = RLEstimator(entry_point="train-%s.py" % roboschool_problem, source_dir='src', dependencies=["common/sagemaker_rl"], image_name=custom_image_name, role=role, train_instance_type=instance_type, train_instance_count=train_instance_count, output_path=s3_output_path, base_job_name=job_name_prefix, metric_definitions=metric_definitions, hyperparameters={ # Attention scientists! You can override any Ray algorithm parameter here: # 3 m4.2xl with 8 cores each. We have to leave 1 core for ray scheduler. # Don't forget to change this on the basis of instance type. "rl.training.config.num_workers": (8 * train_instance_count) - 1 #"rl.training.config.horizon": 5000, #"rl.training.config.num_sgd_iter": 10, } ) estimator.fit(wait=local_mode) job_name = estimator.latest_training_job.job_name print("Training job: %s" % job_name) ``` ## 시각화 RL 훈련에는 시간이 오래 걸릴 수 있습니다. 따라서 훈련 작업이 동작하는 동안 훈련 작업의 진행 상황을 추적할 수 있는 다양한 방법들이 있습니다. 훈련 도중 일부 중간 출력이 S3에 저장되므로, 이를 캡처하도록 설정합니다. ``` print("Job name: {}".format(job_name)) s3_url = "s3://{}/{}".format(s3_bucket,job_name) if local_mode: output_tar_key = "{}/output.tar.gz".format(job_name) else: output_tar_key = "{}/output/output.tar.gz".format(job_name) intermediate_folder_key = "{}/output/intermediate/".format(job_name) output_url = "s3://{}/{}".format(s3_bucket, output_tar_key) intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key) print("S3 job path: {}".format(s3_url)) print("Output.tar.gz location: {}".format(output_url)) print("Intermediate folder path: {}".format(intermediate_url)) tmp_dir = "/tmp/{}".format(job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir)) ``` ### 훈련 롤아웃 비디오 가져오기 특정 롤아웃의 비디오는 훈련 중 S3에 기록됩니다. 여기에서는 S3에서 마지막 10개의 비디오 클립을 가져 와서 마지막 비디오를 렌더링합니다. ``` recent_videos = wait_for_s3_object(s3_bucket, intermediate_folder_key, tmp_dir, fetch_only=(lambda obj: obj.key.endswith(".mp4") and obj.size>0), limit=10) last_video = sorted(recent_videos)[-1] # Pick which video to watch os.system("mkdir -p ./src/tmp_render_homogeneous/ && cp {} ./src/tmp_render_homogeneous/last_video.mp4".format(last_video)) HTML('<video src="./src/tmp_render_homogeneous/last_video.mp4" controls autoplay></video>') ``` ### 훈련 작업에 대한 지표 plot CloudWatch 지표에 기록된 알고리즘 지표를 사용하여 실행 중인 훈련의 보상 지표를 볼 수 있습니다. 시간이 지남에 따라, 모델의 성능을 볼 수 있도록 이를 plot할 수 있습니다. ``` %matplotlib inline from sagemaker.analytics import TrainingJobAnalytics df = TrainingJobAnalytics(job_name, ['episode_reward_mean']).dataframe() num_metrics = len(df) if num_metrics == 0: print("No algorithm metrics found in CloudWatch") else: plt = df.plot(x='timestamp', y='value', figsize=(12,5), legend=True, style='b-') plt.set_ylabel('Mean reward per episode') plt.set_xlabel('Training time (s)') ``` ### 훈련 진행 상황 모니터링 위의 시각화 셀을 반복해서 실행하여 최신 비디오를 얻거나, 훈련 작업이 진행됨에 따라 최신 지표를 볼 수 있습니다. ## Ray 이기종(heterogeneous) 스케일링 RL 훈련을 확장하기 위해 롤아웃 작업자 수를 늘릴 수 있습니다. 그러나, 롤아웃이 많을수록 훈련 중 종종 병목 현상이 발생할 수 있습니다. 이를 방지하기 위해 하나 이상의 GPU가 있는 인스턴스를 훈련용으로 사용하고 여러 개의 CPU 인스턴스들을 롤아웃에 사용할 수 있습니다. SageMaker는 훈련 작업에서 단일 유형의 인스턴스를 지원하므로, 두 개의 SageMaker 작업을 서로 통신하도록 함으로써 위의 목표를 달성할 수 있습니다. 이름 지정을 위해 `기본 클러스터(Primary cluster)`를 사용하여 하나 이상의 GPU 인스턴스를 참조하고 `보조 클러스터(Secondary cluster)`를 사용하여 CPU 인스턴스 클러스터를 참조합니다. > local_mode는 이 유형의 스케일링을 테스트하는 데 사용할 수 없습니다. SageMaker 작업을 구성하기 전에 먼저 VPC 모드에서 SageMaker를 실행해야 합니다. VPC 모드에서는 두 SageMaker 작업이 네트워크를 통해 통신할 수 있습니다. 작업 시작 스크립트에 서브넷(subnet)과 보안 그룹(security group)을 제공하면 됩니다. 이 예에서는 기본 VPC 구성을 사용합니다. ``` ec2 = boto3.client('ec2') default_vpc = [vpc['VpcId'] for vpc in ec2.describe_vpcs()['Vpcs'] if vpc["IsDefault"] == True][0] default_security_groups = [group["GroupId"] for group in ec2.describe_security_groups()['SecurityGroups'] \ if group["GroupName"] == "default" and group["VpcId"] == default_vpc] default_subnets = [subnet["SubnetId"] for subnet in ec2.describe_subnets()["Subnets"] \ if subnet["VpcId"] == default_vpc and subnet['DefaultForAz']==True] print("Using default VPC:", default_vpc) print("Using default security group:", default_security_groups) print("Using default subnets:", default_subnets) ``` VPC 모드에서 실행 중인 SageMaker 작업은 S3 리소스에 액세스할 수 없습니다. 따라서, SageMaker 컨테이너에서 S3에 액세스할 수 있도록 VPC S3 엔드포인트를 생성해야 합니다. VPC 모드에 대한 자세한 내용을 보려면 [이 링크](https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)를 방문하세요. ``` try: route_tables = [route_table["RouteTableId"] for route_table in ec2.describe_route_tables()['RouteTables']\ if route_table['VpcId'] == default_vpc] except Exception as e: if "UnauthorizedOperation" in str(e): display(Markdown(generate_help_for_s3_endpoint_permissions(role))) else: display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc))) raise e print("Trying to attach S3 endpoints to the following route tables:", route_tables) assert len(route_tables) >= 1, "No route tables were found. Please follow the VPC S3 endpoint creation "\ "guide by clicking the above link." try: ec2.create_vpc_endpoint(DryRun=False, VpcEndpointType="Gateway", VpcId=default_vpc, ServiceName="com.amazonaws.{}.s3".format(aws_region), RouteTableIds=route_tables) print("S3 endpoint created successfully!") except Exception as e: if "RouteAlreadyExists" in str(e): print("S3 endpoint already exists.") elif "UnauthorizedOperation" in str(e): display(Markdown(generate_help_for_s3_endpoint_permissions(role))) raise e else: display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc))) raise e ``` ### 인스턴스 유형 구성 1 Volta (V100) GPU와 40개의 CPU 코어로 클러스터를 구성해 보겠습니다. ml.p3.2xlarge에는 8개의 CPU 코어가 있고 ml.c5.4xlarge에는 16개의 CPU 코어가 있으므로 1개의 ml.p3.2xlarge 인스턴스와 2개의 ml.c5.4xlarge 인스턴스를 사용하여 이 작업을 수행할 수 있습니다. ``` %%time # Build CPU image cpu_repository_short_name = "sagemaker-roboschool-ray-%s" % "cpu" docker_build_args = { 'CPU_OR_GPU': "cpu", 'AWS_REGION': boto3.Session().region_name, } cpu_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using CPU ECR image %s" % cpu_image_name) # Build GPU image gpu_repository_short_name = "sagemaker-roboschool-ray-%s" % "gpu" docker_build_args = { 'CPU_OR_GPU': "gpu", 'AWS_REGION': boto3.Session().region_name, } gpu_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using GPU ECR image %s" % gpu_image_name) primary_cluster_instance_type = "ml.p3.2xlarge" primary_cluster_instance_count = 1 secondary_cluster_instance_type = "ml.c5.4xlarge" secondary_cluster_instance_count = 2 total_cpus = 40 - 1 # Leave one for ray scheduler total_gpus = 1 primary_cluster_instance_type = "ml.p3.16xlarge" primary_cluster_instance_count = 1 secondary_cluster_instance_type = "ml.c5.4xlarge" secondary_cluster_instance_count = 2 total_cpus = 40 - 1 # Leave one for ray scheduler total_gpus = 8 ``` 다음으로, 훈련하려는 roboschool 에이전트를 선택합니다. 이기종(heterogeneous) 훈련의 경우 인스턴스 간 동기화를 지원하는 몇 가지 추가 파라메터들을 훈련 작업에 전달합니다. - s3_bucket, s3_prefix: 마스터 IP 주소와 같은 메타데이터 저장에 사용 - rl_cluster_type: "기본" 또는 "보조" - aws_region: VPC 모드에서 S3에 연결하는 데 필요 - rl_num_instances_secondary: 보조 클러스터의 노드 수 - subnets, security_group_ids: VPC 모드에 필요 ``` roboschool_problem = 'reacher' job_name_prefix = 'rl-roboschool-distributed-'+ roboschool_problem s3_output_path = 's3://{}/'.format(s3_bucket) # SDK appends the job name and output folder # We explicitly need to specify these params so that the two jobs can synchronize using the metadata stored here s3_bucket = sage_session.default_bucket() s3_prefix = "dist-ray-%s-1GPU-40CPUs" % (roboschool_problem) # Make sure that the prefix is empty !aws s3 rm --recursive s3://{s3_bucket}/{s3_prefix} ``` ### 기본 클러스터 시작 (1 GPU 훈련 인스턴스) ``` primary_cluster_estimator = RLEstimator(entry_point="train-%s.py" % roboschool_problem, source_dir='src', dependencies=["common/sagemaker_rl"], image_name=gpu_image_name, role=role, train_instance_type=primary_cluster_instance_type, train_instance_count=primary_cluster_instance_count, output_path=s3_output_path, base_job_name=job_name_prefix, metric_definitions=metric_definitions, train_max_run=int(3600 * .5), # Maximum runtime in seconds hyperparameters={ "s3_prefix": s3_prefix, # Important for syncing "s3_bucket": s3_bucket, # Important for syncing "aws_region": boto3.Session().region_name, # Important for S3 connection "rl_cluster_type": "primary", # Important for syncing "rl_num_instances_secondary": secondary_cluster_instance_count, # Important for syncing "rl.training.config.num_workers": total_cpus, "rl.training.config.train_batch_size": 20000, "rl.training.config.num_gpus": total_gpus, }, subnets=default_subnets, # Required for VPC mode security_group_ids=default_security_groups # Required for VPC mode ) primary_cluster_estimator.fit(wait=False) primary_job_name = primary_cluster_estimator.latest_training_job.job_name print("Primary Training job: %s" % primary_job_name) ``` ### 보조 클러스터 시작 (2 CPU 인스턴스) ``` secondary_cluster_estimator = RLEstimator(entry_point="train-%s.py" % roboschool_problem, source_dir='src', dependencies=["common/sagemaker_rl"], image_name=cpu_image_name, role=role, train_instance_type=secondary_cluster_instance_type, train_instance_count=secondary_cluster_instance_count, output_path=s3_output_path, base_job_name=job_name_prefix, metric_definitions=metric_definitions, train_max_run=3600, # Maximum runtime in seconds hyperparameters={ "s3_prefix": s3_prefix, # Important for syncing "s3_bucket": s3_bucket, # Important for syncing "aws_region": boto3.Session().region_name, # Important for S3 connection "rl_cluster_type": "secondary", # Important for syncing }, subnets=default_subnets, # Required for VPC mode security_group_ids=default_security_groups # Required for VPC mode ) secondary_cluster_estimator.fit(wait=False) secondary_job_name = secondary_cluster_estimator.latest_training_job.job_name print("Secondary Training job: %s" % secondary_job_name) ``` ### 시각화 ``` print("Job name: {}".format(primary_job_name)) s3_url = "s3://{}/{}".format(s3_bucket,primary_job_name) if local_mode: output_tar_key = "{}/output.tar.gz".format(primary_job_name) else: output_tar_key = "{}/output/output.tar.gz".format(primary_job_name) intermediate_folder_key = "{}/output/intermediate/".format(primary_job_name) output_url = "s3://{}/{}".format(s3_bucket, output_tar_key) intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key) print("S3 job path: {}".format(s3_url)) print("Output.tar.gz location: {}".format(output_url)) print("Intermediate folder path: {}".format(intermediate_url)) tmp_dir = "/tmp/{}".format(primary_job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir)) ``` ### 훈련 롤아웃 비디오 가져오기 특정 롤아웃의 비디오는 훈련 중 S3에 기록됩니다. 여기에서는 S3에서 마지막 10개의 비디오 클립을 가져 와서 마지막 비디오를 렌더링합니다. ``` recent_videos = wait_for_s3_object(s3_bucket, intermediate_folder_key, tmp_dir, fetch_only=(lambda obj: obj.key.endswith(".mp4") and obj.size>0), limit=10) last_video = sorted(recent_videos)[-1] # Pick which video to watch os.system("mkdir -p ./src/tmp_render_heterogeneous/ && cp {} ./src/tmp_render_heterogeneous/last_video.mp4".format(last_video)) HTML('<video src="./src/tmp_render_heterogeneous/last_video.mp4" controls autoplay></video>') ``` ### 훈련 작업에 대한 지표 plot CloudWatch 지표에 기록된 알고리즘 지표를 사용하여 실행 중인 훈련의 보상 지표를 볼 수 있습니다. 시간이 지남에 따라, 모델의 성능을 볼 수 있도록 이를 plot할 수 있습니다. ``` %matplotlib inline from sagemaker.analytics import TrainingJobAnalytics df = TrainingJobAnalytics(primary_job_name, ['episode_reward_mean']).dataframe() num_metrics = len(df) if num_metrics == 0: print("No algorithm metrics found in CloudWatch") else: plt = df.plot(x='timestamp', y='value', figsize=(12,5), legend=True, style='b-') plt.set_ylabel('Mean reward per episode') plt.set_xlabel('Training time (s)') ``` 위의 시각화 셀을 반복해서 실행하여 최신 비디오를 얻거나, 훈련 작업이 진행됨에 따라 최신 지표를 볼 수 있습니다.
github_jupyter
``` import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import turbofats ``` ## Create a lightcurve ``` n_samples = 400 n_days = 100 n_components = 7 period = 7.4 std = 0.5 time = np.random.rand(n_samples) * n_days time.sort() time = time.reshape(-1, 1) cosine_components = np.random.randn(1, n_components) * np.exp(-np.linspace(0, 4, n_components)) sine_components = np.random.randn(1, n_components) * np.exp(-np.linspace(0, 4, n_components)) bias = np.random.randn(1) * 5 time_arg = 2*np.pi*time/(period/np.arange(1, n_components+1).reshape(1, n_components)) magnitude = np.sum(cosine_components * np.cos(time_arg) + sine_components * np.sin(time_arg), axis=1) + bias error = np.ones(n_samples)*std + np.random.rand(n_samples)*std*3 magnitude += error print(time.shape, magnitude.shape) plt.subplot(2, 1, 1) plt.errorbar(time, magnitude, yerr=error, fmt='*') plt.subplot(2, 1, 2) plt.errorbar(time % period, magnitude, yerr=error, fmt='*') feature_space = turbofats.NewFeatureSpace(feature_list=['PeriodLS_v2', 'Period_fit_v2', 'Harmonics']) detections_data = np.stack( [ time.flatten(), magnitude.flatten(), error ], axis=-1 ) detections = pd.DataFrame( data=detections_data, columns=['mjd', 'magpsf_corr', 'sigmapsf_corr'], index=['asdf'] * len(detections_data) ) feature_values = feature_space.calculate_features(detections) print(cosine_components) print(sine_components) print(np.sqrt(cosine_components**2 + sine_components**2)) feature_values reconstructed_period = feature_values['PeriodLS_v2'].values[0] reconstructed_time_arg = 2*np.pi*time/(reconstructed_period/np.arange(1, n_components+1).reshape(1, n_components)) print(reconstructed_time_arg.shape) reconstructed_harmonics_mag = np.array([feature_values['Harmonics_mag_%d' % i].values[0] for i in range(1, 8)]) reconstructed_harmonics_phase = np.array([0.0] + [feature_values['Harmonics_phase_%d' % i].values[0] for i in range(2, 8)]) reconstructed_mag = reconstructed_harmonics_mag.reshape(1, -1)*np.cos(reconstructed_time_arg - reconstructed_harmonics_phase.reshape(1, -1)) reconstructed_mag = np.real(np.sum(reconstructed_mag, axis=1) + np.mean(magnitude)) plt.subplot(2, 1, 1) plt.scatter(time % period, magnitude) plt.scatter((time - 3.0) % period, reconstructed_mag) plt.title(f'periodo original {period} dias') plt.subplot(2, 1, 2) plt.scatter(time % reconstructed_period, magnitude) plt.scatter((time-0.5) % reconstructed_period, reconstructed_mag) plt.title(f'periodo reconstruido {reconstructed_period} dias') plt.tight_layout() tt = np.linspace(0, 10, 1000) a = 1.2 b = -1.5 f = 0.2 y = a*np.cos(2*np.pi*f*tt) + b*np.sin(2*np.pi*f*tt) plt.plot(tt, y) m = np.sqrt(a**2 + b**2) phi = np.arctan2(b, a) y2 = m*np.cos(2*np.pi*f*tt-phi) plt.plot(tt, y2) lc = pd.read_pickle('~/alerce/GP-Augmentation/results_paula/augmented_lightcurves.pkl') lc.head() detections = lc[lc.detected] oids = detections.index.unique() for oid in oids: one_lc = detections.loc[oid] feature_values = feature_space.calculate_features(one_lc) print(oid,) ```
github_jupyter
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # Challenge Notebook ## Problem: Add two numbers whose digits are stored in a linked list in reverse order. * [Constraints](#Constraints) * [Test Cases](#Test-Cases) * [Algorithm](#Algorithm) * [Code](#Code) * [Unit Test](#Unit-Test) * [Solution Notebook](#Solution-Notebook) ## Constraints * Can we assume this is a non-circular, singly linked list? * Yes * Do we expect the return to be in reverse order too? * Yes * What if one of the inputs is None? * Return None for an invalid operation * How large are these numbers--can they fit in memory? * Yes * Can we assume we already have a linked list class that can be used for this problem? * Yes * Can we assume this fits in memory? * Yes ## Test Cases * Empty list(s) -> None * Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8 * Add values of same lengths * Exercised from values of different lengths * Done here for completeness ## Algorithm Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. ## Code ``` # %load ../linked_list/linked_list.py class Node(object): def __init__(self, data, next=None): self.next = next self.data = data def __str__(self): return self.data class LinkedList(object): def __init__(self, head=None): self.head = head def __len__(self): curr = self.head counter = 0 while curr is not None: counter += 1 curr = curr.next return counter def insert_to_front(self, data): if data is None: return None node = Node(data, self.head) self.head = node return node def append(self, data): if data is None: return None node = Node(data) if self.head is None: self.head = node return node curr_node = self.head while curr_node.next is not None: curr_node = curr_node.next curr_node.next = node return node def find(self, data): if data is None: return None curr_node = self.head while curr_node is not None: if curr_node.data == data: return curr_node curr_node = curr_node.next return None def delete(self, data): if data is None: return if self.head is None: return if self.head.data == data: self.head = self.head.next return prev_node = self.head curr_node = self.head.next while curr_node is not None: if curr_node.data == data: prev_node.next = curr_node.next return prev_node = curr_node curr_node = curr_node.next def delete_alt(self, data): if data is None: return if self.head is None: return curr_node = self.head if curr_node.data == data: curr_node = curr_node.next return while curr_node.next is not None: if curr_node.next.data == data: curr_node.next = curr_node.next.next return curr_node = curr_node.next def print_list(self): curr_node = self.head while curr_node is not None: print(curr_node.data) curr_node = curr_node.next def get_all_data(self): data = [] curr_node = self.head while curr_node is not None: data.append(curr_node.data) curr_node = curr_node.next return data class MyLinkedList(LinkedList): def add_reverse(self, first_list, second_list): # TODO: Implement me pass ``` ## Unit Test **The following unit test is expected to fail until you solve the challenge.** ``` # %load test_add_reverse.py from nose.tools import assert_equal class TestAddReverse(object): def test_add_reverse(self): print('Test: Empty list(s)') assert_equal(MyLinkedList().add_reverse(None, None), None) assert_equal(MyLinkedList().add_reverse(Node(5), None), None) assert_equal(MyLinkedList().add_reverse(None, Node(10)), None) print('Test: Add values of different lengths') # Input 1: 6->5->None # Input 2: 9->8->7 # Result: 5->4->8 first_list = MyLinkedList(Node(6)) first_list.append(5) second_list = MyLinkedList(Node(9)) second_list.append(8) second_list.append(7) result = MyLinkedList().add_reverse(first_list, second_list) assert_equal(result.get_all_data(), [5, 4, 8]) print('Test: Add values of same lengths') # Input 1: 6->5->4 # Input 2: 9->8->7 # Result: 5->4->2->1 first_head = Node(6) first_list = MyLinkedList(first_head) first_list.append(5) first_list.append(4) second_head = Node(9) second_list = MyLinkedList(second_head) second_list.append(8) second_list.append(7) result = MyLinkedList().add_reverse(first_list, second_list) assert_equal(result.get_all_data(), [5, 4, 2, 1]) print('Success: test_add_reverse') def main(): test = TestAddReverse() test.test_add_reverse() if __name__ == '__main__': main() ``` ## Solution Notebook Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb) for a discussion on algorithms and code solutions.
github_jupyter
``` import pickle import os import numpy as np from tqdm.notebook import tqdm from quchem_ibm.exp_analysis import * def dict_of_M_to_list(M_dict, PauliOP): P_Qubit_list, _ = zip(*(list(*PauliOP.terms.keys()))) list_of_M_bitstrings=None for bit_string, N_obtained in M_dict.items(): M_string = np.take(list(bit_string[::-1]), P_Qubit_list) # only take terms measured! Note bitstring reversed! array_meas = np.repeat(''.join(M_string), N_obtained) if list_of_M_bitstrings is None: list_of_M_bitstrings=array_meas else: list_of_M_bitstrings=np.hstack((list_of_M_bitstrings,array_meas)) # randomly shuffle (seed means outcome will always be the SAME!) # np.random.seed(42) np.random.shuffle(list_of_M_bitstrings) return list_of_M_bitstrings # # input for exp base_dir = os.getcwd() input_file = os.path.join(base_dir, 'LiH_simulation_RESULTS_time=2020Oct07-163210198971.pickle') with open(input_file, 'rb') as handle: LiH_data = pickle.load(handle) experimental_data_STANDARD = LiH_data['experiment_data'].copy() del LiH_data STANDARD_data = experimental_data_STANDARD[101852100]['standard'].copy() del experimental_data_STANDARD len(STANDARD_data) STANDARD_Hist_data_sim={} for exp_instance in STANDARD_data: #each exp repeated 10 times! for exp_dict_key in exp_instance: exp_dict= exp_instance[exp_dict_key] P=exp_dict['qubitOp'] coeff = exp_dict['coeff'] measured_dict_sim = exp_dict['measurement_dict'] M_list_sim = dict_of_M_to_list(measured_dict_sim, P) if exp_dict_key in STANDARD_Hist_data_sim.keys(): STANDARD_Hist_data_sim[exp_dict_key]={'P':list(P.terms.items())[0] ,'coeff': coeff.real, 'Measurements': np.hstack((STANDARD_Hist_data_sim[exp_dict_key]['Measurements'],M_list_sim))} else: STANDARD_Hist_data_sim[exp_dict_key]={'P':list(P.terms.items())[0] ,'coeff': coeff.real, 'Measurements': M_list_sim} del exp_dict del STANDARD_data # for key in STANDARD_Hist_data_sim: # STANDARD_Hist_data_sim[key]['Measurements']=STANDARD_Hist_data_sim[key]['Measurements'].tolist() # STANDARD_Hist_data_sim[key]['P']=(STANDARD_Hist_data_sim[key]['P'][0], STANDARD_Hist_data_sim[key]['P'][1].real) # import json # with open("STANDARD_Hist_data_sim", "w") as write_file: # json.dump(STANDARD_Hist_data_sim, write_file) STANDARD_Hist_data_sim[0]['Measurements'].shape # ### save output # np.save('Standard_hist_data', STANDARD_Hist_data_sim) import matplotlib.pyplot as plt fci_energy= -7.971184315565538 ``` # Histogram ``` def Get_Hist_data(Histogram_data, I_term): E_list=[] for m_index in tqdm(range(Histogram_data[0]['Measurements'].shape[0])): E=I_term for M_dict_key in Histogram_data: coeff = Histogram_data[M_dict_key]['coeff'] parity = 1 if sum(map(int, Histogram_data[M_dict_key]['Measurements'][m_index])) % 2 == 0 else -1 E+=coeff*parity E_list.append(E) return E_list I_term = -4.142299396835105 E_list_STANDARD_sim=Get_Hist_data(STANDARD_Hist_data_sim, I_term) import json with open("E_list_STANDARD_sim.json", "w") as write_file: json.dump(E_list_STANDARD_sim, write_file) E_list_STANDARD_sim=np.array(E_list_STANDARD_sim) def gaussian(x, mean, amplitude, standard_deviation): return amplitude * np.exp( - ((x - mean)**2 / (2*standard_deviation**2))) from scipy.optimize import curve_fit # from matplotlib import pyplot # %matplotlib inline # # bins_standard = len(set(E_list_STANDARD_sim)) # bins_standard = 1000 # bin_heights_STANDARD, bin_borders_STANDARD, _=pyplot.hist(E_list_STANDARD_sim, # bins_standard, alpha=0.7, # label='$E$ standard VQE - sim', # color='g', # density=False) # bin_centers_STANDARD = bin_borders_STANDARD[:-1] + np.diff(bin_borders_STANDARD) / 2 # popt, _ = curve_fit(gaussian, bin_centers_STANDARD, bin_heights_STANDARD, p0=[fci_energy, 0., 1.], **{'maxfev':10000}) # mean_STANDARD, amplitude_STANDARD, standard_deviation_STANDARD= popt # x_interval_for_fit = np.linspace(bin_borders_STANDARD[0], bin_borders_STANDARD[-1], 10000) # pyplot.plot(x_interval_for_fit, gaussian(x_interval_for_fit, *popt), label='Gaussian fit', color='g') # pyplot.axvline(mean_STANDARD, color='g', linestyle='dashed', linewidth=1, # label='$E_{average}$ standard VQE - sim') # mean of GAUSSIAN FIT # # pyplot.axvline(E_list_STANDARD_sim.mean(), color='g', linestyle='dashed', linewidth=1, # # label='$E_{average}$ standard VQE - sim') # mean of DATA # pyplot.errorbar(mean_STANDARD,65_000, # xerr=standard_deviation_STANDARD, linestyle="None", color='g', # uplims=True, lolims=True, label='$\sigma_{E_{av}}$standard VQE - sim') # pyplot.axvline(fci_energy, color='k', linestyle='solid', linewidth=2, # label='$E_{FCI}$', alpha=0.4) # pyplot.legend(loc='upper right') # # pyplot.legend(bbox_to_anchor=(0.865,1.9), loc="upper left") # pyplot.ylabel('Frequency') # pyplot.xlabel('Energy') # pyplot.tight_layout() # file_name = 'LiH_Histogram_STANDARD_sim_Gaussian.jpeg' # pyplot.savefig(file_name, dpi=300,transparent=True,) # edgecolor='black', facecolor='white') # pyplot.show() def normal_dist(x, mean, standard_deviation): return (1/(np.sqrt(2*np.pi)*standard_deviation)) * np.exp( - ((x - mean)**2 / (2*standard_deviation**2))) plt.plot(x, normal_dist(x, av, sig)) # from scipy.stats import norm # x=np.linspace(-10, 10, 1000) # av=2 # sig=1 # plt.plot(x, norm.pdf(x, av, sig)) len(set(np.around(E_list_STANDARD_sim, 5))) E_list_STANDARD_sim.shape E_list_STANDARD_sim.shape[0]**(1/3) # https://stats.stackexchange.com/questions/798/calculating-optimal-number-of-bins-in-a-histogram from scipy.stats import iqr bin_width = 2 * iqr(E_list_STANDARD_sim) / E_list_STANDARD_sim.shape[0]**(1/3) np.ceil((max(E_list_STANDARD_sim)-min(E_list_STANDARD_sim))/bin_width) from matplotlib import pyplot %matplotlib inline # bins = len(set(E_list_SEQ_ROT_sim)) # bins_standard = len(set(E_list_STANDARD_sim)) # bins_standard = 150_000 bins_standard = 2500 bin_heights_STANDARD, bin_borders_STANDARD, _=pyplot.hist(E_list_STANDARD_sim, bins_standard, alpha=0.7, label='$E$ standard VQE - sim', color='g', density=True) #### ,hatch='-') ###### Gaussian fit bin_centers_STANDARD = bin_borders_STANDARD[:-1] + np.diff(bin_borders_STANDARD) / 2 popt, _ = curve_fit(gaussian, bin_centers_STANDARD, bin_heights_STANDARD, p0=[fci_energy, 0., 1.])#, **{'maxfev':10000}) mean_STANDARD, amplitude_STANDARD, standard_deviation_STANDARD= popt x_interval_for_fit = np.linspace(bin_borders_STANDARD[0], bin_borders_STANDARD[-1], 10000) pyplot.plot(x_interval_for_fit, gaussian(x_interval_for_fit, *popt), label='Gaussian fit', color='olive', linewidth=3) ### normal fit # popt_norm, _ = curve_fit(normal_dist, bin_centers_STANDARD, bin_heights_STANDARD, p0=[fci_energy, standard_deviation_STANDARD])#, **{'maxfev':10000}) # mean_norm, standard_deviation_norm= popt_norm # pyplot.plot(x_interval_for_fit, normal_dist(x_interval_for_fit, *popt_norm), label='Normal fit', color='b', # linestyle='--') # pyplot.plot(x_interval_for_fit, normal_dist(x_interval_for_fit, mean_STANDARD, standard_deviation_STANDARD), # label='Normal fit', color='b', linestyle='--') #### Average energy from data pyplot.axvline(E_list_STANDARD_sim.mean(), color='g', linestyle='--', linewidth=2, label='$E_{average}$ standard VQE - sim') # mean of DATA ############## # chemical accuracy pyplot.axvline(fci_energy, color='k', linestyle='solid', linewidth=3, label='$E_{FCI}$', alpha=0.3) # # chemical accuracy # pyplot.fill_between([fci_energy-1.6e-3, fci_energy+1.6e-3], # [0, np.ceil(max(bin_heights_STANDARD))] , # color='k', # label='chemical accuracy', # alpha=0.5) pyplot.rcParams["font.family"] = "Times New Roman" # pyplot.legend(loc='upper right') # # pyplot.legend(bbox_to_anchor=(0.865,1.9), loc="upper left") pyplot.ylabel('Probability Density', fontsize=20) pyplot.xlabel('Energy / Hartree', fontsize=20) pyplot.xticks(np.arange(-9.5,-5.5,0.5), fontsize=20) pyplot.yticks(np.arange(0,2.5,0.5), fontsize=20) # pyplot.xlim(np.floor(min(bin_borders_STANDARD)), np.ceil(max(bin_borders_STANDARD))) pyplot.xlim(-9.5, -6.5) pyplot.tight_layout() file_name = 'LiH_Histogram_STANDARD_sim_Gaussian.jpeg' pyplot.savefig(file_name, dpi=300,transparent=True,) # edgecolor='black', facecolor='white') pyplot.show() from matplotlib import pyplot %matplotlib inline # bins = len(set(E_list_SEQ_ROT_sim)) # bins_standard = len(set(E_list_STANDARD_sim)) # bins_standard = 5000 bins_standard = 150_000 bin_heights_STANDARD, bin_borders_STANDARD, _=pyplot.hist(E_list_STANDARD_sim, bins_standard, alpha=0.7, label='$E$ standard VQE - sim', color='g', density=True) ############## pyplot.rcParams["font.family"] = "Times New Roman" # pyplot.legend(loc='upper right') # # pyplot.legend(bbox_to_anchor=(0.865,1.9), loc="upper left") pyplot.ylabel('Probability Density', fontsize=20) pyplot.xlabel('Energy / Hartree', fontsize=20) pyplot.xticks(np.arange(-9.5,-5.5,0.5), fontsize=20) pyplot.yticks(np.arange(0,3,0.5), fontsize=20) # pyplot.xlim(np.floor(min(bin_borders_STANDARD)), np.ceil(max(bin_borders_STANDARD))) pyplot.xlim(-9.5, -6.5) pyplot.tight_layout() # file_name = 'LiH_Histogram_STANDARD_sim_Gaussian.jpeg' # pyplot.savefig(file_name, dpi=300,transparent=True,) # edgecolor='black', facecolor='white') pyplot.show() from scipy import stats print(stats.shapiro(E_list_STANDARD_sim)) print(stats.kstest(E_list_STANDARD_sim, 'norm')) ``` # XY Z comparison ``` i_list_XY=[] STANDARD_Hist_data_XY={} i_list_Z=[] STANDARD_Hist_data_Z={} amplitude_min=0.00 XY_terms=[] Z_amp_sum=0 for key in STANDARD_Hist_data_sim: Pword, const = STANDARD_Hist_data_sim[key]['P'] coeff=STANDARD_Hist_data_sim[key]['coeff'] if np.abs(coeff)>amplitude_min: qubitNos, qubitPstrs = zip(*(list(Pword))) # XY terms only! if ('X' in qubitPstrs) or ('Y' in qubitPstrs): i_list_XY.append(key) STANDARD_Hist_data_XY[key]=STANDARD_Hist_data_sim[key] XY_terms.append(STANDARD_Hist_data_sim[key]['P']) else: i_list_Z.append(key) STANDARD_Hist_data_Z[key]=STANDARD_Hist_data_sim[key] Z_amp_sum+=coeff Z_amp_sum def Get_Hist_data(Histogram_data, I_term): E_list=[] for m_index in tqdm(range(Histogram_data[list(Histogram_data.keys())[0]]['Measurements'].shape[0])): E=I_term for M_dict_key in Histogram_data: coeff = Histogram_data[M_dict_key]['coeff'] parity = 1 if sum(map(int, Histogram_data[M_dict_key]['Measurements'][m_index])) % 2 == 0 else -1 E+=coeff*parity E_list.append(E) return E_list I_term = -4.142299396835105 E_list_STANDARD_XY=Get_Hist_data(STANDARD_Hist_data_XY, 0) E_list_STANDARD_Z=Get_Hist_data(STANDARD_Hist_data_Z, 0) print(len(set(np.around(E_list_STANDARD_XY, 5)))) print(len(set(np.around(E_list_STANDARD_Z, 5)))) from matplotlib import pyplot %matplotlib inline # bins_standard = len(set(E_list_STANDARD_sim)) # bins_standard = 1000 bins_standard=8_000 # bin_heights_XY, bin_borders_XY, _=pyplot.hist(E_list_STANDARD_XY, # bins_standard, alpha=0.7, # label='$XY$ terms', # color='b', # density=False) bin_heights_Z, bin_borders_Z, _=pyplot.hist(E_list_STANDARD_Z, bins_standard, alpha=0.7, label='$Z$ terms', color='g', density=True) pyplot.rcParams["font.family"] = "Times New Roman" pyplot.ylabel('Probability Density', fontsize=20) pyplot.xlabel('Energy / Hartree', fontsize=20) pyplot.xticks(np.arange(-4.2,-3.0,0.2), fontsize=20) pyplot.xlim((-4.2, -3.2)) pyplot.yticks(np.arange(0,1200,200), fontsize=20) pyplot.ylim((0, 1000)) pyplot.tight_layout() file_name = 'LiH_standard_Z.jpeg' pyplot.savefig(file_name, dpi=300,transparent=True,) # edgecolor='black', facecolor='white') pyplot.show() np.where(bin_heights_Z==max(bin_heights_Z))[0] print(bin_heights_Z[2334]) print('left sum:',sum(bin_heights_Z[:2334])) print('right sum:', sum(bin_heights_Z[2335:])) # therefore slighlt more likely to get more +ve energy!!! bin_borders_Z[583] print(len(np.where(np.array(E_list_STANDARD_Z)>-3.8)[0])) print(len(np.where(np.array(E_list_STANDARD_Z)<-3.89)[0])) len(E_list_STANDARD_Z) from matplotlib import pyplot %matplotlib inline # bins_standard = len(set(E_list_STANDARD_sim)) # bins_standard = 1000 bins_standard = 5000 bin_heights_XY, bin_borders_XY, _=pyplot.hist(E_list_STANDARD_XY, bins_standard, alpha=0.7, label='$XY$ terms', color='g', density=True) pyplot.rcParams["font.family"] = "Times New Roman" pyplot.ylabel('Probability Density', fontsize=20) pyplot.xlabel('Energy / Hartree', fontsize=20) pyplot.xticks(np.arange(-0.8,0.9,0.2), fontsize=20) pyplot.xlim((-0.8, 0.8)) pyplot.yticks(np.arange(0,3,0.5), fontsize=20) pyplot.tight_layout() file_name = 'LiH_standard_XY.jpeg' pyplot.savefig(file_name, dpi=300,transparent=True,) # edgecolor='black', facecolor='white') pyplot.show() ```
github_jupyter
L'obiettivo di questa esercitazione è quello di arrivare ad implementare un sistema completo di classificazione dei sopravvissuti al disastro del Titanic. Per farlo, partiremo dall'omonimo dataset, faremo un'analisi completa dello stesso, e cercheremo di raggiungere il miglior risultato possibile in termini di accuracy. ``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') from sklearn.compose import ColumnTransformer from sklearn.datasets import fetch_openml from sklearn.feature_selection import VarianceThreshold from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier ``` ## Parte 1: estrazione dei dati Finora, abbiamo sempre usato la funzione `read_csv` di Pandas per la lettura di un dataset. Scikit Learn, però, offre la funzione [`fetch_openml`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_openml.html), che permette di estrarre un dataframe da [OpenML](https://www.openml.org/), nota repository online dalla quale è possibile reperire numerosi dataset. Proviamo quindi ad estrarre i dati usando proprio questa funzione. > **Suggerimento**: la funzione `fetch_openml` restituisce un oggetto. Esploriamolo, assieme alla documentazione, per estrarre il dataframe. ``` data = fetch_openml("titanic", version=1, as_frame=True) df = data.frame df.head() ``` ## Parte 2: Exploratory data analysis Come abbiamo visto, è sempre opportuno "esplorare" i dati a nostra disposizione. ### Parte 2.1: Tipologia di feature e preprocessing Per prima cosa, quindi, osserviamoli, guardando i primi cinque campioni, e valutiamo il tipo delle feature che stiamo utilizzando. ``` df.head() df.dtypes ``` Notiamo subito che ci sono dei `NaN` e dei `None` relativi a diverse feature. > **Suggerimento**: `NaN` e `None` *non* sono analoghi. Entrambi indicano la mancanza di dati, ma `None` implica la presenza di un oggetto, mentre `NaN` quella di un valore numerico. In tal senso, a [questo indirizzo](https://stackoverflow.com/questions/17534106/what-is-the-difference-between-nan-and-none) potete trovare un'interessante disquisizione. Abbiamo due possibilità: la prima è quella di eliminare i campioni che presentano dati mancanti, la seconda è quella di eliminare le feature che presentano tali valori. Scriviamo la funzione `drop_nan` che elimini una feature qualora il numero di dati mancanti sia superiore al 25% del totale e che, una volta terminata questa operazione, provveda ad eliminare i campioni che presentano una o più feature con dati mancanti. ``` def drop_nan(df): threshold = round(len(df) / 4) df.dropna(axis=1, inplace=True, thresh=threshold) df.dropna(axis=0, inplace=True) ``` Prima di applicare la funzione `drop_nan`, però, eliminiamo le feature che non reputiamo significative ai fini della nostra analisi. In particolare, potremmo eliminare feature come il nome o il numero di ticket. Per quello che riguarda i valori a `None`, questi sono indicativi del fatto che il passeggero non è stato imbarcato su alcuna scialuppa di salvataggio. Assegnamo uno zero a tutti i dati che assumono valore `None` mediante la funzione [`apply`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.apply.html). > **Nota** > > La funzione `apply` prevede l'utilizzo delle [*lambda functions*](https://docs.python.org/3/tutorial/controlflow.html). Per brevità, queste funzioni hanno una sintassi di questo tipo: > ```python > lambda x: f(x) > ``` > Ciò significa che sarà applicata `f(x)` ad ogni valore di `x`. Così, ad esempio: > ```python > df = pd.DataFrame([1, 2, 3]) > df = df.apply(lambda x: x**2) > # Risultato: pd.DataFrame([1, 4, 9]) > ``` ``` df.drop(['name', 'ticket'], axis=1, inplace=True) df['boat'] = df['boat'].apply(lambda x: '0' if x is None else x) drop_nan(df) ``` Vediamo adesso il dataframe risultante. ``` df.head() df.dtypes ``` Nel dataframe, sono rimaste dieci feature, rispetto alle iniziali 14. Notiamo anche che abbiamo alcune feature che possiamo contrassegnare come *categorical*, ovvero: * `sex` * `embarked` * `boat` * `home.dest` > **Nota**: `boat` è una feature categorica, in quanto alcune delle scialuppe di salvataggio erano contrassegnate da valori alfanumerici (ad esempio, `D`) e non da semplici cifre. ### Parte 2.2: Esplorazione dei dati Usiamo adesso congiuntamente gli strumenti degli *istogrammi* e degli *scatter plot* per esplorare visivamente le singole feature. Partiamo dall'età. ``` df.hist( column='age', grid=False ) plt.show() ``` Come possiamo vedere, l'età ricorda una distribuzione di Rayleigh. Ci attendiamo lo stesso per un'altra dimensione che possiamo esplorare, ovvero quella delle tariffe pagate dai singoli passeggeri. ``` df.hist( column='fare', grid=False, bins=100 ) plt.show() ``` Vediamo come si dispongono le due feature usando uno scatter plot. ``` df.boxplot(['age', 'fare']) plt.show() df.plot.scatter(x='survived', y='age', c='survived', cmap='inferno') plt.show() ``` Notiamo come la variabilità dell'età è sufficiente, mentre quella del ticket è bassa. Potremmo quindi provare ad usare una tecnica di feature selection basata su `VarianceThreshold`. In ultimo, valutiamo la matrice di correlazione. Usiamo l'indice di correlazione di Kendall, che risulta essere maggiormente robusto rispetto a quello di Pearson ed a quello di Spearman. Per approfondire, ecco un [eccellente punto di partenza](https://datascience.stackexchange.com/a/64261) su Stack Exchange. ``` df.corr(method='kendall') df['survived'] = df['survived'].apply(lambda x: int(x)) df.head() ``` E' facile vedere come i risultati ci conducano alla conclusione che non vi sono feature fortemente correlate od anticorrelate; la correlazione maggiore che è possibile riscontrare è infatti tra classe del passeggero e tariffa (ed è negativa, come prevedibile: ciò significa che passeggeri con classe numerica più alta, ovvero terza, hanno pagato meno rispetto a passeggeri con classe numerica più bassa, ovvero seconda e prima). ## Parte 3: pipeline di machine learning Possiamo adesso passare a creare due pipeline per il machine learning. Useremo in tal senso due classificatori: il primo sarà basato su alberi decisionali, mentre il secondo sarà un *multi-layer perceptron*, modellato grazie alla classe [`MPLClassifier`](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html). Prima di continuare, però, isoliamo le label del database. Per farlo, usiamo la funzione [`pop`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pop.html): ``` y = df.pop('survived') df.head() ``` Definiamo ora un `ColumnTransformer` che codifichi le feature in precedenza indicate come categorical: ``` ct = ColumnTransformer( [('sex_tr', OneHotEncoder(handle_unknown='ignore'), ['sex']), ('embarked_tr', OneHotEncoder(handle_unknown='ignore'), ['embarked']), ('boat_tr', OneHotEncoder(handle_unknown='ignore'), ['boat']), ('home.dest_tr', OneHotEncoder(handle_unknown='ignore'), ['home.dest'])], remainder='passthrough') X = ct.fit_transform(df) print(X[0, :]) ``` Possiamo ora definire le nostre due processing pipeline, una per ognuno dei possibili classificatori. ``` dt_pipeline = Pipeline([ ('feat_sel', VarianceThreshold(.8 * 1 - .8)), ('dt', DecisionTreeClassifier(random_state=42)) ]) mlp_pipeline = Pipeline([ ('feat_sel', VarianceThreshold(.8 * 1 - .8)), ('mlp', MLPClassifier(random_state=42)) ]) ``` Applichiamo la `GridSearchCV` su ognuna delle due pipeline. Dato che queste offrono un'interfaccia analoga a quella dei classici stimatori, potremo usare la stessa modalità vista in precedenza sui regressori; l'unica accortezza starà nello specificare a quale step della pipeline si riferiscono i parametri indicati nella griglia, usando una notazione: ```python step_name__param_name ``` ``` dt_params = { 'dt__max_depth': list(range(1, 11)), 'dt__criterion': ['gini', 'entropy'], } mlp_params = { 'mlp__hidden_layer_sizes': [50, 100, 150, 200], 'mlp__activation': ['logistic', 'tanh', 'relu'], 'mlp__solver': ['sgd', 'adam'], 'mlp__learning_rate': ['constant', 'adaptive'], } dt_search = GridSearchCV(dt_pipeline, dt_params) dt_search = dt_search.fit(X, y) mlp_search = GridSearchCV(mlp_pipeline, mlp_params) mlp_search = mlp_search.fit(X, y) ``` Vediamo quali sono i migliori punteggi ottenuti da entrambe le pipeline. ``` print('Accuracy per la pipeline con albero decisionale: ~{}%'.format( round(dt_search.best_score_ * 100))) print('Accuracy per la pipeline con MLP: ~{}%'.format( round(mlp_search.best_score_ * 100))) ``` Ovviamente, potremo usare in inferenza la pipeline addestrata esattamente come uno stimatore mediante il metodo `predict`. ## Note finali Scegliere tra un gran numero di stimatori può essere un'operazione abbastanza onerosa. Per questo, esiste un'intera branca del machine learning, chiamata *AutoML*, che si occupa di automatizzare la scelta, rendendo il processo trasparente all'utente. In tal senso, un tool per l'AutoML basato su Scikit Learn è [AutoSKLearn](https://github.com/automl/auto-sklearn). Questo è, al momento, disponibile soltanto per macchine non Windows; tuttavia, il consiglio è quello di darci un'occhiata, se possibile. Un altro tool molto interessante (ma purtroppo meno "aggiornato" di AutoSKLearn) è [LazyPredict](https://lazypredict.readthedocs.io/en/latest/readme.html).
github_jupyter
##### Copyright 2018 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # AutoGraph: Easy control flow for graphs <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/versions/master/guide/autograph"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/models/blob/master/samples/core/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> [AutoGraph](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/) helps you write complicated graph code using normal Python. Behind the scenes, AutoGraph automatically transforms your code into the equivalent [TensorFlow graph code](https://www.tensorflow.org/guide/graphs). AutoGraph already supports much of the Python language, and that coverage continues to grow. For a list of supported Python language features, see the [Autograph capabilities and limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/LIMITATIONS.md). ## Setup To use AutoGraph, install the latest version of TensorFlow: ``` ! pip install -U tf-nightly ``` Import TensorFlow, AutoGraph, and any supporting modules: ``` from __future__ import division, print_function, absolute_import import tensorflow as tf import tensorflow.keras.layers as layers from tensorflow.contrib import autograph import numpy as np import matplotlib.pyplot as plt ``` We'll enable [eager execution](https://www.tensorflow.org/guide/eager) for demonstration purposes, but AutoGraph works in both eager and [graph execution](https://www.tensorflow.org/guide/graphs) environments: ``` tf.enable_eager_execution() ``` Note: AutoGraph converted code is designed to run during graph execution. When eager exectuon is enabled, use explicit graphs (as this example shows) or `tf.contrib.eager.defun`. ## Automatically convert Python control flow AutoGraph will convert much of the Python language into the equivalent TensorFlow graph building code. Note: In real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, you must index and batch the examples to maintain performance while applying the control flow logic. AutoGraph converts a function like: ``` def square_if_positive(x): if x > 0: x = x * x else: x = 0.0 return x ``` To a function that uses graph building: ``` print(autograph.to_code(square_if_positive)) ``` Code written for eager execution can run in a `tf.Graph` with the same results, but with the benfits of graph execution: ``` print('Eager results: %2.2f, %2.2f' % (square_if_positive(tf.constant(9.0)), square_if_positive(tf.constant(-9.0)))) ``` Generate a graph-version and call it: ``` tf_square_if_positive = autograph.to_graph(square_if_positive) with tf.Graph().as_default(): # The result works like a regular op: takes tensors in, returns tensors. # You can inspect the graph using tf.get_default_graph().as_graph_def() g_out1 = tf_square_if_positive(tf.constant( 9.0)) g_out2 = tf_square_if_positive(tf.constant(-9.0)) with tf.Session() as sess: print('Graph results: %2.2f, %2.2f\n' % (sess.run(g_out1), sess.run(g_out2))) ``` AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, and `return`, with support for nesting. Compare this function with the complicated graph verson displayed in the following code blocks: ``` # Continue in a loop def sum_even(items): s = 0 for c in items: if c % 2 > 0: continue s += c return s print('Eager result: %d' % sum_even(tf.constant([10,12,15,20]))) tf_sum_even = autograph.to_graph(sum_even) with tf.Graph().as_default(), tf.Session() as sess: print('Graph result: %d\n\n' % sess.run(tf_sum_even(tf.constant([10,12,15,20])))) print(autograph.to_code(sum_even)) ``` ## Decorator If you don't need easy access to the original Python function, use the `convert` decorator: ``` @autograph.convert() def fizzbuzz(i, n): while i < n: msg = '' if i % 3 == 0: msg += 'Fizz' if i % 5 == 0: msg += 'Buzz' if msg == '': msg = tf.as_string(i) print(msg) i += 1 return i with tf.Graph().as_default(): final_i = fizzbuzz(tf.constant(10), tf.constant(16)) # The result works like a regular op: takes tensors in, returns tensors. # You can inspect the graph using tf.get_default_graph().as_graph_def() with tf.Session() as sess: sess.run(final_i) ``` ## Examples Let's demonstrate some useful Python language features. ### Assert AutoGraph automatically converts the Python `assert` statement into the equivalent `tf.Assert` code: ``` @autograph.convert() def inverse(x): assert x != 0.0, 'Do not pass zero!' return 1.0 / x with tf.Graph().as_default(), tf.Session() as sess: try: print(sess.run(inverse(tf.constant(0.0)))) except tf.errors.InvalidArgumentError as e: print('Got error message:\n %s' % e.message) ``` ### Print Use the Python `print` function in-graph: ``` @autograph.convert() def count(n): i=0 while i < n: print(i) i += 1 return n with tf.Graph().as_default(), tf.Session() as sess: sess.run(count(tf.constant(5))) ``` ### Lists Append to lists in loops (tensor list ops are automatically created): ``` @autograph.convert() def arange(n): z = [] # We ask you to tell us the element dtype of the list autograph.set_element_type(z, tf.int32) for i in range(n): z.append(i) # when you're done with the list, stack it # (this is just like np.stack) return autograph.stack(z) with tf.Graph().as_default(), tf.Session() as sess: sess.run(arange(tf.constant(10))) ``` ### Nested control flow ``` @autograph.convert() def nearest_odd_square(x): if x > 0: x = x * x if x % 2 == 0: x = x + 1 return x with tf.Graph().as_default(): with tf.Session() as sess: print(sess.run(nearest_odd_square(tf.constant(4)))) print(sess.run(nearest_odd_square(tf.constant(5)))) print(sess.run(nearest_odd_square(tf.constant(6)))) ``` ### While loop ``` @autograph.convert() def square_until_stop(x, y): while x < y: x = x * x return x with tf.Graph().as_default(): with tf.Session() as sess: print(sess.run(square_until_stop(tf.constant(4), tf.constant(100)))) ``` ### For loop ``` @autograph.convert() def fizzbuzz_each(nums): result = [] autograph.set_element_type(result, tf.string) for num in nums: result.append(fizzbuzz(num)) return autograph.stack(result) with tf.Graph().as_default(): with tf.Session() as sess: print(sess.run(fizzbuzz_each(tf.constant(np.arange(10))))) ``` ### Break ``` @autograph.convert() def argwhere_cumsum(x, threshold): current_sum = 0.0 idx = 0 for i in range(len(x)): idx = i if current_sum >= threshold: break current_sum += x[i] return idx N = 10 with tf.Graph().as_default(): with tf.Session() as sess: idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2))) print(sess.run(idx)) ``` ## Interoperation with `tf.Keras` Now that you've seen the basics, let's build some model components with autograph. It's relatively simple to integrate `autograph` with `tf.keras`. ### Stateless functions For stateless functions, like `collatz` shown below, the easiest way to include them in a keras model is to wrap them up as a layer uisng `tf.keras.layers.Lambda`. ``` import numpy as np @autograph.convert() def collatz(x): x=tf.reshape(x,()) assert x>0 n = tf.convert_to_tensor((0,)) while not tf.equal(x,1): n+=1 if tf.equal(x%2, 0): x = x//2 else: x = 3*x+1 return n with tf.Graph().as_default(): model = tf.keras.Sequential([ tf.keras.layers.Lambda(collatz, input_shape=(1,), output_shape=(), ) ]) result = model.predict(np.array([6171])) #261 result ``` ### Custom Layers and Models <!--TODO(markdaoust) link to full examples or these referenced models.--> The easiest way to use AutoGraph with Keras layers and models is to `@autograph.convert()` the `call` method. See the [TensorFlow Keras guide](https://tensorflow.org/guide/keras#build_advanced_models) for details on how to build on these classes. Here is a simple example of the [stocastic network depth](https://arxiv.org/abs/1603.09382) technique : ``` # `K` is used to check if we're in train or test mode. import tensorflow.keras.backend as K class StocasticNetworkDepth(tf.keras.Sequential): def __init__(self, pfirst=1.0, plast=0.5, *args,**kwargs): self.pfirst = pfirst self.plast = plast super().__init__(*args,**kwargs) def build(self,input_shape): super().build(input_shape.as_list()) self.depth = len(self.layers) self.plims = np.linspace(self.pfirst, self.plast, self.depth+1)[:-1] @autograph.convert() def call(self, inputs): training = tf.cast(K.learning_phase(), dtype=bool) if not training: count = self.depth return super(StocasticNetworkDepth, self).call(inputs), count p = tf.random_uniform((self.depth,)) keeps = p<=self.plims x = inputs count = tf.reduce_sum(tf.cast(keeps, tf.int32)) for i in range(self.depth): if keeps[i]: x = self.layers[i](x) # return both the final-layer output and the number of layers executed. return x, count ``` Let's try it on mnist-shaped data: ``` train_batch = np.random.randn(64, 28,28,1).astype(np.float32) ``` Build a simple stack of `conv` layers, in the stocastic depth model: ``` with tf.Graph().as_default() as g: model = StocasticNetworkDepth( pfirst=1.0, plast=0.5) for n in range(20): model.add( layers.Conv2D(filters=16, activation=tf.nn.relu, kernel_size=(3,3), padding='same')) model.build(tf.TensorShape((None, None, None,1))) init = tf.global_variables_initializer() ``` Now test it to ensure it behaves as expected in train and test modes: ``` # Use an explicit session here so we can set the train/test switch, and # inspect the layer count returned by `call` with tf.Session(graph=g) as sess: init.run() for phase, name in enumerate(['test','train']): K.set_learning_phase(phase) result, count = model(tf.convert_to_tensor(train_batch, dtype=tf.float32)) result1, count1 = sess.run((result, count)) result2, count2 = sess.run((result, count)) delta = (result1 - result2) print(name, "sum abs delta: ", abs(delta).mean()) print(" layers 1st call: ", count1) print(" layers 2nd call: ", count2) print() ``` ## Advanced example: An in-graph training loop The previous section showed that AutoGraph can be used inside Keras layers and models. Keras models can also be used in AutoGraph code. Since writing control flow in AutoGraph is easy, running a training loop in a TensorFlow graph should also be easy. This example shows how to train a simple Keras model on MNIST with the entire training process—loading batches, calculating gradients, updating parameters, calculating validation accuracy, and repeating until convergence—is performed in-graph. ### Download data ``` (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() ``` ### Define the model ``` def mlp_model(input_shape): model = tf.keras.Sequential(( tf.keras.layers.Flatten(), tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(10, activation='softmax'))) model.build() return model def predict(m, x, y): y_p = m(x) losses = tf.keras.losses.categorical_crossentropy(y, y_p) l = tf.reduce_mean(losses) accuracies = tf.keras.metrics.categorical_accuracy(y, y_p) accuracy = tf.reduce_mean(accuracies) return l, accuracy def fit(m, x, y, opt): l, accuracy = predict(m, x, y) # Autograph automatically adds the necessary `tf.control_dependencies` here. # (Without them nothing depends on `opt.minimize`, so it doesn't run.) # This makes it much more like eager-code. opt.minimize(l) return l, accuracy def setup_mnist_data(is_training, batch_size): if is_training: ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) ds = ds.shuffle(batch_size * 10) else: ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) ds = ds.repeat() ds = ds.batch(batch_size) return ds def get_next_batch(ds): itr = ds.make_one_shot_iterator() image, label = itr.get_next() x = tf.to_float(image)/255.0 y = tf.one_hot(tf.squeeze(label), 10) return x, y ``` ### Define the training loop ``` # Use `recursive = True` to recursively convert functions called by this one. @autograph.convert(recursive=True) def train(train_ds, test_ds, hp): m = mlp_model((28 * 28,)) opt = tf.train.AdamOptimizer(hp.learning_rate) # We'd like to save our losses to a list. In order for AutoGraph # to convert these lists into their graph equivalent, # we need to specify the element type of the lists. train_losses = [] autograph.set_element_type(train_losses, tf.float32) test_losses = [] autograph.set_element_type(test_losses, tf.float32) train_accuracies = [] autograph.set_element_type(train_accuracies, tf.float32) test_accuracies = [] autograph.set_element_type(test_accuracies, tf.float32) # This entire training loop will be run in-graph. i = tf.constant(0) while i < hp.max_steps: train_x, train_y = get_next_batch(train_ds) test_x, test_y = get_next_batch(test_ds) step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt) step_test_loss, step_test_accuracy = predict(m, test_x, test_y) if i % (hp.max_steps // 10) == 0: print('Step', i, 'train loss:', step_train_loss, 'test loss:', step_test_loss, 'train accuracy:', step_train_accuracy, 'test accuracy:', step_test_accuracy) train_losses.append(step_train_loss) test_losses.append(step_test_loss) train_accuracies.append(step_train_accuracy) test_accuracies.append(step_test_accuracy) i += 1 # We've recorded our loss values and accuracies # to a list in a graph with AutoGraph's help. # In order to return the values as a Tensor, # we need to stack them before returning them. return (autograph.stack(train_losses), autograph.stack(test_losses), autograph.stack(train_accuracies), autograph.stack(test_accuracies)) ``` Now build the graph and run the training loop: ``` with tf.Graph().as_default() as g: hp = tf.contrib.training.HParams( learning_rate=0.005, max_steps=500, ) train_ds = setup_mnist_data(True, 50) test_ds = setup_mnist_data(False, 1000) (train_losses, test_losses, train_accuracies, test_accuracies) = train(train_ds, test_ds, hp) init = tf.global_variables_initializer() with tf.Session(graph=g) as sess: sess.run(init) (train_losses, test_losses, train_accuracies, test_accuracies) = sess.run([train_losses, test_losses, train_accuracies, test_accuracies]) plt.title('MNIST train/test losses') plt.plot(train_losses, label='train loss') plt.plot(test_losses, label='test loss') plt.legend() plt.xlabel('Training step') plt.ylabel('Loss') plt.show() plt.title('MNIST train/test accuracies') plt.plot(train_accuracies, label='train accuracy') plt.plot(test_accuracies, label='test accuracy') plt.legend(loc='lower right') plt.xlabel('Training step') plt.ylabel('Accuracy') plt.show() ```
github_jupyter
# Exponential Model This model is not working! It attempts to fit an exponential curve to the data in order to predict the number of new cases. The example used here just takes in one feature and needs to be able to take an `*args` value and unpack it to be able to expand the function to take more arguments. Might be easier/more efficient to just transform the output, make the model and untransform it back. Yeo-johnson or power transform??? just trying to predict `x` for `tomorrow_cases = today_cases**x` instead of predicting `tomorrow cases directly` ``` import pickle import os import urllib.request from sklearn.linear_model import Lasso from sklearn.model_selection import train_test_split import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns ``` ## Validation Data? ``` path_to_ips_file="validation/data/2020-09-30_historical_ip.csv" input_file = pd.read_csv(path_to_ips_file, low_memory=False) # input_file[input_file['CountryName'] == 'United States'] input_file[input_file['RegionName'] == 'California'] ``` ## Importing the Training Data ``` # Main source for the training data DATA_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv' # Local files data_path = 'examples/predictors/ryan_predictor/data' DATA_FILE = data_path + '/OxCGRT_latest.csv' if not os.path.exists(data_path): os.mkdir(data_path) urllib.request.urlretrieve(DATA_URL, DATA_FILE) df = pd.read_csv(DATA_FILE, parse_dates=['Date'], encoding="ISO-8859-1", dtype={"RegionName": str, "RegionCode": str}, error_bad_lines=False) # df[cases_df['RegionName'] == 'California'] df.columns HYPOTHETICAL_SUBMISSION_DATE = np.datetime64("2020-07-31") df = df[df.Date <= HYPOTHETICAL_SUBMISSION_DATE] # Add RegionID column that combines CountryName and RegionName for easier manipulation of data df['GeoID'] = df['CountryName'] + '__' + df['RegionName'].astype(str) # Add new cases column df['NewCases'] = df.groupby('GeoID').ConfirmedCases.diff().fillna(0) # import sys # NewCases = [] # for val in df['NewCases']: # if val != 0: # NewCases.append(val) # else: # NewCases.append(sys.float_info.epsilon) # sys.float_info.epsilon # df['NewCasesPercent'] = df.groupby('GeoID').NewCases.diff().fillna(0)/NewCases df['NewCasesPercent'] = df.groupby('GeoID').NewCases.diff().fillna(0)/df['NewCases'] # NewCasesList = df['NewCasesPercent'].tolist() df = df.replace([np.inf, -np.inf, np.nan], 0) NewCasesList = df['NewCasesPercent'].tolist() NewCasesList # Keep only columns of interest id_cols = ['CountryName', 'RegionName', 'GeoID', 'Date'] #cases_col = ['NewCases', 'NewCasesPercent', 'ConfirmedCases'] cases_col = ['NewCasesPercent'] npi_cols = ['C1_School closing', 'C2_Workplace closing', 'C3_Cancel public events', 'C4_Restrictions on gatherings', 'C5_Close public transport', 'C6_Stay at home requirements', 'C7_Restrictions on internal movement', 'C8_International travel controls', 'H1_Public information campaigns', 'H2_Testing policy', 'H3_Contact tracing', 'H6_Facial Coverings'] df = df[id_cols + cases_col + npi_cols] # Fill any missing case values by interpolation and setting NaNs to 0 df.update(df.groupby('GeoID').NewCasesPercent.apply( lambda group: group.interpolate()).fillna(0)) # Fill any missing NPIs by assuming they are the same as previous day for npi_col in npi_cols: df.update(df.groupby('GeoID')[npi_col].ffill().fillna(0)) df ``` ## Making the Model ``` # Set number of past days to use to make predictions nb_lookback_days = 30 # Create training data across all countries for predicting one day ahead X_cols = cases_col + npi_cols y_col = cases_col X_samples = [] y_samples = [] geo_ids = df.GeoID.unique() for g in geo_ids: gdf = df[df.GeoID == g] all_case_data = np.array(gdf[cases_col]) all_npi_data = np.array(gdf[npi_cols]) # Create one sample for each day where we have enough data # Each sample consists of cases and npis for previous nb_lookback_days nb_total_days = len(gdf) for d in range(nb_lookback_days, nb_total_days - 1): X_cases = all_case_data[d-nb_lookback_days:d] # Take negative of npis to support positive # weight constraint in Lasso. X_npis = -all_npi_data[d - nb_lookback_days:d] # Flatten all input data so it fits Lasso input format. X_sample = np.concatenate([X_cases.flatten(), X_npis.flatten()]) y_sample = all_case_data[d + 1] X_samples.append(X_sample) y_samples.append(y_sample) X_samples = np.array(X_samples) y_samples = np.array(y_samples).flatten() # Helpful function to compute mae def mae(pred, true): return np.mean(np.abs(pred - true)) # Split data into train and test sets X_train, X_test, y_train, y_test = train_test_split(X_samples, y_samples, test_size=0.2, random_state=301) # Create and train Lasso model. # Set positive=True to enforce assumption that cases are positively correlated # with future cases and npis are negatively correlated. model = Lasso(alpha=0.1, precompute=True, max_iter=10000, positive=True, selection='random') # Fit model model.fit(X_train, y_train) # Evaluate model train_preds = model.predict(X_train) train_preds = np.maximum(train_preds, 0) # Don't predict negative cases print('Train MAE:', mae(train_preds, y_train)) test_preds = model.predict(X_test) test_preds = np.maximum(test_preds, 0) # Don't predict negative cases print('Test MAE:', mae(test_preds, y_test)) # Evaluate model train_preds = model.predict(X_train) #train_preds = np.maximum(train_preds, 0) # Don't predict negative cases # y_train print('Train MAE:', mae(train_preds, y_train)) test_preds = model.predict(X_test) #test_preds = np.maximum(test_preds, 0) # Don't predict negative cases print('Test MAE:', mae(test_preds, y_test)) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.ensemble import RandomForestRegressor regressor = RandomForestRegressor(n_estimators=20, random_state=0) regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) # Evaluate model train_preds = model.predict(X_train) train_preds = np.maximum(train_preds, 0) # Don't predict negative cases # y_train print('Train MAE:', mae(train_preds, y_train)) test_preds = model.predict(X_test) test_preds = np.maximum(test_preds, 0) # Don't predict negative cases print('Test MAE:', mae(test_preds, y_test)) import numpy as np from scipy.optimize import curve_fit def func_exp(x, a, b, c): #c = 0 return a * np.exp(b * x) + c def exponential_regression (x_data, y_data): popt, pcov = curve_fit(func_exp, x_data, y_data, p0 = (-1, 0.01, 1)) print(popt) puntos = plt.plot(x_data, y_data, 'x', color='xkcd:maroon', label = "data") curva_regresion = plt.plot(x_data, func_exp(x_data, *popt), color='xkcd:teal', label = "fit: {:.3f}, {:.3f}, {:.3f}".format(*popt)) return func_exp(x_data, *popt) # x_data = np.arange(0, 51) # y_data = np.array([0.001, 0.199, 0.394, 0.556, 0.797, 0.891, 1.171, 1.128, 1.437, # 1.525, 1.720, 1.703, 1.895, 2.003, 2.108, 2.408, 2.424,2.537, # 2.647, 2.740, 2.957, 2.58, 3.156, 3.051, 3.043, 3.353, 3.400, # 3.606, 3.659, 3.671, 3.750, 3.827, 3.902, 3.976, 4.048, 4.018, # 4.286, 4.353, 4.418, 4.382, 4.444, 4.485, 4.465, 4.600, 4.681, # 4.737, 4.792, 4.845, 4.909, 4.919, 5.100]) # exponential_regression(x_data, y_data) exponential_regression(X_train, list(y_train)) # Inspect the learned feature coefficients for the model # to see what features it's paying attention to. # Give names to the features x_col_names = [] for d in range(-nb_lookback_days, 0): x_col_names.append('Day ' + str(d) + ' ' + cases_col[0]) for d in range(-nb_lookback_days, 1): for col_name in npi_cols: x_col_names.append('Day ' + str(d) + ' ' + col_name) # View non-zero coefficients for (col, coeff) in zip(x_col_names, list(model.coef_)): if coeff != 0.: print(col, coeff) print('Intercept', model.intercept_) # Save model to file model_path = 'examples/predictors/ryan_predictor/model' if not os.path.exists(model_path): os.mkdir(model_path) with open(model_path + '/model.pkl', 'wb') as model_file: pickle.dump(model, model_file) ``` ## Evaluating the Model ``` # Reload the module to get the latest changes from examples.predictors.linear import predict from importlib import reload reload(predict) from examples.predictors.linear.predict import predict_df %%time path_to_ips_file="validation/data/2020-09-30_historical_ip.csv" preds_df = predict_df("2020-08-01", "2020-08-31", path_to_ips_file, verbose=True) # Check the predictions preds_df.head() ``` ## Validation This is how the predictor is going to be called during the competition. !!! PLEASE DO NOT CHANGE THE API !!! ``` !python examples/predictors/linear/ryan_predict.py -s 2020-08-01 -e 2020-08-04 -ip validation/data/2020-09-30_historical_ip.csv -o examples/predictors/ryan_predictor/predictions/2020-08-01_2020-08-04.csv !head predictions/2020-08-01_2020-08-04.csv ``` ## Test Cases We can generate a prediction file. Let's validate a few cases... ``` import sys from validation.predictor_validation import validate_submission def validate(start_date, end_date, ip_file, output_file): # First, delete any potential old file try: os.remove(output_file) except OSError: pass # Then generate the prediction, calling the official API !python examples/predictors/linear/predict.py -s {start_date} -e {end_date} -ip {ip_file} -o {output_file} # And validate it errors = validate_submission(start_date, end_date, ip_file, output_file) if errors: for error in errors: print(error) else: print("All good!") ``` ### 4 days, no gap - All countries and regions - Official number of cases is known up to start_date - Intervention Plans are the official ones ``` validate(start_date="2020-08-01", end_date="2020-08-04", ip_file="validation/data/2020-09-30_historical_ip.csv", output_file="examples/predictors/ryan_predictor/predictions/val_4_days.csv") ``` ### 1 month in the future - 2 countries only - there's a gap between date of last known number of cases and start_date - For future dates, Intervention Plans contains scenarios for which predictions are requested to answer the question: what will happen if we apply these plans? ``` %%time validate(start_date="2021-01-01", end_date="2021-01-31", ip_file="validation/data/future_ip.csv", output_file="examples/predictors/linear/predictions/val_1_month_future.csv") ``` ### 180 days, from a future date, all countries and regions - Prediction start date is 1 week from now. (i.e. assuming submission date is 1 week from now) - Prediction end date is 6 months after start date. - Prediction is requested for all available countries and regions. - Intervention plan scenario: freeze last known intervention plans for each country and region. As the number of cases is not known yet between today and start date, but the model relies on them, the model has to predict them in order to use them. This test is the most demanding test. It should take less than 1 hour to generate the prediction file. ``` from datetime import datetime, timedelta start_date = datetime.now() + timedelta(days=7) start_date_str = start_date.strftime('%Y-%m-%d') end_date = start_date + timedelta(days=180) end_date_str = end_date.strftime('%Y-%m-%d') print(f"Start date: {start_date_str}") print(f"End date: {end_date_str}") from validation.scenario_generator import get_raw_data, generate_scenario, NPI_COLUMNS DATA_FILE = 'examples/predictors/linear/data/OxCGRT_latest.csv' latest_df = get_raw_data(DATA_FILE, latest=True) scenario_df = generate_scenario(start_date_str, end_date_str, latest_df, countries=None, scenario="Freeze") scenario_file = "examples/predictors/linear/predictions/180_days_future_scenario.csv" scenario_df.to_csv(scenario_file, index=False) print(f"Saved scenario to {scenario_file}") ``` ### Check it ``` %%time validate(start_date=start_date_str, end_date=end_date_str, ip_file=scenario_file, output_file="examples/predictors/linear/predictions/val_6_month_future.csv") ```
github_jupyter
在本练习中,您将实现正则化的线性回归,并使用它来研究具有不同偏差-方差属性的模型 ## 1 Regularized Linear Regression 正则线性回归 在前半部分的练习中,你将实现正则化线性回归,以预测水库中的水位变化,从而预测大坝流出的水量。在下半部分中,您将通过一些调试学习算法的诊断,并检查偏差 v.s. 方差的影响。 ### 1.1 Visualizing the dataset 我们将从可视化数据集开始,其中包含水位变化的历史记录,x,以及从大坝流出的水量,y。 这个数据集分为了三个部分: - training set 训练集:训练模型 - cross validation set 交叉验证集:选择正则化参数 - test set 测试集:评估性能,模型训练中不曾用过的样本 ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy.io import loadmat import scipy.optimize as opt ``` 读取数据 ``` path = 'ex5data1.mat' data = loadmat(path) #Training set X, y = data['X'], data['y'] #Cross validation set Xval, yval = data['Xval'], data['yval'] #Test set Xtest, ytest = data['Xtest'], data['ytest'] #Insert a column of 1's to all of the X's, as usual X = np.insert(X, 0, 1, axis=1) Xval = np.insert(Xval, 0, 1, axis=1) Xtest = np.insert(Xtest, 0, 1, axis=1) print('X={},y={}'.format(X.shape, y.shape)) print('Xval={},yval={}'.format(Xval.shape, yval.shape)) print('Xtest={},ytest={}'.format(Xtest.shape, ytest.shape)) def plotData(): """瞧一瞧数据长啥样""" plt.figure(figsize=(8,5)) plt.scatter(X[:,1:], y, c='r', marker='x') plt.xlabel('Change in water level (x)') plt.ylabel('Water flowing out of the dam (y)') plt.grid(True) plotData() ``` ### 1.2 Regularized linear regression cost function ![image.png](../img/5_1.png) ``` def costReg(theta, X, y, l): '''do not regularizethe theta0 theta is a 1-d array with shape (n+1,) X is a matrix with shape (m, n+1) y is a matrix with shape (m, 1) ''' cost = ((X @ theta - y.flatten()) ** 2).sum() regterm = theta[1:] @ theta[1:] return (cost + l * regterm) / (2 * len(X)) ``` Using theta initialized at [1, 1], and lambda = 1, you should expect to see an output of 303.993192 ``` theta = np.ones(X.shape[1]) print(costReg(theta, X, y, 1)) ``` ### 1.3 Regularized linear regression gradient ![image.png](../img/5_2.png) ``` def gradientReg(theta, X, y, l): """ theta: 1-d array with shape (2,) X: 2-d array with shape (12, 2) y: 2-d array with shape (12, 1) l: lambda constant grad has same shape as theta (2,) """ grad = (X @ theta - y.flatten()) @ X regterm = l * theta regterm[0] = 0 # #don't regulate bias term return (grad + regterm) / len(X) # Using theta initialized at [1; 1] you should expect to see a # gradient of [-15.303016; 598.250744] (with lambda=1) print(gradientReg(theta, X, y, 1)) ``` ### 1.4 Fitting linear regression 拟合线性回归 ``` def trainLinearReg(X, y, l): theta = np.zeros(X.shape[1]) res = opt.minimize(fun=costReg, x0=theta, args=(X, y ,l), method='TNC', jac=gradientReg) return res.x fit_theta = trainLinearReg(X, y, 0) plotData() plt.plot(X[:,1], X @ fit_theta) ``` 这里我们把$\lambda$ = 0,因为我们现在实现的线性回归只有两个参数,这么低的维度,正则化并没有用。 从图中可以看到,拟合最好的这条直线告诉我们这个模型并不适合这个数据。 在下一节中,您将实现一个函数来生成学习曲线,它可以帮助您调试学习算法,即使可视化数据不那么容易。 ## 2 Bias-variance 机器学习中一个重要的概念是偏差(bias)和方差(variance)的权衡。高偏差意味着欠拟合,高方差意味着过拟合。 在这部分练习中,您将在学习曲线上绘制训练误差和验证误差,以诊断bias-variance问题。 ### 2.1 Learning curves 学习曲线 ![image.png](../img/5_3.png) 训练样本X从1开始逐渐增加,训练出不同的参数向量θ。接着通过交叉验证样本Xval计算验证误差。 1. 使用训练集的子集来训练模型,得到不同的theta。 2. 通过theta计算训练代价和交叉验证代价,切记此时**不要使用正则化**,将 $\lambda = 0$。 3. 计算交叉验证代价时记得整个交叉验证集来计算,无需分为子集。 ``` def plot_learning_curve(X, y, Xval, yval, l): """画出学习曲线,即交叉验证误差和训练误差随样本数量的变化的变化""" xx = range(1, len(X) + 1) # at least has one example training_cost, cv_cost = [], [] for i in xx: res = trainLinearReg(X[:i], y[:i], l) training_cost_i = costReg(res, X[:i], y[:i], 0) cv_cost_i = costReg(res, Xval, yval, 0) training_cost.append(training_cost_i) cv_cost.append(cv_cost_i) plt.figure(figsize=(8,5)) plt.plot(xx, training_cost, label='training cost') plt.plot(xx, cv_cost, label='cv cost') plt.legend() plt.xlabel('Number of training examples') plt.ylabel('Error') plt.title('Learning curve for linear regression') plt.grid(True) plot_learning_curve(X, y, Xval, yval, 0) ``` 从图中看出来,随着样本数量的增加,训练误差和交叉验证误差都很高,这属于高偏差,欠拟合。 ## 3 Polynomial regression 多项式回归 我们的线性模型对于数据来说太简单了,导致了欠拟合(高偏差)。在这一部分的练习中,您将通过添加更多的特性来解决这个问题。 使用多项式回归,假设函数形式如下: ![image.png](../img/5_4.png) ### 3.1 Learning Polynomial Regression 数据预处理 1. X,Xval,Xtest都需要添加多项式特征,这里我们选择增加到6次方,因为若选8次方无法达到作业pdf上的效果图,这是因为scipy和octave版本的优化算法不同。 2. 不要忘了标准化。 ``` def genPolyFeatures(X, power): """添加多项式特征 每次在array的最后一列插入第二列的i+2次方(第一列为偏置) 从二次方开始开始插入(因为本身含有一列一次方) """ Xpoly = X.copy() for i in range(2, power + 1): Xpoly = np.insert(Xpoly, Xpoly.shape[1], np.power(Xpoly[:,1], i), axis=1) return Xpoly def get_means_std(X): """获取训练集的均值和误差,用来标准化所有数据。""" means = np.mean(X,axis=0) stds = np.std(X,axis=0,ddof=1) # ddof=1 means 样本标准差 return means, stds def featureNormalize(myX, means, stds): """标准化""" X_norm = myX.copy() X_norm[:,1:] = X_norm[:,1:] - means[1:] X_norm[:,1:] = X_norm[:,1:] / stds[1:] return X_norm ``` 关于归一化,所有数据集应该都用**训练集的均值和样本标准差**处理。切记。所以要将训练集的均值和样本标准差存储起来,对后面的数据进行处理。 而且注意这里是**样本标准差而不是总体标准差**,使用np.std()时,将ddof=1则是样本标准差,默认=0是总体标准差。而pandas默认计算样本标准差。 获取添加多项式特征以及 标准化之后的数据。 ``` power = 6 # 扩展到x的6次方 train_means, train_stds = get_means_std(genPolyFeatures(X,power)) X_norm = featureNormalize(genPolyFeatures(X,power), train_means, train_stds) Xval_norm = featureNormalize(genPolyFeatures(Xval,power), train_means, train_stds) Xtest_norm = featureNormalize(genPolyFeatures(Xtest,power), train_means, train_stds) def plot_fit(means, stds, l): theta = trainLinearReg(X_norm,y, l) x = np.linspace(-75,55,50) xmat = x.reshape(-1, 1) # Reshape your data using array.reshape(-1, 1) if your data has a single feature xmat = np.insert(xmat,0,1,axis=1) Xmat = genPolyFeatures(xmat, power) Xmat_norm = featureNormalize(Xmat, means, stds) plotData() plt.plot(x, Xmat_norm@theta,'b--') plot_fit(train_means, train_stds, 0) plot_learning_curve(X_norm, y, Xval_norm, yval, 0) ``` ### 3.2 Adjusting the regularization parameter 上图可以看到 $\lambda$ = 0时,训练误差太小了,明显过拟合了。 我们继续调整$\lambda$ = 1 时: ``` plot_fit(train_means, train_stds, 1) plot_learning_curve(X_norm, y, Xval_norm, yval, 1) ``` 我们继续调整$\lambda$ = 100 时,很明显惩罚过多,欠拟合了 ``` plot_fit(train_means, train_stds, 100) plot_learning_curve(X_norm, y, Xval_norm, yval, 100) ``` ### 3.3 Selecting λ using a cross validation set ``` lambdas = [0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1., 3., 10.] # lambdas = np.linspace(0,5,20) errors_train, errors_val = [], [] for l in lambdas: theta = trainLinearReg(X_norm, y, l) errors_train.append(costReg(theta,X_norm,y,0)) # 记得把lambda = 0 errors_val.append(costReg(theta,Xval_norm,yval,0)) plt.figure(figsize=(8,5)) plt.plot(lambdas,errors_train,label='Train') plt.plot(lambdas,errors_val,label='Cross Validation') plt.legend() plt.xlabel('lambda') plt.ylabel('Error') plt.grid(True) # 可以看到时交叉验证代价最小的是 lambda = 3 lambdas[np.argmin(errors_val)] ``` ### 3.4 Computing test set error In our cross validation, we obtained a test error of 3.8599 for λ = 3. 实际上我在上面调整了power=6来匹配作业里面的图,所以得不到3.8599。但是调整power=8时(同作业里一样),就可以得到上述数据。 ``` theta = trainLinearReg(X_norm, y, 3) print('test cost(l={}) = {}'.format(3, costReg(theta, Xtest_norm, ytest, 0))) # for l in lambdas: # theta = trainLinearReg(X_norm, y, l) # print('test cost(l={}) = {}'.format(l, costReg(theta, Xtest_norm, ytest, 0))) ```
github_jupyter