text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# 02_hsn_v1_lean-voc2012 ``` import time import skimage.io as imgio import pandas as pd import numpy.matlib from adp_cues import ADPCues from utilities import * from dataset import Dataset MODEL_CNN_ROOT = '../database/models_cnn' MODEL_WSSS_ROOT = '../database/models_wsss' dataset = 'VOC2012' model_type = 'VGG16' batch_size = 16 sess_id = dataset + '_' + model_type if model_type in ['VGG16', 'VGG16bg']: size = 321 else: size = 224 should_saveimg = False is_verbose = True if model_type in ['VGG16', 'VGG16bg']: img_size = 321 else: img_size = 224 sess_id = dataset + '_' + model_type model_dir = os.path.join(MODEL_CNN_ROOT, sess_id) if is_verbose: print('Predict: dataset=' + dataset + ', model=' + model_type) database_dir = os.path.join(os.path.dirname(os.getcwd()), 'database') if dataset == 'VOC2012': devkit_dir = os.path.join(database_dir, 'VOCdevkit', 'VOC2012') fgbg_modes = ['fg', 'bg'] OVERLAY_R = 0.75 elif 'DeepGlobe' in dataset: devkit_dir = os.path.join(database_dir, 'DGdevkit') fgbg_modes = ['fg'] OVERLAY_R = 0.25 img_dir = os.path.join(devkit_dir, 'JPEGImages') gt_dir = os.path.join(devkit_dir, 'SegmentationClassAug') out_dir = os.path.join('./out', sess_id) if not os.path.exists(out_dir): os.makedirs(out_dir) eval_dir = os.path.join('./eval', sess_id) if not os.path.exists(eval_dir): os.makedirs(eval_dir) ``` ## Load network and data ``` # Load network and thresholds mdl = {} thresholds = {} alpha = {} final_layer = {} for fgbg_mode in fgbg_modes: mdl[fgbg_mode] = build_model(model_dir, sess_id) thresholds[fgbg_mode] = load_thresholds(model_dir, sess_id) thresholds[fgbg_mode] = np.maximum(np.minimum(thresholds[fgbg_mode], 0), 1 / 3) alpha[fgbg_mode], final_layer[fgbg_mode] = get_grad_cam_weights(mdl[fgbg_mode], np.zeros((1, img_size, img_size, 3))) # Load data and classes ds = Dataset(data_type=dataset, size=img_size, batch_size=batch_size) class_names, seg_class_names = load_classes(dataset) colours = get_colours(dataset) if 'DeepGlobe' in dataset: colours = colours[:-1] gen_curr = ds.set_gens[ds.sets[ds.is_evals.index(True)]] ``` ## Generate segmentations for single batch ``` # Process images in batches intersects = np.zeros((len(colours))) unions = np.zeros((len(colours))) confusion_matrix = np.zeros((len(colours), len(colours))) gt_count = np.zeros((len(colours))) n_batches = 1 for iter_batch in range(n_batches): batch_start_time = time.time() if is_verbose: print('\tBatch #%d of %d' % (iter_batch + 1, n_batches)) start_idx = iter_batch * batch_size end_idx = min(start_idx + batch_size - 1, len(gen_curr.filenames) - 1) cur_batch_sz = end_idx - start_idx + 1 # Image reading start_time = time.time() img_batch_norm, img_batch = read_batch(gen_curr.directory, gen_curr.filenames[start_idx:end_idx + 1], cur_batch_sz, (img_size, img_size), dataset) if is_verbose: print('\t\tImage read time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time, (time.time() - start_time) / cur_batch_sz)) # Generate patch confidence scores start_time = time.time() predicted_scores = {} is_pass_threshold = {} for fgbg_mode in fgbg_modes: predicted_scores[fgbg_mode] = mdl[fgbg_mode].predict(img_batch_norm) is_pass_threshold[fgbg_mode] = np.greater_equal(predicted_scores[fgbg_mode], thresholds[fgbg_mode]) if is_verbose: print('\t\tGenerating patch confidence scores time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time, (time.time() - start_time) / cur_batch_sz)) # Generate Grad-CAM start_time = time.time() H = {} for fgbg_mode in fgbg_modes: H[fgbg_mode] = grad_cam(mdl[fgbg_mode], alpha[fgbg_mode], img_batch_norm, is_pass_threshold[fgbg_mode], final_layer[fgbg_mode], predicted_scores[fgbg_mode], orig_sz=[img_size, img_size], should_upsample=True) H[fgbg_mode] = np.transpose(H[fgbg_mode], (0, 3, 1, 2)) if is_verbose: print('\t\tGenerating Grad-CAM time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time, (time.time() - start_time) / cur_batch_sz)) # Modify fg Grad-CAM with bg activation start_time = time.time() if dataset == 'VOC2012': Y_gradcam = np.zeros((cur_batch_sz, len(seg_class_names), img_size, img_size)) mode = 'mult' if mode == 'mult': X_bg = np.sum(H['bg'], axis=1) Y_gradcam[:, 0] = 0.15 * scipy.special.expit(np.max(X_bg) - X_bg) Y_gradcam[:, 1:] = H['fg'] elif 'DeepGlobe' in dataset: Y_gradcam = H['fg'][:, :-1, :, :] if is_verbose: print('\t\tFg/Bg modifications time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time, (time.time() - start_time) / cur_batch_sz)) # FC-CRF start_time = time.time() if dataset == 'VOC2012': dcrf_config = np.array([3 / 4, 3, 80 / 4, 13, 10, 10]) # test (since 2448 / 500 = 4.896 ~= 4) elif 'DeepGlobe' in dataset: dcrf_config = np.array([3, 3, 80, 13, 10, 10]) # test Y_crf = dcrf_process(Y_gradcam, img_batch, dcrf_config) if is_verbose: print('\t\tCRF time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time, (time.time() - start_time) / cur_batch_sz)) elapsed_time = time.time() - batch_start_time if is_verbose: print('\t\tElapsed time: %0.5f seconds (%0.5f seconds / image)' % (elapsed_time, elapsed_time / cur_batch_sz)) if dataset == 'VOC2012': for iter_file, filename in enumerate(gen_curr.filenames[start_idx:end_idx + 1]): # Load GT segmentation gt_filepath = os.path.join(gt_dir, filename.replace('.jpg', '.png')) gt_idx = cv2.cvtColor(cv2.imread(gt_filepath), cv2.COLOR_BGR2RGB)[:, :, 0] # Load predicted segmentation pred_idx = cv2.resize(np.uint8(Y_crf[iter_file]), (gt_idx.shape[1], gt_idx.shape[0]), interpolation=cv2.INTER_NEAREST) pred_segmask = np.zeros((gt_idx.shape[0], gt_idx.shape[1], 3)) # Evaluate predicted segmentation for k in range(len(colours)): intersects[k] += np.sum((gt_idx == k) & (pred_idx == k)) unions[k] += np.sum((gt_idx == k) | (pred_idx == k)) confusion_matrix[k, :] += np.bincount(pred_idx[gt_idx == k], minlength=len(colours)) pred_segmask += np.expand_dims(pred_idx == k, axis=2) * \ np.expand_dims(np.expand_dims(colours[k], axis=0), axis=0) gt_count[k] += np.sum(gt_idx == k) # Save outputted segmentation to file if should_saveimg: orig_filepath = os.path.join(img_dir, filename) orig_img = cv2.cvtColor(cv2.imread(orig_filepath), cv2.COLOR_BGR2RGB) imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '.png'), pred_segmask / 256.0) imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '_overlay.png'), (1 - OVERLAY_R) * orig_img / 256.0 + OVERLAY_R * pred_segmask / 256.0) elif 'DeepGlobe' in dataset: for iter_file, filename in enumerate(gen_curr.filenames[start_idx:end_idx + 1]): # Load GT segmentation gt_filepath = os.path.join(gt_dir, filename.replace('.jpg', '.png')) gt_curr = cv2.cvtColor(cv2.imread(gt_filepath), cv2.COLOR_BGR2RGB) gt_r = gt_curr[:, :, 0] gt_g = gt_curr[:, :, 1] gt_b = gt_curr[:, :, 2] # Load predicted segmentation pred_idx = cv2.resize(np.uint8(Y_crf[iter_file]), (gt_curr.shape[1], gt_curr.shape[0]), interpolation=cv2.INTER_NEAREST) pred_segmask = np.zeros((gt_curr.shape[0], gt_curr.shape[1], 3)) # Evaluate predicted segmentation for k, gt_colour in enumerate(colours): gt_mask = (gt_r == gt_colour[0]) & (gt_g == gt_colour[1]) & (gt_b == gt_colour[2]) pred_mask = pred_idx == k intersects[k] += np.sum(gt_mask & pred_mask) unions[k] += np.sum(gt_mask | pred_mask) confusion_matrix[k, :] += np.bincount(pred_idx[gt_mask], minlength=len(colours)) pred_segmask += np.expand_dims(pred_mask, axis=2) * \ np.expand_dims(np.expand_dims(colours[k], axis=0), axis=0) gt_count[k] += np.sum(gt_mask) # Save outputted segmentation to file if should_saveimg: orig_filepath = os.path.join(img_dir, filename) orig_img = cv2.cvtColor(cv2.imread(orig_filepath), cv2.COLOR_BGR2RGB) orig_img = cv2.resize(orig_img, (orig_img.shape[0] // 4, orig_img.shape[1] // 4)) pred_segmask = cv2.resize(pred_segmask, (pred_segmask.shape[0] // 4, pred_segmask.shape[1] // 4), interpolation=cv2.INTER_NEAREST) imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '.png'), pred_segmask / 256.0) imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '_overlay.png'), (1 - OVERLAY_R) * orig_img / 256.0 + OVERLAY_R * pred_segmask / 256.0) ``` ## Show sample segmentations ``` img_filepath = os.path.join(gen_curr.directory, gen_curr.filenames[0]) I = cv2.cvtColor(cv2.imread(img_filepath), cv2.COLOR_BGR2RGB) gt_filepath = os.path.join(gt_dir, gen_curr.filenames[0].replace('.jpg', '.png')) gt_idx = np.expand_dims(cv2.cvtColor(cv2.imread(gt_filepath), cv2.COLOR_BGR2RGB)[:, :, 0], axis=0) G = maxconf_class_as_colour(gt_idx, colours, gt_idx.shape[1:3]) plt.figure plt.subplot(121) plt.imshow(I.astype('uint8')) plt.title('Original image') plt.subplot(122) plt.imshow(G[0].astype('uint8')) plt.title('Ground truth\n segmentation') # Load predicted segmentation pred_idx = cv2.resize(np.uint8(Y_crf[0]), (gt_idx.shape[2], gt_idx.shape[1]), interpolation=cv2.INTER_NEAREST) Y = np.zeros((gt_idx.shape[1], gt_idx.shape[2], 3)) # Evaluate predicted segmentation for k in range(len(colours)): Y += np.expand_dims(pred_idx == k, axis=2) * np.expand_dims(np.expand_dims(colours[k], axis=0), axis=0) # Obtain overlay Y_overlay = (1 - OVERLAY_R) * I.astype('uint8') + OVERLAY_R * Y plt.figure plt.subplot(121) plt.imshow(Y.astype('uint8')) plt.title('Predicted\n Segmentation') plt.subplot(122) plt.imshow(Y_overlay.astype('uint8')) plt.title('Overlaid\n Segmentation') ```
github_jupyter
``` #importing nevesary libraries import pandas as pd import matplotlib.pyplot as plt import seaborn as sns print("Done") df_cust = pd.read_csv("Customer List.csv") df_cust.head() df_cust.tail() df_cust.info() df_cust.isnull().sum() from sklearn.preprocessing import LabelEncoder lb = LabelEncoder() df_cust['Gender'] = lb.fit_transform(df_cust['Gender']) df_cust X = df_cust.drop(columns='Purchased') y = df_cust['Purchased'] from sklearn.model_selection import train_test_split ``` # Feature Scaling ``` from sklearn.preprocessing import MinMaxScaler min_max = MinMaxScaler() X = min_max.fit_transform(X) print(X) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.25, random_state =42) X_train #using knn from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train) knn_pred = knn.predict(X_test) knn_pred from sklearn.metrics import accuracy_score acc_knn = accuracy_score(y_test, knn_pred) acc_knn knn_prob = knn.predict_proba(X_test) knn_prob = pd.DataFrame(knn_prob, columns=['yes', 'no']) knn_prob sns.pairplot(knn_prob) plt.show() ``` # GridSearch for hyperparameter tuning ``` from sklearn.model_selection import GridSearchCV knn_param = [{'n_neighbors': [3,5,7,9], 'weights': ['uniform', 'distance'], 'algorithm': ['auto', 'brute', 'kd_tree'], 'leaf_size': [5,15,20]}] gs_knn = GridSearchCV(knn, param_grid=knn_param, scoring= 'recall', cv=5) gs_knn.fit(X_train, y_train) # gs_knn.best_params_ gs_knn.score(X_test, y_test) from sklearn.svm import SVC svm_ = SVC(kernel='linear') svm_.fit(X_train, y_train) svm_pred = svm_.predict(X_test) svm_pred svm_acc = accuracy_score(y_test, svm_pred) svm_acc from imblearn.over_sampling import SMOTE sm = SMOTE(random_state=42) X_res, y_res = sm.fit_resample(X,y) X_res = min_max.fit_transform(X_res) X_train1, X_test1, y_train1, y_test1 = train_test_split(X_res,y_res, test_size = 0.25, random_state =42) y_res.value_counts() knn.fit(X_train1, y_train1) knn_pred2 = knn.predict(X_test1) knn_pred2 knn_acc2 = accuracy_score(y_test1, knn_pred2) knn_acc2 svm_.fit(X_train1, y_train1) svm_pred2 = svm_.predict(X_test1) svm_pred2 svm_acc2 = accuracy_score(y_test1, svm_pred2) svm_acc2 from sklearn.metrics import f1_score, confusion_matrix, recall_score, precision_score, precision_recall_fscore_support #modelling using XGB classifier with thread import xgboost as xg xg_ = xg.XGBClassifier(nthreads = -1) xg_.fit(X_train, y_train) pred_xgb = xg_.predict(X_test) pred_xgb # Check the accuracy of the model on train and test dataset. score_xgb = accuracy_score(y_test, pred_xgb) score_xgb #prec = precision_score(y_test, knn_pred2) #prec xg_.fit(X_train1, y_train1) pred_xgb2 = xg_.predict(X_test1) pred_xgb2 # Check the accuracy of the model on train and test dataset. score_xgb2 = accuracy_score(y_test1, pred_xgb2) score_xgb2 cm_bal= confusion_matrix(y_test1, knn_pred2) sns.heatmap(cm_bal) plt.show() from sklearn.metrics import classification_report cr_bal = classification_report(y_test1, knn_pred2) print(cr_bal) cr_ubal = classification_report(y_test, knn_pred) print(cr_ubal) plt.figure(figsize=(5,4)) sns.countplot(svm_pred2) plt.show() from sklearn.metrics import roc_auc_score roc_acc = roc_auc_score(y_test1, knn_pred2) roc_acc from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_test1, knn_pred2) fpr thresholds tpr def plot_roc_curve(fpr, tpr, label=None): plt.plot(fpr, tpr, linewidth=2, label=label) plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal [...] # Add axis labels and grid plot_roc_curve(fpr, tpr) plt.show() ```
github_jupyter
# Jupyter lab on Sunbird using port forwarding An excellent manual for Classic Jupyter: https://github.com/McWilliamsCenter/slurm_jupyter I've tailored this manual for Swansea Sunbird for both Jupyter and Jupyter lab. What is Swansea Sunbird ? https://portal.supercomputing.wales/index.php/about-sunbird/ # Installing Jupyter lab * Create a conda environment or load an existing one as `source activate ml` * By default a package named `six` is missing. ```sh (pytorch) [s.1915438@sl2 ~]$ jupyter notebook --generate-config Traceback (most recent call last): File "/apps/languages/anaconda3/bin/jupyter-notebook", line 7, in <module> from notebook.notebookapp import main File "/apps/languages/anaconda3/lib/python3.6/site-packages/notebook/__init__.py", line 25, in <module> from .nbextensions import install_nbextension File "/apps/languages/anaconda3/lib/python3.6/site-packages/notebook/nbextensions.py", line 31, in <module> from .config_manager import BaseJSONConfigManager File "/apps/languages/anaconda3/lib/python3.6/site-packages/notebook/config_manager.py", line 14, in <module> from six import PY3 ModuleNotFoundError: No module named 'six' ``` So, it is better to install new Jupyter lab or Jupyter as follows: * Jupyter lab: `pip install jupyterlab` * Jupyter: `pip install notebook` In the root i.e. `/home/s.1915438` run this command to generate the config file and store password for secure port forwarding. Apperently this command does not work. ```sh (ml) [s.1915438@sl2 ~]$ pwd /home/s.1915438 (ml) [s.1915438@sl2 ~]$ jupyter-lab --generate-config (ml) [s.1915438@sl2 ~]$ jupyter-lab password Enter password: Verify password: Traceback (most recent call last): File "/home/s.1915438/.conda/envs/ml/bin/jupyter-lab", line 8, in <module> sys.exit(main()) File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_server/extension/application.py", line 602, in launch_instance serverapp.start() File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_server/serverapp.py", line 2760, in start self.start_app() File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_server/serverapp.py", line 2658, in start_app super(ServerApp, self).start() File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_core/application.py", line 253, in start self.subapp.start() File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_server/serverapp.py", line 492, in start set_password(config_file=self.config_file) File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_server/auth/security.py", line 172, in set_password hashed_password = passwd(password) File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyter_server/auth/security.py", line 63, in passwd import argon2 File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/argon2/__init__.py", line 7, in <module> from . import exceptions, low_level, profiles File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/argon2/low_level.py", line 15, in <module> from _argon2_cffi_bindings import ffi, lib File "/home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/_argon2_cffi_bindings/__init__.py", line 3, in <module> from ._ffi import ffi, lib ImportError: libffi.so.7: cannot open shared object file: No such file or directory ``` # Tinkering configuration file So, we will go without the password. But there is a Jupyter server token for security also the public IP is also hidden in ssh-tunnel. **Update**: When I was using ssh through VS Code's extention I was able to run this command `jupyter-lab password` and was able to setup the password. If you have setup the password, Jupyter server will not generate a token. Now head on to `/lustrehome/home/s.1915438/.jupyter` and open the file, un-comment and change the following lines. * Jupyter lab: * `c.LabApp.open_browser = False` * `c.ServerApp.port = 8888` * Jupyter: * `c.NotebookApp.open_browser = False` * `c.NotebookApp.port = 8888` # (You can set this to any four-digit integer) Obviously, we don't want to the Notebook to open in the browser as there is no browser on Sunbird. Lol. Also, we want to set a specific port on server (Sunbird) for port forwarding. # Port forwarding This command is used to run commands on the server (Sunbird) while sshing. `ssh -L <port>:localhost:<port> -t <user>@<server> "jupyter notebook"` where `<port>` is the port you set earlier, `<user>` is your cluster user id, and `<server>` is the address of the login server. The `-L` flag tells ssh to tunnel the `localhost:<port>` of the remote server to that of your local machine. The `-t` flag opens up the connection as an interactive session, allowing you to pass `SIGINT` (Ctrl-C) to end the jupyter notebook before killing the ssh connection. To open your jupyter notebook within a specific conda environment (e.g. `<env>`), replace the command in quotations with `source activate <env>; jupyter notebook`. # Running Jupyter lab on login node Here is out turn to become creative with the command. ```sh (base) hell@Dell-Precision-T1600:~/Desktop/repos/Sunbird/Jupyter_lab_port_forwarding$ ssh -L 8888:localhost:8888 -t s.1915438@sunbird.swansea.ac.uk "module load anaconda/3;source activate ml;jupyter-lab" ``` Here, `8888` on left, in green colour is my server (Sunbird) port and `8888` on the right is my local port and `-L` tunnel both the ports. `-t` opens an interactive session in which we can any command that falls inside those inverted commas `""`. Now, we can run our Jupyter lab as follows: * Load Anaconda: `module load anaconda/3` the module name might change time to time. Check the module name using `module avail`. * Activate the conda environment where you installed the Jupyter Lab. In my case it was `ml` conda environment. So, I would type `source activate ml`. * `source activate base` and then `conda activate ml` is equivalent to above command. * Now run `jupyter-lab` as you were using `anaconda prompt` in Windows. All these things can be passed to ssh as `"module load anaconda/3;source activate ml;jupyter-lab"`. As you run ssh command you will see Jupyter lab is starting. ```sh (base) hell@Dell-Precision-T1600:~/Desktop/repos/Sunbird/Jupyter_lab_port_forwarding$ ssh -L 8888:localhost:8888 -t s.1915438@sunbird.swansea.ac.uk "module load anaconda/3;source activate ml;jupyter-lab" [I 2022-03-19 22:39:59.593 ServerApp] jupyterlab | extension was successfully linked. [I 2022-03-19 22:39:59.602 ServerApp] nbclassic | extension was successfully linked. [I 2022-03-19 22:40:00.377 ServerApp] notebook_shim | extension was successfully linked. [I 2022-03-19 22:40:00.432 ServerApp] notebook_shim | extension was successfully loaded. [I 2022-03-19 22:40:00.434 LabApp] JupyterLab extension loaded from /home/s.1915438/.conda/envs/ml/lib/python3.9/site-packages/jupyterlab [I 2022-03-19 22:40:00.434 LabApp] JupyterLab application directory is /lustrehome/home/s.1915438/.conda/envs/ml/share/jupyter/lab [I 2022-03-19 22:40:00.438 ServerApp] jupyterlab | extension was successfully loaded. [I 2022-03-19 22:40:00.452 ServerApp] nbclassic | extension was successfully loaded. [I 2022-03-19 22:40:00.452 ServerApp] Serving notebooks from local directory: /lustrehome/home/s.1915438 [I 2022-03-19 22:40:00.453 ServerApp] Jupyter Server 1.15.6 is running at: [I 2022-03-19 22:40:00.453 ServerApp] http://localhost:8888/lab?token=d9d5dd555ef63d682ec2b68232f493a4818db4bb71f1f6a1 [I 2022-03-19 22:40:00.453 ServerApp] or http://127.0.0.1:8888/lab?token=d9d5dd555ef63d682ec2b68232f493a4818db4bb71f1f6a1 [I 2022-03-19 22:40:00.453 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation). [C 2022-03-19 22:40:00.459 ServerApp] To access the server, open this file in a browser: file:///lustrehome/home/s.1915438/.local/share/jupyter/runtime/jpserver-89059-open.html Or copy and paste one of these URLs: http://localhost:8888/lab?token=d9d5dd555ef63d682ec2b68232f493a4818db4bb71f1f6a1 or http://127.0.0.1:8888/lab?token=d9d5dd555ef63d682ec2b68232f493a4818db4bb71f1f6a1 [W 2022-03-19 22:40:30.098 LabApp] Could not determine jupyterlab build status without nodejs [I 2022-03-19 22:42:43.725 ServerApp] New terminal with automatic name: 1 TermSocket.open: 1 TermSocket.open: Opened 1 [I 2022-03-19 22:43:22.523 ServerApp] Writing notebook-signing key to /lustrehome/home/s.1915438/.local/share/jupyter/notebook_secret [W 2022-03-19 22:43:22.526 ServerApp] Notebook ipynb_try/2D heat conduction.ipynb is not trusted [I 2022-03-19 22:43:24.066 ServerApp] Kernel started: 45842dc9-891f-4ce4-94d1-12e6aa528f09 ``` Copy and paste the one the link the `localhost` one or the `127.0.0.1:8888` one in your favourite browser. Bingo. It is working. Remember, if you managed to setup a password, then you won't get the token. Simply open `http://localhost:<local_por>/` or `http://localhost:8888/`. A screenshot: ![image.png](attachment:5cc409b0-0a5f-4dc6-827b-ee644a0f73db.png) To close the session. Go to File -> Shut Down. ```sh [I 2022-03-19 23:33:38.361 ServerApp] Terminal 1 closed Websocket closed [I 2022-03-19 23:33:39.321 ServerApp] Shutting down on /api/shutdown request. [I 2022-03-19 23:33:39.322 ServerApp] Shutting down 3 extensions [I 2022-03-19 23:33:39.322 ServerApp] Shutting down 0 kernels [I 2022-03-19 23:33:39.322 ServerApp] Shutting down 0 terminals [I 2022-03-19 23:47:36.316 ServerApp] Shutting down 0 terminals Connection to sunbird.swansea.ac.uk closed. ``` The Sunbird has `htop` preinstalled so you can see how much memory are you using. Also, you can double check if the Jupyter Lab server is still running or not. # Wrapping ssh command in Bash file Instead of typing this `ssh -L 8888:localhost:8888 -t s.1915438@sunbird.swansea.ac.uk "module load anaconda/3;source activate ml;jupyter-lab"` every single time, we can write a bash script. Just create a new text file using `gedit` or `nano` and paste the above command. Now where do you create this bash file. On Sunbird? Obviously not. If you have this question in your mind `rm -rf /` your PC.
github_jupyter
# Transfer Learning with TensorFlow Hub for TFLite ## Set up library versions for TF2 ``` # !pip uninstall tensorflow --yes !pip install -U --pre -q tensorflow-gpu==2.0.0-beta1 # !pip install -U --pre -q tf-nightly-gpu-2.0-preview==2.0.0.dev20190715 # Last tested version: 2.0.0-dev20190704 # !pip install -U --pre -q tf-estimator-nightly==1.14.0.dev2019071001 # !pip uninstall tensorflow-hub --yes # !pip install -U --pre -q tf-hub-nightly==0.6.0.dev201907150002 # Last tested version: Hub version: 0.6.0.dev201907160002 from __future__ import absolute_import, division, print_function import os import matplotlib.pylab as plt import numpy as np import tensorflow as tf import tensorflow_hub as hub print("Version: ", tf.__version__) print("Eager mode: ", tf.executing_eagerly()) print("Hub version: ", hub.__version__) print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE") ``` ## Select the Hub/TF2 module to use Hub modules for TF 1.x won't work here, please use one of the selections provided. ``` module_selection = ("mobilenet_v2", 224, 1280) #@param ["(\"mobilenet_v2\", 224, 1280)", "(\"inception_v3\", 299, 2048)"] {type:"raw", allow-input: true} handle_base, pixels, FV_SIZE = module_selection MODULE_HANDLE ="https://tfhub.dev/google/tf2-preview/{}/feature_vector/4".format(handle_base) IMAGE_SIZE = (pixels, pixels) print("Using {} with input size {} and output dimension {}".format( MODULE_HANDLE, IMAGE_SIZE, FV_SIZE)) ``` ## Data preprocessing Use [TensorFlow Datasets](http://tensorflow.org/datasets) to load the cats and dogs dataset. This `tfds` package is the easiest way to load pre-defined data. If you have your own data, and are interested in importing using it with TensorFlow see [loading image data](../load_data/images.ipynb) ``` import tensorflow_datasets as tfds tfds.disable_progress_bar() ``` The `tfds.load` method downloads and caches the data, and returns a `tf.data.Dataset` object. These objects provide powerful, efficient methods for manipulating data and piping it into your model. Since `"cats_vs_dog"` doesn't define standard splits, use the subsplit feature to divide it into (train, validation, test) with 80%, 10%, 10% of the data respectively. ``` splits = tfds.Split.ALL.subsplit(weighted=(80, 10, 10)) splits, info = tfds.load('cats_vs_dogs', with_info=True, as_supervised=True, split = splits) (train_examples, validation_examples, test_examples) = splits num_examples = info.splits['train'].num_examples num_classes = info.features['label'].num_classes ``` ### Format the Data Use the `tf.image` module to format the images for the task. Resize the images to a fixes input size, and rescale the input channels ``` def format_image(image, label): image = tf.image.resize(image, IMAGE_SIZE) / 255.0 return image, label ``` Now shuffle and batch the data ``` BATCH_SIZE = 32 #@param {type:"integer"} train_batches = train_examples.shuffle(num_examples // 4).map(format_image).batch(BATCH_SIZE).prefetch(1) validation_batches = validation_examples.map(format_image).batch(BATCH_SIZE).prefetch(1) test_batches = test_examples.map(format_image).batch(1) ``` Inspect a batch ``` for image_batch, label_batch in train_batches.take(1): pass image_batch.shape ``` ## Defining the model All it takes is to put a linear classifier on top of the `feature_extractor_layer` with the Hub module. For speed, we start out with a non-trainable `feature_extractor_layer`, but you can also enable fine-tuning for greater accuracy. ``` do_fine_tuning = False #@param {type:"boolean"} ``` Load TFHub Module ``` feature_extractor = hub.KerasLayer(MODULE_HANDLE, input_shape=IMAGE_SIZE + (3,), output_shape=[FV_SIZE], trainable=do_fine_tuning) print("Building model with", MODULE_HANDLE) model = tf.keras.Sequential([ feature_extractor, tf.keras.layers.Dense(num_classes, activation='softmax') ]) model.summary() #@title (Optional) Unfreeze some layers NUM_LAYERS = 10 #@param {type:"slider", min:1, max:50, step:1} if do_fine_tuning: feature_extractor.trainable = True for layer in model.layers[-NUM_LAYERS:]: layer.trainable = True else: feature_extractor.trainable = False ``` ## Training the model ``` if do_fine_tuning: model.compile( optimizer=tf.keras.optimizers.SGD(lr=0.002, momentum=0.9), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) else: model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) EPOCHS = 5 hist = model.fit(train_batches, epochs=EPOCHS, validation_data=validation_batches) ``` ## Export the model ``` CATS_VS_DOGS_SAVED_MODEL = "exp_saved_model" ``` Export the SavedModel ``` tf.saved_model.save(model, CATS_VS_DOGS_SAVED_MODEL) %%bash -s $CATS_VS_DOGS_SAVED_MODEL saved_model_cli show --dir $1 --tag_set serve --signature_def serving_default loaded = tf.saved_model.load(CATS_VS_DOGS_SAVED_MODEL) print(list(loaded.signatures.keys())) infer = loaded.signatures["serving_default"] print(infer.structured_input_signature) print(infer.structured_outputs) ``` ## Convert using TFLite's Converter ``` ``` Load the TFLiteConverter with the SavedModel ``` converter = tf.lite.TFLiteConverter.from_saved_model(CATS_VS_DOGS_SAVED_MODEL) ``` ### Post-training quantization The simplest form of post-training quantization quantizes weights from floating point to 8-bits of precision. This technique is enabled as an option in the TensorFlow Lite converter. At inference, weights are converted from 8-bits of precision to floating point and computed using floating-point kernels. This conversion is done once and cached to reduce latency. To further improve latency, hybrid operators dynamically quantize activations to 8-bits and perform computations with 8-bit weights and activations. This optimization provides latencies close to fully fixed-point inference. However, the outputs are still stored using floating point, so that the speedup with hybrid ops is less than a full fixed-point computation. ``` converter.optimizations = [tf.lite.Optimize.DEFAULT] ``` ### Post-training integer quantization We can get further latency improvements, reductions in peak memory usage, and access to integer only hardware accelerators by making sure all model math is quantized. To do this, we need to measure the dynamic range of activations and inputs with a representative data set. You can simply create an input data generator and provide it to our converter. ``` def representative_data_gen(): for input_value, _ in test_batches.take(100): yield [input_value] converter.representative_dataset = representative_data_gen ``` The resulting model will be fully quantized but still take float input and output for convenience. Ops that do not have quantized implementations will automatically be left in floating point. This allows conversion to occur smoothly but may restrict deployment to accelerators that support float. ### Full integer quantization To require the converter to only output integer operations, one can specify: ``` converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] ``` ### Finally convert the model ``` tflite_model = converter.convert() tflite_model_file = 'converted_model.tflite' with open(tflite_model_file, "wb") as f: f.write(tflite_model) ``` ##Test the TFLite model using the Python Interpreter ``` # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_path=tflite_model_file) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] from tqdm import tqdm # Gather results for the randomly sampled test images predictions = [] test_labels, test_imgs = [], [] for img, label in tqdm(test_batches.take(10)): interpreter.set_tensor(input_index, img) interpreter.invoke() predictions.append(interpreter.get_tensor(output_index)) test_labels.append(label.numpy()[0]) test_imgs.append(img) #@title Utility functions for plotting # Utilities for plotting class_names = ['cat', 'dog'] def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) img = np.squeeze(img) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'green' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) ``` NOTE: Colab runs on server CPUs. At the time of writing this, TensorFlow Lite doesn't have super optimized server CPU kernels. For this reason post-training full-integer quantized models may be slower here than the other kinds of optimized models. But for mobile CPUs, considerable speedup can be observed. ``` #@title Visualize the outputs { run: "auto" } index = 1 #@param {type:"slider", min:0, max:9, step:1} plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(index, predictions, test_labels, test_imgs) plt.show() ``` Download the model ``` from google.colab import files files.download('converted_model.tflite') labels = ['cat', 'dog'] with open('labels.txt', 'w') as f: f.write('\n'.join(labels)) files.download('labels.txt') ``` # Prepare the test images for download (Optional) This part involves downloading additional test images for the Mobile Apps only in case you need to try out more samples ``` !mkdir -p test_images from PIL import Image for index, (image, label) in enumerate(test_batches.take(50)): image = tf.cast(image * 255.0, tf.uint8) image = tf.squeeze(image).numpy() pil_image = Image.fromarray(image) pil_image.save('test_images/{}_{}.jpg'.format(class_names[label[0]], index)) !ls test_images !zip -qq cats_vs_dogs_test_images.zip -r test_images/ files.download('cats_vs_dogs_test_images.zip') ```
github_jupyter
<a id="title_ID"></a> # JWST Pipeline Validation Notebook: calwebb_detector1, firstframe unit tests <span style="color:red"> **Instruments Affected**</span>: MIRI ### Table of Contents <div style="text-align: left"> <br> [Introduction](#intro) <br> [JWST Unit Tests](#unit) <br> [Defining Terms](#terms) <br> [Test Description](#description) <br> [Data Description](#data_descr) <br> [Imports](#imports) <br> [Convenience Functions](#functions) <br> [Perform Tests](#testing) <br> [About This Notebook](#about) <br> </div> <a id="intro"></a> # Introduction This is the validation notebook that displays the unit tests for the Firstframe step in calwebb_detector1. This notebook runs and displays the unit tests that are performed as a part of the normal software continuous integration process. For more information on the pipeline visit the links below. * Pipeline description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/firstframe/index.html * Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/ [Top of Page](#title_ID) <a id="unit"></a> # JWST Unit Tests JWST unit tests are located in the "tests" folder for each pipeline step within the [GitHub repository](https://github.com/spacetelescope/jwst/tree/master/jwst/), e.g., ```jwst/firstframe/tests```. * Unit test README: https://github.com/spacetelescope/jwst#unit-tests [Top of Page](#title_ID) <a id="terms"></a> # Defining Terms These are terms or acronymns used in this notebook that may not be known a general audience. * JWST: James Webb Space Telescope * NIRCam: Near-Infrared Camera [Top of Page](#title_ID) <a id="description"></a> # Test Description Unit testing is a software testing method by which individual units of source code are tested to determine whether they are working sufficiently well. Unit tests do not require a separate data file; the test creates the necessary test data and parameters as a part of the test code. [Top of Page](#title_ID) <a id="data_descr"></a> # Data Description Data used for unit tests is created on the fly within the test itself, and is typically an array in the expected format of JWST data with added metadata needed to run through the pipeline. [Top of Page](#title_ID) <a id="imports"></a> # Imports * tempfile for creating temporary output products * pytest for unit test functions * jwst for the JWST Pipeline * IPython.display for display pytest reports [Top of Page](#title_ID) ``` import tempfile import pytest import jwst from IPython.display import IFrame ``` <a id="functions"></a> # Convenience Functions Here we define any convenience functions to help with running the unit tests. [Top of Page](#title_ID) ``` def display_report(fname): '''Convenience function to display pytest report.''' return IFrame(src=fname, width=700, height=600) ``` <a id="testing"></a> # Perform Tests Below we run the unit tests for the Firstframe step. [Top of Page](#title_ID) ``` with tempfile.TemporaryDirectory() as tmpdir: !pytest jwst/firstframe -v --ignore=jwst/associations --ignore=jwst/datamodels --ignore=jwst/stpipe --ignore=jwst/regtest --html=tmpdir/unit_report.html --self-contained-html report = display_report('tmpdir/unit_report.html') report ``` <a id="about"></a> ## About This Notebook **Author:** Alicia Canipe, Staff Scientist, NIRCam <br>**Updated On:** 01/07/2021 [Top of Page](#title_ID) <img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
github_jupyter
#タンパク質折りたたみ問題 量子アニーリングを用いた創薬関連のタンパク質折りたたみ問題がハーバード大学の先生によって2012年に発表されていました。そのタンパク質折りたたみ問題の論文を元に実際にwildqatで解いてみたいと思います。 ##参考にする論文 natureに掲載されているこちらの論文をベースにします。 Finding low-energy conformations of lattice protein models by quantum annealing Alejandro Perdomo-Ortiz, Neil Dickson, Marshall Drew-Brook, Geordie Rose & Alán Aspuru-Guzik Scientific Reports volume 2, Article number: 571 (2012) https://www.nature.com/articles/srep00571 ##問題の概要とHPモデル、MJモデル タンパク質を単純にモデル化をしてそれをイジングモデルモデルに落とし込むという試みです。 まずは、HPモデルというモデルを使用しています。 HPモデル • アミノ酸をH(疎水性、非極性アミノ酸)とP(親水性、極性アミノ酸)のいずれかに分ける。 • Hは、水を嫌い、互いに引き付けあう 参考:HPモデルhttp://www.iba.t.u-tokyo.ac.jp/iba/AI/HP.pdf Mijazawa-Jernigan (MJ) model 今回は単純化されたMJモデルを利用します。 ##MJモデルのQUBOへの適用 用意された塩基列を特定の方向に回転させる操作をイジングモデルに対応させています。 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_5.png?raw=1"> 引用:https://www.nature.com/articles/srep00571 今回使用する塩基列は、PSVKMAの配列で、 下記のように特定の塩基列が隣接すると安定状態になり、エネルギーがへります。このエネルギーの安定化を使っってコスト関数を最小化させることを狙います。 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_0.png?raw=1"> また、今回塩基列を全て一度に処理するのは難しいのでいくつかのパターンに分けます。 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_1.png?raw=1"> 引用:https://www.nature.com/articles/srep00571 上記のようにすでにいくつかの折りたたまれたパターンから出発して安定状態を求めます。数が多くないので書き出すことができ、もっとも低いエネルギー安定状態は下記のようになります。それぞれのパターンに対して安定状態に到達できる形状が異なるようなので、どれか1つのschemeを取り上げて一番エネルギーの低い状態を立式から導き出したいと思います。 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_2.png?raw=1"> 引用:https://www.nature.com/articles/srep00571 ##コスト関数 今回のコスト関数は、 $$E_p = E_{onsite} + E_{px} + E_{ext}$$ となります。1項目はタンパク質の塩基列が重ならないという条件、2項目は塩基列同士の近接の相互作用のエネルギー関数、3項目は外部からの影響です。今回3項目は使用しないということなので、 $$E_p = E_{onsite} + E_{pw}$$ となります。 ##モデル 今回、論文中にも触れられている実験3をやって見たいと思います。塩基列は、 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_3.png?raw=1"> この順番ですが、今回はある程度折りたたまれた状態で始まります。 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_4.png?raw=1"> この場合、PSKVまでは決まっていて、Mは下か左が決まっていますので、回転方向はPから順番に書いてみると、 $$010010q_10q_2q_3$$ となり、3量子ビットの式を解けばよいことになります。$01$は右、$00$は下、$10$は左、Mは下か左しかないので、$00$か$10$なので、$0$は決まっていて、残りの$q_1$から$q_3$をイジング問題で求めます。 コスト関数は、 $$E = -1-4q_3+9q_1q_3+9q_2q_3-16q_1q_2q_3$$ ##3体問題の2体問題への分解 ここで、QUBOではそのままでは解けない3体問題がでてきます。この際には数学変換で2体問題へと分解をします。詳しくは他のチュートリアルを参照ください。 新しい量子ビット$q_4$を導入して、 $$q_4=q_1q_2$$ これにペナルティ項を導入するとコスト関数は、 $$E = -1-4q_3+9q_1q_3+9q_2q_3-16q_3q_4+\delta(3q_4+q_1q_2-2q_1q_4-2q_2q_4)$$ こちらをWildqatに入れて計算をして見ます。 ##Wildqatへ実装 ここで上記のQUBOを実装します。 デルタの値を10として見てとくと、 ``` !pip install blueqat import blueqat.wq as wq a = wq.Opt() d = 10; a.qubo = [[0,d,9,-2*d],[0,0,9,-2*d],[0,0,-4,-16],[0,0,0,3*d]] a.sa() ``` 答えは$0010$となりました、QUBOで表現されたタンパク質の折りたたみは、 $0100100010$ という回転を表現することになり、下記のようなものがもっとも安定なものとなります。 <img src="https://github.com/Blueqat/Wildqat/blob/master/examples_ja/img/024_7.png?raw=1">
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/geemap/tree/master/examples/notebooks/geemap_and_earthengine.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/examples/notebooks/geemap_and_earthengine.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/examples/notebooks/geemap_and_earthengine.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map ``` Map = emap.Map(center=(40, -100), zoom=4) Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset image = ee.Image('USGS/SRTMGL1_003') # Set visualization parameters. vis_params = { 'min': 0, 'max': 4000, 'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']} # Print the elevation of Mount Everest. xy = ee.Geometry.Point([86.9250, 27.9881]) elev = image.sample(xy, 30).first().get('elevation').getInfo() print('Mount Everest elevation (m):', elev) # Add Earth Engine layers to Map Map.addLayer(image, vis_params, 'SRTM DEM', True, 0.5) Map.addLayer(xy, {'color': 'red'}, 'Mount Everest') ``` ## Change map positions For example, center the map on an Earth Engine object: ``` Map.centerObject(ee_object=xy, zoom=13) ``` Set the map center using coordinates (longitude, latitude) ``` Map.setCenter(lon=-100, lat=40, zoom=4) ``` ## Extract information from Earth Engine data based on user inputs ``` import ee import geemap from ipyleaflet import * from ipywidgets import Label try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() Map = geemap.Map(center=(40, -100), zoom=4) Map.default_style = {'cursor': 'crosshair'} # Add Earth Engine dataset image = ee.Image('USGS/SRTMGL1_003') # Set visualization parameters. vis_params = { 'min': 0, 'max': 4000, 'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']} # Add Earth Eninge layers to Map Map.addLayer(image, vis_params, 'STRM DEM', True, 0.5) latlon_label = Label() elev_label = Label() display(latlon_label) display(elev_label) coordinates = [] markers = [] marker_cluster = MarkerCluster(name="Marker Cluster") Map.add_layer(marker_cluster) def handle_interaction(**kwargs): latlon = kwargs.get('coordinates') if kwargs.get('type') == 'mousemove': latlon_label.value = "Coordinates: {}".format(str(latlon)) elif kwargs.get('type') == 'click': coordinates.append(latlon) # Map.add_layer(Marker(location=latlon)) markers.append(Marker(location=latlon)) marker_cluster.markers = markers xy = ee.Geometry.Point(latlon[::-1]) elev = image.sample(xy, 30).first().get('elevation').getInfo() elev_label.value = "Elevation of {}: {} m".format(latlon, elev) Map.on_interaction(handle_interaction) Map import ee import geemap from ipyleaflet import * from bqplot import pyplot as plt try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() Map = geemap.Map(center=(40, -100), zoom=4) Map.default_style = {'cursor': 'crosshair'} # Compute the trend of nighttime lights from DMSP. # Add a band containing image date as years since 1990. def createTimeBand(img): year = img.date().difference(ee.Date('1991-01-01'), 'year') return ee.Image(year).float().addBands(img) NTL = ee.ImageCollection('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS') \ .select('stable_lights') # Fit a linear trend to the nighttime lights collection. collection = NTL.map(createTimeBand) fit = collection.reduce(ee.Reducer.linearFit()) image = NTL.toBands() figure = plt.figure(1, title='Nighttime Light Trend', layout={'max_height': '250px', 'max_width': '400px'}) count = collection.size().getInfo() start_year = 1992 end_year = 2013 x = range(1, count+1) coordinates = [] markers = [] marker_cluster = MarkerCluster(name="Marker Cluster") Map.add_layer(marker_cluster) def handle_interaction(**kwargs): latlon = kwargs.get('coordinates') if kwargs.get('type') == 'click': coordinates.append(latlon) markers.append(Marker(location=latlon)) marker_cluster.markers = markers xy = ee.Geometry.Point(latlon[::-1]) y = image.sample(xy, 500).first().toDictionary().values().getInfo() plt.clear() plt.plot(x, y) # plt.xticks(range(start_year, end_year, 5)) Map.on_interaction(handle_interaction) # Display a single image Map.addLayer(ee.Image(collection.select('stable_lights').first()), {'min': 0, 'max': 63}, 'First image') # Display trend in red/blue, brightness in green. Map.setCenter(30, 45, 4) Map.addLayer(fit, {'min': 0, 'max': [0.18, 20, -0.18], 'bands': ['scale', 'offset', 'scale']}, 'stable lights trend') fig_control = WidgetControl(widget=figure, position='bottomright') Map.add_control(fig_control) Map ```
github_jupyter
#### Copyright 2017 Google LLC. ``` # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Sparsity and L1 Regularization **Learning Objectives:** * Calculate the size of a model * Apply L1 regularization to reduce the size of a model by increasing sparsity One way to reduce complexity is to use a regularization function that encourages weights to be exactly zero. For linear models such as regression, a zero weight is equivalent to not using the corresponding feature at all. In addition to avoiding overfitting, the resulting model will be more efficient. L1 regularization is a good way to increase sparsity. ## Setup Run the cells below to load the data and create feature definitions. ``` from __future__ import print_function import math from IPython import display #for displaying multiple tables using the same code block from matplotlib import cm from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf from tensorflow.python.data import Dataset tf.logging.set_verbosity(tf.logging.ERROR) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format california_housing_dataframe = pd.read_csv("https://dl.google.com/mlcc/mledu-datasets/california_housing_train.csv", sep=",") #reindex the indices for random draws california_housing_dataframe = california_housing_dataframe.reindex( np.random.permutation(california_housing_dataframe.index)) def preprocess_features(california_housing_dataframe): """Prepares input features from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the features to be used for the model, including synthetic features. """ selected_features = california_housing_dataframe[ ["latitude", "longitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income"]] processed_features = selected_features.copy() #copy() is a pandas data frame method # Create a synthetic feature. processed_features["rooms_per_person"] = ( california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"]) return processed_features def preprocess_targets(california_housing_dataframe): """Prepares target features (i.e., labels) from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the target feature. """ output_targets = pd.DataFrame() # Create a boolean categorical feature representing whether the # median_house_value is above a set threshold. output_targets["median_house_value_is_high"] = ( california_housing_dataframe["median_house_value"] > 265000).astype(float) return output_targets #for clarity (california_housing_dataframe["median_house_value"] > 265000).astype(float) # Choose the first 12000 (out of 17000) examples for training. training_examples = preprocess_features(california_housing_dataframe.head(12000)) training_targets = preprocess_targets(california_housing_dataframe.head(12000)) # Choose the last 5000 (out of 17000) examples for validation. validation_examples = preprocess_features(california_housing_dataframe.tail(5000)) validation_targets = preprocess_targets(california_housing_dataframe.tail(5000)) # Double-check that we've done the right thing. print("Training examples summary:") display.display(training_examples.describe()) print("Validation examples summary:") display.display(validation_examples.describe()) print("Training targets summary:") display.display(training_targets.describe()) print("Validation targets summary:") display.display(validation_targets.describe()) def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): """Trains a linear regression model. Args: features: pandas DataFrame of features targets: pandas DataFrame of targets batch_size: Size of batches to be passed to the model shuffle: True or False. Whether to shuffle the data. num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely Returns: Tuple of (features, labels) for next data batch """ # Convert pandas data into a dict of np arrays. features = {key:np.array(value) for key,value in dict(features).items()} # Construct a dataset, and configure batching/repeating. ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit ds = ds.batch(batch_size).repeat(num_epochs) # Shuffle the data, if specified. if shuffle: ds = ds.shuffle(10000) # Return the next batch of data. features, labels = ds.make_one_shot_iterator().get_next() return features, labels {key:np.array(value) for key,value in dict(training_examples).items()} def get_quantile_based_buckets(feature_values, num_buckets): quantiles = feature_values.quantile( [(i+1.)/(num_buckets + 1.) for i in range(num_buckets)]) return [quantiles[q] for q in quantiles.keys()] def construct_feature_columns(): """Construct the TensorFlow Feature Columns. Returns: A set of feature columns """ bucketized_households = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("households"), boundaries=get_quantile_based_buckets(training_examples["households"], 10)) bucketized_longitude = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("longitude"), boundaries=get_quantile_based_buckets(training_examples["longitude"], 50)) bucketized_latitude = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("latitude"), boundaries=get_quantile_based_buckets(training_examples["latitude"], 50)) bucketized_housing_median_age = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("housing_median_age"), boundaries=get_quantile_based_buckets( training_examples["housing_median_age"], 10)) bucketized_total_rooms = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("total_rooms"), boundaries=get_quantile_based_buckets(training_examples["total_rooms"], 10)) bucketized_total_bedrooms = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("total_bedrooms"), boundaries=get_quantile_based_buckets(training_examples["total_bedrooms"], 10)) bucketized_population = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("population"), boundaries=get_quantile_based_buckets(training_examples["population"], 10)) bucketized_median_income = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("median_income"), boundaries=get_quantile_based_buckets(training_examples["median_income"], 10)) bucketized_rooms_per_person = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("rooms_per_person"), boundaries=get_quantile_based_buckets( training_examples["rooms_per_person"], 10)) long_x_lat = tf.feature_column.crossed_column( set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000) feature_columns = set([ long_x_lat, bucketized_longitude, bucketized_latitude, bucketized_housing_median_age, bucketized_total_rooms, bucketized_total_bedrooms, bucketized_population, bucketized_households, bucketized_median_income, bucketized_rooms_per_person]) return feature_columns ``` ## Calculate the Model Size To calculate the model size, we simply count the number of parameters that are non-zero. We provide a helper function below to do that. The function uses intimate knowledge of the Estimators API - don't worry about understanding how it works. ``` def model_size(estimator): variables = estimator.get_variable_names() size = 0 for variable in variables: if not any(x in variable for x in ['global_step', 'centered_bias_weight', 'bias_weight', 'Ftrl'] ): size += np.count_nonzero(estimator.get_variable_value(variable)) return size ``` ## Reduce the Model Size Your team needs to build a highly accurate Logistic Regression model on the *SmartRing*, a ring that is so smart it can sense the demographics of a city block ('median_income', 'avg_rooms', 'households', ..., etc.) and tell you whether the given city block is high cost city block or not. Since the SmartRing is small, the engineering team has determined that it can only handle a model that has **no more than 600 parameters**. On the other hand, the product management team has determined that the model is not launchable unless the **LogLoss is less than 0.35** on the holdout test set. Can you use your secret weapon—L1 regularization—to tune the model to satisfy both the size and accuracy constraints? ### Task 1: Find a good regularization coefficient. **Find an L1 regularization strength parameter which satisfies both constraints — model size is less than 600 and log-loss is less than 0.35 on validation set.** The following code will help you get started. There are many ways to apply regularization to your model. Here, we chose to do it using `FtrlOptimizer`, which is designed to give better results with L1 regularization than standard gradient descent. Again, the model will train on the entire data set, so expect it to run slower than normal. ``` def train_linear_classifier_model( learning_rate, regularization_strength, steps, batch_size, feature_columns, training_examples, training_targets, validation_examples, validation_targets): """Trains a linear regression model. In addition to training, this function also prints training progress information, as well as a plot of the training and validation loss over time. Args: learning_rate: A `float`, the learning rate. regularization_strength: A `float` that indicates the strength of the L1 regularization. A value of `0.0` means no regularization. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. feature_columns: A `set` specifying the input feature columns to use. training_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for training. training_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for training. validation_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for validation. validation_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for validation. Returns: A `LinearClassifier` object trained on the training data. """ periods = 7 steps_per_period = steps / periods # Create a linear classifier object. my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate, l1_regularization_strength=regularization_strength) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_classifier = tf.estimator.LinearClassifier( feature_columns=feature_columns, optimizer=my_optimizer ) # Create input functions. training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) #num_epochs=1 ensures data repeats for only 1 cycle predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) #num_epochs=1 ensures data repeats for only 1 cycle # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("LogLoss (on validation data):") training_log_losses = [] validation_log_losses = [] for period in range (0, periods): # Train the model, starting from the prior state. linear_classifier.train( input_fn=training_input_fn, steps=steps_per_period ) # Take a break and compute predictions. training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn) training_probabilities = np.array([item['probabilities'] for item in training_probabilities]) validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn) validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities]) # Compute training and validation loss. training_log_loss = metrics.log_loss(training_targets, training_probabilities) validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, validation_log_loss)) # Add the loss metrics from this period to our list. training_log_losses.append(training_log_loss) validation_log_losses.append(validation_log_loss) print("Model training finished.") # Output a graph of loss metrics over periods. plt.ylabel("LogLoss") plt.xlabel("Periods") plt.title("LogLoss vs. Periods") plt.tight_layout() plt.plot(training_log_losses, label="training") plt.plot(validation_log_losses, label="validation") plt.legend() return linear_classifier linear_classifier = train_linear_classifier_model( learning_rate=0.1, # TWEAK THE REGULARIZATION VALUE BELOW regularization_strength=0.1, steps=300, batch_size=100, feature_columns=construct_feature_columns(), training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) print("Model size:", model_size(linear_classifier)) linear_classifier = train_linear_classifier_model( learning_rate=0.1, # TWEAK THE REGULARIZATION VALUE BELOW regularization_strength=0.6, steps=300, batch_size=100, feature_columns=construct_feature_columns(), training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) print("Model size:", model_size(linear_classifier)) variables= linear_classifier.get_variable_names() size= [] for variable in variables: if not any(x in variable for x in ['global_step', 'centered_bias_weight', 'bias_weight', 'Ftrl'] ): size.append(linear_classifier.get_variable_value(variable)) tot= 0 for i in xrange(len(size)): tot+= len(size[i]) tot ```
github_jupyter
``` # Ensure the scenepic library will auto reload %load_ext autoreload # Imports import json import math import os import numpy as np import scenepic as sp %autoreload # Seed random number generator for consistency np.random.seed(0) ASSET_DIR = os.path.join("..", "ci", "assets") def asset_path(filename): return os.path.join(ASSET_DIR, filename) ``` # ScenePic Python Tutorials These tutorials provide practical examples that highlight most of the functionality supported by ScenePic. While by no means exhaustive, they should give you a solid start towards building useful and insightful 3D visualizations of your own. If there is something you feel is missing from this tutorial, or if there is something you would like to contribute, please contact the maintainers via GitHub Issues. ``` # Tutorial 1 - Scene and Canvas basics # Create a Scene, the top level container in ScenePic scene = sp.Scene() # A Scene can contain many Canvases # For correct operation, you should create these using scene1.create_canvas() (rather than constructing directly using sp.Canvas(...)) canvas_1 = scene.create_canvas_3d(width = 300, height = 300) canvas_2 = scene.create_canvas_3d(width = 100, height = 300) # ScenePic has told Jupyter how to display scene objects scene # Tutorial 2 - Meshes and Frames # Create a scene scene = sp.Scene() # A Mesh is a vertex/triangle/line buffer with convenience methods # Meshes "belong to" the Scene, so should be created using create_mesh() # Meshes can be re-used across multiple frames/canvases my_first_mesh = scene.create_mesh(shared_color = sp.Color(1.0, 0.0, 1.0)) # If shared_color is not provided, you can use per-vertex coloring my_first_mesh.add_cube(transform = sp.Transforms.Scale(0.1)) # Adds a unit cube centered at the origin my_first_mesh.add_cube(transform = np.dot(sp.Transforms.Translate([-1.0, 1.0, -1.0]), sp.Transforms.Scale(0.5))) my_first_mesh.add_sphere(transform = sp.Transforms.Translate([1.0, 1.0, 1.0])) # A Canvas is a 3D rendering panel canvas = scene.create_canvas_3d(width = 300, height = 300) # Create an animation with multiple Frames # A Frame references a set of Meshes # Frames are created from the Canvas not the Scene for i in range(10): frame = canvas.create_frame() frame.add_mesh(my_first_mesh, transform = sp.Transforms.Translate([i / 10.0, 0.0, 0.0])) # An arbitrary rigid transform can optionally be specified. mesh2 = scene.create_mesh(shared_color = sp.Color(1.0,0.0,0.0),camera_space=True) mesh2.add_cube(transform = np.dot(sp.Transforms.Translate([0.0, 0.0, -5.0]), sp.Transforms.Scale(0.5))) frame.add_mesh(mesh2) label = scene.create_label(text = "Hi", color = sp.Colors.White, size_in_pixels = 80, offset_distance = 0.6, camera_space = True) frame.add_label(label = label, position = [0.0, 0.0, -5.0]) # Display the Scene in Jupyter scene # Tutorial 3 - Point clouds 1 # Create a scene scene = sp.Scene() # Create a mesh that we'll turn in to a point-cloud using enable_instancing() mesh = scene.create_mesh(shared_color = sp.Color(0,1,0)) mesh.add_cube() # Unit diameter cube that will act as primitive mesh.apply_transform(sp.Transforms.Scale(0.01)) # Scale the primitive mesh.enable_instancing(positions = 2 * np.random.rand(10000, 3) - 1) # Cause the mesh to be replicated across many instances with the provided translations. You can optionally also provide per-instance colors and quaternion rotations. # Create Canvas and Frame, and add Mesh to Frame canvas = scene.create_canvas_3d(width = 300, height = 300, shading=sp.Shading(bg_color=sp.Colors.White)) frame = canvas.create_frame() frame.add_mesh(mesh) scene # Tutorial 4 - Points clouds 2 # Note that the point cloud primitive can be arbitrarily complex. # The primitive geometry will only be stored once for efficiency. # Some parameters disc_thickness = 0.2 normal_length = 1.5 point_size = 0.1 # A helper Mesh which we won't actually use for rendering - just to find the points and normals on a sphere to be used in mesh2 below # NB this is created using the sp.Mesh() constructor directly so it doesn't get added automatically to the Scene sphere_mesh = sp.Mesh() sphere_mesh.add_sphere(transform = sp.Transforms.Scale(2.0), color = sp.Color(1.0, 0.0, 0.0)) N = sphere_mesh.count_vertices() points = sphere_mesh.vertex_buffer['pos'] normals = sphere_mesh.vertex_buffer['norm'] # Convert the normals into quaternion rotations rotations = np.zeros((N, 4)) for i in range(0, N): rotations[i, :] = sp.Transforms.QuaternionToRotateXAxisToAlignWithAxis(normals[i, :]) # Generate some random colors colors = np.random.rand(N,3) # Create a scene scene = sp.Scene() # Create a mesh that we'll turn in to a point-cloud using enable_instancing() mesh = scene.create_mesh(shared_color = sp.Color(0,1,0), double_sided = True) # shared_color will be overridden in a moment # Add the primitive to the Mesh - a disc and a thickline showing the normal mesh.add_disc(segment_count = 20, transform = sp.Transforms.Scale([disc_thickness, 1.0, 1.0])) mesh.add_thickline(start_point = np.array([disc_thickness * 0.5, 0.0, 0.0]), end_point = np.array([normal_length, 0.0, 0.0]), start_thickness = 0.2, end_thickness = 0.1) mesh.apply_transform(sp.Transforms.Scale(point_size)) # Now turn the mesh into a point-cloud mesh.enable_instancing(positions = points, rotations = rotations, colors = colors) # Both rotations and colors are optional # Create Canvas and Frame, and add Mesh to Frame canvas = scene.create_canvas_3d(width = 300, height = 300) frame = canvas.create_frame() frame.add_mesh(mesh) scene # Tutorial 5 - Misc Meshes # Scene is the top level container in ScenePic scene = sp.Scene() # Ok - let's start by creating some Mesh objects # Mesh 1 - contains a cube and a sphere # Mesh objects can contain arbitrary triangle mesh and line geometry # Meshes can belong to "layers" which can be controlled by the user interactively mesh1 = scene.create_mesh(layer_id = "Sphere+") # No shared_color provided, so per-vertex coloring enabled mesh1.add_cylinder(color = sp.Color(1.0, 0.0, 0.0), transform = sp.Transforms.Translate([-2.0, 0.0, -2.0])) mesh1.add_uv_sphere(color = sp.Color(0.0, 0.0, 1.0), transform = np.dot(sp.Transforms.Translate([-1.0, 1.0, 0.0]), sp.Transforms.Scale(1.8)), fill_triangles = False, add_wireframe = True) mesh1.add_icosphere(color = sp.Color(0.0, 1.0, 1.0), transform = np.dot(sp.Transforms.Translate([2.0, 1.0, 0.0]), sp.Transforms.Scale(1.8)), fill_triangles = False, add_wireframe = True, steps = 2) # Mesh 2 - coordinate axes mesh2 = scene.create_mesh(layer_id = "Coords") mesh2.add_coordinate_axes(transform = sp.Transforms.Translate([0.0, 0.0, 0.0])) # Mesh 3 - example of Loop Subdivision on a cube cube_verts = np.array([[-0.5, -0.5, -0.5], [+0.5, -0.5, -0.5], [-0.5, +0.5, -0.5], [+0.5, +0.5, -0.5], [-0.5, -0.5, +0.5], [+0.5, -0.5, +0.5], [-0.5, +0.5, +0.5], [+0.5, +0.5, +0.5]]) cube_tris = np.array([[0, 2, 3], [0, 3, 1], [1, 3, 7], [1, 7, 5], [4, 5, 7], [4, 7, 6], [4, 6, 2], [4, 2, 0], [2, 6, 7], [2, 7, 3], [4, 0, 1], [4, 1, 5]]) cube_verts_a, cube_tris_a = sp.LoopSubdivStencil(cube_tris, 2, False).apply(cube_verts) # Two steps of subdivision, no projection to limit surface. Stencils could be reused for efficiency for other meshes with same triangle topology. cube_verts_b, cube_tris_b = sp.LoopSubdivStencil(cube_tris, 2, True).apply(cube_verts) # Two steps of subdivision, projection to limit surface. Stencils could be reused for efficiency for other meshes with same triangle topology. mesh3 = scene.create_mesh(shared_color = sp.Color(1.0, 0.8, 0.8)) mesh3.add_mesh_without_normals(cube_verts, cube_tris, transform = sp.Transforms.Translate([-1.0, 0.0, 0.0])) # Add non-subdivided cube mesh3.add_mesh_without_normals(cube_verts_a, cube_tris_a) mesh3.add_mesh_without_normals(cube_verts_b, cube_tris_b, transform = sp.Transforms.Translate([+1.0, 0.0, 0.0])) # Mesh 4 - line example mesh4 = scene.create_mesh() Nsegs = 7000 positions = np.cumsum(np.random.rand(Nsegs, 3) * 0.2, axis = 0) colored_points = np.concatenate((positions, np.random.rand(Nsegs, 3)), axis = 1) mesh4.add_lines(colored_points[0:-1, :], colored_points[1:, :]) mesh4.add_camera_frustum(color = sp.Color(1.0,1.0,0.0)) # Let's create two Canvases this time canvas1 = scene.create_canvas_3d(width = 300, height = 300) canvas2 = scene.create_canvas_3d(width = 300, height = 300) # We can link their keyboard/mouse/etc. input events to keep the views in sync scene.link_canvas_events(canvas1, canvas2) # And we can specify that certain named "mesh collections" should have user-controlled visibility and opacity # Meshs without mesh_collection set, or without specified visibilities will always be visible and opaque canvas1.set_layer_settings({"Coords" : { "opacity" : 0 }, "Sphere+" : { "opacity" : 1 }}) # A Frame contains an array of meshes frame11 = canvas1.create_frame(meshes = [mesh1, mesh2]) # Note that Frames are created from the Canvas not the Scene frame21 = canvas2.create_frame(meshes = [mesh2, mesh3]) frame22 = canvas2.create_frame(meshes = [mesh4, mesh1]) # ScenePic has told Jupyter how to display scene objects scene # Tutorial 6 - Images and Textures # Scene is the top level container in ScenePic scene = sp.Scene() # Create and populate an Image object image1 = scene.create_image(image_id = "PolarBear") image1.load(asset_path("PolarBear.png")) # This will preserve the image data in compressed PNG format # Create a texture map texture = scene.create_image(image_id = "texture") texture.load(asset_path("uv.png")) # we can use this image to skin meshes # Example of a mesh that is defined in camera space not world space # This will not move as the virtual camera is moved with the mouse cam_space_mesh = scene.create_mesh(shared_color = sp.Color(1.0, 0.0, 0.0), camera_space = True) cam_space_mesh.add_sphere(transform = np.dot(sp.Transforms.Translate([10, -10, -20.0]), sp.Transforms.Scale(1.0))) # Some textured primitives sphere = scene.create_mesh(texture_id=texture.image_id, nn_texture = False) sphere.add_icosphere(steps=4) cube = scene.create_mesh(texture_id=texture.image_id) transform = sp.Transforms.translate([-1, 0, 0]) @ sp.Transforms.scale(0.5) cube.add_cube(transform=transform) # Show images in 3D canvas canvas = scene.create_canvas_3d(shading=sp.Shading(bg_color=sp.Colors.White)) mesh1 = scene.create_mesh(texture_id = "PolarBear") mesh1.add_image() # Adds image in canonical position # Add an animation that rigidly transforms each image n_frames = 20 for i in range(n_frames): angle = 2 * math.pi * i / n_frames c, s = math.cos(angle), math.sin(angle) # Create a focus point that allows you to "lock" the camera's translation and optionally orientation by pressing the "l" key axis = np.array([1.0, 0.0, 1.0]) axis /= np.linalg.norm(axis) focus_point = sp.FocusPoint([c,s,0], orientation_axis_angle = axis * angle) mesh = scene.create_mesh() mesh.add_coordinate_axes(transform = np.dot(sp.Transforms.Translate(focus_point.position), sp.Transforms.RotationMatrixFromAxisAngle(axis, angle))) im_size = 15 im_data = np.random.rand(im_size, im_size, 4) im_data[:,:,3] = 0.5 + 0.5 * im_data[:,:,3] imageB = scene.create_image() imageB.from_numpy(im_data) # Converts data to PNG format meshB = scene.create_mesh(texture_id = imageB, is_billboard = True, use_texture_alpha=True) meshB.add_image(transform = np.dot(sp.Transforms.Scale(2.0), sp.Transforms.Translate([0,0,-1]))) frame = canvas.create_frame(focus_point = focus_point) frame.add_mesh(mesh1, transform = sp.Transforms.Translate([c,s,0])) frame.add_mesh(meshB, transform = np.dot(sp.Transforms.Scale(i * 1.0 / n_frames), sp.Transforms.Translate([-c,-s,0]))) frame.add_mesh(cam_space_mesh) frame.add_mesh(sphere, transform=sp.Transforms.rotation_about_y(np.pi * 2 * i / n_frames)) frame.add_mesh(cube, transform=sp.Transforms.rotation_about_y(-np.pi * 2 * i / n_frames)) frame.add_mesh(mesh) # Show Scene scene # Tutorial 7 - 2D canvases # Scene is the top level container in ScenePic scene = sp.Scene() # Load an image image1 = scene.create_image(image_id = "PolarBear") image1.load(asset_path("PolarBear.png")) # This will preserve the image data in compressed PNG format # Create and populate an Image object image2 = scene.create_image(image_id = "Random") image2.from_numpy(np.random.rand(20, 30, 3) * 128 / 255.0) # Converts data to PNG format # Create a 2D canvas demonstrating different image positioning options canvas1 = scene.create_canvas_2d(width = 400, height = 300, background_color = sp.Colors.White) canvas1.create_frame().add_image(image1, "fit") canvas1.create_frame().add_image(image1, "fill") canvas1.create_frame().add_image(image1, "stretch") canvas1.create_frame().add_image(image1, "manual", x = 50, y= 50, scale = 0.3) # You can composite images and primitives too canvas2 = scene.create_canvas_2d(width = 300, height = 300) f = canvas2.create_frame() f.add_image(image2, "fit") f.add_image(image1, "manual", x = 30, y= 30, scale = 0.2) f.add_circle(200, 200, 40, fill_color = sp.Colors.Black, line_width = 10, line_color = sp.Colors.Blue) f.add_rectangle(200, 100, 50, 25, fill_color = sp.Colors.Green, line_width = 0) f.add_text("Hello World", 30, 100, sp.Colors.White, 100, "segoe ui light") scene.framerate = 2 scene # Tutorial 8 - a mix of transparent and opaque objects, with labels np.random.seed(55) scene = sp.Scene() canvas = scene.create_canvas_3d(width = 700, height = 700) frame = canvas.create_frame() # Create a mesh that we'll turn in to a point-cloud using enable_instancing() layer_settings = { "Labels" : { "opacity" : 1.0 }} N = 20 for i in range(N): # Sample object geotype = np.random.randint(2) color = np.random.rand(3) size = 0.3 * np.random.rand() + 0.2 position = 3.0 * np.random.rand(3) - 1.5 opacity = 1.0 if np.random.randint(2) == 0 else np.random.uniform(0.45, 0.55) # Generate geometry layer_id = "Layer" + str(i) mesh = scene.create_mesh(shared_color = color, layer_id = layer_id) layer_settings[layer_id] = { "opacity" : opacity } if geotype == 0: mesh.add_cube() elif geotype == 1: mesh.add_sphere() mesh.apply_transform(sp.Transforms.Scale(size)) # Scale the primitive mesh.apply_transform(sp.Transforms.Translate(position)) frame.add_mesh(mesh) # Add label text = "{0:0.2f} {1:0.2f} {2:0.2f} {3:0.2f}".format(color[0], color[1], color[2], opacity) horizontal_align = ["left", "center", "right"][np.random.randint(3)] vertical_align = ["top", "middle", "bottom"][np.random.randint(3)] if geotype == 0: if horizontal_align != "center" and vertical_align != "middle": offset_distance = size * 0.7 else: offset_distance = size * 0.9 else: if horizontal_align != "center" and vertical_align != "middle": offset_distance = size * 0.5 * 0.8 else: offset_distance = size * 0.6 label = scene.create_label(text = text, color = color, layer_id = "Labels", font_family = "consolas", size_in_pixels = 80 * size, offset_distance = offset_distance, vertical_align = vertical_align, horizontal_align = horizontal_align) frame.add_label(label = label, position = position) canvas.set_layer_settings(layer_settings) scene # Tutorial 9 - mesh animation # let's create our mesh to get started scene = sp.Scene() canvas = scene.create_canvas_3d(width=700, height=700) # Load a mesh to animate jelly_mesh = sp.load_obj(asset_path("jelly.obj")) texture = scene.create_image("texture") texture.load(asset_path("jelly.png")) # create a base mesh for the animation. The animation # will only change the vertex positions, so this mesh # is used to set everything else, e.g. textures. base_mesh = scene.create_mesh("jelly_base") base_mesh.texture_id = texture.image_id base_mesh.use_texture_alpha = True base_mesh.add_mesh(jelly_mesh) def random_linspace(min_val, max_val, num_samples): vals = np.linspace(min_val, max_val, num_samples) np.random.shuffle(vals) return vals # this base mesh will be instanced, so we can animate each # instance individual using rigid transforms, in this case # just translation. marbles = scene.create_mesh("marbles_base") num_marbles = 10 marbles.add_sphere(sp.Colors.White, transform=sp.Transforms.Scale(0.2)) marble_positions = np.zeros((num_marbles, 3), np.float32) marble_positions[:, 0] = random_linspace(-0.6, 0.6, num_marbles) marble_positions[:, 2] = random_linspace(-1, 0.7, num_marbles) marble_offsets = np.random.uniform(0, 2*np.pi, size=num_marbles).astype(np.float32) marble_colors_start = np.random.uniform(0, 1, size=(num_marbles, 3)).astype(np.float32) marble_colors_end = np.random.uniform(0, 1, size=(num_marbles, 3)).astype(np.float32) marbles.enable_instancing(marble_positions, colors=marble_colors_start) for i in range(60): # animate the wave mesh by updating the vertex positions positions = jelly_mesh.positions.copy() delta_x = (positions[:, 0] + 0.0838 * i) * 10 delta_z = (positions[:, 2] + 0.0419 * i) * 10 positions[:, 1] = positions[:, 1] + 0.1 * (np.cos(delta_x) + np.sin(delta_z)) # we create a mesh update with the new posiitons. We can use this mesh update # just like a new mesh, because it essentially is one, as ScenePic will create # a new mesh from the old one using these new positions. jelly_update = scene.update_mesh_positions("jelly_base", positions) frame = canvas.create_frame(meshes=[jelly_update]) # this is a simpler form of animation in which we will change the position # and colors of the marbles marble_y = np.sin(0.105 * i + marble_offsets) positions = np.stack([marble_positions[:, 0], marble_y, marble_positions[:, 2]], -1) alpha = ((np.sin(marble_y) + 1) * 0.5).reshape(-1, 1) beta = 1 - alpha colors = alpha * marble_colors_start + beta * marble_colors_end marbles_update = scene.update_instanced_mesh("marbles_base", positions, colors=colors) frame.add_mesh(marbles_update) scene.quantize_updates() scene # Tutorial 10 - Instanced Animation # In this tutorial we will explore how we can use mesh updates on # instanced meshes as well. We will begin by creating a simple primitive # and use instancing to create a cloud of stylized butterflies. We will # then using mesh updates on the instances to make the butterflies # fly. scene = sp.Scene() butterflies = scene.create_mesh("butterflies", double_sided=True) # the primitive will be a single wing, and we'll use instancing to create # all the butterflies butterflies.add_quad(sp.Colors.Blue, [0, 0, 0], [0.1, 0, 0.04], [0.08, 0, -0.06], [0.015, 0, -0.03]) rotate_back = sp.Transforms.quaternion_from_axis_angle([1, 0, 0], -np.pi / 6) num_butterflies = 100 num_anim_frames = 20 # this will make them flap their wings independently start_frames = np.random.randint(0, num_anim_frames, num_butterflies) rot_angles = np.random.uniform(-1, 1, num_butterflies) rotations = np.zeros((num_butterflies * 2, 4), np.float32) positions = np.random.uniform(-1, 1, (num_butterflies * 2, 3)) colors = np.random.random((num_butterflies * 2, 3)) for b, angle in enumerate(rot_angles): rot = sp.Transforms.quaternion_from_axis_angle([0, 1, 0], angle) rotations[2 * b] = rotations[2 * b + 1] = rot # we will use the second position per butterfly as a destination dx = np.sin(angle) * 0.1 dy = positions[2 * b + 1, 1] - positions[2 * b, 1] dy = np.sign(angle) * min(abs(angle), 0.1) dz = np.cos(angle) * 0.1 positions[2 * b + 1] = positions[2 * b] + [dx, dy, dz] butterflies.enable_instancing(positions, rotations, colors) canvas = scene.create_canvas_3d("main", 700, 700) canvas.shading = sp.Shading(sp.Colors.White) start = -np.pi / 6 end = np.pi / 2 delta = (end - start) / (num_anim_frames // 2 - 1) # let's construct the animation frame by frame animation = [] for i in range(num_anim_frames): frame_positions = np.zeros_like(positions) frame_rotations = np.zeros_like(rotations) frame_colors = np.zeros_like(colors) for b, start_frame in enumerate(start_frames): frame = (i + start_frame) % num_anim_frames if frame < num_anim_frames // 2: angle = start + delta * frame else: angle = end + delta * (frame - num_anim_frames // 2) right = sp.Transforms.quaternion_from_axis_angle([0, 0, 1], angle) right = sp.Transforms.quaternion_multiply(rotate_back, right) right = sp.Transforms.quaternion_multiply(rotations[2 * b], right) left = sp.Transforms.quaternion_from_axis_angle([0, 0, 1], np.pi - angle) left = sp.Transforms.quaternion_multiply(rotate_back, left) left = sp.Transforms.quaternion_multiply(rotations[2 * b + 1], left) frame_rotations[2 * b] = right frame_rotations[2 * b + 1] = left progress = np.sin((frame * 2 * np.pi) / num_anim_frames) progress = (progress + 1) * 0.5 # we move the butterfly along its path pos = (1 - progress) * positions[2 * b] + progress * positions[2 * b + 1] pos[1] -= np.sin(angle) * 0.02 frame_positions[2 * b : 2 * b + 2, :] = pos # finally we alter the color color = (1 - progress) * colors[2 * b] + progress * colors[2 * b + 1] frame_colors[2 * b : 2 * b + 2, :] = color # now we create the update. Here we update position, rotation, # and color, but you can update them separately as well by passing # the `*None()` versions of the buffers to this function. update = scene.update_instanced_mesh("butterflies", frame_positions, frame_rotations, frame_colors) animation.append(update) # now we create the encapsulating animation which will move the camera # around the butterflies. The inner animation will loop as the camera moves. num_frames = 300 cameras = sp.Camera.orbit(num_frames, 3, 2) for i, camera in enumerate(cameras): frame = canvas.create_frame() frame.add_mesh(animation[i % num_anim_frames]) frame.camera = camera scene # Tutorial 11 - camera movement # in this tutorial we will show how to create per-frame camera movement. # while the user can always choose to override this behavior, having a # camera track specified can be helpful for demonstrating particular # items in 3D. We will also show off the flexible GLCamera class. scene = sp.Scene() spin_canvas = scene.create_canvas_3d("spin") spiral_canvas = scene.create_canvas_3d("spiral") # let's create some items in the scene so we have a frame of reference polar_bear = scene.create_image(image_id="polar_bear") polar_bear.load(asset_path("PolarBear.png")) uv_texture = scene.create_image(image_id = "texture") uv_texture.load(asset_path("uv.png")) cube = scene.create_mesh("cube", texture_id=polar_bear.image_id) cube.add_cube() sphere = scene.create_mesh("sphere", texture_id=uv_texture.image_id) sphere.add_icosphere(steps=4, transform=sp.Transforms.translate([0, 1, 0])) num_frames = 60 for i in range(num_frames): angle = i*np.pi*2/num_frames # for the first camera we will spin in place on the Z axis rotation = sp.Transforms.rotation_about_z(angle) spin_camera = sp.Camera(center=[0, 0, 4], rotation=rotation, fov_y_degrees=30.0) # for the second camera, we will spin the camera in a spiral around the scene # we can do this using the look-at initialization, which provides a straightforward # "look at" interface for camera placement. camera_center = [4*np.cos(angle), i*4/num_frames - 2, 4*np.sin(angle)] spiral_camera = sp.Camera(camera_center, look_at=[0, 0.5, 0]) # we can add frustums directly using the ScenePic camera objects frustums = scene.create_mesh() frustums.add_camera_frustum(spin_camera, sp.Colors.Red) frustums.add_camera_frustum(spiral_camera, sp.Colors.Green) spin_frame = spin_canvas.create_frame() spin_frame.camera = spin_camera # each frame can have its own camera object spin_frame.add_meshes([cube, sphere, frustums]) spiral_frame = spiral_canvas.create_frame() spiral_frame.camera = spiral_camera spiral_frame.add_meshes([cube, sphere, frustums]) scene.link_canvas_events(spin_canvas, spiral_canvas) scene # Tutorial 12 - audio tracks # in this tutorial we'll show how to attach audio tracks to canvases. ScenePic # supports any audio file format supported by the browser. def _set_audio(scene, canvas, path): audio = scene.create_audio() audio.load(path) canvas.media_id = audio.audio_id scene = sp.Scene() names = ["red", "green", "blue"] colors = [sp.Colors.Red, sp.Colors.Green, sp.Colors.Blue] frequencies = [0, 1, 0.5] graph = scene.create_graph("graph", width=900, height=150) for name, color, frequency in zip(names, colors, frequencies): mesh = scene.create_mesh() mesh.add_cube(color) canvas = scene.create_canvas_3d(name, width=300, height=300) _set_audio(scene, canvas, asset_path(name + ".ogg")) values = [] for j in range(60): frame = canvas.create_frame() scale = math.sin(j * 2 * math.pi * frequency / 30) frame.add_mesh(mesh, sp.Transforms.scale((scale + 1) / 2 + 0.5)) values.append(scale) graph.add_sparkline(name, values, color) graph.media_id = canvas.media_id names.append("graph") scene.grid("600px", "1fr auto", "1fr 1fr 1fr") scene.place("graph", "2", "1 / span 3") scene.link_canvas_events(*names) scene # Tutorial 13 - video # It is also possible to attach videos to ScenePic scenes. Once attached, you can draw the # frames of those videos to canvases in the same way as images, and can draw the same # video to multiple frames. Once a media file (video or audio) has been attached to a # canvas, that file will be used to drive playback. In practical terms, this means that # ScenePic will display frames such that they line up with the timestamps of the video # working on the assumption that ScenePic frames are displayed at the framerate of the video. def _angle_to_pos(angle, radius): return np.cos(angle) * radius + 200, np.sin(angle) * radius + 200 scene = sp.Scene() video = scene.create_video() video.load(asset_path("circles.mp4")) tracking = scene.create_canvas_2d("tracking", background_color=sp.Colors.White) tracking.media_id = video.video_id multi = scene.create_canvas_2d("multi", background_color=sp.Colors.White) multi.media_id = video.video_id angles = np.linspace(0, 2 * np.pi, 360, endpoint=False) for angle in angles: # if a 2D canvas has an associated video # then a frame of that video can be added # via the add_video method. frame = tracking.create_frame() frame.add_video(layer_id="video") red_pos = _angle_to_pos(angle, 160) frame.add_rectangle(red_pos[0] - 11, red_pos[1] - 11, 22, 22, [255, 0, 0], 2, layer_id="rect") frame.add_circle(red_pos[0], red_pos[1], 10, fill_color=[255, 0, 0], layer_id="dot") green_pos = _angle_to_pos(-2*angle, 80) frame.add_rectangle(green_pos[0] - 11, green_pos[1] - 11, 22, 22, [0, 255, 0], 2, layer_id="rect") frame.add_circle(green_pos[0], green_pos[1], 10, fill_color=[0, 255, 0], layer_id="dot") blue_pos = _angle_to_pos(4*angle, 40) frame.add_rectangle(blue_pos[0] - 11, blue_pos[1] - 11, 22, 22, [0, 0, 255], 2, layer_id="rect") frame.add_circle(blue_pos[0], blue_pos[1], 10, fill_color=[0, 0, 255], layer_id="dot") frame = multi.create_frame() frame.add_video("manual", red_pos[0] - 40, red_pos[1] - 40, 0.2, layer_id="red") frame.add_video("manual", green_pos[0] - 25, green_pos[1] - 25, 0.125, layer_id="green") frame.add_video("manual", 160, 160, 0.2, layer_id="blue") tracking.set_layer_settings({ "rect": {"render_order": 0}, "video": {"render_order": 1}, "dot": {"render_order": 2} }) scene.link_canvas_events("tracking", "multi") scene # Tutorial 14 - Multiview Visualization # One common and useful scenario for ScenePic is to visualize the result of multiview 3D reconstruction. # In this tutorial we'll show how to load some geometry, assocaited camera calibration # information, and images to create a visualization depicting the results. def _load_camera(camera_info): # this function loads an "OpenCV"-style camera representation # and converts it to a GL style for use in ScenePic location = np.array(camera_info["location"], np.float32) euler_angles = np.array(camera_info["rotation"], np.float32) rotation = sp.Transforms.euler_angles_to_matrix(euler_angles, "XYZ") translation = sp.Transforms.translate(location) extrinsics = translation @ rotation world_to_camera = sp.Transforms.gl_world_to_camera(extrinsics) aspect_ratio = camera_info["width"] / camera_info["height"] projection = sp.Transforms.gl_projection(camera_info["fov"], aspect_ratio, 0.01, 100) return sp.Camera(world_to_camera, projection) def _load_cameras(): with open(asset_path("cameras.json")) as file: cameras = json.load(file) return [_load_camera(cameras[key]) for key in cameras] scene = sp.Scene() # load the fitted cameras cameras = _load_cameras() # this textured cube will stand in for a reconstructed mesh texture = scene.create_image("texture") texture.load(asset_path("PolarBear.png")) cube = scene.create_mesh("cube") cube.texture_id = texture.image_id cube.add_cube(transform=sp.Transforms.scale(2)) # construct all of the frustums # and camera images frustums = scene.create_mesh("frustums", layer_id="frustums") colors = [sp.Colors.Red, sp.Colors.Green, sp.Colors.Blue] paths = [asset_path(name) for name in ["render0.png", "render1.png", "render2.png"]] camera_images = [] images = [] for i, (color, path, camera) in enumerate(zip(colors, paths, cameras)): image = scene.create_image(path) image.load(path) frustums.add_camera_frustum(camera, color) image_mesh = scene.create_mesh("image{}".format(i), layer_id="images", shared_color=sp.Colors.Gray, double_sided=True, texture_id=image.image_id) image_mesh.add_camera_image(camera) images.append(image) camera_images.append(image_mesh) # create one canvas for each camera to show the scene from # that camera's viewpoint width = 640 for i, camera in enumerate(cameras): height = width / camera.aspect_ratio canvas = scene.create_canvas_3d("hand{}".format(i), width, height, camera=camera) frame = canvas.create_frame() frame.add_mesh(cube) frame.add_mesh(frustums) frame.camera = camera for cam_mesh in camera_images: frame.add_mesh(cam_mesh) scene # Tutorial 15 - Frame Layer Settings # It is possible to use the per-frame layer settings to automatically # change various layer properties, for example to fade meshes in and # out of view. The user can still override this manually using the # controls, of course, but this feature can help guide the user through # more complex animations. scene = sp.Scene() # In this tutorial we will fade out one mesh (the cube) and fade # another in (the sphere). cube = scene.create_mesh(layer_id="cube") cube.add_cube(sp.Colors.Green) sphere = scene.create_mesh(layer_id="sphere") sphere.add_sphere(sp.Colors.Red) canvas = scene.create_canvas_3d() for i in range(60): sphere_opacity = i / 59 cube_opacity = 1 - sphere_opacity frame = canvas.create_frame() frame.add_mesh(cube) frame.add_mesh(sphere) # the interface here is the same as with how layer settings # usually works at the canvas level. frame.set_layer_settings({ "cube": {"opacity": cube_opacity}, "sphere": {"opacity": sphere_opacity} }) scene ```
github_jupyter
# Under the Hood *Modeling and Simulation in Python* Copyright 2021 Allen Downey License: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/) ``` # download modsim.py if necessary from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://raw.githubusercontent.com/AllenDowney/' + 'ModSimPy/master/modsim.py') # import functions from modsim from modsim import * ``` In this chapter we "open the hood," looking more closely at how some of the tools we have used---`run_solve_ivp`, `root_scalar`, and `maximize_scalar`---work. Most of the time you don't need to know, which is why I left this chapter for last. But you might be curious. And if nothing else, I have found that I can remember how to use these tools more easily because I know something about how they work. ## How run_solve_ivp Works `run_solve_ivp` is a function in the ModSimPy library that checks for common errors in the parameters and then calls `solve_ip`, which is the function in the SciPy library that does the actual work. By default, `solve_ivp` uses the Dormand-Prince method, which is a kind of Runge-Kutta method. You can read about it at <https://en.wikipedia.org/wiki/Dormand-Prince_method>, but I'll give you a sense of it here. The key idea behind all Runge-Kutta methods is to evaluate the slope function several times at each time step and use a weighted average of the computed slopes to estimate the value at the next time step. Different methods evaluate the slope function in different places and compute the average with different weights. So let's see if we can figure out how `solve_ivp` works. As an example, we'll solve the following differential equation: $$\frac{dy}{dt}(t) = y \sin t$$ Here's the slope function we'll use: ``` import numpy as np def slope_func(t, state, system): y, = state dydt = y * np.sin(t) return dydt ``` I'll create a `State` object with the initial state and a `System` object with the end time. ``` init = State(y=1) system = System(init=init, t_end=3) ``` Now we can call `run_solve_ivp`. ``` results, details = run_solve_ivp(system, slope_func) details ``` One of the variables in `details` is `nfev`, which stands for "number of function evaluations", that is, the number of times `solve_ivp` called the slope function. This example took 50 evaluations. Keep that in mind. Here are the first few time steps in `results`: ``` results.head() ``` And here is the number of time steps. ``` len(results) ``` `results` contains 101 points that are equally spaced in time. Now you might wonder, if `solve_ivp` ran the slope function 50 times, how did we get 101 time steps? To answer that question, we need to know more how the solver works. There are actually three steps: 1. For each time step, `solve_ivp` evaluates the slope function seven times, with different values of `t` and `y`. 2. Using the results, it computes the best estimate for the value `y` at the next time step. 3. After computing all of the time steps, it uses interpolation to compute equally spaced points that connect the estimates from the previous step. So we can see what's happening, I will run `run_solve_ivp` with the keyword argument `dense_output=False`, which skips the interpolation step and returns time steps that are not equally spaced (that is, not "dense"). While we're at it, I'll modify the slope function so that every time it runs, it adds the values of `t`, `y`, and `dydt` to a list called `evals`. ``` def slope_func(t, state, system): y, = state dydt = y * np.sin(t) evals.append((t, y, dydt)) return dydt ``` Now, before we call `run_solve_ivp` again, I'll initialize `evals` with an empty list. ``` evals = [] results2, details = run_solve_ivp(system, slope_func, dense_output=False) ``` Here are the results: ``` results2 ``` It turns out there are only eight time steps, and the first five of them only cover 0.11 seconds. The time steps are not equal because the Dormand-Prince method is *adaptive*. At each time step, it actually computes two estimates of the next value. By comparing them, it can estimate the magnitude of the error, which it uses to adjust the time step. If the error is too big, it uses a smaller time step; if the error is small enough, it uses a bigger time step. By adjusting the time step in this way, it minimizes the number of times it calls the slope function to achieve a given level of accuracy. Because we saved the values of `y` and `t`, we can plot the locations where the slope function was evaluated. I'll need to use a couple of features we have not seen before, if you don't mind. First we'll unpack the values from `evals` using `np.transpose`. Then we can use trigonometry to convert the slope, `dydt`, to components called `u` and `v`. ``` t, y, slope = np.transpose(evals) theta = np.arctan(slope) u = np.cos(theta) v = np.sin(theta) ``` Using these values, we can generate a *quiver plot* that shows an arrow for each time the slope function ran. The location of the each arrow represents the values of `t` and `y`; the orientation of the arrow shows the slope that was computed. ``` import matplotlib.pyplot as plt plt.quiver(t, y, u, v, pivot='middle', color='C1', alpha=0.4, label='evaluation points') results2['y'].plot(style='o', color='C0', label='solution points') results['y'].plot(lw=1, label='interpolation') decorate(xlabel='Time (t)', ylabel='Quantity (y)') ``` In this figure, the arrows show where the slope function was executed; the dots show the best estimate of `y` for each time step; and the line shows the interpolation that connects the estimates. Notice that many of the arrows do not fall on the line; `solve_ivp` evaluated the slope function at these locations in order to compute the solution, but as it turned out, they are not part of the solution. This is good to know when you are writing a slope function; you should not assume that the time and state you get as input variables are correct. ## How root_scalar Works `root_scalar` in the ModSim library is a wrapper for a function in the SciPy library with the same name. Like `run_solve_ivp`, it checks for common errors and changes some of the parameters in a way that makes the SciPy function easier to use (I hope). According to the documentation, `root_scalar` uses "a combination of bisection, secant, and inverse quadratic interpolation methods." (See <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root_scalar.html>) To understand what that means, suppose we're trying to find a root of a function of one variable, $f(x)$, and assume we have evaluated the function at two places, $x_1$ and $x_2$, and found that the results have opposite signs. Specifically, assume $f(x_1) > 0$ and $f(x_2) < 0$, as shown in the following diagram: ![Initial state of a root-finding search](https://github.com/AllenDowney/ModSim/raw/main/figs/secant.png) If $f$ is a continuous function, there must be at least one root in this interval. In this case we would say that $x_1$ and $x_2$ *bracket* a root. If this were all you knew about $f$, where would you go looking for a root? If you said "halfway between $x_1$ and $x_2$," congratulations! `You just invented a numerical method called *bisection*! If you said, "I would connect the dots with a straight line and compute the zero of the line," congratulations! You just invented the *secant method*! And if you said, "I would evaluate $f$ at a third point, find the parabola that passes through all three points, and compute the zeros of the parabola," congratulations, you just invented *inverse quadratic interpolation*! That's most of how `root_scalar` works. The details of how these methods are combined are interesting, but beyond the scope of this book. You can read more at <https://en.wikipedia.org/wiki/Brents_method>. ## How maximize_scalar Works `maximize_scalar` in the ModSim library is a wrapper for a function in the SciPy library called `minimize_scalar`. You can read about it at <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html>. By default, it uses Brent's method, which is related to the method I described in the previous section for root-finding. Brent's method for finding a maximum or minimum is based on a simpler algorithm: the *golden-section search*, which I will explain. Suppose we're trying to find the minimum of a function of a single variable, $f(x)$. As a starting place, assume that we have evaluated the function at three places, $x_1$, $x_2$, and $x_3$, and found that $x_2$ yields the lowest value. The following diagram shows this initial state. ![Initial state of a golden-section search](https://github.com/AllenDowney/ModSim/raw/main/figs/golden1.png) We will assume that $f(x)$ is continuous and *unimodal* in this range, which means that there is exactly one minimum between $x_1$ and $x_3$. The next step is to choose a fourth point, $x_4$, and evaluate $f(x_4)$. There are two possible outcomes, depending on whether $f(x_4)$ is greater than $f(x_2)$ or not. The following figure shows the two possible states. ![Possible states of a golden-section search after evaluating $f(x_4)$](https://github.com/AllenDowney/ModSim/raw/main/figs/golden2.png) If $f(x_4)$ is less than $f(x_2)$ (shown on the left), the minimum must be between $x_2$ and $x_3$, so we would discard $x_1$ and proceed with the new bracket $(x_2, x_4, x_3)$. If $f(x_4)$ is greater than $f(x_2)$ (shown on the right), the local minimum must be between $x_1$ and $x_4$, so we would discard $x_3$ and proceed with the new bracket $(x_1, x_2, x_4)$. Either way, the range gets smaller and our estimate of the optimal value of $x$ gets better. This method works for almost any value of $x_4$, but some choices are better than others. You might be tempted to bisect the interval between $x_2$ and $x_3$, but that turns out not to be optimal. You can read about a better option at <https://greenteapress.com/matlab/golden>. ## Chapter Review The information in this chapter is not strictly necessary; you can use these methods without knowing much about how they work. But there are two reasons you might want to know. One reason is pure curiosity. If you use these methods, and especially if you come to rely on them, you might find it unsatisfying to treat them as "black boxes." At the risk of mixing metaphors, I hope you enjoyed opening the hood. The other reason is that these methods are not infallible; sometimes things go wrong. If you know how they work, at least in a general sense, you might find it easier to debug them. With that, you have reached the end of the book, so congratulations! I hope you enjoyed it and learned a lot. I think the tools in this book are useful, and the ways of thinking are important, not just in engineering and science, but in practically every field of inquiry. Models are the tools we use to understand the world: if you build good models, you are more likely to get things right. Good luck!
github_jupyter
# What you will learn from this notebook This notebook is supposed to demonstrate a simplified version of an actual analysis you might want to run. In the real world steps would be probably the same but the dataset itself would be much, much noisier (meaning it would take some effort to put it into the required shape) and much bigger (I mean, nowadays in the industry we are dealing with more than ~30 samples!). ``` # general packages import pandas as pd import numpy as np # specialized stats packages from lifelines import KaplanMeierFitter # plotting import matplotlib.pyplot as plt import seaborn as sns # preferences %matplotlib inline import warnings warnings.filterwarnings('ignore') ``` # Data I will use one of default datasets from lifetimes library. I don't know much about it and would prefer to avoid jumping to conclusions so I will pretend this data comes actually from a survey among 26 vampires collected a 100 years ago. In that survey scientists collected information about how many years ago the vampire became un-dead (in other words was bitten by another vampire and turned into one), how old they were at the time of their transformation, whether they identified themselves as binary or non-binary and whether they have experienced depression symptoms yet. ``` # data from lifelines.datasets import load_psychiatric_patients df = load_psychiatric_patients() df.head() ``` Alright, so we have vampires at different age when they tranformed (`Age` column), they reported how many years have passed since transformation (`T` column), whether they have experienced depression symptoms (`C` column) and what gender they identify with (`sex` column, I'm gonna assume `1` is binary and `2` is non-binary because why not). # Plotting lifetimes and very basic data exploration There aren't many variables to work with and I will first show you how to plot lifetimes (assuming *now* is at 25, change `current_time` to see how the plot changes): ``` current_time = 25 observed_lifetimes = df['T'].values observed = observed_lifetimes < current_time # I'm using slightly modified function from lifetimes library. See the end of this notebook for details. # If you are running this notebook yourself first execute the cell with function definition at the bottom # of this notebook plot_lifetimes(observed_lifetimes, event_observed=observed, block=True) ``` Next I will see whether experiencing depression symptoms is somehow related to age at which the transformation into a vampire took place: ``` sns.catplot(x="C", y="Age", kind="boxen", data=df.sort_values("C")); plt.xlabel('Vampire experienced depression or not', size=18) plt.ylabel('Age', size=18) plt.title('Vampire survival as a function of age', size=18); ``` Looks like it does! Appears that vampires who have experienced depressive symptoms were on average older when they were bitten and consequently turned into vampires. This is very interesting! Let's look at Kaplan-Meier curves, and hazard curves to check whether gender has anything to do with depressive symptoms. # Kaplan-Meier curve ``` kmf = KaplanMeierFitter() T = df["T"] # time since vampire transformation C = df["C"] # whether they experienced depression symptoms kmf.fit(T,C); kmf.survival_function_ kmf.median_ kmf.plot(figsize=[10,6]) ``` ## Kaplan-Meier curve plotted separately for vampires who define themselves as binary and non-binary ``` # plot both genders on the same plot plt.figure(figsize=[10,6]) groups = df['sex'] ix = (groups == 1) kmf.fit(T[~ix], C[~ix], label='binary vampires') ax = kmf.plot(figsize=[10,10]); kmf.fit(T[ix], C[ix], label='non-binary vampires') kmf.plot(ax=ax); ``` Our sample size is small so error bars are relatively large. It looks like in the early years after vampire tranformation more binary (blue line) than non-binary (orange line) vampires experienced depressive symptoms. Maybe non-binary vampires were in a honeymoon stage with vampirism? However, the error bars are pretty much overlapping starting at 20 years past transformation so likely the differences are not statistically significant. But let's look at the hazard rate first. # Hazard rate using Nelson-Aalen estimator ``` from lifelines import NelsonAalenFitter naf = NelsonAalenFitter() naf.fit(T,event_observed=C); naf.plot(figsize=[10,6]); naf.fit(T[~ix], C[~ix], label='binary vampires') ax = naf.plot(figsize=[10,10]) naf.fit(T[ix], C[ix], label='non-binary vampires') naf.plot(ax=ax); ``` Okay, so it looks like hazard rate increases with time for both groups which we could already deduce from survival curves. Interestingly, it seems that the hazard rate for non-binary vampires increases rapidly around 35 years compared to previous period (I'm ignoring error bars for the moment). # Statistical analysis of differences Is there a difference between hazard rate for binary and non-binary vampires? Let's run a log rank test. It will look at random combinations of samples from the two distributions and calculate how many times one had a higher value than the other. A very important point to remember is that this analysis will not tell us anything about the hazard rates themselves but rather whether one is different from the other - so it signals only relative differences. ``` from lifelines.statistics import logrank_test results = logrank_test(T[ix], T[~ix], event_observed_A=C[ix], event_observed_B=C[~ix]) results.print_summary() ``` Looks like indeed there are no significant differences between binary and non-binary vampires but for the sake of exercise let's see how to get from the test statistic to difference in hazard rate: $$ log{\lambda} = Z \sqrt{ \frac{4}{D} } $$ ``` Z = results.test_statistic D = C.count() log_lambda = Z * np.sqrt (D / 4) log_lambda ``` Okay, so if the test was significant we could conclude that the hazard rate for binary versus non-binary vampires is roughly 4 times higher which means they are more likely to suffer from depressive symptoms ## What factors influence vampire's survival? Cox Proportional Hazards Model Alright, and lets say now we want to look at how age and gender identity shape vampire's future. We want to train the model on one set of samples and then use it to predict relative hazard increases (it's always relative to other vampires, never absolute hazard!) during vampire's lifetime. ``` from lifelines import CoxPHFitter cph = CoxPHFitter() cph.fit(df, duration_col='T', event_col='C', show_progress=True) cph.print_summary() ``` It looks like age is significantly related to the occurence of depressive symptoms, just like our EDA indicated at the beginning. If we had some new data we could use the beta values calculated in by fitting method in the previous step to predict relative changes in hazard rates of new vampires (using `cph.predict_cumulative_hazard(new_df)`. This is a semi-parametric model which means that it assumes the same constant rate of change during lifetime for all vampires. There are also models which take into account time covariates but they are beyond the scope of this short notebook. Thanks for reading and good luck with your own explorations! ## Helper function ``` # the function below is a modified version of plotting function from the lifetimes library. All credit should go to # them and all faults are mine. def plot_lifetimes(lifetimes, event_observed=None, birthtimes=None, order=False, block=True): """ Parameters: lifetimes: an (n,) numpy array of lifetimes. event_observed: an (n,) numpy array of booleans: True if event observed, else False. birthtimes: an (n,) numpy array offsetting the births away from t=0. Creates a lifetime plot, see examples: """ from matplotlib import pyplot as plt N = lifetimes.shape[0] if N > 100: print("warning: you may want to subsample to less than 100 individuals.") if event_observed is None: event_observed = np.ones(N, dtype=bool) if birthtimes is None: birthtimes = np.zeros(N) if order: """order by length of lifetimes; probably not very informative.""" ix = np.argsort(lifetimes, 0) lifetimes = lifetimes[ix, 0] event_observed = event_observed[ix, 0] birthtimes = birthtimes[ix] fig, ax = plt.subplots(figsize=[15,5], frameon=False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) for i in range(N): c = "#663366" if event_observed[i] else "green" l = 'burned by the sun rays or an angry mob' if event_observed[i] else "alive" plt.hlines(N - 1 - i, birthtimes[i], birthtimes[i] + lifetimes[i], color=c, lw=3, label=l if (i == 0) or (i==40) else "") m = "|" if not event_observed[i] else 'o' plt.scatter((birthtimes[i]) + lifetimes[i], N - 1 - i, color=c, s=30, marker=m) plt.legend(fontsize=16) plt.xlabel("Number of years since becoming a vampire", size=18) plt.ylabel("Individual vampires", size=20) plt.vlines(current_time, 0, N, lw=2, linestyles='--', alpha=0.5) plt.xticks(fontsize=18) plt.ylim(-0.5, N) return ```
github_jupyter
<br><br><br><br><br> # Awkward datasets <br><br><br><br><br> <br><br><br><br><br> It's not uncommon for data to be non-rectangular. Jagged ("ragged") arrays, cross-references, trees, and graphs are frequently encountered, but difficult to cast as Numpy arrays or Pandas DataFrames. <br> **Let's start with NASA's exoplanet database:** each star can have an arbitrary number of planets (jagged array). <br><br><br><br><br> ``` import pandas # NASA provides this dataset as a CSV file, which suggests a rectangular table: one row per planet. exoplanets = pandas.read_csv("data/nasa-exoplanets.csv") exoplanets # Quite a few planets in this table have the same star ("host") name. numplanets = exoplanets.groupby("pl_hostname").size() numplanets[numplanets > 1] # Use Pandas's MultiIndex to represent a sparse, 2D index (stars × planets without missing values). exoplanets.index = pandas.MultiIndex.from_arrays([exoplanets["pl_hostname"], exoplanets["pl_letter"]]) exoplanets.index.names = ["star", "planet"] exoplanets # Simplify the table to show 5 star attributes and 5 planet attributes. Star attributes are repeated. df = exoplanets[["ra", "dec", "st_dist", "st_mass", "st_rad", "pl_orbsmax", "pl_orbeccen", "pl_orbper", "pl_bmassj", "pl_radj"]] df.columns = pandas.MultiIndex.from_arrays([["star"] * 5 + ["planet"] * 5, ["right asc. (deg)", "declination (deg)", "distance (pc)", "mass (solar)", "radius (solar)", "orbit (AU)", "eccen.", "period (days)", "mass (Jupiter)", "radius (Jupiter)"]]) df # DataFrame.unstack moves the sparse planet index into a dense set of columns. # Every column (reduced to 2: orbit and mass) is duplicated 8 times because one star has 8 planets. df[[("planet", "orbit (AU)"), ("planet", "mass (Jupiter)")]].unstack("planet") # We can also select a cross-section (xs) of the index by planet letter to focus on one at a time. df.xs("b", level="planet") # try "c", "d", "e", "f", "g", "h", "i" ``` <br><br><br><br><br> ### Alternative: stars and planets as nested objects <br><br><br><br><br> ``` # Despite the nice tools Pandas provides, it's easier to think of stars and planets as objects. stardicts = [] for (starname, planetname), row in df.iterrows(): if len(stardicts) == 0 or stardicts[-1]["name"] != starname: stardicts.append({"name": starname, "ra": row["star", "right asc. (deg)"], "dec": row["star", "declination (deg)"], "dist": row["star", "distance (pc)"], "mass": row["star", "mass (solar)"], "radius": row["star", "radius (solar)"], "planets": []}) stardicts[-1]["planets"].append({"name": planetname, "orbit": row["planet", "orbit (AU)"], "eccen": row["planet", "eccen."], "period": row["planet", "period (days)"], "mass": row["planet", "mass (Jupiter)"], "radius": row["planet", "radius (Jupiter)"]}) stardicts[:30] # But this destroys Numpy's array-at-a-time performance and (in some cases) convenience. # Here's a way to get both (disclosure: I'm the author). import awkward stars = awkward.fromiter(stardicts) stars # The data are logically a collection of nested lists and dicts... stars[:30].tolist() # ...but they have been entirely converted into arrays. for starattr in "name", "ra", "dec", "dist", "mass", "radius": print("{:15s} =".format("stars[{!r:}]".format(starattr)), stars[starattr]) print() for planetattr in "name", "orbit", "eccen", "period", "mass", "radius": print("{:26s} =".format("stars['planets'][{!r:}]".format(planetattr)), stars["planets"][planetattr]) # The object structure is a façade, built on Numpy arrays. planet_masses = stars["planets"]["mass"] # It appears to be a list of lists; print("\nplanet_masses =", planet_masses) # but it is a JaggedArray class instance; print("\ntype(planet_masses) =", type(planet_masses)) # whose numerical data are in a content array; print("\nplanet_masses.content =", planet_masses.content) # and divisions between stars are encoded in an offsets array. print("\nplanet_masses.offsets =", planet_masses.offsets) # Pandas's unstack becomes... stars["planets"][["orbit", "mass"]].pad(8).tolist() # ...which can be used to produce regular Numpy arrays. maxplanets = stars["planets"].counts.max() stars["planets"]["mass"].pad(maxplanets).fillna(float("nan")).regular() # Pandas's cross-section becomes... stars["planets"][:, 0].tolist() # ...though the first dimension must be selected for >= n subelements to ask for the nth subelement. print("stars['planets'].counts =", stars["planets"].counts) atleast3 = (stars["planets"].counts >= 3) print("atleast3 =", atleast3) stars["planets"][atleast3, 2].tolist() # Motivated by particle physics analyses, which have particularly complex events. import uproot # Open a simplified file (for tutorials). lhc_data = uproot.open("data/HZZ.root")["events"] # Read columns of data for particle energies. particle_energies = lhc_data.arrays(["*_E"], namedecode="utf-8") # There's a different number of particles for each particle type in each event. for name, array in particle_energies.items(): print("\nparticle_energies['{}'] = {}".format(name, array)) ``` <br><br> ### Overview of Awkward Arrays Awkward Array (`import awkward`) has been designed to resemble a generalization of Numpy to * jagged arrays * non-rectangular tables * nullable types * heterogeneous lists * cross-references and cyclic references * non-contiguous arrays * virtual data and objects <br><br> ``` # Generate simple data or convert from JSON using fromiter. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) # Columnar structure is built into the resulting object. print("\na =", a) print("\ntype(a) =", type(a)) print("\na.content =", a.content) print("\na.offsets =", a.offsets) # Numpy ufuncs pass through the structure for array-at-a-time calculations. # (Uses the same __array_ufunc__ trick as CuPy and Dask...) import numpy a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) print(numpy.sqrt(a)) # Array-at-a-time calculations are only possible if all arguments have the same structure. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) b = awkward.fromiter([[100, 200, 300], [], [400, 500]]) print("a + b =", a + b) # In Numpy, scalars can be "broadcasted" to be used in calculations with arrays. # Generalizing this, Numpy arrays can be "broadcasted" to fit jagged arrays. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) b = numpy.array([100, 200, 300]) print("a + b =", a + b) # Slicing works like Numpy. a = awkward.fromiter([[1.1, 2.2, 3.3, 4.4], [5.5, 6.6], [7.7, 8.8, 9.9]]) # Take the first two outer lists. print("\na[:2] =", a[:2]) # Take the first two of each inner list. print("\na[:, :2] =", a[:, :2]) # Masking works like Numpy, but with new capabilities for jagged masks. a = awkward.fromiter([[ 1.1, 2.2, 3.3, 4.4], [ 5.5, 6.6], [ 7.7, 8.8, 9.9]]) mask = awkward.fromiter([True, False, True]) jaggedmask = awkward.fromiter([[True, False, False, True], [False, True], [False, False, False]]) # Filter outer lists. print("\na[mask] =", a[mask]) # Filter inner lists. print("\na[jaggedmask] =", a[jaggedmask]) # Integer indexing works like Numpy, but with new capabilities for jagged indexes. a = awkward.fromiter([[1.1, 2.2, 3.3, 4.4], [5.5, 6.6], [7.7, 8.8, 9.9]]) index = awkward.fromiter([2, 1, 1, 0]) jaggedindex = awkward.fromiter([[3, 0, 0, 1, 2], [], [-1]]) # Apply an integer function to outer lists. print("\na[index] =", a[index]) # Apply an integer function to inner lists. print("\na[jaggedindex] =", a[jaggedindex]) # In Numpy, "reducers" turn arrays into scalars. # Generalizing this, jagged arrays can be "reduced" to Numpy arrays. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) print("\na.sum() =", a.sum()) print("\na.max() =", a.max()) # Like Numpy, argmax and argmin produce integer indexes appropriate for application to arrays. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) b = awkward.fromiter([[100, 200, 300], [], [400, 500]]) indexes = a.argmax() print("\nindexes =", indexes) print("\nb[indexes] =", b[indexes]) # Since we often deal with different numbers of objects in the same event, we need ways to # match them for comparison. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) b = awkward.fromiter([[10, 20], [30], [40]]) print("\na.cross(b) =", a.cross(b)) print("\na.cross(b).i0 (lefts) =", a.cross(b).i0) print("\na.cross(b).i1 (rights) =", a.cross(b).i1) ``` <br><br><br><br><br> ### Application to a realistic problem Based on a typical case in particle physics, but general enough for all sciences. <br><br><br><br><br> ``` # Suppose we have a variable number of real objects in each event. import collections T = collections.namedtuple("T", ["x", "y"]) truth = [] for i in range(10): truth.append([]) for j in range(numpy.random.poisson(2)): truth[-1].append(T(*numpy.random.randint(0, 100, 2)/100)) truth # When we try to reconstruct these objects from the signals they produce, # the measurements have error, some unlucky objects are lost, and some spurious noise is added. M = collections.namedtuple("M", ["x", "y"]) error = lambda: numpy.random.normal(0, 0.001) unlucky = lambda: numpy.random.uniform(0, 1) < 0.2 observed = [] for event in truth: observed.append([M(x + error(), y + error()) for x, y in event if not unlucky()]) for j in range(numpy.random.poisson(0.25)): observed[-1].append(M(*numpy.random.normal(0.5, 0.25, 2))) observed # So the simulated data look like this: data = awkward.Table(truth=awkward.fromiter(truth), observed=awkward.fromiter(observed)) data.tolist() # The measured objects were reconstructed from raw signals in our simulation by a complex process. # We want to match real and measured to learn what the simulation is telling us about measurement # errors, missing fraction, and spurious fraction. pairs = data["truth"].cross(data["observed"], nested=True) # pairs for all combinations distances = numpy.sqrt((pairs.i0["x"] - pairs.i1["x"])**2 + # compute distance for all (pairs.i0["y"] - pairs.i1["y"])**2) best = distances.argmin() # pick smallest distance print("\nbest =", best) good_enough = (distances[best] < 0.005) # exclude if the distance is too large print("\ngood_enough =", good_enough) good_pairs = pairs[best][good_enough].flatten(axis=1) # select best and good enough; reduce print("\ngood_pairs[0] =", good_pairs[0]) ``` #### **Explode:** create deeper structures by combining the ones we have <center><img src="img/explode.png" width="25%"></center> #### **Flat:** compute something in a vectorized way <center><img src="img/flat.png" width="25%"></center> #### **Reduce:** use the new values to eliminate structure (max, sum, mean...) <center><img src="img/reduce.png" width="25%"></center> ``` # Other awkward types: nullable, heterogeneous lists, nested records... a = awkward.fromiter([[1.1, 2.2, None, 3.3, None], [4.4, [5.5]], [{"x": 6, "y": {"z": 7}}, None, {"x": 8, "y": {"z": 9}}] ]) # Array type as a function signature print(a.type) print() # Vectorized operations all the way down (a + 100).tolist() # Cross-references data = awkward.fromiter([ {"tracks": [{"phi": 1.0}, {"phi": 2.0}], "hits": [{"id": 100, "pos": 3.7}, {"id": 50, "pos": 2.1}, {"id": 75, "pos": 2.5}]}, {"tracks": [{"phi": 1.5}], "hits": [{"id": 100, "pos": 1.4}, {"id": 50, "pos": 0.7}, {"id": 75, "pos": 3.0}]}]) data["tracks"]["hits-on-track"] = \ awkward.JaggedArray.fromcounts([2, 1], awkward.JaggedArray.fromcounts([2, 2, 1, 1], awkward.IndexedArray([0, 1, 1, 2, 3, 5], data["hits"].content))) data.tolist() # Cyclic references tree = awkward.fromiter([ {"value": 1.23, "left": 1, "right": 2}, # node 0 {"value": 3.21, "left": 3, "right": 4}, # node 1 {"value": 9.99, "left": 5, "right": 6}, # node 2 {"value": 3.14, "left": 7, "right": None}, # node 3 {"value": 2.71, "left": None, "right": 8}, # node 4 {"value": 5.55, "left": None, "right": None}, # node 5 {"value": 8.00, "left": None, "right": None}, # node 6 {"value": 9.00, "left": None, "right": None}, # node 7 {"value": 0.00, "left": None, "right": None}, # node 8 ]) left = tree.contents["left"].content right = tree.contents["right"].content left[(left < 0) | (left > 8)] = 0 # satisfy overzealous validity checks right[(right < 0) | (right > 8)] = 0 tree.contents["left"].content = awkward.IndexedArray(left, tree) tree.contents["right"].content = awkward.IndexedArray(right, tree) tree[0].tolist() ``` | Array type | Purpose | Members | Usage | |:-----------|:--------|:--------|:------| | JaggedArray | variable-sized data structures | starts, stops, content | ubiquitous | | Table | struct-like objects in columns | contents _(dict)_ | ubiquitous | | ObjectArray | arbitrary Python types on demand | generator, content | common | | Methods | mix-in methods and properties on any array type | _(none)_ | common | | MaskedArray | allow nullable values (`None`) | mask _(bytes)_, content | occasional | | BitMaskedArray | same, but with a bit-mask | mask _(bits)_, content | from Arrow | | IndexedMaskedArray | same, but with dense content | mask-index _(integers)_ content | rare | | IndexedArray | lazy integer indexing: "pointers" | index, content | rare | | SparseArray | huge array defined at a few indexes | index, content, default | rare | | UnionArray | heterogeneous types or data sources | tags, index, contents _(list)_ | rare | | StringArray | special case: jagged array of characters | starts, stops, content, string methods | common | | ChunkedArray | discontiguous array presented as a whole | counts, chunks _(lists)_ | from Parquet | | AppendableArray | chunked allocation for efficient appending | counts, chunks _(lists)_ | rare | | VirtualArray | array generated from a function when needed | generator, possible cached array | from Parquet |
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"></ul></div> # Saving TF Models with SavedModel for TF Serving <a class="tocSkip"> ``` import math import os import numpy as np np.random.seed(123) print("NumPy:{}".format(np.__version__)) import tensorflow as tf tf.set_random_seed(123) print("TensorFlow:{}".format(tf.__version__)) DATASETSLIB_HOME = os.path.expanduser('~/dl-ts/datasetslib') import sys if not DATASETSLIB_HOME in sys.path: sys.path.append(DATASETSLIB_HOME) %reload_ext autoreload %autoreload 2 import datasetslib from datasetslib import util as dsu datasetslib.datasets_root = os.path.join(os.path.expanduser('~'),'datasets') models_root = os.path.join(os.path.expanduser('~'),'models') ``` # Serving Model in TensorFlow # Saving model with SavedModel ``` # Restart kernel to run the flag setting again #tf.flags.DEFINE_integer('model_version', 1, 'version number of the model.') model_name = 'mnist' model_version = '1' model_dir = os.path.join(models_root,model_name,model_version) # get the MNIST Data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(os.path.join(datasetslib.datasets_root,'mnist'), one_hot=True) x_train = mnist.train.images x_test = mnist.test.images y_train = mnist.train.labels y_test = mnist.test.labels # parameters pixel_size = 28 num_outputs = 10 # 0-9 digits num_inputs = 784 # total pixels def mlp(x, num_inputs, num_outputs,num_layers,num_neurons): w=[] b=[] for i in range(num_layers): # weights w.append(tf.Variable(tf.random_normal( \ [num_inputs if i==0 else num_neurons[i-1], \ num_neurons[i]]), \ name="w_{0:04d}".format(i) \ ) \ ) # biases b.append(tf.Variable(tf.random_normal( \ [num_neurons[i]]), \ name="b_{0:04d}".format(i) \ ) \ ) w.append(tf.Variable(tf.random_normal( [num_neurons[num_layers-1] if num_layers > 0 else num_inputs, num_outputs]),name="w_out")) b.append(tf.Variable(tf.random_normal([num_outputs]),name="b_out")) # x is input layer layer = x # add hidden layers for i in range(num_layers): layer = tf.nn.relu(tf.matmul(layer, w[i]) + b[i]) # add output layer layer = tf.matmul(layer, w[num_layers]) + b[num_layers] model = layer probs = tf.nn.softmax(model) return model,probs tf.reset_default_graph() # input images serialized_tf_example = tf.placeholder(tf.string, name='tf_example') feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),} tf_example = tf.parse_example(serialized_tf_example, feature_configs) x_p = tf.identity(tf_example['x'], name='x_p') # use tf.identity() to assign name # target output y_p = tf.placeholder(dtype=tf.float32, name="y_p", shape=[None, num_outputs]) num_layers = 2 num_neurons = [] for i in range(num_layers): num_neurons.append(256) learning_rate = 0.01 n_epochs = 50 batch_size = 100 n_batches = mnist.train.num_examples//batch_size model,probs = mlp(x=x_p, num_inputs=num_inputs, num_outputs=num_outputs, num_layers=num_layers, num_neurons=num_neurons) # loss function #loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(model), axis=1)) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=y_p)) # optimizer function optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss) predictions_check = tf.equal(tf.argmax(probs,1), tf.argmax(y_p,1)) accuracy_function = tf.reduce_mean(tf.cast(predictions_check, tf.float32)) values, indices = tf.nn.top_k(probs, 10) table = tf.contrib.lookup.index_to_string_table_from_tensor( tf.constant([str(i) for i in range(10)])) prediction_classes = table.lookup(tf.to_int64(indices)) with tf.Session() as tfs: tfs.run(tf.global_variables_initializer()) for epoch in range(n_epochs): epoch_loss = 0.0 for batch in range(n_batches): x_batch, y_batch = mnist.train.next_batch(batch_size) _,batch_loss = tfs.run([train_op,loss], feed_dict={x_p: x_batch, y_p: y_batch}) epoch_loss += batch_loss average_loss = epoch_loss / n_batches print("epoch: {0:04d} loss = {1:0.6f}".format(epoch,average_loss)) accuracy_score = tfs.run(accuracy_function, feed_dict={x_p: x_test, y_p: y_test }) print("accuracy={0:.8f}".format(accuracy_score)) # save the model # definitions for saving the models builder = tf.saved_model.builder.SavedModelBuilder(model_dir) # build signature_def_map classification_inputs = tf.saved_model.utils.build_tensor_info( serialized_tf_example) classification_outputs_classes = tf.saved_model.utils.build_tensor_info( prediction_classes) classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values) classification_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={ tf.saved_model.signature_constants.CLASSIFY_INPUTS: classification_inputs }, outputs={ tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES: classification_outputs_classes, tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES: classification_outputs_scores }, method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME)) tensor_info_x = tf.saved_model.utils.build_tensor_info(x_p) tensor_info_y = tf.saved_model.utils.build_tensor_info(probs) prediction_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'inputs': tensor_info_x}, outputs={'outputs': tensor_info_y}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder.add_meta_graph_and_variables( tfs, [tf.saved_model.tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: classification_signature, }, legacy_init_op=legacy_init_op) builder.save() print('Run following command:') print('tensorflow_model_server --model_name=mnist --model_base_path={}' .format(os.path.join(models_root,model_name))) ```
github_jupyter
# Face Recognition with SphereFace Paper: https://arxiv.org/abs/1704.08063 Repo: https://github.com/wy1iu/sphereface ``` import cv2 import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt #We are going to use deepface to detect and align faces #Repo: https://github.com/serengil/deepface #!pip install deepface from deepface.commons import functions ``` ### Pre-trained model ``` #Structure: https://github.com/wy1iu/sphereface/blob/master/train/code/sphereface_deploy.prototxt #Weights: https://drive.google.com/open?id=0B_geeR2lTMegb2F6dmlmOXhWaVk model = cv2.dnn.readNetFromCaffe("sphereface_deploy.prototxt", "sphereface_model.caffemodel") #SphereFace input shape. You can verify this in the prototxt. input_shape = (112, 96) ``` ### Common functions ``` #Similarity metrics tutorial: https://sefiks.com/2018/08/13/cosine-similarity-in-machine-learning/ def findCosineDistance(source_representation, test_representation): a = np.matmul(np.transpose(source_representation), test_representation) b = np.sum(np.multiply(source_representation, source_representation)) c = np.sum(np.multiply(test_representation, test_representation)) return 1 - (a / (np.sqrt(b) * np.sqrt(c))) def findEuclideanDistance(source_representation, test_representation): euclidean_distance = source_representation - test_representation euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance)) euclidean_distance = np.sqrt(euclidean_distance) return euclidean_distance ``` ### Data set ``` # Master.csv: https://github.com/serengil/deepface/blob/master/tests/dataset/master.csv # Images: https://github.com/serengil/deepface/tree/master/tests/dataset df = pd.read_csv("../deepface/tests/dataset/master.csv") df.head() euclidean_distances = []; cosine_distances = [] for index, instance in tqdm(df.iterrows(), total = df.shape[0]): img1_path = instance["file_x"] img2_path = instance["file_y"] target_label = instance["Decision"] #---------------------------------- #detect and align img1 = functions.preprocess_face("../deepface/tests/dataset/%s" % (img1_path), target_size=input_shape)[0] img2 = functions.preprocess_face("../deepface/tests/dataset/%s" % (img2_path), target_size=input_shape)[0] #---------------------------------- #reshape images to expected shapes img1_blob = cv2.dnn.blobFromImage(img1) img2_blob = cv2.dnn.blobFromImage(img2) if img1_blob.shape != (1, 3, 96, 112): raise ValueError("img shape must be (1, 3, 96, 112) but it has a ", img1_blob.shape," shape") #---------------------------------- #representation model.setInput(img1_blob) img1_representation = model.forward()[0] model.setInput(img2_blob) img2_representation = model.forward()[0] #---------------------------------- euclidean_distance = findEuclideanDistance(img1_representation, img2_representation) euclidean_distances.append(euclidean_distance) cosine_distance = findCosineDistance(img1_representation, img2_representation) cosine_distances.append(cosine_distance) df['euclidean'] = euclidean_distances df['cosine'] = cosine_distances df.head() ``` ### Visualize distributions ``` df[df.Decision == "Yes"]['euclidean'].plot(kind='kde', title = 'euclidean', label = 'Yes', legend = True) df[df.Decision == "No"]['euclidean'].plot(kind='kde', title = 'euclidean', label = 'No', legend = True) plt.show() df[df.Decision == "Yes"]['cosine'].plot(kind='kde', title = 'cosine', label = 'Yes', legend = True) df[df.Decision == "No"]['cosine'].plot(kind='kde', title = 'cosine', label = 'No', legend = True) plt.show() ``` ### Find the best threshold ``` #Repo: https://github.com/serengil/chefboost #!pip install chefboost from chefboost import Chefboost as chef config = {'algorithm': 'C4.5'} df[['euclidean', 'Decision']].head() euclidean_tree = chef.fit(df[['euclidean', 'Decision']].copy(), config) cosine_tree = chef.fit(df[['cosine', 'Decision']].copy(), config) #stored in outputs/rules euclidean_threshold = 17.212238311767578 #euclidean cosine_threshold = 0.4668717384338379 #cosine ``` ### Predictions ``` df['prediction_by_euclidean'] = 'No' df['prediction_by_cosine'] = 'No' df.loc[df[df['euclidean'] <= euclidean_threshold].index, 'prediction_by_euclidean'] = 'Yes' df.loc[df[df['cosine'] <= cosine_threshold].index, 'prediction_by_cosine'] = 'Yes' df.sample(5) euclidean_positives = 0; cosine_positives = 0 for index, instance in df.iterrows(): target = instance['Decision'] prediction_by_euclidean = instance['prediction_by_euclidean'] prediction_by_cosine = instance['prediction_by_cosine'] if target == prediction_by_euclidean: euclidean_positives = euclidean_positives + 1 if target == prediction_by_cosine: cosine_positives = cosine_positives + 1 print("Accuracy (euclidean): ",round(100 * euclidean_positives/df.shape[0], 2)) print("Accuracy (cosine): ",round(100 * cosine_positives/df.shape[0], 2)) ``` ### Production ``` def verifyFaces(img1_path, img2_path): print("Verify ",img1_path," and ",img2_path) #------------------------------------ #detect and align img1 = functions.preprocess_face(img1_path, target_size=input_shape)[0] img2 = functions.preprocess_face(img2_path, target_size=input_shape)[0] img1_blob = cv2.dnn.blobFromImage(img1) img2_blob = cv2.dnn.blobFromImage(img2) #------------------------------------ #representation model.setInput(img1_blob) img1_representation = model.forward()[0] model.setInput(img2_blob) img2_representation = model.forward()[0] #------------------------------------ #verify euclidean_distance = findEuclideanDistance(img1_representation, img2_representation) print("Found euclidean distance is ",euclidean_distance," whereas required threshold is ",euclidean_threshold) fig = plt.figure() ax1 = fig.add_subplot(1,2,1) plt.imshow(img1[:,:,::-1]) plt.axis('off') ax2 = fig.add_subplot(1,2,2) plt.imshow(img2[:,:,::-1]) plt.axis('off') plt.show() if euclidean_distance <= euclidean_threshold: print("they are same person") else: print("they are not same person") ``` ### True positive examples ``` verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img2.jpg") verifyFaces("../deepface/tests/dataset/img54.jpg", "../deepface/tests/dataset/img3.jpg") verifyFaces("../deepface/tests/dataset/img42.jpg", "../deepface/tests/dataset/img45.jpg") verifyFaces("../deepface/tests/dataset/img9.jpg", "../deepface/tests/dataset/img49.jpg") ``` ### True negative examples ``` verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img3.jpg") verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img45.jpg") verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img49.jpg") ```
github_jupyter
Old Guestbook IP Extraction === This script processes the json guestbook in the old (2016) dataset to a CSV file containing the IP metadata. ``` %reload_ext autoreload %autoreload 2 %matplotlib inline import os import re import pandas as pd import numpy as np from collections import Counter import sqlite3 from nltk import word_tokenize from html.parser import HTMLParser from tqdm import tqdm import random import pickle import json from datetime import datetime from pprint import pprint import matplotlib.pyplot as plt import matplotlib.dates as md import matplotlib import pylab as pl from IPython.core.display import display, HTML from pathlib import Path git_root_dir = !git rev-parse --show-toplevel git_root_dir = Path(git_root_dir[0].strip()) git_root_dir import sys caringbridge_core_path = "/home/srivbane/levon003/repos/caringbridge_core" sys.path.append(caringbridge_core_path) import cbcore.data.paths as paths import cbcore.data.dates as dates raw_data_dir = paths.raw_data_2016_filepath guestbook_filepath = os.path.join(raw_data_dir, "guestbook_scrubbed.json") working_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/geo_data" os.makedirs(working_dir, exist_ok=True) assert os.path.exists(working_dir) ``` ### Load and convert journal file ``` output_filepath = os.path.join(working_dir, "gb_ip_raw.csv") bad_ips = [] with open(output_filepath, 'w') as outfile: with open(guestbook_filepath, encoding='utf-8') as infile: for line in tqdm(infile, total=82980359): gb = json.loads(line) if "ip" not in gb: continue ip = gb['ip'] if not re.match(r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$", ip): bad_ips.append(ip) continue created_at = int(gb["createdAt"]["$date"]) updated_at = int(gb["updatedAt"]["$date"]) if "updatedAt" in gb else "" outfile.write(f"{int(gb['userId'])},{ip},{int(gb['siteId'])},{created_at},{updated_at}\n") len(bad_ips) Counter(bad_ips).most_common()[:10] !wc -l {output_filepath} ``` ### Add geo information to data This is now in a separate PBS script for easier execution as an independent, long-running job. ``` raw_ip_filepath = os.path.join(working_dir, "gb_ip_raw.csv") geo_info_added_filepath = os.path.join(working_dir, "gb_geo_data.csv") # read the geoip2 database import geoip2 import geoip2.database city_database_filepath = "/home/srivbane/shared/caringbridge/data/derived/geolite2/GeoLite2-City_20190813/GeoLite2-City.mmdb" reader = geoip2.database.Reader(city_database_filepath) with open(raw_ip_filepath, 'r') as infile: with open(geo_info_added_filepath, 'w') as outfile: for line in tqdm(infile, total=76810342): tokens = line.strip().split(",") if len(tokens) != 5: raise ValueError(f"Too many values to unpack: {line}") user_id, ip, site_id, created_at, updated_at = tokens try: g = reader.city(ip) except geoip2.errors.AddressNotFoundError: outfile.write(f"{user_id},{site_id},{created_at},NOT_FOUND,,,,,,\n") continue country = g.country.iso_code subdiv_count = len(g.subdivisions) state = g.subdivisions.most_specific.iso_code city = g.city.name lat = g.location.latitude long = g.location.longitude acc_radius = g.location.accuracy_radius outfile.write(f"{user_id},{site_id},{created_at},{country},{subdiv_count},{state},{city},{lat},{long},{acc_radius}\n") ``` ### Read csv file ``` header = ['user_id','site_id','created_at','country','subdiv_count','state','city','lat','long','acc_radius'] df = pd.read_csv(geo_info_added_filepath, header=None, names=header) len(df) df.head() Counter(df.country).most_common()[:15] Counter(df.state).most_common()[:15] Counter(df.city).most_common()[:15] Counter(df.acc_radius).most_common()[:15] Counter(df.subdiv_count).most_common()[:3] # multiple subdivisions is a non-US thing Counter(df[df.subdiv_count == 2].country).most_common() plt.hist([int(ar) for ar in df.acc_radius if ar is not None and ar != 'None' and not pd.isnull(ar)], log=True) plt.title("Accuracy radius for the classified points") plt.show() ``` ### Visualization by US state and location ``` us_df = df[(df.subdiv_count == 1)&(df.country=='US')] len(us_df), len(us_df) / len(df) us_df.head() import geopandas as gpd import geopandas.datasets import shapely #from quilt.data.ResidentMario import geoplot_data #world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) #world = gpd.read_file(geoplot_data.contiguous_usa()) world = gpd.read_file(geopandas.datasets.get_path('naturalearth_lowres')) fig, ax = plt.subplots(1,1, figsize=(12,12)) ax = world.plot(ax=ax, color="seagreen") marker_size_map = {'1000': 9, '500': 8, '200': 7, '100': 6, '50': 5, '20': 4, '10': 3, '5': 2, '1': 1 } marker_color_map = {'1000': plt.cm.Greys(0.7, alpha=0.01), '500': plt.cm.Greys(0.7, alpha=0.05), '200': plt.cm.Greys(0.8, alpha=0.2), '100': plt.cm.Greys(0.8, alpha=0.3), '50': plt.cm.Greys(0.9, alpha=0.4), '20': plt.cm.Greys(0.9, alpha=0.4), '10': plt.cm.Greys(1, alpha=0.5), '5': plt.cm.Greys(1, alpha=0.5), '1': plt.cm.Greys(1, alpha=0.5) } geometry = [shapely.geometry.Point(xy) for xy in zip(us_df.long.astype("float"), us_df.lat.astype("float"))] crs = {'init': 'epsg:4326'} gdf = gpd.GeoDataFrame(us_df, crs=crs, geometry=geometry) gdf = gdf.to_crs(world.crs) # just to ensure the coordinate system is the same markersizes = [marker_size_map[ar] for ar in us_df.acc_radius] colors = [marker_color_map[ar] for ar in us_df.acc_radius] ax = gdf.plot(ax=ax, color=colors, markersize=markersizes) # try to apply different colors to the response categories in order to sanity check the demo_country variable #ax = gdf[gdf["demo_country"] == "USA"].plot(ax=ax, color='blue', markersize=4) #ax = gdf[gdf["demo_country"] == "Canada"].plot(ax=ax, color='red', markersize=4) #ax = gdf[gdf["demo_country"] == "Other"].plot(ax=ax, color='white', markersize=2) # some hacky estimates of North America coords ax.set_xlim((-141,-62)) ax.set_ylim((14,72)) _ = ax.set_title("US Guestbook Locations (From IP)") ```
github_jupyter
# Combining Thompson Sampling Results ``` import pinot ds = pinot.data.moonshot() actual_best = max([d[1].item() for d in ds]) import pandas as pd best_human = pd.read_csv('best_Human.csv', index_col=0) pro_human = pd.read_csv('pro_Human.csv', index_col=0) retro_human = pd.read_csv('retro_Human.csv', index_col=0) for df in [best_human, pro_human, retro_human]: df['Type'] = 'Human' best_ei = pd.read_csv('best_ExpectedImprovement.csv', index_col=0) pro_ei = pd.read_csv('pro_ExpectedImprovement.csv', index_col=0) retro_ei = pd.read_csv('retro_ExpectedImprovement.csv', index_col=0) for df in [best_ei, pro_ei, retro_ei]: df['Type'] = 'ExpectedImprovement' best = pd.concat([best_human, best_ei]) pro = pd.concat([pro_human, pro_ei]) retro = pd.concat([retro_human, retro_ei]) def larger_font(ylabel): plt.xticks(size=20) plt.xlabel('Round', size=20) plt.yticks(size=20) plt.ylabel(ylabel, size=20) import matplotlib.pyplot as plt import seaborn as sns sns.catplot(x='Round', y='Value', hue='Type', data=retro, kind='violin', height=10, aspect=2, # split=True palette='tab10' ) larger_font('Thompson Estimates of $y_{max}$') plt.axhline(actual_best, color='black') import seaborn as sns fig, ax = plt.subplots(figsize=(20, 10)) # plt.axhline(actual_best, color='black') sns.lineplot(x='Round', y='Value', hue='Type', data=best, ax=ax) larger_font('$y_{max}^i$') import torch import numpy as np import pandas as pd import seaborn as sns improvement_list = [] for type_ in ['Human', 'ExpectedImprovement']: pro_subset = pro[pro['Type'] == type_] best_subset = best[best['Type'] == type_] for trial in pro_subset.Trial.unique(): for round_ in pro_subset.Round.unique(): round_values = pro_subset[np.logical_and(pro_subset['Round'] == round_, pro_subset['Trial'] == trial)]['Value'] round_best = best[np.logical_and(best['Round'] == round_, best['Trial'] == trial)]['Value'].iloc[0] improvement_list.append({'Acquisition Function': 'ExpectedImprovement', 'Trial': trial, 'Round': round_, 'Type': type_, 'ProbabilityImprovement': (round_values > round_best).mean(), 'ExpectedImprovement': (np.maximum(round_values - round_best, 0)).mean()}) improvement_df = pd.DataFrame(improvement_list) import matplotlib.pyplot as plt import pandas as pd fig, ax = plt.subplots(figsize=(10, 10)) sns.swarmplot(x='Round', y='ProbabilityImprovement', hue='Type', data=improvement_df, ax=ax) larger_font('$P$(Thompson Estimate > $y_{max}^i$)') fig, ax = plt.subplots(figsize=(10, 10)) sns.swarmplot(x='Round', y='ExpectedImprovement', hue='Type', data=improvement_df, ax=ax) plt.ylabel('Thompson Estimates of $y_{max}$') larger_font('$E$($\max$(Thompson Estimate - $y_{max}^i$, 0)') ```
github_jupyter
# Download ECG data This notebook downloads ECG data from the [MIT-BIH Arrhythmia Database Directory](https://archive.physionet.org/physiobank/database/html/mitdbdir/mitdbdir.htm) Copyright 2020 Dr. Klaus G. Paul Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ``` from IPython.display import display from ipywidgets import IntProgress import numpy as np import os import pandas as pd import wfdb import requests import zipfile, io ``` Download the zip archive and extract all the files ``` r = requests.get("https://storage.googleapis.com/mitdb-1.0.0.physionet.org/mit-bih-arrhythmia-database-1.0.0.zip") z = zipfile.ZipFile(io.BytesIO(r.content)) if not os.path.exists("./mit-data"): os.mkdir("./mit-data") z.extractall("./mit-data") ``` This is not very generic, but the example does not need a concise dataset. ``` r = set() for f in os.listdir("./mit-data/mit-bih-arrhythmia-database-1.0.0/"): s = f.split(".")[0][:3] if s.isdigit(): r.add(s) wIP = IntProgress(min=0,max=len(r)) display(wIP) allAbnormalities = [] allData = [] for rec in r: record = wfdb.rdrecord('./mit-data/mit-bih-arrhythmia-database-1.0.0/{}'.format(rec)) dfHB = pd.DataFrame(record.p_signal) dfHB.rename(columns={0:record.sig_name[0],1:record.sig_name[1]},inplace=True) dfHB["record"] = rec # this is the reference between time series and markup data dfHB["sample"] = dfHB.index # this is known freq = 360 period = '{}N'.format(int(1e9 / freq)) index = pd.date_range(pd.to_datetime("2020-01-01 12:00"), periods=len(dfHB), freq=period) dfHB["Timestamp"] = index # need to reduce the amount of data dfHB = dfHB[dfHB.Timestamp < pd.to_datetime("2020-01-01 12:02:30")] dfHB.index = dfHB["Timestamp"] # else bokeh may complain about identical names del dfHB.index.name dfHB.to_parquet("../data/{}.parquet".format(rec), use_deprecated_int96_timestamps=True) allData.append(dfHB) ann = wfdb.rdann('./mit-data/mit-bih-arrhythmia-database-1.0.0/{}'.format(rec), extension='atr',return_label_elements=['symbol', 'label_store', 'description']) ann.create_label_map() dfAnn = pd.DataFrame({"annotation":ann.description,"sample":ann.sample,"symbol":ann.symbol}) dfAnn = dfAnn[dfAnn["sample"] <= len(dfHB)] dfAnn = pd.merge(dfAnn,dfHB,on="sample") dfAnn["record"] = rec # else bokeh may complain about identical names del dfAnn.index.name # uncomment this if you think you need the individual files #dfAnn.to_csv("../data/ann.{}.csv".format(rec)) #dfAnn[dfAnn.symbol != "N"].to_csv("../data/ann.abnormalities.{}.csv".format(rec)) allAbnormalities.append(dfAnn[dfAnn.symbol != "N"][["Timestamp","annotation","symbol","record"]]) wIP.value += 1 pd.DataFrame().append(allAbnormalities,sort=False).to_parquet("../data/abnormalities.parquet", use_deprecated_int96_timestamps=True) #pd.DataFrame().append(allData,sort=False).to_parquet("../data/ecg.parquet", use_deprecated_int96_timestamps=True) ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.stats as st import probability_kernels as pk ``` #### Note to users This Jupyter Notebook is for creating the figures in the paper. It also demonstrates how percentile transition matrices can be calculatd using the python file `probability_kernels`. ``` save = True ``` ### Figure of the Peason data ``` # Load the data frame (female) -> dff dff = pd.read_csv('data/pearson-lee-mother-daughter.csv') # x values (mothers) xf = dff.Parent.to_numpy() # y values (daughters) yf = dff.Child.to_numpy() # Load the data frame (male) -> dfm dfm = pd.read_csv('data/pearson-lee-father-son.csv') # x values (fathers) xm = dfm.Parent.to_numpy() # y values (sons) ym = dfm.Child.to_numpy() %%time # Create an empty list of size three, that will store matrices_p = [None] * 3 matrices_p[0] = pk.get_matrix_data(xf, yf) matrices_p[1] = pk.get_matrix_data(xm, ym) matrices_p[2] = pk.get_matrix(r=0.54, rs=0.96, num_iters=1_000_000, trim_score=6) # Pearson male is exactly the same: pk.get_matrix(r=0.51, rs=0.89) fig, axes = plt.subplots(3, 1, figsize=(13*1*0.95*0.75, 8*3/0.95*0.75)) titles_p = ['Pearson data, Mother-Daughter', 'Pearson data, Father-Son', 'Pearson data, simulation of estimated parameters'] for i in range(3): pk.plot_ax(ax=axes.ravel()[i], matrix=matrices_p[i], i=0, title=titles_p[i], title_loc='center', child=True) plt.tight_layout() legend = ['Descendant in\nTop Quintile', 'Fourth Quintile', 'Third Quintile', 'Second Quintile', 'Bottom Quintile'] fig.legend(legend, bbox_to_anchor=(1.27, 0.9805), fontsize=15) if save: plt.savefig('latex/figures/quintile-pearson.png', dpi=300) plt.show() ``` ### Figure for multigenerational mobility, standard parameters ``` r = 0.5 rs = pk.stable_rs(r) num_steps = 6 matrices = [None] * num_steps print('r_s =', round(rs, 5)) %%time for i in range(num_steps): matrices[i] = pk.get_matrix(r=r, rs=rs, n=i+1, num_iters=1_000_000, trim_score=6) fig, axes = plt.subplots(3, 2, figsize=(13*2*0.95*0.75, 8*3/0.95*0.75)) for i in range(num_steps): pk.plot_ax(ax=axes.ravel()[i], matrix=matrices[i], i=i, j=i, title="$n = {}$".format(str(i+1)), title_loc='center', x_label=True, child=False) plt.tight_layout() if save: plt.savefig('latex/figures/quintile-r=0.5-stable.png', dpi=300) plt.show() ``` ### Figure for the mobility measure ``` mv = np.array([12, 6, 3, 2, 1.4, 1]) m = mv.size rv, rsv = pk.get_rv_rsv(mv) matrices_m = [None] * m %%time for i in range(m): matrices_m[i] = pk.get_matrix(r=rv[i], rs=rsv[i], n=1, num_iters=1_000_000, trim_score=6) ``` There are `num_iters` number of iterations over the simulated integral for each probability calculation. Therefore, $5\times 5 \times$ `num_iters` total for one quintile transition matrix. Here we make six matrices in 23 seconds. Therefore, about 6.5 million computations per second - due to vectorization. ``` fig, axes = plt.subplots(3, 2, figsize=(13*2*0.95*0.75, 8*3/0.95*0.75)) for i in range(m): pk.plot_ax(ax=axes.ravel()[i], matrix=matrices_m[i], i=0, j=i, title=pk.report_mobility(mv, rv, rsv, i), title_loc='center', x_label=False, child=True) plt.tight_layout() if save: plt.savefig('latex/figures/quintile-mobility.png', dpi=300) plt.show() ``` ### Figure for the Chetty data ![](misc/chetty-picture.png) ``` chetty = np.array( [[0.337, 0.242, 0.178, 0.134, 0.109], [0.28, 0.242, 0.198, 0.16, 0.119], [0.184, 0.217, 0.221, 0.209, 0.17], [0.123, 0.176, 0.22, 0.244, 0.236], [0.075, 0.123, 0.183, 0.254, 0.365]]) pk.plot_matrix(chetty, child=True, legend=False) plt.tight_layout() if save: plt.savefig('latex/figures/quintile-chetty.png', dpi=300) r_chetty = 0.31 pk.plot_matrix( pk.get_matrix(r=r_chetty, rs=pk.stable_rs(r_chetty), n=1, num_iters=100_000, trim_score=6)) pk.stable_rs(r_chetty) / r_chetty ``` ### Reference ``` r_ref = 0.5 ref = pk.get_matrix(r=r_ref, rs=pk.stable_rs(r_ref), n=3, num_iters=1_000_000, trim_score=6) fig, axes = plt.subplots(1, 1, figsize=(13*1*0.95*0.75, 8*1/0.95*0.75)) pk.plot_ax(axes, matrix=ref, i=2, j=2, x_label=True, child=False) plt.tight_layout() if save: plt.savefig("latex/figures/quintile_reference.png", dpi=300) ``` #### Test symmetry (proof in paper) ``` def get_sigma(r, rs, n): return np.sqrt((r**2+rs**2)**n) def joint(v1, v2, r, rs, n): return st.norm.pdf(v2, scale=pk.get_sigma_tilda(1, r, rs, n), loc=pk.get_mu_tilda(v1, r, n)) * st.norm.pdf(v1) def check_vs(va, vb, r, rs, n): va_vb = joint(va, vb, r, rs, n) vb_va = joint(vb, va, r, rs, n) return va_vb, vb_va # Stable population variance r_c = 0.3 check_vs(va=0.3, vb=0.7, r=r_c, rs=pk.stable_rs(r_c), n=3) # (Not) stable population variance check_vs(va=0.3, vb=0.7, r=r_c, rs=0.7, n=3) pa = 0.17 pb = 0.64 def per_to_v1(p1): return st.norm.ppf(p1) def per_to_v2(p2, r, rs, n): return st.norm.ppf(p2, scale=get_sigma(r, rs, n)) def check_ps(pa, pb, r, rs, n): va_vb = joint(per_to_v1(pa), per_to_v2(pb, r, rs, n), r, rs, n) vb_va = joint(per_to_v1(pb), per_to_v2(pa, r, rs, n), r, rs, n) return va_vb, vb_va # (Not) stable population variance, but index by percentile check_ps(pa=0.17, pb=0.64, r=r_c, rs=0.7, n=3) ``` ### Pearson summary stats ``` rawm = pk.get_matrix_data(xm, ym, return_raw=True) rawf = pk.get_matrix_data(xf, yf, return_raw=True) raws = np.ravel((rawm + rawf) / 2) np.quantile(raws, (0.25, 0.5, 0.75)) min(np.min(rawm), np.min(rawf)) max(np.max(rawm), np.max(rawf)) np.mean(raws) ``` ### Top two quintiles ``` # Stature 100-(25+25+43+25)/2 # Income 100-(25+24+36+24)/2 ``` ### Archive ``` # r2v = np.arange(0.05, 0.6, 0.1) # rv = np.sqrt(r2v) # rsv = pk.stable_rs(rv) # mv = rsv / rv # for r in np.arange(0.2, 0.9, 0.1): # plot_matrix(get_matrix(r=r, rs=stable_rs(r))) # plt.title(str(round(r, 2)) + ', ' + str(round(stable_rs(r), 2)) + ', ' + str(round(stable_rs(r) / r, 2))) # plt.show() ```
github_jupyter
### Analyse node statistics for benchmark results In this notebook we analyse the node statistics, such as e.g. average degree, for correctly and misclassified nodes, given the benchmark results of any community detection method. First, we import the necessary packages. ``` %reload_ext autoreload %autoreload 2 import os import matplotlib.pyplot as plt import numpy as np from clusim.clustering import Clustering from src.utils.cluster_analysis import get_cluster_properties, get_node_properties from src.utils.plotting import plot_histogram, init_plot_style from src.wrappers.igraph import read_graph %matplotlib init_plot_style() color_dict = {'infomap': 'tab:blue', 'synwalk': 'tab:orange', 'walktrap': 'tab:green', 'louvain': 'tab:red', 'graphtool': 'tab:purple'} ``` First, we specify the network to be analyzed, load the network and glance at its basic properties. ``` # select network network = 'pennsylvania-roads' # assemble paths graph_file = '../data/empirical/clean/' + network + '.txt' results_dir = '../results/empirical/' + network + '/' os.makedirs(results_dir, exist_ok=True) # output directory for storing generated figures fig_dir = '../figures/' os.makedirs(fig_dir, exist_ok=True) # load network graph = read_graph(graph_file) node_degrees = graph.degree() avg_degree = np.mean(node_degrees) print(f'Network size is {len(graph.vs)} nodes, {len(graph.es)} edges') print (f'Min/Max/Average degrees are {np.min(node_degrees)}, {np.max(node_degrees)}, {avg_degree}.') ``` Here we compute single-number characteristics of the detected clusters. ``` # methods = ['infomap', 'synwalk', 'walktrap'] methods = ['synwalk', 'louvain', 'graphtool'] colors = [color_dict[m] for m in methods] graph = read_graph(graph_file) for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') trivial_clu_sizes = [len(cluster) for cluster in clu.to_cluster_list() if len(cluster) < 3] num_trivial = len(trivial_clu_sizes) num_non_trivial = clu.n_clusters - num_trivial print ('\nCluster statistics for ' + method + ': ') print (f'Number of detected clusters: {clu.n_clusters}') # print (f'Number of trivial clusters: {clu.n_clusters - num_non_trivial}') print (f'Number of non-trivial clusters: {num_non_trivial}') print (f'Fraction of non-trivial clusters: {num_non_trivial/clu.n_clusters}') print (f'Fraction of nodes in non-trivial clusters: {1.0 - sum(trivial_clu_sizes)/clu.n_elements}') print (f'Modularity: {graph.modularity(clu.to_membership_list())}') ``` Here we plot the degree occurances of the network. ``` # plot parameters bin_size = 1 # integer bin size for aggregating degrees save_figure = False # if True, we save the figure as .pdf in ´fig_dir´ plt.close('all') graph = read_graph(graph_file) node_degrees = graph.degree() avg_degree = np.mean(node_degrees) # compute degree pmf min_deg = np.min(node_degrees) max_deg = np.max(node_degrees) bin_edges = np.array(range(min_deg - 1, max_deg+1, bin_size)) + 0.5 bin_centers = bin_edges[:-1] + 0.5 occurances,_ = np.histogram(node_degrees, bins=bin_edges, density=True) # plot the degree distribution fig, ax = plt.subplots(figsize=(12,9)) ax.plot(bin_centers, occurances, 'x', label=f'Node Degrees') ax.plot([avg_degree, avg_degree], [0, np.max(occurances)], color='crimson', label=fr'Average Degree, $\bar{{k}} = {avg_degree:.2f}$') ax.set_ylabel(r'Probability Mass, $p(k_\alpha)$') ax.set_xlabel(r'Node Degree, $k_\alpha$') ax.loglog() ax.legend(loc='upper right') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + 'degrees_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of cluster sizes. ``` feature = 'size' n_bins = 25 xmax = 1e3 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_cluster_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=True, xmax=xmax, colors=colors) ax.set_xlabel(r'Cluster sizes, $|\mathcal{Y}_i|$') ax.set_ylabel(r'Bin Probability Mass, $p(|\mathcal{Y}_i|)$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of cluster densities. ``` feature = 'density' xmin=1e-2 n_bins = 25 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_cluster_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=True, xmin=xmin, colors=colors) ax.set_xlabel(r'Cluster Density, $\rho(\mathcal{Y}_i)$') ax.set_ylabel(r'Bin Probability Mass, $p(\rho(\mathcal{Y}_i))$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of clustering coefficients. ``` feature = 'clustering_coefficient' n_bins = 25 xmin = 1e-2 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_cluster_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=True, xmin=xmin, colors=colors) ax.set_xlabel(r'Clustering coefficient, $c(\mathcal{Y}_i)$') ax.set_ylabel(r'Bin Probability Mass, $p(c(\mathcal{Y}_i))$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of cluster conductances. ``` feature = 'conductance' n_bins = 25 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_cluster_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=False, colors=colors) ax.set_xlabel(r'Conductance, $\kappa(\mathcal{Y}_i)$') ax.set_ylabel(r'Bin Probability Mass, $p(\kappa(\mathcal{Y}_i))$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of cluster cut ratios. ``` feature = 'cut_ratio' xmin = None n_bins = 25 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_cluster_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=True, xmin=xmin, colors=colors) ax.set_xlabel(r'Cut Ratio, $\xi(\mathcal{Y}_i)$') ax.set_ylabel(r'Bin Probability Mass, $p(\xi(\mathcal{Y}_i))$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of node mixing parameters. ``` feature = 'mixing_parameter' xmin = 1e-2 n_bins = 15 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_node_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=True, xmin=xmin, colors=colors) ax.set_xlabel(r'Mixing parameter, $\mu_\alpha$') ax.set_ylabel(r'Bin Probability Mass, $p(\mu_\alpha)$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ``` The next cell plots the histogram of normalized local degrees. ``` feature = 'nld' n_bins = 25 plt.close('all') save_figure = True # if True, we save the figure as .pdf in ´fig_dir´ # compute cluster properties data = [] for method in methods: clu = Clustering().load(results_dir + 'clustering_' + method + '.json') data.append(get_node_properties(graph, clu, feature=feature)) # plot histogram _, ax = plt.subplots(figsize=(12,9)) plot_histogram(ax, data, methods, n_bins, normalization = 'pmf', log_scale=True, colors=colors) ax.set_xlabel(r'Normalized local degree, $\hat{k}_\alpha$') ax.set_ylabel(r'Probability Mass, $p(\hat{k}_\alpha)$') ax.legend(loc='best') plt.tight_layout() # save figure as .pdf if save_figure: fig_path = fig_dir + feature + '_' + network + '.pdf' plt.savefig(fig_path, dpi=600, format='pdf') plt.close() ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Text classification with TensorFlow Lite model customization with TensorFlow 2.0 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/tensorflow_examples/lite/model_customization/demo/text_classification.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/tensorflow_examples/lite/model_customization/demo/text_classification.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> </table> The TensorFlow Lite model customization library simplifies the process of adapting and converting a TensorFlow neural-network model to particular input data when deploying this model for on-device ML applications. This notebook shows an end-to-end example that utilizes this model customization library to illustrate the adaption and conversion of a commonly-used text classification model to classify movie reviews on a mobile device. ## Prerequisites To run this example, we first need to install serveral required packages, including model customization package that in github [repo](https://github.com/tensorflow/examples). ``` !pip uninstall -q -y tensorflow google-colab grpcio !pip install -q tf-nightly !pip install -q git+https://github.com/tensorflow/examples ``` Import the required packages. ``` from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import os import tensorflow as tf assert tf.__version__.startswith('2') from tensorflow_examples.lite.model_customization.core.data_util.text_dataloader import TextClassifierDataLoader from tensorflow_examples.lite.model_customization.core.model_export_format import ModelExportFormat from tensorflow_examples.lite.model_customization.core.task.model_spec import AverageWordVecModelSpec import tensorflow_examples.lite.model_customization.core.task.text_classifier as text_classifier ``` ## Simple End-to-End Example Let's get some texts to play with this simple end-to-end example. You could replace it with your own text folders. ``` data_path = tf.keras.utils.get_file( fname='aclImdb', origin='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz', untar=True) ``` The example just consists of 4 lines of code as shown below, each of which representing one step of the overall process. 1. Load train and test data specific to an on-device ML app. ``` train_data = TextClassifierDataLoader.from_folder(os.path.join(data_path, 'train'), class_labels=['pos', 'neg']) test_data = TextClassifierDataLoader.from_folder(os.path.join(data_path, 'test'), shuffle=False) ``` 2. Customize the TensorFlow model. ``` model = text_classifier.create(train_data) ``` 3. Evaluate the model. ``` loss, acc = model.evaluate(test_data) ``` 4. Export to TensorFlow Lite model. ``` model.export('movie_review_classifier.tflite', 'text_label.txt', 'vocab.txt') ``` After this simple 4 steps, we could further use TensorFlow Lite model file and label file in on-device applications like in [text classification](https://github.com/tensorflow/examples/tree/master/lite/examples/text_classification) reference app. ## Detailed Process In above, we tried the simple end-to-end example. The following walks through the example step by step to show more detail. ### Step 1: Load Input Data Specific to an On-device ML App The IMDB dataset contains 25000 movie reviews for training and 25000 movie reviews for testing from the [Internet Movie Database](https://www.imdb.com/). The dataset have two classes: positive and negative movie reviews. Download the archive version of the dataset and untar it. The IMDB dataset has the following directory structure: <pre> <b>aclImdb</b> |__ <b>train</b> |______ <b>pos</b>: [1962_10.txt, 2499_10.txt, ...] |______ <b>neg</b>: [104_3.txt, 109_2.txt, ...] |______ unsup: [12099_0.txt, 1424_0.txt, ...] |__ <b>test</b> |______ <b>pos</b>: [1384_9.txt, 191_9.txt, ...] |______ <b>neg</b>: [1629_1.txt, 21_1.txt] </pre> Note that the text data under `train/unsup` folder are unlabeled documents for unsupervised learning and such data should be ignored in this tutorial. ``` data_path = tf.keras.utils.get_file( fname='aclImdb', origin='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz', untar=True) ``` Use `TextClassifierDataLoader` to load data. As for `from_folder()` method, it could load data from the folder. It assumes that the text data of the same class are in the same subdirectory and the subfolder name is the class name. Each text file contains one movie review sample. Parameter `class_labels` is used to specify which subfolder should be considered. As for `train` folder, this parameter is used to skip `unsup` subfolder. ``` train_data = TextClassifierDataLoader.from_folder(os.path.join(data_path, 'train'), class_labels=['pos', 'neg']) test_data = TextClassifierDataLoader.from_folder(os.path.join(data_path, 'test'), shuffle=False) train_data, validation_data = train_data.split(0.9) ``` Take a glance at 25 training data. ``` for text, label in train_data.dataset.take(25): print ("%s: %s"%(train_data.index_to_label[label.numpy()], text.numpy())) ``` ### Step 2: Customize the TensorFlow Model Create a custom text classifier model based on the loaded data. Currently, we only supports averging word embedding method. ``` model = text_classifier.create(train_data, validation_data=validation_data) ``` Have a look at the detailed model structure. ``` model.summary() ``` ### Step 3: Evaluate the Customized Model Evaluate the result of the model, get the loss and accuracy of the model. Evaluate the loss and accuracy in `test_data`. If no data is given the results are evaluated on the data that's splitted in the `create` method. ``` loss, acc = model.evaluate(test_data) ``` ### Step 4: Export to TensorFlow Lite Model Convert the existing model to TensorFlow Lite model format that could be later used in on-device ML application. Meanwhile, save the text labels in label file and vocabulary in vocab file. ``` model.export('movie_review_classifier.tflite', 'text_label.txt', 'vocab.txt') ``` The TensorFlow Lite model file and label file could be used in [text classification](https://github.com/tensorflow/examples/tree/master/lite/examples/text_classification) reference app. In detail, we could add `movie_review_classifier.tflite`, `text_label.txt` and `vocab.txt` in [assets](https://github.com/tensorflow/examples/tree/master/lite/examples/text_classification/android/app/src/main/assets) folder. Meanwhile, change the filenames in [code](https://github.com/tensorflow/examples/blob/master/lite/examples/text_classification/android/app/src/main/java/org/tensorflow/lite/examples/textclassification/TextClassificationClient.java#L43). Here, we also demonstrate how to use the above files to run and evaluate the TensorFlow Lite model. ``` # Read TensorFlow Lite model from TensorFlow Lite file. with tf.io.gfile.GFile('movie_review_classifier.tflite', 'rb') as f: model_content = f.read() # Read label names from label file. with tf.io.gfile.GFile('text_label.txt', 'r') as f: label_names = f.read().split('\n') # Initialze TensorFlow Lite inpterpreter. interpreter = tf.lite.Interpreter(model_content=model_content) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]['index'] output = interpreter.tensor(interpreter.get_output_details()[0]["index"]) # Run predictions on each test data and calculate accuracy. accurate_count = 0 for i, (text, label) in enumerate(test_data.dataset): # Pre-processing should remain the same. text, label = model.preprocess(text, label) # Add batch dimension and convert to float32 to match with the model's input # data format. text = tf.expand_dims(text, 0).numpy() text = tf.cast(text, tf.float32) # Run inference. interpreter.set_tensor(input_index, text) interpreter.invoke() # Post-processing: remove batch dimension and find the label with highest # probability. predict_label = np.argmax(output()[0]) # Get label name with label index. predict_label_name = label_names[predict_label] accurate_count += (predict_label == label.numpy()) accuracy = accurate_count * 1.0 / test_data.size print('TensorFlow Lite model accuracy = %.4f' % accuracy) ``` Note that preprocessing for inference should be the same as training. Currently, preprocessing contains split the text to tokens by '\W', encode the tokens to ids, the pad the text with `pad_id` to have the length of `sentence_length`. # Advanced Usage The `create` function is the critical part of this library in which parameter `model_spec` defines the specification of the model, currently only `AverageWordVecModelSpec` is supported. The `create` function contains the following steps: 1. Split the data into training, validation, testing data according to parameter `validation_ratio` and `test_ratio`. The default value of `validation_ratio` and `test_ratio` are `0.1` and `0.1`. 2. Tokenize the text and select the top `num_words` frequency of words to generate the vocubulary. The default value of `num_words` in `AverageWordVecModelSpec` object is `10000`. 3. Encode the text string tokens to int ids. 4. Create the text classifier model. Currently, this library supports one model: average the word embedding of the text with RELU activation, then leverage softmax dense layer for classification. As for [Embedding layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding), the input dimension is the size of the vocabulary, the output dimension is `AverageWordVecModelSpec` object's variable `wordvec_dim` which default value is `16`, the input length is `AverageWordVecModelSpec` object's variable `sentence_len` which default value is `256`. 5. Train the classifier model. The default epoch is `2` and the default batch size is `32`. In this section, we describe several advanced topics, including adjusting the model, changing the training hyperparameters etc. # Adjust the model We could adjust the model infrastructure like variables `wordvec_dim`, `sentence_len` in `AverageWordVecModelSpec` class. * `wordvec_dim`: Dimension of word embedding. * `sentence_len`: length of sentence. For example, we could train with larger `wordvec_dim`. ``` model = text_classifier.create(train_data, validation_data=validation_data, model_spec=AverageWordVecModelSpec(wordvec_dim=32)) ``` ## Change the training hyperparameters We could also change the training hyperparameters like `epochs` and `batch_size` that could affect the model accuracy. For instance, * `epochs`: more epochs could achieve better accuracy until converage but training for too many epochs may lead to overfitting. * `batch_size`: number of samples to use in one training step. For example, we could train with more epochs. ``` model = text_classifier.create(train_data, validation_data=validation_data, epochs=5) ``` Evaluate the newly retrained model with 5 training epochs. ``` loss, accuracy = model.evaluate(test_data) ```
github_jupyter
``` import numpy as np import torch import sklearn import sklearn.datasets from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt def load_data(): N = 500 gq = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.7, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None) return gq gaussian_quantiles = load_data() X, y = gaussian_quantiles print(X[0]) print(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) plt.figure(figsize=(9,6)) plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, marker='.', s=40, cmap=plt.cm.Spectral, label='training-samples') plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, marker='X', s=40, cmap=plt.cm.Spectral, label='validation-samples') plt.legend() plt.show() class NeuralNetwork: def __init__(self, n_in, n_hidden, n_out): self.n_x = n_in self.n_h = n_hidden self.n_y = n_out self.W1 = torch.rand(self.n_h, self.n_x) * 0.01 self.b1 = torch.zeros(self.n_h, 1) self.W2 = torch.rand(self.n_y, self.n_h) * 0.01 self.b2 = torch.zeros(self.n_y, 1) def my_forward(self, torch_X): self.Z1 = self.W1.matmul(torch_X.t()) + self.b1 self.A1 = torch.tanh(self.Z1) self.Z2 = self.W2.matmul(self.A1) + self.b2 self.A2 = torch.sigmoid(self.Z2) def my_backward(self, torch_X, torch_y): m = torch_X.shape[0] self.dZ2 = self.A2 - torch_y self.dW2 = (1. / m) * torch.matmul(self.dZ2, self.A1.t()) self.db2 = (1. / m) * torch.sum(self.dZ2, dim=1, keepdim=True) self.dZ1 = torch.mul(torch.matmul(self.W2.t(), self.dZ2), 1 - torch.pow(self.A1, 2)) self.dW1 = (1. / m) * torch.matmul(self.dZ1, torch_X) self.db1 = (1. / m) * torch.sum(self.dZ1, dim=1, keepdim=True) def train(self, training_X, training_y, validation_X, validation_y, epochs, learning_rate): m = training_X.shape[0] for e in range(epochs): self.my_forward(training_X) training_loss = -torch.sum(torch.mul(torch.log(self.A2), training_y) + torch.mul(torch.log(1-self.A2), (1 - training_y))) / m self.my_backward(training_X, training_y) self.W1 -= learning_rate * self.dW1 self.b1 -= learning_rate * self.db1 self.W2 -= learning_rate * self.dW2 self.b2 -= learning_rate * self.db2 if e % 250 == 0: m = validation_X.shape[0] self.my_forward(validation_X) validation_loss = -torch.sum(torch.mul(torch.log(self.A2), validation_y) + torch.mul(torch.log(1-self.A2), (1 - validation_y))) / m print("Training loss: {}".format(training_loss)) print("Validation loss: {}".format(validation_loss)) training_X = torch.from_numpy(X_train).float() training_y = torch.from_numpy(y_train).float() validation_X = torch.from_numpy(X_test).float() validation_y = torch.from_numpy(y_test).float() nn = NeuralNetwork(2, 10, 1) nn.train(training_X, training_y, validation_X, validation_y, 2500, 0.5) ```
github_jupyter
``` import read_data import pandas as pd import numpy as np from IPython import embed from sklearn.decomposition import PCA import matplotlib.pyplot as plt import seaborn import logging import six import scipy.stats from sklearn.preprocessing import Imputer df=pd.read_csv('dyl_ecoli_df.csv',usecols=['index', 'Week', 'Yearday', 'Monthday', 'Weekday', 'Month', 'Timestamp', 'Beach', 'Ecoli', 'Year'],index_col=[0],parse_dates=[6]) forecast_df=pd.read_csv('dyl_forecast_df.csv',usecols=['beach', 'time', 'precipIntensity', 'precipProbability', 'temperature', 'apparentTemperature', 'dewPoint', 'humidity', 'windSpeed', 'visibility', 'cloudCover', 'pressure', 'Breezy', 'Dry', 'Foggy', 'Humid', 'MostlyCloudy', 'PartlyCloudy', 'Overcast', 'Clear', 'Drizzle', 'DangerouslyWindy', 'Windy', 'HeavyRain', 'LightRain', 'Rain', 'windSin', 'windCos'],parse_dates=[1]) forecast_df['windCos']=forecast_df['windCos']*forecast_df['windSpeed'] forecast_df['windSin']=forecast_df['windSin']*forecast_df['windSpeed'] forecast_df=forecast_df.drop(['windSpeed'],axis=1) df=df[df.Timestamp.dt.hour==0] dfs=[] for y in range(2006,2015): dfs.append(pd.DataFrame(index=pd.DatetimeIndex(start=df.Timestamp[df.Year==y].min()-pd.Timedelta(days=10),freq='H',end=df.Timestamp[df.Year==y].max()))); timeindexed_df=pd.concat(dfs) for beach in df.Beach.unique(): b=beach.replace(' ','').strip() timeindexed_df['ecoli_'+b]=np.nan timeindexed_df.loc[df.Timestamp[df.Beach==beach],'ecoli_'+b]=df.Ecoli[df.Beach==beach].values for beach in df.Beach.unique(): b=beach.replace(' ','').strip() sub_df=forecast_df[forecast_df.beach==beach] for c in (set(forecast_df.columns)-set(['time','beach'])): timeindexed_df[c+'_'+b]=np.nan timeindexed_df.loc[sub_df.time,c+'_'+b]=sub_df[c].values predictor_pcas=pd.DataFrame(index=timeindexed_df.index) for c in (set(forecast_df.columns)-set(['time','beach'])): forecast_pivot=forecast_df.pivot(index='time',columns='beach',values=c) forecast_pivot.drop('39th',axis=1,inplace=True) forecast_pivot=forecast_pivot[forecast_pivot.notnull().all(axis=1)] forecast_pivot=forecast_pivot.loc[forecast_pivot.notnull().all(axis=1),:] pca=PCA(n_components=6) predictor_pcas=predictor_pcas.merge(pd.DataFrame(pca.fit_transform(forecast_pivot),index=forecast_pivot.index,columns=[c+'0',c+'1',c+'2',c+'3',c+'4',c+'5']),left_index=True,right_index=True,how='outer') c='Ecoli' ecoli_pivot=df.pivot(index='Timestamp',columns='Beach',values=c) ecoli_pivot.drop('39th',axis=1,inplace=True) ecoli_pivot=ecoli_pivot[ecoli_pivot.notnull().all(axis=1)] ecoli_pivot=ecoli_pivot.loc[ecoli_pivot.notnull().all(axis=1),:] pca=PCA(n_components=6) predictor_pcas=predictor_pcas.merge(pd.DataFrame(pca.fit_transform(ecoli_pivot),index=ecoli_pivot.index,columns=[c+'0',c+'1',c+'2',c+'3',c+'4',c+'5']),left_index=True,right_index=True,how='outer') pca_hit_means={} pca_miss_means={} pca_hit_stds={} pca_miss_stds={} pca_hit_counts={} pca_miss_counts={} # pca_hitmiss_kw={} pca_hitmiss_mwu={} pca_hitmiss_ranksum={} all_columns=predictor_pcas.columns empty_dt_df=pd.DataFrame(columns=all_columns, index=pd.timedelta_range(start='8H', end='-24H',freq='-1H').append(pd.timedelta_range(start='-2 days', end='-10 days',freq='-1D'))) for beach in df.Beach.unique(): b=beach.replace(' ','').strip() print(b) sub_df=df[df.Beach==beach] pca_hit_means[b]=empty_dt_df.copy() pca_miss_means[b]=empty_dt_df.copy() pca_hit_stds[b]=empty_dt_df.copy() pca_miss_stds[b]=empty_dt_df.copy() pca_hit_counts[b]=empty_dt_df.copy() pca_miss_counts[b]=empty_dt_df.copy() # pca_hitmiss_kw[b]=empty_dt_df.copy() pca_hitmiss_mwu[b]=empty_dt_df.copy() pca_hitmiss_ranksum[b]=empty_dt_df.copy() hit_times=sub_df.Timestamp[sub_df.Ecoli>=235] miss_times=sub_df.Timestamp[sub_df.Ecoli<235] for dt in pd.timedelta_range(start='8H', end='-24H',freq='-1H').append(pd.timedelta_range(start='-2 days', end='-10 days',freq='-1D')): shift_hit=hit_times+dt shift_miss=miss_times+dt pca_hit_means[b].loc[dt,:]=predictor_pcas.loc[shift_hit].mean(); pca_miss_means[b].loc[dt,:]=predictor_pcas.loc[shift_miss].mean(); pca_hit_stds[b].loc[dt,:]=predictor_pcas.loc[shift_hit].std(); pca_miss_stds[b].loc[dt,:]=predictor_pcas.loc[shift_miss].std(); pca_hit_counts[b].loc[dt,:]=predictor_pcas.loc[shift_hit].notnull().sum(); pca_miss_counts[b].loc[dt,:]=predictor_pcas.loc[shift_miss].notnull().sum(); for f in all_columns: if ~np.isnan(predictor_pcas.loc[shift_hit,f].sum()): try: pca_hitmiss_mwu[b].loc[dt,f]=scipy.stats.mstats.mannwhitneyu(predictor_pcas.loc[shift_hit,f],predictor_pcas.loc[shift_miss,f]).pvalue+scipy.stats.mstats.mannwhitneyu(predictor_pcas.loc[shift_miss,f],predictor_pcas.loc[shift_hit,f]).pvalue; pca_hitmiss_ranksum[b].loc[dt,f]=scipy.stats.ranksums(predictor_pcas.loc[shift_miss,f],predictor_pcas.loc[shift_hit,f]).pvalue; # pca_hitmiss_kw[b].loc[dt,f]=scipy.stats.mstats.kruskalwallis(predictor_pcas.loc[shift_hit,f],predictor_pcas.loc[shift_miss,f]).pvalue; except: continue predictors_df=df.copy(); for c in (set(forecast_df.columns)-set(['time','beach'])): predictors_df[c+'0']=0 predictors_df[c+'1']=0 predictors_df[c+'A']=0 predictors_df['Ecoli0']=0 predictors_df['Ecoli1']=0 predictors_df['EcoliA']=0 predictors_df.reset_index(inplace=True) for beach in predictors_df.Beach.unique(): beach_hits=predictors_df.Beach==beach beach_index=predictors_df.index[beach_hits] beach_times=predictors_df.loc[beach_hits,'Timestamp'] b=beach.replace(' ','').strip() print(b) for dt in pd.timedelta_range(start='8H', end='-24H',freq='-1H').append(pd.timedelta_range(start='-2 days', end='-10 days',freq='-1D')): shift_times=beach_times+dt for c in (set(forecast_df.columns)-set(['time','beach'])): score=(predictor_pcas.loc[beach_times+dt,c+'0']-pca_miss_means[b].loc[dt,c+'0'])*(pca_hit_means[b].loc[dt,c+'0']-pca_miss_means[b].loc[dt,c+'0'])*(1-pca_hitmiss_ranksum[b].loc[dt,c+'0'])**100; predictors_df.loc[beach_index[score.notnull()],c+'0']+=score[score.notnull()].values; score=(predictor_pcas.loc[beach_times+dt,c+'1']-pca_miss_means[b].loc[dt,c+'1'])*(pca_hit_means[b].loc[dt,c+'1']-pca_miss_means[b].loc[dt,c+'1'])*(1-pca_hitmiss_ranksum[b].loc[dt,c+'1'])**100; predictors_df.loc[beach_index[score.notnull()],c+'1']+=score[score.notnull()].values; score=((predictor_pcas.loc[beach_times+dt,[c+'2',c+'3',c+'4',c+'5']]-pca_miss_means[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']])*(pca_hit_means[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']]-pca_miss_means[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']])*(1-pca_hitmiss_ranksum[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']])**100).sum(axis=1); predictors_df.loc[beach_index[score.notnull()],c+'A']+=score[score.notnull()].values; c='Ecoli' if dt.days<0: score=(predictor_pcas.loc[beach_times+dt,c+'0']-pca_miss_means[b].loc[dt,c+'0'])*(pca_hit_means[b].loc[dt,c+'0']-pca_miss_means[b].loc[dt,c+'0'])*(1-pca_hitmiss_ranksum[b].loc[dt,c+'0'])**100; predictors_df.loc[beach_index[score.notnull()],c+'0']+=score[score.notnull()].values; score=(predictor_pcas.loc[beach_times+dt,c+'1']-pca_miss_means[b].loc[dt,c+'1'])*(pca_hit_means[b].loc[dt,c+'1']-pca_miss_means[b].loc[dt,c+'1'])*(1-pca_hitmiss_ranksum[b].loc[dt,c+'1'])**100; predictors_df.loc[beach_index[score.notnull()],c+'1']+=score[score.notnull()].values; score=((predictor_pcas.loc[beach_times+dt,[c+'2',c+'3',c+'4',c+'5']]-pca_miss_means[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']])*(pca_hit_means[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']]-pca_miss_means[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']])*(1-pca_hitmiss_ranksum[b].loc[dt,[c+'2',c+'3',c+'4',c+'5']])**100).sum(axis=1); predictors_df.loc[beach_index[score.notnull()],c+'A']+=score[score.notnull()].values; from sklearn.preprocessing import Imputer import sklearn.ensemble as ens import sklearn.metrics %matplotlib inline Fresh_run=predictors_df.copy() # usingParams=['Year','Ecoli_geomean','precipIntensity','precipProbability','temperature','apparentTemperature','dewPoint','cloudCover','pressure','windSin','windCos','precipIntensity_pca','precipProbability_pca','temperature_pca','apparentTemperature_pca','dewPoint_pca','cloudCover_pca','pressure_pca','windCos_pca','dtemperature_pca','dapparentTemperature_pca','dvisibility_pca','dwindCos_pca'] # Fresh_run=ecoli_df.loc[:,usingParams].copy() Fresh_run=Fresh_run.drop(['index','Timestamp','Beach'],1) years=Fresh_run.Year.unique() columns=Fresh_run.columns.drop(['Ecoli']) speration=(predictors_df[predictors_df.Ecoli>=235].mean()-predictors_df[predictors_df.Ecoli<235].mean())/predictors_df[predictors_df.Ecoli<235].std() predictor_columns=speration[speration>0.15].index predictor_columns=predictor_columns.drop('Ecoli') # Fresh_run.loc[Fresh_run[np.isinf(Fresh_run.precipIntensity)].index,'precipIntensity']=100; # Fresh_run.loc[Fresh_run[np.isinf(Fresh_run.precipProbability)].index,'precipProbability']=100; # imp = Imputer(missing_values='NaN', strategy='mean', axis=1) cleaned_data = Fresh_run[columns] E_levels=Fresh_run.Ecoli.as_matrix() plt.figure(figsize=[12,12]) RF=list() count=0; predictions=list() E_test=list() legend=list() for y in years: print(y) train_ind=(cleaned_data.Year != y).as_matrix() test_ind=(cleaned_data.Year == y).as_matrix() RF.append(ens.RandomForestClassifier(n_estimators=500,criterion='entropy',class_weight={True:.8,False:.2})) RF[count]=RF[count].fit(cleaned_data.loc[train_ind,predictor_columns],E_levels[train_ind]>=235) predictions.append(RF[count].predict_proba(cleaned_data.loc[test_ind,predictor_columns])) E_test.append(E_levels[test_ind]) fpr, tpr, _ =sklearn.metrics.roc_curve(E_test[count]>=235,predictions[count][:,1]) plt.plot(fpr,tpr) plt.hold(True) count+=1 legend.append(y) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend(legend,loc=0) plt.axis('equal') coverage=list() for count in range(0,9): temp=predictions[count][:,1].copy() temp=E_test[count][temp.argsort()]>235 temp=temp[::-1] temp2=np.cumsum(temp)/np.arange(1,temp.size+1) temp3=np.argwhere(temp2>0.45).max() coverage.append(temp2[temp3]*(temp3+1)/temp.sum()) coverage predictor_columns speration[predictor_columns] Fresh_run.to_csv('Ecoli_filtered_pcas.csv') ```
github_jupyter
``` import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import numpy as np pd.options.mode.chained_assignment = None relevant_cols = ['Which round did you get your coop in?', 'Which sector was your first co-op in?'] software = class_df[relevant_cols].dropna() software.head(100) all = software ``` Software ``` software = software[software['Which round did you get your coop in?'].notna()] software.head() software['Which sector was your first co-op in?'] = software['Which sector was your first co-op in?'].str.split(';') software = (software .set_index(['Which round did you get your coop in?'])['Which sector was your first co-op in?'] .apply(pd.Series) .stack() .reset_index() .drop('level_1', axis=1) .rename(columns={0:'Which sector was your first co-op in?'})) software.head() software[software['Which sector was your first co-op in?'] != 'Unhired'] software = software[software['Which sector was your first co-op in?'] != 'Unhired'] software = software[software['Which round did you get your coop in?'] != 'Still Looking'] software.head() software = software[software['Which sector was yourfirst co-op in?'] == 'Software'] software.head() software['Number of people'] = software.groupby(['Which round did you get your coop in?'])['Which sector was your first co-op in?'].transform('count') software = software.drop_duplicates(subset=['Which round did you get your coop in?', 'Which sector was your first co-op in?', 'Number of people'], keep='first') software software['Percentage of People'] = (software['Number of people'] / 98) * 100 software ``` Other ``` other = class_df[relevant_cols] other.head() other = other[other['Which round did you get your coop in?'].notna()] other.head() other['Which sector was your first co-op in?'] = other['Which sector was your first co-op in?'].str.split(';') other = (other .set_index(['Which round did you get your coop in?'])['Which sector was your first co-op in?'] .apply(pd.Series) .stack() .reset_index() .drop('level_1', axis=1) .rename(columns={0:'Which sector was your first co-op in?'})) other.head() other[other['Which sector was your first co-op in?'] != 'Unhired'] other = other[other['Which sector was your first co-op in?'] != 'Unhired'] other.head() other = other[other['Which sector was your first co-op in?'] == 'Other'] other.head() other['Number of people'] = other.groupby(['Which round did you get your coop in?'])['Which sector was your first co-op in?'].transform('count') other = other.drop_duplicates(subset=['Which round did you get your coop in?', 'Which sector was your first co-op in?', 'Number of people'], keep='first') other other['Percentage of People'] = (other['Number of people'] / 98) * 100 other ``` hardware ``` hardware = class_df[relevant_cols] hardware.head() hardware = hardware[hardware['Which round did you get your coop in?'].notna()] hardware.head() hardware['Which sector was your first co-op in?'] = hardware['Which sector was your first co-op in?'].str.split(';') hardware = (hardware .set_index(['Which round did you get your coop in?'])['Which sector was your first co-op in?'] .apply(pd.Series) .stack() .reset_index() .drop('level_1', axis=1) .rename(columns={0:'Which sector was your first co-op in?'})) hardware.head() hardware[hardware['Which sector was your first co-op in?'] != 'Unhired'] hardware = hardware[hardware['Which sector was your first co-op in?'] != 'Unhired'] hardware.head() hardware = hardware[hardware['Which sector was your first co-op in?'] == 'Hardware'] hardware.head() hardware['Number of people'] = hardware.groupby(['Which round did you get your coop in?'])['Which sector was your first co-op in?'].transform('count') hardware = hardware.drop_duplicates(subset=['Which round did you get your coop in?', 'Which sector was your first co-op in?', 'Number of people'], keep='first') hardware hardware['Percentage of People'] = (hardware['Number of people'] / 98) * 100 hardware ``` Product Management ``` prodmag = class_df[relevant_cols] prodmag.head() prodmag = prodmag[prodmag['Which round did you get your coop in?'].notna()] prodmag.head() prodmag['Which sector was your first co-op in?'] = prodmag['Which sector was your first co-op in?'].str.split(';') prodmag = (prodmag .set_index(['Which round did you get your coop in?'])['Which sector was your first co-op in?'] .apply(pd.Series) .stack() .reset_index() .drop('level_1', axis=1) .rename(columns={0:'Which sector was your first co-op in?'})) prodmag.head() prodmag = prodmag[prodmag['Which sector was your first co-op in?'] != 'Unhired'] prodmag.head() prodmag = prodmag[prodmag['Which sector was your first co-op in?'] == 'Product Management'] prodmag.head() prodmag['Number of people'] = prodmag.groupby(['Which round did you get your coop in?'])['Which sector was your first co-op in?'].transform('count') prodmag = prodmag.drop_duplicates(subset=['Which round did you get your coop in?', 'Which sector was your first co-op in?', 'Number of people'], keep='first') prodmag prodmag['Percentage of People'] = (prodmag['Number of people'] / 98) * 100 prodmag ``` UI/UX ``` uiUx = class_df[relevant_cols] uiUx.head() uiUx = uiUx[uiUx['Which round did you get your coop in?'].notna()] uiUx.head() uiUx['Which sector was your first co-op in?'] = uiUx['Which sector was your first co-op in?'].str.split(';') uiUx = (uiUx .set_index(['Which round did you get your coop in?'])['Which sector was your first co-op in?'] .apply(pd.Series) .stack() .reset_index() .drop('level_1', axis=1) .rename(columns={0:'Which sector was your first co-op in?'})) uiUx.head() uiUx = uiUx[uiUx['Which sector was your first co-op in?'] != 'Unhired'] uiUx.head() uiUx = uiUx[uiUx['Which sector was your first co-op in?'] == 'UI/UX'] uiUx.head() uiUx['Number of people'] = uiUx.groupby(['Which round did you get your coop in?'])['Which sector was your first co-op in?'].transform('count') uiUx = uiUx.drop_duplicates(subset=['Which round did you get your coop in?', 'Which sector was your first co-op in?', 'Number of people'], keep='first') uiUx uiUx['Percentage of People'] = (uiUx['Number of people'] / 98) * 100 uiUx ``` Product Design ``` prodDes = class_df[relevant_cols] prodDes.head() prodDes = prodDes[prodDes['Which round did you get your coop in?'].notna()] prodDes.head() prodDes['Which sector was your first co-op in?'] = prodDes['Which sector was your first co-op in?'].str.split(';') prodDes = (prodDes .set_index(['Which round did you get your coop in?'])['Which sector was your first co-op in?'] .apply(pd.Series) .stack() .reset_index() .drop('level_1', axis=1) .rename(columns={0:'Which sector was your first co-op in?'})) prodDes.head() prodDes = prodDes[prodDes['Which sector was your first co-op in?'] != 'Unhired'] prodDes.head() prodDes = prodDes[prodDes['Which sector was your first co-op in?'] == 'Product Design'] prodDes.head() prodDes['Number of people'] = prodDes.groupby(['Which round did you get your coop in?'])['Which sector was your first co-op in?'].transform('count') prodDes = prodDes.drop_duplicates(subset=['Which round did you get your coop in?', 'Which sector was your first co-op in?', 'Number of people'], keep='first') prodDes prodDes['Percentage of People'] = (prodDes['Number of people'] / 98) * 100 prodDes all_rounds = pd.DataFrame(columns=["Sector", "First Round", "Second Round", "Continuous Round", "Direct Offer"], data=[["Software", 10.204082, 12.244898, 25.510204, 7.142857], ["Hardware", 0, 1.020408, 1.020408, 0], ["Product Management", 0, 2.040816, 2.040816, 1.020408], ["UI/UX", 3.061224, 5.102041, 4.081633, 3.061224], ["Product Design", 0, 0, 3.061224, 0], ["Other", 4.081633, 3.061224, 10.204082, 3.061224]]) sns.set(rc={'figure.figsize':(15, 8)}) ax = all_rounds.set_index('Sector').T.plot(kind='bar', stacked=True) ax.set(ylim=(0, 50)) plt.title("CO-OP Round VS Sector") plt.xlabel("Round", labelpad=15) plt.ylabel("Percentage of people (%)", labelpad=15) plt.xticks(rotation=0) ```
github_jupyter
# Springboard Logistic Regression Advanced Case Study $$ \renewcommand{\like}{{\cal L}} \renewcommand{\loglike}{{\ell}} \renewcommand{\err}{{\cal E}} \renewcommand{\dat}{{\cal D}} \renewcommand{\hyp}{{\cal H}} \renewcommand{\Ex}[2]{E_{#1}[#2]} \renewcommand{\x}{{\mathbf x}} \renewcommand{\v}[1]{{\mathbf #1}} $$ This case study delves into the math behind logistic regression in a Python environment. We've adapted this case study from [Lab 5 in the CS109](https://github.com/cs109/2015lab5) course. Please feel free to check out the original lab, both for more exercises, as well as solutions. We turn our attention to **classification**. Classification tries to predict, which of a small set of classes, an observation belongs to. Mathematically, the aim is to find $y$, a **label** based on knowing a feature vector $\x$. For instance, consider predicting gender from seeing a person's face, something we do fairly well as humans. To have a machine do this well, we would typically feed the machine a bunch of images of people which have been labelled "male" or "female" (the training set), and have it learn the gender of the person in the image from the labels and the *features* used to determine gender. Then, given a new photo, the trained algorithm returns us the gender of the person in the photo. There are different ways of making classifications. One idea is shown schematically in the image below, where we find a line that divides "things" of two different types in a 2-dimensional feature space. The classification show in the figure below is an example of a maximum-margin classifier where construct a decision boundary that is far as possible away from both classes of points. The fact that a line can be drawn to separate the two classes makes the problem *linearly separable*. Support Vector Machines (SVM) are an example of a maximum-margin classifier. <img src="images/onelinesplit.png" width="400" height="200"> ``` %matplotlib inline import numpy as np import scipy as sp import matplotlib as mpl import matplotlib.cm as cm from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt import pandas as pd pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns sns.set_style("whitegrid") sns.set_context("poster") import sklearn.model_selection import warnings # For handling error messages. # Don't worry about the following two instructions: they just suppress warnings that could occur later. warnings.simplefilter(action="ignore", category=FutureWarning) warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd") c0=sns.color_palette()[0] c1=sns.color_palette()[1] c2=sns.color_palette()[2] cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) def points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=True, colorscale=cmap_light, cdiscrete=cmap_bold, alpha=0.1, psize=10, zfunc=False, predicted=False): h = .02 X=np.concatenate((Xtr, Xte)) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100)) #plt.figure(figsize=(10,6)) if zfunc: p0 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0] p1 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] Z=zfunc(p0, p1) else: Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) ZZ = Z.reshape(xx.shape) if mesh: plt.pcolormesh(xx, yy, ZZ, cmap=cmap_light, alpha=alpha, axes=ax) if predicted: showtr = clf.predict(Xtr) showte = clf.predict(Xte) else: showtr = ytr showte = yte ax.scatter(Xtr[:, 0], Xtr[:, 1], c=showtr-1, cmap=cmap_bold, s=psize, alpha=alpha,edgecolor="k") # and testing points ax.scatter(Xte[:, 0], Xte[:, 1], c=showte-1, cmap=cmap_bold, alpha=alpha, marker="s", s=psize+10) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) return ax,xx,yy def points_plot_prob(ax, Xtr, Xte, ytr, yte, clf, colorscale=cmap_light, cdiscrete=cmap_bold, ccolor=cm, psize=10, alpha=0.1): ax,xx,yy = points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=False, colorscale=colorscale, cdiscrete=cdiscrete, psize=psize, alpha=alpha, predicted=True) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=ccolor, alpha=.2, axes=ax) cs2 = plt.contour(xx, yy, Z, cmap=ccolor, alpha=.6, axes=ax) #plt.clabel(cs2, fmt = '%2.1f', colors = 'k', fontsize=14, axes=ax) return ax ``` ## A Motivating Example Using `sklearn`: Heights and Weights We'll use a dataset of heights and weights of males and females to hone our understanding of classifiers. We load the data into a dataframe and plot it. ``` dflog = pd.read_csv("data/01_heights_weights_genders.csv") dflog.head() ``` Remember that the form of data we will use always is <img src="images/dataform.jpg" width="400" height="200"> with the "response" or "label" $y$ as a plain array of 0s and 1s for binary classification. Sometimes we will also see -1 and +1 instead. There are also *multiclass* classifiers that can assign an observation to one of $K > 2$ classes and the labe may then be an integer, but we will not be discussing those here. `y = [1,1,0,0,0,1,0,1,0....]`. <div class="span5 alert alert-info"> <h3>Checkup Exercise Set I</h3> <ul> <li> <b>Exercise:</b> Create a scatter plot of Weight vs. Height <li> <b>Exercise:</b> Color the points differently by Gender </ul> </div> ``` _ = sns.scatterplot(x='Weight', y='Height', hue='Gender', data=dflog, linestyle = 'None', color = 'blue', alpha=0.25) ``` ### Training and Test Datasets When fitting models, we would like to ensure two things: * We have found the best model (in terms of model parameters). * The model is highly likely to generalize i.e. perform well on unseen data. <br/> <div class="span5 alert alert-success"> <h4>Purpose of splitting data into Training/testing sets</h4> <ul> <li> We built our model with the requirement that the model fit the data well. </li> <li> As a side-effect, the model will fit <b>THIS</b> dataset well. What about new data? </li> <ul> <li> We wanted the model for predictions, right?</li> </ul> <li> One simple solution, leave out some data (for <b>testing</b>) and <b>train</b> the model on the rest </li> <li> This also leads directly to the idea of cross-validation, next section. </li> </ul> </div> First, we try a basic Logistic Regression: * Split the data into a training and test (hold-out) set * Train on the training set, and test for accuracy on the testing set ``` from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # Split the data into a training and test set. Xlr, Xtestlr, ylr, ytestlr = train_test_split(dflog[['Height','Weight']].values, (dflog.Gender == "Male").values,random_state=5) clf = LogisticRegression() # Fit the model on the trainng data. clf.fit(Xlr, ylr) # Print the accuracy from the testing data. print(accuracy_score(clf.predict(Xtestlr), ytestlr)) ``` ### Tuning the Model The model has some hyperparameters we can tune for hopefully better performance. For tuning the parameters of your model, you will use a mix of *cross-validation* and *grid search*. In Logistic Regression, the most important parameter to tune is the *regularization parameter* `C`. Note that the regularization parameter is not always part of the logistic regression model. The regularization parameter is used to control for unlikely high regression coefficients, and in other cases can be used when data is sparse, as a method of feature selection. You will now implement some code to perform model tuning and selecting the regularization parameter $C$. We use the following `cv_score` function to perform K-fold cross-validation and apply a scoring function to each test fold. In this incarnation we use accuracy score as the default scoring function. ``` from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score def cv_score(clf, x, y, score_func=accuracy_score): result = 0 nfold = 5 for train, test in KFold(nfold).split(x): # split data into train/test groups, 5 times clf.fit(x[train], y[train]) # fit result += score_func(clf.predict(x[test]), y[test]) # evaluate score function on held-out data return result / nfold # average ``` Below is an example of using the `cv_score` function for a basic logistic regression model without regularization. ``` clf = LogisticRegression() score = cv_score(clf, Xlr, ylr) print(round(score,4)) ``` <div class="span5 alert alert-info"> <h3>Checkup Exercise Set II</h3> <b>Exercise:</b> Implement the following search procedure to find a good model <ul> <li> You are given a list of possible values of `C` below <li> For each C: <ol> <li> Create a logistic regression model with that value of C <li> Find the average score for this model using the `cv_score` function **only on the training set** `(Xlr, ylr)` </ol> <li> Pick the C with the highest average score </ul> Your goal is to find the best model parameters based *only* on the training set, without showing the model test set at all (which is why the test set is also called a *hold-out* set). </div> ``` #the grid of parameters to search over Cs = [0.001, 0.1, 1, 10, 100] highest_score = 0 best_c = 0 for C in Cs: clf = LogisticRegression(C=C) score = cv_score(clf, Xlr, ylr) if score > highest_score: highest_score = score best_c = C print("Best score is {}".format(round(highest_score,4))) ``` <div class="span5 alert alert-info"> <h3>Checkup Exercise Set III</h3> **Exercise:** Now you want to estimate how this model will predict on unseen data in the following way: <ol> <li> Use the C you obtained from the procedure earlier and train a Logistic Regression on the training data <li> Calculate the accuracy on the test data </ol> <p>You may notice that this particular value of `C` may or may not do as well as simply running the default model on a random train-test split. </p> <ul> <li> Do you think that's a problem? <li> Why do we need to do this whole cross-validation and grid search stuff anyway? </ul> </div> ``` clf = LogisticRegression(C=C) clf.fit(Xlr, ylr) ypredlr = clf.predict(Xtestlr) print(accuracy_score(ypredlr, ytestlr)) ``` ### Black Box Grid Search in `sklearn` Scikit-learn, as with many other Python packages, provides utilities to perform common operations so you do not have to do it manually. It is important to understand the mechanics of each operation, but at a certain point, you will want to use the utility instead to save time... <div class="span5 alert alert-info"> <h3>Checkup Exercise Set IV</h3> <b>Exercise:</b> Use scikit-learn's [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) tool to perform cross validation and grid search. * Instead of writing your own loops above to iterate over the model parameters, can you use GridSearchCV to find the best model over the training set? * Does it give you the same best value of `C`? * How does this model you've obtained perform on the test set?</div> ``` from sklearn.model_selection import GridSearchCV parameters = { 'C': [0.001, 0.1, 1, 10, 100] } grid = GridSearchCV(clf, parameters) grid.fit(Xlr, ylr) ypredlr = grid.predict(Xtestlr) print("Accuracy score is {}".format(accuracy_score(ypredlr, ytestlr))) print("Best tuned parameters are {}".format(grid.best_params_)) print("Best score is {}".format(grid.best_score_)) print("Best estimator is {}".format(grid.best_estimator_)) ``` ## A Walkthrough of the Math Behind Logistic Regression ### Setting up Some Demo Code Let's first set some code up for classification that we will need for further discussion on the math. We first set up a function `cv_optimize` which takes a classifier `clf`, a grid of hyperparameters (such as a complexity parameter or regularization parameter) implemented as a dictionary `parameters`, a training set (as a samples x features array) `Xtrain`, and a set of labels `ytrain`. The code takes the traning set, splits it into `n_folds` parts, sets up `n_folds` folds, and carries out a cross-validation by splitting the training set into a training and validation section for each foldfor us. It prints the best value of the parameters, and retuens the best classifier to us. ``` def cv_optimize(clf, parameters, Xtrain, ytrain, n_folds=5): gs = sklearn.model_selection.GridSearchCV(clf, param_grid=parameters, cv=n_folds) gs.fit(Xtrain, ytrain) print("BEST PARAMS", gs.best_params_) best = gs.best_estimator_ return best ``` We then use this best classifier to fit the entire training set. This is done inside the `do_classify` function which takes a dataframe `indf` as input. It takes the columns in the list `featurenames` as the features used to train the classifier. The column `targetname` sets the target. The classification is done by setting those samples for which `targetname` has value `target1val` to the value 1, and all others to 0. We split the dataframe into 80% training and 20% testing by default, standardizing the dataset if desired. (Standardizing a data set involves scaling the data so that it has 0 mean and is described in units of its standard deviation. We then train the model on the training set using cross-validation. Having obtained the best classifier using `cv_optimize`, we retrain on the entire training set and calculate the training and testing accuracy, which we print. We return the split data and the trained classifier. ``` from sklearn.model_selection import train_test_split def do_classify(clf, parameters, indf, featurenames, targetname, target1val, standardize=False, train_size=0.8): subdf=indf[featurenames] if standardize: subdfstd=(subdf - subdf.mean())/subdf.std() else: subdfstd=subdf X=subdfstd.values y=(indf[targetname].values==target1val)*1 Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=train_size) clf = cv_optimize(clf, parameters, Xtrain, ytrain) clf=clf.fit(Xtrain, ytrain) training_accuracy = clf.score(Xtrain, ytrain) test_accuracy = clf.score(Xtest, ytest) print("Accuracy on training data: {:0.2f}".format(training_accuracy)) print("Accuracy on test data: {:0.2f}".format(test_accuracy)) return clf, Xtrain, ytrain, Xtest, ytest ``` ## Logistic Regression: The Math We could approach classification as linear regression, there the class, 0 or 1, is the target variable $y$. But this ignores the fact that our output $y$ is discrete valued, and futhermore, the $y$ predicted by linear regression will in general take on values less than 0 and greater than 1. Additionally, the residuals from the linear regression model will *not* be normally distributed. This violation means we should not use linear regression. But what if we could change the form of our hypotheses $h(x)$ instead? The idea behind logistic regression is very simple. We want to draw a line in feature space that divides the '1' samples from the '0' samples, just like in the diagram above. In other words, we wish to find the "regression" line which divides the samples. Now, a line has the form $w_1 x_1 + w_2 x_2 + w_0 = 0$ in 2-dimensions. On one side of this line we have $$w_1 x_1 + w_2 x_2 + w_0 \ge 0,$$ and on the other side we have $$w_1 x_1 + w_2 x_2 + w_0 < 0.$$ Our classification rule then becomes: \begin{eqnarray*} y = 1 &\mbox{if}& \v{w}\cdot\v{x} \ge 0\\ y = 0 &\mbox{if}& \v{w}\cdot\v{x} < 0 \end{eqnarray*} where $\v{x}$ is the vector $\{1,x_1, x_2,...,x_n\}$ where we have also generalized to more than 2 features. What hypotheses $h$ can we use to achieve this? One way to do so is to use the **sigmoid** function: $$h(z) = \frac{1}{1 + e^{-z}}.$$ Notice that at $z=0$ this function has the value 0.5. If $z > 0$, $h > 0.5$ and as $z \to \infty$, $h \to 1$. If $z < 0$, $h < 0.5$ and as $z \to -\infty$, $h \to 0$. As long as we identify any value of $y > 0.5$ as 1, and any $y < 0.5$ as 0, we can achieve what we wished above. This function is plotted below: ``` h = lambda z: 1. / (1 + np.exp(-z)) zs=np.arange(-5, 5, 0.1) plt.plot(zs, h(zs), alpha=0.5); ``` So we then come up with our rule by identifying: $$z = \v{w}\cdot\v{x}.$$ Then $h(\v{w}\cdot\v{x}) \ge 0.5$ if $\v{w}\cdot\v{x} \ge 0$ and $h(\v{w}\cdot\v{x}) \lt 0.5$ if $\v{w}\cdot\v{x} \lt 0$, and: \begin{eqnarray*} y = 1 &if& h(\v{w}\cdot\v{x}) \ge 0.5\\ y = 0 &if& h(\v{w}\cdot\v{x}) \lt 0.5. \end{eqnarray*} We will show soon that this identification can be achieved by minimizing a loss in the ERM framework called the **log loss** : $$ R_{\cal{D}}(\v{w}) = - \sum_{y_i \in \cal{D}} \left ( y_i \log(h(\v{w}\cdot\v{x})) + ( 1 - y_i) \log(1 - h(\v{w}\cdot\v{x})) \right )$$ We will also add a regularization term: $$ R_{\cal{D}}(\v{w}) = - \sum_{y_i \in \cal{D}} \left ( y_i \log(h(\v{w}\cdot\v{x})) + ( 1 - y_i) \log(1 - h(\v{w}\cdot\v{x})) \right ) + \frac{1}{C} \v{w}\cdot\v{w},$$ where $C$ is the regularization strength (equivalent to $1/\alpha$ from the Ridge case), and smaller values of $C$ mean stronger regularization. As before, the regularization tries to prevent features from having terribly high weights, thus implementing a form of feature selection. How did we come up with this loss? We'll come back to that, but let us see how logistic regression works out. ``` dflog.head() clf_l, Xtrain_l, ytrain_l, Xtest_l, ytest_l = do_classify(LogisticRegression(), {"C": [0.01, 0.1, 1, 10, 100]}, dflog, ['Weight', 'Height'], 'Gender','Male') plt.figure() ax=plt.gca() points_plot(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, alpha=0.2); ``` In the figure here showing the results of the logistic regression, we plot the actual labels of both the training(circles) and test(squares) samples. The 0's (females) are plotted in red, the 1's (males) in blue. We also show the classification boundary, a line (to the resolution of a grid square). Every sample on the red background side of the line will be classified female, and every sample on the blue side, male. Notice that most of the samples are classified well, but there are misclassified people on both sides, as evidenced by leakage of dots or squares of one color ontothe side of the other color. Both test and traing accuracy are about 92%. ### The Probabilistic Interpretaion Remember we said earlier that if $h > 0.5$ we ought to identify the sample with $y=1$? One way of thinking about this is to identify $h(\v{w}\cdot\v{x})$ with the probability that the sample is a '1' ($y=1$). Then we have the intuitive notion that lets identify a sample as 1 if we find that the probabilty of being a '1' is $\ge 0.5$. So suppose we say then that the probability of $y=1$ for a given $\v{x}$ is given by $h(\v{w}\cdot\v{x})$? Then, the conditional probabilities of $y=1$ or $y=0$ given a particular sample's features $\v{x}$ are: \begin{eqnarray*} P(y=1 | \v{x}) &=& h(\v{w}\cdot\v{x}) \\ P(y=0 | \v{x}) &=& 1 - h(\v{w}\cdot\v{x}). \end{eqnarray*} These two can be written together as $$P(y|\v{x}, \v{w}) = h(\v{w}\cdot\v{x})^y \left(1 - h(\v{w}\cdot\v{x}) \right)^{(1-y)} $$ Then multiplying over the samples we get the probability of the training $y$ given $\v{w}$ and the $\v{x}$: $$P(y|\v{x},\v{w}) = P(\{y_i\} | \{\v{x}_i\}, \v{w}) = \prod_{y_i \in \cal{D}} P(y_i|\v{x_i}, \v{w}) = \prod_{y_i \in \cal{D}} h(\v{w}\cdot\v{x_i})^{y_i} \left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}$$ Why use probabilities? Earlier, we talked about how the regression function $f(x)$ never gives us the $y$ exactly, because of noise. This hold for classification too. Even with identical features, a different sample may be classified differently. We said that another way to think about a noisy $y$ is to imagine that our data $\dat$ was generated from a joint probability distribution $P(x,y)$. Thus we need to model $y$ at a given $x$, written as $P(y|x)$, and since $P(x)$ is also a probability distribution, we have: $$P(x,y) = P(y | x) P(x)$$ and can obtain our joint probability $P(x, y)$. Indeed its important to realize that a particular training set can be thought of as a draw from some "true" probability distribution (just as we did when showing the hairy variance diagram). If for example the probability of classifying a test sample as a '0' was 0.1, and it turns out that the test sample was a '0', it does not mean that this model was necessarily wrong. After all, in roughly a 10th of the draws, this new sample would be classified as a '0'! But, of-course its more unlikely than its likely, and having good probabilities means that we'll be likely right most of the time, which is what we want to achieve in classification. And furthermore, we can quantify this accuracy. Thus its desirable to have probabilistic, or at the very least, ranked models of classification where you can tell which sample is more likely to be classified as a '1'. There are business reasons for this too. Consider the example of customer "churn": you are a cell-phone company and want to know, based on some of my purchasing habit and characteristic "features" if I am a likely defector. If so, you'll offer me an incentive not to defect. In this scenario, you might want to know which customers are most likely to defect, or even more precisely, which are most likely to respond to incentives. Based on these probabilities, you could then spend a finite marketing budget wisely. ### Maximizing the Probability of the Training Set Now if we maximize $P(y|\v{x},\v{w})$, we will maximize the chance that each point is classified correctly, which is what we want to do. While this is not exactly the same thing as maximizing the 1-0 training risk, it is a principled way of obtaining the highest probability classification. This process is called **maximum likelihood** estimation since we are maximising the **likelihood of the training data y**, $$\like = P(y|\v{x},\v{w}).$$ Maximum likelihood is one of the corenerstone methods in statistics, and is used to estimate probabilities of data. We can equivalently maximize $$\loglike = \log{P(y|\v{x},\v{w})}$$ since the natural logarithm $\log$ is a monotonic function. This is known as maximizing the **log-likelihood**. Thus we can equivalently *minimize* a risk that is the negative of $\log(P(y|\v{x},\v{w}))$: $$R_{\cal{D}}(h(x)) = -\loglike = -\log \like = -\log{P(y|\v{x},\v{w})}.$$ Thus \begin{eqnarray*} R_{\cal{D}}(h(x)) &=& -\log\left(\prod_{y_i \in \cal{D}} h(\v{w}\cdot\v{x_i})^{y_i} \left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}\right)\\ &=& -\sum_{y_i \in \cal{D}} \log\left(h(\v{w}\cdot\v{x_i})^{y_i} \left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}\right)\\ &=& -\sum_{y_i \in \cal{D}} \log\,h(\v{w}\cdot\v{x_i})^{y_i} + \log\,\left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}\\ &=& - \sum_{y_i \in \cal{D}} \left ( y_i \log(h(\v{w}\cdot\v{x})) + ( 1 - y_i) \log(1 - h(\v{w}\cdot\v{x})) \right ) \end{eqnarray*} This is exactly the risk we had above, leaving out the regularization term (which we shall return to later) and was the reason we chose it over the 1-0 risk. Notice that this little process we carried out above tells us something very interesting: **Probabilistic estimation using maximum likelihood is equivalent to Empiricial Risk Minimization using the negative log-likelihood**, since all we did was to minimize the negative log-likelihood over the training samples. `sklearn` will return the probabilities for our samples, or for that matter, for any input vector set $\{\v{x}_i\}$, i.e. $P(y_i | \v{x}_i, \v{w})$: ``` clf_l.predict_proba(Xtest_l) ``` ### Discriminative vs Generative Classifier Logistic regression is what is known as a **discriminative classifier** as we learn a soft boundary between/among classes. Another paradigm is the **generative classifier** where we learn the distribution of each class. For more examples of generative classifiers, look [here](https://en.wikipedia.org/wiki/Generative_model). Let us plot the probabilities obtained from `predict_proba`, overlayed on the samples with their true labels: ``` plt.figure() ax = plt.gca() points_plot_prob(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, psize=20, alpha=0.1); ``` Notice that lines of equal probability, as might be expected are stright lines. What the classifier does is very intuitive: if the probability is greater than 0.5, it classifies the sample as type '1' (male), otherwise it classifies the sample to be class '0'. Thus in the diagram above, where we have plotted predicted values rather than actual labels of samples, there is a clear demarcation at the 0.5 probability line. Again, this notion of trying to obtain the line or boundary of demarcation is what is called a **discriminative** classifier. The algorithm tries to find a decision boundary that separates the males from the females. To classify a new sample as male or female, it checks on which side of the decision boundary the sample falls, and makes a prediction. In other words we are asking, given $\v{x}$, what is the probability of a given $y$, or, what is the likelihood $P(y|\v{x},\v{w})$?
github_jupyter
``` import os import numpy as np import matplotlib.pyplot as plt from keras.models import load_model from keras.preprocessing.image import ImageDataGenerator from panotti.datautils import build_dataset ``` # Class Distribution ``` import os import glob import pandas as pd import seaborn as sns import matplotlib.pyplot as plt def get_class_freq(root_path, y='', title='', color=sns.xkcd_rgb["denim blue"], plot=True): ''' Returns a dataframe of class frequency distribution when structured in the Keras ImageDataGenerator manner for classification ''' walker = os.walk(root_path) next(walker) class_freq = dict() for r, d, f in walker: class_freq[r.split('/')[-1]] = len(f) class_freq_df = pd.DataFrame.from_dict( class_freq, orient='index', columns=['count']) class_freq_df.reset_index(inplace=True) class_freq_df.columns = [y, 'count'] class_freq_df.sort_values('count', axis=0, ascending=False, inplace=True) if plot: sns.catplot(x="count", y=y, kind="bar", data=class_freq_df, color=color) plt.title(title) plt.show() return class_freq_df else: return class_freq_df get_class_freq('Preproc/Train/', y='Classes', title= 'Class Distribution for Training Data') get_class_freq('Preproc/Test/', y='Classes', title= 'Class Distribution for Testing Data', color=sns.xkcd_rgb["dusty purple"]) ``` # Confusion Matrix ``` weights_path = 'weights.hdf5' model = load_model(weights_path) melgram_path = glob.glob('Preproc/Test/*/*.npz') test_mel = melgram_path[0] with np.load(test_mel) as data: melgram = data['melgram'] melgram.shape X_test, Y_test, paths, class_names = build_dataset('Preproc/Test/') X_test.shape pred = model.predict(X_test) from sklearn.metrics import confusion_matrix confusion_1 = confusion_matrix(np.argmax(Y_test, axis = -1), np.argmax(pred, axis = -1)) NUM_LABELS = len(class_names) f, axes = plt.subplots(1,1, figsize = (12,12)) axes.set_xlabel('Predicted') axes.set_ylabel('Actual') axes.grid(False) axes.set_xticklabels(class_names, rotation = 90) axes.set_yticklabels(class_names) axes.set_yticks(list(range(NUM_LABELS))) axes.set_xticks(list(range(NUM_LABELS))) plt.imshow(confusion_1, cmap=plt.cm.Set2, interpolation='nearest') for i, cas in enumerate(confusion_1): for j, count in enumerate(cas): if count > 0: xoff = .07 * len(str(count)) plt.text(j-xoff, i+.2, int(count), fontsize=12, color='black') print(round((1-(21/1120))*100, 2)) print(round((1-(13/1120))*100, 2)) ```
github_jupyter
# TensorFlow Lattice estimators In this tutorial, we will cover basics of TensorFlow Lattice estimators. ``` # import libraries !pip install tensorflow_lattice import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_lattice as tfl import tempfile from six.moves import urllib ``` # Synthetic dataset Here we create a synthetic dataset. ``` %matplotlib inline # Training dataset contains one feature, "distance". train_features = { 'distance': np.array([1.0, 1.3, 1.5, 2.0, 2.1, 3.0, 4.0, 5.0, 1.3, 1.7, 2.5, 2.8, 4.7, 4.2, 3.5, 4.75, 5.2, 5.8, 5.9]) * 0.1, } train_labels = np.array([4.8, 4.9, 5.0, 5.0, 4.8, 3.3, 2.5, 2.0, 4.7, 4.6, 4.0, 3.2, 2.12, 2.1, 2.5, 2.2, 2.3, 2.34, 2.6]) plt.scatter(train_features['distance'], train_labels) plt.xlabel('distance') plt.ylabel('user hapiness') # This function draws two plots. # Firstly, we draw the scatter plot of `distance` vs. `label`. # Secondly, we generate predictions from `estimator` distance ranges in # [xmin, xmax]. def Plot(distance, label, estimator, xmin=0.0, xmax=10.0): %matplotlib inline test_features = { 'distance': np.linspace(xmin, xmax, num=100) } # Estimator accepts an input in the form of input_fn (callable). # numpy_input_fn creates an input function that generates a dictionary where # the key is a feaeture name ('distance'), and the value is a tensor with # a shape [batch_size, 1]. test_input_fn = tf.estimator.inputs.numpy_input_fn( x=test_features, batch_size=1, num_epochs=1, shuffle=False) # Estimator's prediction is 1d tensor with a shape [batch_size]. Since we # set batch_size == 1 in the above, p['predictions'] will contain only one # element in each batch, and we fetch this value by p['predictions'][0]. predictions = [p['predictions'][0] for p in estimator.predict(input_fn=test_input_fn)] # Plot estimator's response and (distance, label) scatter plot. fig, ax = plt.subplots(1, 1) ax.plot(test_features['distance'], predictions) ax.scatter(distance, label) plt.xlabel('distance') plt.ylabel('user hapiness') plt.legend(['prediction', 'data']) ``` # DNN Estimator Now let us define feature columns and use DNN regressor to fit a model. ``` # Specify feature. feature_columns = [ tf.feature_column.numeric_column('distance'), ] # Define a neural network legressor. # The first hidden layer contains 30 hidden units, and the second # hidden layer contains 10 hidden units. dnn_estimator = tf.estimator.DNNRegressor( feature_columns=feature_columns, hidden_units=[30, 10], optimizer=tf.train.GradientDescentOptimizer( learning_rate=0.01, ), ) # Define training input function. # mini-batch size is 10, and we iterate the dataset over # 1000 times. train_input_fn = tf.estimator.inputs.numpy_input_fn( x=train_features, y=train_labels, batch_size=10, num_epochs=1000, shuffle=False) tf.logging.set_verbosity(tf.logging.ERROR) # Train this estimator dnn_estimator.train(input_fn=train_input_fn) # Response in [0.0, 1.0] range Plot(train_features['distance'], train_labels, dnn_estimator, 0.0, 1.0) # Now let's increase the prediction range to [0.0, 3.0] # Note) In most machines, the prediction is going up. # However, DNN training does not have a unique solution, so it's possible # not to see this phenomenon. Plot(train_features['distance'], train_labels, dnn_estimator, 0.0, 3.0) ``` # TensorFlow Lattice calibrated linear model Let's use calibrated linear model to fit the data. Since we only have one example, there's no reason to use a lattice. ``` # TensorFlow Lattice needs feature names to specify # per-feature parameters. feature_names = [fc.name for fc in feature_columns] num_keypoints = 5 hparams = tfl.CalibratedLinearHParams( feature_names=feature_names, learning_rate=0.1, num_keypoints=num_keypoints) # input keypoint initializers. # init_fns are dict of (feature_name, callable initializer). keypoints_init_fns = { 'distance': lambda: tfl.uniform_keypoints_for_signal(num_keypoints, input_min=0.0, input_max=0.7, output_min=-1.0, output_max=1.0)} non_monotnic_estimator = tfl.calibrated_linear_regressor( feature_columns=feature_columns, keypoints_initializers_fn=keypoints_init_fns, hparams=hparams) non_monotnic_estimator.train(input_fn=train_input_fn) # The prediction goes up! Plot(train_features['distance'], train_labels, non_monotnic_estimator, 0.0, 1.0) # Declare distance as a decreasing monotonic input. hparams.set_feature_param('distance', 'monotonicity', -1) monotonic_estimator = tfl.calibrated_linear_regressor( feature_columns=feature_columns, keypoints_initializers_fn=keypoints_init_fns, hparams=hparams) monotonic_estimator.train(input_fn=train_input_fn) # Now it's decreasing. Plot(train_features['distance'], train_labels, monotonic_estimator, 0.0, 1.0) # Even if the output range becomes larger, the prediction never goes up! Plot(train_features['distance'], train_labels, monotonic_estimator, 0.0, 3.0) ```
github_jupyter
``` # find the dataset definition by name, for example dtu_yao (dtu_yao.py) def find_dataset_def(dataset_name): module_name = 'datasets.{}'.format(dataset_name) module = importlib.import_module(module_name) return getattr(module, "MVSDataset") """ Implementation of Pytorch layer primitives, such as Conv+BN+ReLU, differentiable warping layers, and depth regression based upon expectation of an input probability distribution. """ import torch import torch.nn as nn import torch.nn.functional as F class ConvBnReLU(nn.Module): """Implements 2d Convolution + batch normalization + ReLU""" def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, pad: int = 1, dilation: int = 1, ) -> None: """initialization method for convolution2D + batch normalization + relu module Args: in_channels: input channel number of convolution layer out_channels: output channel number of convolution layer kernel_size: kernel size of convolution layer stride: stride of convolution layer pad: pad of convolution layer dilation: dilation of convolution layer """ super(ConvBnReLU, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False ) self.bn = nn.BatchNorm2d(out_channels) def forward(self, x: torch.Tensor) -> torch.Tensor: """forward method""" return F.relu(self.bn(self.conv(x)), inplace=True) class ConvBnReLU3D(nn.Module): """Implements of 3d convolution + batch normalization + ReLU.""" def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, pad: int = 1, dilation: int = 1, ) -> None: """initialization method for convolution3D + batch normalization + relu module Args: in_channels: input channel number of convolution layer out_channels: output channel number of convolution layer kernel_size: kernel size of convolution layer stride: stride of convolution layer pad: pad of convolution layer dilation: dilation of convolution layer """ super(ConvBnReLU3D, self).__init__() self.conv = nn.Conv3d( in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False ) self.bn = nn.BatchNorm3d(out_channels) def forward(self, x: torch.Tensor) -> torch.Tensor: """forward method""" return F.relu(self.bn(self.conv(x)), inplace=True) class ConvBnReLU1D(nn.Module): """Implements 1d Convolution + batch normalization + ReLU.""" def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, pad: int = 1, dilation: int = 1, ) -> None: """initialization method for convolution1D + batch normalization + relu module Args: in_channels: input channel number of convolution layer out_channels: output channel number of convolution layer kernel_size: kernel size of convolution layer stride: stride of convolution layer pad: pad of convolution layer dilation: dilation of convolution layer """ super(ConvBnReLU1D, self).__init__() self.conv = nn.Conv1d( in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False ) self.bn = nn.BatchNorm1d(out_channels) def forward(self, x: torch.Tensor) -> torch.Tensor: """forward method""" return F.relu(self.bn(self.conv(x)), inplace=True) class ConvBn(nn.Module): """Implements of 2d convolution + batch normalization.""" def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, pad: int = 1 ) -> None: """initialization method for convolution2D + batch normalization + ReLU module Args: in_channels: input channel number of convolution layer out_channels: output channel number of convolution layer kernel_size: kernel size of convolution layer stride: stride of convolution layer pad: pad of convolution layer """ super(ConvBn, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = nn.BatchNorm2d(out_channels) def forward(self, x: torch.Tensor) -> torch.Tensor: """forward method""" return self.bn(self.conv(x)) def differentiable_warping( src_fea: torch.Tensor, src_proj: torch.Tensor, ref_proj: torch.Tensor, depth_samples: torch.Tensor ): """Differentiable homography-based warping, implemented in Pytorch. Args: src_fea: [B, C, H, W] source features, for each source view in batch src_proj: [B, 4, 4] source camera projection matrix, for each source view in batch ref_proj: [B, 4, 4] reference camera projection matrix, for each ref view in batch depth_samples: [B, Ndepth, H, W] virtual depth layers Returns: warped_src_fea: [B, C, Ndepth, H, W] features on depths after perspective transformation """ batch, channels, height, width = src_fea.shape num_depth = depth_samples.shape[1] with torch.no_grad(): proj = torch.matmul(src_proj, torch.inverse(ref_proj)) rot = proj[:, :3, :3] # [B,3,3] trans = proj[:, :3, 3:4] # [B,3,1] y, x = torch.meshgrid( [ torch.arange(0, height, dtype=torch.float32, device=src_fea.device), torch.arange(0, width, dtype=torch.float32, device=src_fea.device), ] ) y, x = y.contiguous(), x.contiguous() y, x = y.view(height * width), x.view(height * width) xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W] xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W] rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W] rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_samples.view( batch, 1, num_depth, height * width ) # [B, 3, Ndepth, H*W] proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W] # avoid negative depth negative_depth_mask = proj_xyz[:, 2:] <= 1e-3 proj_xyz[:, 0:1][negative_depth_mask] = float(width) proj_xyz[:, 1:2][negative_depth_mask] = float(height) proj_xyz[:, 2:3][negative_depth_mask] = 1.0 proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W] proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1 # [B, Ndepth, H*W] proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1 proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2] grid = proj_xy warped_src_fea = F.grid_sample( src_fea, grid.view(batch, num_depth * height, width, 2), mode="bilinear", padding_mode="zeros", align_corners=True, ) return warped_src_fea.view(batch, channels, num_depth, height, width) def depth_regression(p: torch.Tensor, depth_values: torch.Tensor) -> torch.Tensor: """Implements per-pixel depth regression based upon a probability distribution per-pixel. The regressed depth value D(p) at pixel p is found as the expectation w.r.t. P of the hypotheses. Args: p: probability volume [B, D, H, W] depth_values: discrete depth values [B, D] Returns: result depth: expected value, soft argmin [B, 1, H, W] """ return torch.sum(p * depth_values.view(depth_values.shape[0], 1, 1), dim=1).unsqueeze(1) def is_empty(x: torch.Tensor) -> bool: return x.numel() == 0 from typing import Dict, List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from .module import ConvBnReLU, depth_regression from .patchmatch import PatchMatch class FeatureNet(nn.Module): """Feature Extraction Network: to extract features of original images from each view""" def __init__(self): """Initialize different layers in the network""" super(FeatureNet, self).__init__() self.conv0 = ConvBnReLU(3, 8, 3, 1, 1) # [B,8,H,W] self.conv1 = ConvBnReLU(8, 8, 3, 1, 1) # [B,16,H/2,W/2] self.conv2 = ConvBnReLU(8, 16, 5, 2, 2) self.conv3 = ConvBnReLU(16, 16, 3, 1, 1) self.conv4 = ConvBnReLU(16, 16, 3, 1, 1) # [B,32,H/4,W/4] self.conv5 = ConvBnReLU(16, 32, 5, 2, 2) self.conv6 = ConvBnReLU(32, 32, 3, 1, 1) self.conv7 = ConvBnReLU(32, 32, 3, 1, 1) # [B,64,H/8,W/8] self.conv8 = ConvBnReLU(32, 64, 5, 2, 2) self.conv9 = ConvBnReLU(64, 64, 3, 1, 1) self.conv10 = ConvBnReLU(64, 64, 3, 1, 1) self.output1 = nn.Conv2d(64, 64, 1, bias=False) self.inner1 = nn.Conv2d(32, 64, 1, bias=True) self.inner2 = nn.Conv2d(16, 64, 1, bias=True) self.output2 = nn.Conv2d(64, 32, 1, bias=False) self.output3 = nn.Conv2d(64, 16, 1, bias=False) def forward(self, x: torch.Tensor) -> Dict[int, torch.Tensor]: """Forward method Args: x: images from a single view, in the shape of [B, C, H, W]. Generally, C=3 Returns: output_feature: a python dictionary contains extracted features from stage 1 to stage 3 keys are 1, 2, and 3 """ output_feature: Dict[int, torch.Tensor] = {} conv1 = self.conv1(self.conv0(x)) conv4 = self.conv4(self.conv3(self.conv2(conv1))) conv7 = self.conv7(self.conv6(self.conv5(conv4))) conv10 = self.conv10(self.conv9(self.conv8(conv7))) output_feature[3] = self.output1(conv10) intra_feat = F.interpolate(conv10, scale_factor=2.0, mode="bilinear", align_corners=False) + self.inner1(conv7) del conv7 del conv10 output_feature[2] = self.output2(intra_feat) intra_feat = F.interpolate( intra_feat, scale_factor=2.0, mode="bilinear", align_corners=False) + self.inner2(conv4) del conv4 output_feature[1] = self.output3(intra_feat) del intra_feat return output_feature class Refinement(nn.Module): """Depth map refinement network""" def __init__(self): """Initialize""" super(Refinement, self).__init__() # img: [B,3,H,W] self.conv0 = ConvBnReLU(in_channels=3, out_channels=8) # depth map:[B,1,H/2,W/2] self.conv1 = ConvBnReLU(in_channels=1, out_channels=8) self.conv2 = ConvBnReLU(in_channels=8, out_channels=8) self.deconv = nn.ConvTranspose2d( in_channels=8, out_channels=8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False ) self.bn = nn.BatchNorm2d(8) self.conv3 = ConvBnReLU(in_channels=16, out_channels=8) self.res = nn.Conv2d(in_channels=8, out_channels=1, kernel_size=3, padding=1, bias=False) def forward( self, img: torch.Tensor, depth_0: torch.Tensor, depth_min: torch.Tensor, depth_max: torch.Tensor ) -> torch.Tensor: """Forward method Args: img: input reference images (B, 3, H, W) depth_0: current depth map (B, 1, H//2, W//2) depth_min: pre-defined minimum depth (B, ) depth_max: pre-defined maximum depth (B, ) Returns: depth: refined depth map (B, 1, H, W) """ batch_size = depth_min.size()[0] # pre-scale the depth map into [0,1] depth = (depth_0 - depth_min.view(batch_size, 1, 1, 1)) / (depth_max - depth_min).view(batch_size, 1, 1, 1) conv0 = self.conv0(img) deconv = F.relu(self.bn(self.deconv(self.conv2(self.conv1(depth)))), inplace=True) # depth residual res = self.res(self.conv3(torch.cat((deconv, conv0), dim=1))) del conv0 del deconv depth = F.interpolate(depth, scale_factor=2.0, mode="nearest") + res # convert the normalized depth back return depth * (depth_max - depth_min).view(batch_size, 1, 1, 1) + depth_min.view(batch_size, 1, 1, 1) class PatchmatchNet(nn.Module): """ Implementation of complete structure of PatchmatchNet""" def __init__( self, patchmatch_interval_scale: List[float] = [0.005, 0.0125, 0.025], propagation_range: List[int] = [6, 4, 2], patchmatch_iteration: List[int] = [1, 2, 2], patchmatch_num_sample: List[int] = [8, 8, 16], propagate_neighbors: List[int] = [0, 8, 16], evaluate_neighbors: List[int] = [9, 9, 9], ) -> None: """Initialize modules in PatchmatchNet Args: patchmatch_interval_scale: depth interval scale in patchmatch module propagation_range: propagation range patchmatch_iteration: patchmatch iteration number patchmatch_num_sample: patchmatch number of samples propagate_neighbors: number of propagation neighbors evaluate_neighbors: number of propagation neighbors for evaluation """ super(PatchmatchNet, self).__init__() self.stages = 4 self.feature = FeatureNet() self.patchmatch_num_sample = patchmatch_num_sample num_features = [16, 32, 64] self.propagate_neighbors = propagate_neighbors self.evaluate_neighbors = evaluate_neighbors # number of groups for group-wise correlation self.G = [4, 8, 8] for i in range(self.stages - 1): patchmatch = PatchMatch( propagation_out_range=propagation_range[i], patchmatch_iteration=patchmatch_iteration[i], patchmatch_num_sample=patchmatch_num_sample[i], patchmatch_interval_scale=patchmatch_interval_scale[i], num_feature=num_features[i], G=self.G[i], propagate_neighbors=self.propagate_neighbors[i], evaluate_neighbors=evaluate_neighbors[i], stage=i + 1, ) setattr(self, f"patchmatch_{i+1}", patchmatch) self.upsample_net = Refinement() def forward( self, images: Dict[str, torch.Tensor], proj_matrices: Dict[str, torch.Tensor], depth_min: torch.Tensor, depth_max: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor, Dict[int, List[torch.Tensor]]]: """Forward method for PatchMatchNet Args: images: different stages of images (B, 3, H, W) stored in the dictionary proj_matrices: different stages of camera projection matrices (B, 4, 4) stored in the dictionary depth_min: minimum virtual depth (B, ) depth_max: maximum virtual depth (B, ) Returns: output tuple of PatchMatchNet, containing refined depthmap, depth patchmatch, and photometric confidence. """ imgs_0 = torch.unbind(images["stage_0"], 1) del images ref_image = imgs_0[0] proj_mtx = { 0: torch.unbind(proj_matrices["stage_0"].float(), 1), 1: torch.unbind(proj_matrices["stage_1"].float(), 1), 2: torch.unbind(proj_matrices["stage_2"].float(), 1), 3: torch.unbind(proj_matrices["stage_3"].float(), 1) } del proj_matrices assert len(imgs_0) == len(proj_mtx[0]), "Different number of images and projection matrices" # step 1. Multi-scale feature extraction features: List[Dict[int, torch.Tensor]] = [] for img in imgs_0: output_feature = self.feature(img) features.append(output_feature) del imgs_0 ref_feature, src_features = features[0], features[1:] depth_min = depth_min.float() depth_max = depth_max.float() # step 2. Learning-based patchmatch depth = torch.empty(0) depths: List[torch.Tensor] = [] score = torch.empty(0) view_weights = torch.empty(0) depth_patchmatch: Dict[int, List[torch.Tensor]] = {} for stage in range(self.stages - 1, 0, -1): src_features_l = [src_fea[stage] for src_fea in src_features] ref_proj, src_projs = proj_mtx[stage][0], proj_mtx[stage][1:] # Need conditional since TorchScript only allows "getattr" access with string literals if stage == 3: depths, _, view_weights = self.patchmatch_3( ref_feature=ref_feature[stage], src_features=src_features_l, ref_proj=ref_proj, src_projs=src_projs, depth_min=depth_min, depth_max=depth_max, depth=depth, view_weights=view_weights, ) elif stage == 2: depths, _, view_weights = self.patchmatch_2( ref_feature=ref_feature[stage], src_features=src_features_l, ref_proj=ref_proj, src_projs=src_projs, depth_min=depth_min, depth_max=depth_max, depth=depth, view_weights=view_weights, ) elif stage == 1: depths, score, _ = self.patchmatch_1( ref_feature=ref_feature[stage], src_features=src_features_l, ref_proj=ref_proj, src_projs=src_projs, depth_min=depth_min, depth_max=depth_max, depth=depth, view_weights=view_weights, ) depth_patchmatch[stage] = depths depth = depths[-1].detach() if stage > 1: # upsampling the depth map and pixel-wise view weight for next stage depth = F.interpolate(depth, scale_factor=2.0, mode="nearest") view_weights = F.interpolate(view_weights, scale_factor=2.0, mode="nearest") del ref_feature del src_features # step 3. Refinement depth = self.upsample_net(ref_image, depth, depth_min, depth_max) if self.training: return depth, torch.empty(0), depth_patchmatch else: num_depth = self.patchmatch_num_sample[0] score_sum4 = 4 * F.avg_pool3d( F.pad(score.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0 ).squeeze(1) # [B, 1, H, W] depth_index = depth_regression( score, depth_values=torch.arange(num_depth, device=score.device, dtype=torch.float) ).long().clamp(0, num_depth - 1) photometric_confidence = torch.gather(score_sum4, 1, depth_index) photometric_confidence = F.interpolate(photometric_confidence, scale_factor=2.0, mode="nearest").squeeze(1) return depth, photometric_confidence, depth_patchmatch def patchmatchnet_loss( depth_patchmatch: Dict[int, List[torch.Tensor]], depth_gt: Dict[str, torch.Tensor], mask: Dict[str, torch.Tensor], ) -> torch.Tensor: """Patchmatch Net loss function Args: depth_patchmatch: depth map predicted by patchmatch net depth_gt: ground truth depth map mask: mask for filter valid points Returns: loss: result loss value """ loss = 0 for i in range(0, 4): mask_i = mask[f"stage_{i}"] > 0.5 gt_depth = depth_gt[f"stage_{i}"][mask_i] for depth in depth_patchmatch[i]: loss = loss + F.smooth_l1_loss(depth[mask_i], gt_depth, reduction="mean") return loss """ PatchmatchNet uses the following main steps: 1. Initialization: generate random hypotheses; 2. Propagation: propagate hypotheses to neighbors; 3. Evaluation: compute the matching costs for all the hypotheses and choose best solutions. """ from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from .module import ConvBnReLU3D, differentiable_warping, is_empty class DepthInitialization(nn.Module): """Initialization Stage Class""" def __init__(self, patchmatch_num_sample: int = 1) -> None: """Initialize method Args: patchmatch_num_sample: number of samples used in patchmatch process """ super(DepthInitialization, self).__init__() self.patchmatch_num_sample = patchmatch_num_sample def forward( self, min_depth: torch.Tensor, max_depth: torch.Tensor, height: int, width: int, depth_interval_scale: float, device: torch.device, depth: torch.Tensor = torch.empty(0), ) -> torch.Tensor: """Forward function for depth initialization Args: min_depth: minimum virtual depth, (B, ) max_depth: maximum virtual depth, (B, ) height: height of depth map width: width of depth map depth_interval_scale: depth interval scale device: device on which to place tensor depth: current depth (B, 1, H, W) Returns: depth_sample: initialized sample depth map by randomization or local perturbation (B, Ndepth, H, W) """ batch_size = min_depth.size()[0] inverse_min_depth = 1.0 / min_depth inverse_max_depth = 1.0 / max_depth if is_empty(depth): # first iteration of Patchmatch on stage 3, sample in the inverse depth range # divide the range into several intervals and sample in each of them patchmatch_num_sample = 48 # [B,Ndepth,H,W] depth_sample = torch.rand( size=(batch_size, patchmatch_num_sample, height, width), device=device ) + torch.arange(start=0, end=patchmatch_num_sample, step=1, device=device).view( 1, patchmatch_num_sample, 1, 1 ) depth_sample = inverse_max_depth.view(batch_size, 1, 1, 1) + depth_sample / patchmatch_num_sample * ( inverse_min_depth.view(batch_size, 1, 1, 1) - inverse_max_depth.view(batch_size, 1, 1, 1) ) return 1.0 / depth_sample elif self.patchmatch_num_sample == 1: return depth.detach() else: # other Patchmatch, local perturbation is performed based on previous result # uniform samples in an inversed depth range depth_sample = ( torch.arange(-self.patchmatch_num_sample // 2, self.patchmatch_num_sample // 2, 1, device=device) .view(1, self.patchmatch_num_sample, 1, 1).repeat(batch_size, 1, height, width).float() ) inverse_depth_interval = (inverse_min_depth - inverse_max_depth) * depth_interval_scale inverse_depth_interval = inverse_depth_interval.view(batch_size, 1, 1, 1) depth_sample = 1.0 / depth.detach() + inverse_depth_interval * depth_sample depth_clamped = [] del depth for k in range(batch_size): depth_clamped.append( torch.clamp(depth_sample[k], min=inverse_max_depth[k], max=inverse_min_depth[k]).unsqueeze(0) ) return 1.0 / torch.cat(depth_clamped, dim=0) class Propagation(nn.Module): """ Propagation module implementation""" def __init__(self) -> None: """Initialize method""" super(Propagation, self).__init__() def forward(self, depth_sample: torch.Tensor, grid: torch.Tensor) -> torch.Tensor: # [B,D,H,W] """Forward method of adaptive propagation Args: depth_sample: sample depth map, in shape of [batch, num_depth, height, width], grid: 2D grid for bilinear gridding, in shape of [batch, neighbors*H, W, 2] Returns: propagate depth: sorted propagate depth map [batch, num_depth+num_neighbors, height, width] """ batch, num_depth, height, width = depth_sample.size() num_neighbors = grid.size()[1] // height propagate_depth_sample = F.grid_sample( depth_sample[:, num_depth // 2, :, :].unsqueeze(1), grid, mode="bilinear", padding_mode="border", align_corners=False ).view(batch, num_neighbors, height, width) return torch.sort(torch.cat((depth_sample, propagate_depth_sample), dim=1), dim=1)[0] class Evaluation(nn.Module): """Evaluation module for adaptive evaluation step in Learning-based Patchmatch Used to compute the matching costs for all the hypotheses and choose best solutions. """ def __init__(self, G: int = 8) -> None: """Initialize method` Args: G: the feature channels of input will be divided evenly into G groups """ super(Evaluation, self).__init__() self.G = G self.pixel_wise_net = PixelwiseNet(self.G) self.softmax = nn.LogSoftmax(dim=1) self.similarity_net = SimilarityNet(self.G) def forward( self, ref_feature: torch.Tensor, src_features: List[torch.Tensor], ref_proj: torch.Tensor, src_projs: List[torch.Tensor], depth_sample: torch.Tensor, grid: torch.Tensor, weight: torch.Tensor, view_weights: torch.Tensor = torch.empty(0), is_inverse: bool = False ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Forward method for adaptive evaluation Args: ref_feature: feature from reference view, (B, C, H, W) src_features: features from (Nview-1) source views, (Nview-1) * (B, C, H, W), where Nview is the number of input images (or views) of PatchmatchNet ref_proj: projection matrix of reference view, (B, 4, 4) src_projs: source matrices of source views, (Nview-1) * (B, 4, 4), where Nview is the number of input images (or views) of PatchmatchNet depth_sample: sample depth map, (B,Ndepth,H,W) grid: grid, (B, evaluate_neighbors*H, W, 2) weight: weight, (B,Ndepth,1,H,W) view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W), Nview-1 represents the number of source views is_inverse: Flag for inverse depth regression Returns: depth_sample: expectation of depth sample, (B,H,W) score: probability map, (B,Ndepth,H,W) view_weights: optional, Tensor to store weights of source views, in shape of (B,Nview-1,H,W), Nview-1 represents the number of source views """ batch, feature_channel, height, width = ref_feature.size() device = ref_feature.device num_depth = depth_sample.size()[1] assert ( len(src_features) == len(src_projs) ), "Patchmatch Evaluation: Different number of images and projection matrices" if not is_empty(view_weights): assert ( len(src_features) == view_weights.size()[1] ), "Patchmatch Evaluation: Different number of images and view weights" # Change to a tensor with value 1e-5 pixel_wise_weight_sum = 1e-5 * torch.ones((batch, 1, 1, height, width), dtype=torch.float32, device=device) ref_feature = ref_feature.view(batch, self.G, feature_channel // self.G, 1, height, width) similarity_sum = torch.zeros((batch, self.G, num_depth, height, width), dtype=torch.float32, device=device) i = 0 view_weights_list = [] for src_feature, src_proj in zip(src_features, src_projs): warped_feature = differentiable_warping( src_feature, src_proj, ref_proj, depth_sample ).view(batch, self.G, feature_channel // self.G, num_depth, height, width) # group-wise correlation similarity = (warped_feature * ref_feature).mean(2) # pixel-wise view weight if is_empty(view_weights): view_weight = self.pixel_wise_net(similarity) view_weights_list.append(view_weight) else: # reuse the pixel-wise view weight from first iteration of Patchmatch on stage 3 view_weight = view_weights[:, i].unsqueeze(1) # [B,1,H,W] i = i + 1 similarity_sum += similarity * view_weight.unsqueeze(1) pixel_wise_weight_sum += view_weight.unsqueeze(1) # aggregated matching cost across all the source views similarity = similarity_sum.div_(pixel_wise_weight_sum) # [B, G, Ndepth, H, W] # adaptive spatial cost aggregation score = self.similarity_net(similarity, grid, weight) # [B, G, Ndepth, H, W] # apply softmax to get probability score = torch.exp(self.softmax(score)) if is_empty(view_weights): view_weights = torch.cat(view_weights_list, dim=1) # [B,4,H,W], 4 is the number of source views if is_inverse: # depth regression: inverse depth regression depth_index = torch.arange(0, num_depth, 1, device=device).view(1, num_depth, 1, 1) depth_index = torch.sum(depth_index * score, dim=1) inverse_min_depth = 1.0 / depth_sample[:, -1, :, :] inverse_max_depth = 1.0 / depth_sample[:, 0, :, :] depth_sample = inverse_max_depth + depth_index / (num_depth - 1) * (inverse_min_depth - inverse_max_depth) depth_sample = 1.0 / depth_sample else: # depth regression: expectation depth_sample = torch.sum(depth_sample * score, dim=1) return depth_sample, score, view_weights.detach() class PatchMatch(nn.Module): """Patchmatch module""" def __init__( self, propagation_out_range: int = 2, patchmatch_iteration: int = 2, patchmatch_num_sample: int = 16, patchmatch_interval_scale: float = 0.025, num_feature: int = 64, G: int = 8, propagate_neighbors: int = 16, evaluate_neighbors: int = 9, stage: int = 3, ) -> None: """Initialize method Args: propagation_out_range: range of propagation out, patchmatch_iteration: number of iterations in patchmatch, patchmatch_num_sample: number of samples in patchmatch, patchmatch_interval_scale: interval scale, num_feature: number of features, G: the feature channels of input will be divided evenly into G groups, propagate_neighbors: number of neighbors to be sampled in propagation, stage: number of stage, evaluate_neighbors: number of neighbors to be sampled in evaluation, """ super(PatchMatch, self).__init__() self.patchmatch_iteration = patchmatch_iteration self.patchmatch_interval_scale = patchmatch_interval_scale self.propa_num_feature = num_feature # group wise correlation self.G = G self.stage = stage self.dilation = propagation_out_range self.propagate_neighbors = propagate_neighbors self.evaluate_neighbors = evaluate_neighbors # Using dictionary instead of Enum since TorchScript cannot recognize and export it correctly self.grid_type = {"propagation": 1, "evaluation": 2} self.depth_initialization = DepthInitialization(patchmatch_num_sample) self.propagation = Propagation() self.evaluation = Evaluation(self.G) # adaptive propagation: last iteration on stage 1 does not have propagation, # but we still define this for TorchScript export compatibility self.propa_conv = nn.Conv2d( in_channels=self.propa_num_feature, out_channels=max(2 * self.propagate_neighbors, 1), kernel_size=3, stride=1, padding=self.dilation, dilation=self.dilation, bias=True, ) nn.init.constant_(self.propa_conv.weight, 0.0) nn.init.constant_(self.propa_conv.bias, 0.0) # adaptive spatial cost aggregation (adaptive evaluation) self.eval_conv = nn.Conv2d( in_channels=self.propa_num_feature, out_channels=2 * self.evaluate_neighbors, kernel_size=3, stride=1, padding=self.dilation, dilation=self.dilation, bias=True, ) nn.init.constant_(self.eval_conv.weight, 0.0) nn.init.constant_(self.eval_conv.bias, 0.0) self.feature_weight_net = FeatureWeightNet(self.evaluate_neighbors, self.G) def get_grid( self, grid_type: int, batch: int, height: int, width: int, offset: torch.Tensor, device: torch.device ) -> torch.Tensor: """Compute the offset for adaptive propagation or spatial cost aggregation in adaptive evaluation Args: grid_type: type of grid - propagation (1) or evaluation (2) batch: batch size height: grid height width: grid width offset: grid offset device: device on which to place tensor Returns: generated grid: in the shape of [batch, propagate_neighbors*H, W, 2] """ if grid_type == self.grid_type["propagation"]: if self.propagate_neighbors == 4: # if 4 neighbors to be sampled in propagation original_offset = [[-self.dilation, 0], [0, -self.dilation], [0, self.dilation], [self.dilation, 0]] elif self.propagate_neighbors == 8: # if 8 neighbors to be sampled in propagation original_offset = [ [-self.dilation, -self.dilation], [-self.dilation, 0], [-self.dilation, self.dilation], [0, -self.dilation], [0, self.dilation], [self.dilation, -self.dilation], [self.dilation, 0], [self.dilation, self.dilation], ] elif self.propagate_neighbors == 16: # if 16 neighbors to be sampled in propagation original_offset = [ [-self.dilation, -self.dilation], [-self.dilation, 0], [-self.dilation, self.dilation], [0, -self.dilation], [0, self.dilation], [self.dilation, -self.dilation], [self.dilation, 0], [self.dilation, self.dilation], ] for i in range(len(original_offset)): offset_x, offset_y = original_offset[i] original_offset.append([2 * offset_x, 2 * offset_y]) else: raise NotImplementedError elif grid_type == self.grid_type["evaluation"]: dilation = self.dilation - 1 # dilation of evaluation is a little smaller than propagation if self.evaluate_neighbors == 9: # if 9 neighbors to be sampled in evaluation original_offset = [ [-dilation, -dilation], [-dilation, 0], [-dilation, dilation], [0, -dilation], [0, 0], [0, dilation], [dilation, -dilation], [dilation, 0], [dilation, dilation], ] elif self.evaluate_neighbors == 17: # if 17 neighbors to be sampled in evaluation original_offset = [ [-dilation, -dilation], [-dilation, 0], [-dilation, dilation], [0, -dilation], [0, 0], [0, dilation], [dilation, -dilation], [dilation, 0], [dilation, dilation], ] for i in range(len(original_offset)): offset_x, offset_y = original_offset[i] if offset_x != 0 or offset_y != 0: original_offset.append([2 * offset_x, 2 * offset_y]) else: raise NotImplementedError else: raise NotImplementedError with torch.no_grad(): y_grid, x_grid = torch.meshgrid( [ torch.arange(0, height, dtype=torch.float32, device=device), torch.arange(0, width, dtype=torch.float32, device=device), ] ) y_grid, x_grid = y_grid.contiguous().view(height * width), x_grid.contiguous().view(height * width) xy = torch.stack((x_grid, y_grid)) # [2, H*W] xy = torch.unsqueeze(xy, 0).repeat(batch, 1, 1) # [B, 2, H*W] xy_list = [] for i in range(len(original_offset)): original_offset_y, original_offset_x = original_offset[i] offset_x = original_offset_x + offset[:, 2 * i, :].unsqueeze(1) offset_y = original_offset_y + offset[:, 2 * i + 1, :].unsqueeze(1) xy_list.append((xy + torch.cat((offset_x, offset_y), dim=1)).unsqueeze(2)) xy = torch.cat(xy_list, dim=2) # [B, 2, 9, H*W] del xy_list del x_grid del y_grid x_normalized = xy[:, 0, :, :] / ((width - 1) / 2) - 1 y_normalized = xy[:, 1, :, :] / ((height - 1) / 2) - 1 del xy grid = torch.stack((x_normalized, y_normalized), dim=3) # [B, 9, H*W, 2] del x_normalized del y_normalized return grid.view(batch, len(original_offset) * height, width, 2) def forward( self, ref_feature: torch.Tensor, src_features: List[torch.Tensor], ref_proj: torch.Tensor, src_projs: List[torch.Tensor], depth_min: torch.Tensor, depth_max: torch.Tensor, depth: torch.Tensor, view_weights: torch.Tensor = torch.empty(0), ) -> Tuple[List[torch.Tensor], torch.Tensor, torch.Tensor]: """Forward method for PatchMatch Args: ref_feature: feature from reference view, (B, C, H, W) src_features: features from (Nview-1) source views, (Nview-1) * (B, C, H, W), where Nview is the number of input images (or views) of PatchmatchNet ref_proj: projection matrix of reference view, (B, 4, 4) src_projs: source matrices of source views, (Nview-1) * (B, 4, 4), where Nview is the number of input images (or views) of PatchmatchNet depth_min: minimum virtual depth, (B,) depth_max: maximum virtual depth, (B,) depth: current depth map, (B,1,H,W) or None view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W), Nview-1 represents the number of source views Returns: depth_samples: list of depth maps from each patchmatch iteration, Niter * (B,1,H,W) score: evaluted probabilities, (B,Ndepth,H,W) view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W), Nview-1 represents the number of source views """ score = torch.empty(0) depth_samples = [] device = ref_feature.device batch, _, height, width = ref_feature.size() # the learned additional 2D offsets for adaptive propagation propa_grid = torch.empty(0) if self.propagate_neighbors > 0 and not (self.stage == 1 and self.patchmatch_iteration == 1): # last iteration on stage 1 does not have propagation (photometric consistency filtering) propa_offset = self.propa_conv(ref_feature).view(batch, 2 * self.propagate_neighbors, height * width) propa_grid = self.get_grid(self.grid_type["propagation"], batch, height, width, propa_offset, device) # the learned additional 2D offsets for adaptive spatial cost aggregation (adaptive evaluation) eval_offset = self.eval_conv(ref_feature).view(batch, 2 * self.evaluate_neighbors, height * width) eval_grid = self.get_grid(self.grid_type["evaluation"], batch, height, width, eval_offset, device) # [B, evaluate_neighbors, H, W] feature_weight = self.feature_weight_net(ref_feature.detach(), eval_grid) depth_sample = depth del depth for iter in range(1, self.patchmatch_iteration + 1): is_inverse = self.stage == 1 and iter == self.patchmatch_iteration # first iteration on stage 3, random initialization (depth is empty), no adaptive propagation # subsequent iterations, local perturbation based on previous result, [B,Ndepth,H,W] depth_sample = self.depth_initialization( min_depth=depth_min, max_depth=depth_max, height=height, width=width, depth_interval_scale=self.patchmatch_interval_scale, device=device, depth=depth_sample ) # adaptive propagation if self.propagate_neighbors > 0 and not (self.stage == 1 and iter == self.patchmatch_iteration): # last iteration on stage 1 does not have propagation (photometric consistency filtering) depth_sample = self.propagation(depth_sample=depth_sample, grid=propa_grid) # weights for adaptive spatial cost aggregation in adaptive evaluation, [B,Ndepth,N_neighbors_eval,H,W] weight = depth_weight( depth_sample=depth_sample.detach(), depth_min=depth_min, depth_max=depth_max, grid=eval_grid.detach(), patchmatch_interval_scale=self.patchmatch_interval_scale, neighbors=self.evaluate_neighbors, ) * feature_weight.unsqueeze(1) weight = weight / torch.sum(weight, dim=2).unsqueeze(2) # [B,Ndepth,1,H,W] # evaluation, outputs regressed depth map and pixel-wise view weights which will # be used for subsequent iterations depth_sample, score, view_weights = self.evaluation( ref_feature=ref_feature, src_features=src_features, ref_proj=ref_proj, src_projs=src_projs, depth_sample=depth_sample, grid=eval_grid, weight=weight, view_weights=view_weights, is_inverse=is_inverse, ) depth_sample = depth_sample.unsqueeze(1) depth_samples.append(depth_sample) return depth_samples, score, view_weights class SimilarityNet(nn.Module): """Similarity Net, used in Evaluation module (adaptive evaluation step) 1. Do 1x1x1 convolution on aggregated cost [B, G, Ndepth, H, W] among all the source views, where G is the number of groups 2. Perform adaptive spatial cost aggregation to get final cost (scores) """ def __init__(self, G: int) -> None: """Initialize method Args: G: the feature channels of input will be divided evenly into G groups """ super(SimilarityNet, self).__init__() self.conv0 = ConvBnReLU3D(in_channels=G, out_channels=16, kernel_size=1, stride=1, pad=0) self.conv1 = ConvBnReLU3D(in_channels=16, out_channels=8, kernel_size=1, stride=1, pad=0) self.similarity = nn.Conv3d(in_channels=8, out_channels=1, kernel_size=1, stride=1, padding=0) def forward(self, x1: torch.Tensor, grid: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: """Forward method for SimilarityNet Args: x1: [B, G, Ndepth, H, W], where G is the number of groups, aggregated cost among all the source views with pixel-wise view weight grid: position of sampling points in adaptive spatial cost aggregation, (B, evaluate_neighbors*H, W, 2) weight: weight of sampling points in adaptive spatial cost aggregation, combination of feature weight and depth weight, [B,Ndepth,1,H,W] Returns: final cost: in the shape of [B,Ndepth,H,W] """ batch, G, num_depth, height, width = x1.size() num_neighbors = grid.size()[1] // height # [B,Ndepth,num_neighbors,H,W] x1 = F.grid_sample( input=self.similarity(self.conv1(self.conv0(x1))).squeeze(1), grid=grid, mode="bilinear", padding_mode="border", align_corners=False ).view(batch, num_depth, num_neighbors, height, width) return torch.sum(x1 * weight, dim=2) class FeatureWeightNet(nn.Module): """FeatureWeight Net: Called at the beginning of patchmatch, to calculate feature weights based on similarity of features of sampling points and center pixel. The feature weights is used to implement adaptive spatial cost aggregation. """ def __init__(self, neighbors: int = 9, G: int = 8) -> None: """Initialize method Args: neighbors: number of neighbors to be sampled G: the feature channels of input will be divided evenly into G groups """ super(FeatureWeightNet, self).__init__() self.neighbors = neighbors self.G = G self.conv0 = ConvBnReLU3D(in_channels=G, out_channels=16, kernel_size=1, stride=1, pad=0) self.conv1 = ConvBnReLU3D(in_channels=16, out_channels=8, kernel_size=1, stride=1, pad=0) self.similarity = nn.Conv3d(in_channels=8, out_channels=1, kernel_size=1, stride=1, padding=0) self.output = nn.Sigmoid() def forward(self, ref_feature: torch.Tensor, grid: torch.Tensor) -> torch.Tensor: """Forward method for FeatureWeightNet Args: ref_feature: reference feature map, [B,C,H,W] grid: position of sampling points in adaptive spatial cost aggregation, (B, evaluate_neighbors*H, W, 2) Returns: weight based on similarity of features of sampling points and center pixel, [B,Neighbor,H,W] """ batch, feature_channel, height, width = ref_feature.size() weight = F.grid_sample( ref_feature, grid, mode="bilinear", padding_mode="border", align_corners=False ).view(batch, self.G, feature_channel // self.G, self.neighbors, height, width) # [B,G,C//G,H,W] ref_feature = ref_feature.view(batch, self.G, feature_channel // self.G, height, width).unsqueeze(3) # [B,G,Neighbor,H,W] weight = (weight * ref_feature).mean(2) # [B,Neighbor,H,W] return self.output(self.similarity(self.conv1(self.conv0(weight))).squeeze(1)) def depth_weight( depth_sample: torch.Tensor, depth_min: torch.Tensor, depth_max: torch.Tensor, grid: torch.Tensor, patchmatch_interval_scale: float, neighbors: int, ) -> torch.Tensor: """Calculate depth weight 1. Adaptive spatial cost aggregation 2. Weight based on depth difference of sampling points and center pixel Args: depth_sample: sample depth map, (B,Ndepth,H,W) depth_min: minimum virtual depth, (B,) depth_max: maximum virtual depth, (B,) grid: position of sampling points in adaptive spatial cost aggregation, (B, neighbors*H, W, 2) patchmatch_interval_scale: patchmatch interval scale, neighbors: number of neighbors to be sampled in evaluation Returns: depth weight """ batch, num_depth, height, width = depth_sample.size() inverse_depth_min = 1.0 / depth_min inverse_depth_max = 1.0 / depth_max # normalization x = 1.0 / depth_sample del depth_sample x = (x - inverse_depth_max.view(batch, 1, 1, 1)) / (inverse_depth_min - inverse_depth_max).view(batch, 1, 1, 1) x1 = F.grid_sample( x, grid, mode="bilinear", padding_mode="border", align_corners=False ).view(batch, num_depth, neighbors, height, width) del grid # [B,Ndepth,N_neighbors,H,W] x1 = torch.abs(x1 - x.unsqueeze(2)) / patchmatch_interval_scale del x # sigmoid output approximate to 1 when x=4 return torch.sigmoid(4.0 - 2.0 * x1.clamp(min=0, max=4)).detach() class PixelwiseNet(nn.Module): """Pixelwise Net: A simple pixel-wise view weight network, composed of 1x1x1 convolution layers and sigmoid nonlinearities, takes the initial set of similarities to output a number between 0 and 1 per pixel as estimated pixel-wise view weight. 1. The Pixelwise Net is used in adaptive evaluation step 2. The similarity is calculated by ref_feature and other source_features warped by differentiable_warping 3. The learned pixel-wise view weight is estimated in the first iteration of Patchmatch and kept fixed in the matching cost computation. """ def __init__(self, G: int) -> None: """Initialize method Args: G: the feature channels of input will be divided evenly into G groups """ super(PixelwiseNet, self).__init__() self.conv0 = ConvBnReLU3D(in_channels=G, out_channels=16, kernel_size=1, stride=1, pad=0) self.conv1 = ConvBnReLU3D(in_channels=16, out_channels=8, kernel_size=1, stride=1, pad=0) self.conv2 = nn.Conv3d(in_channels=8, out_channels=1, kernel_size=1, stride=1, padding=0) self.output = nn.Sigmoid() def forward(self, x1: torch.Tensor) -> torch.Tensor: """Forward method for PixelwiseNet Args: x1: pixel-wise view weight, [B, G, Ndepth, H, W], where G is the number of groups """ # [B,1,H,W] return torch.max(self.output(self.conv2(self.conv1(self.conv0(x1))).squeeze(1)), dim=1)[0].unsqueeze(1) from typing import Any, Callable, Union, Dict import numpy as np import torchvision.utils as vutils import torch import torch.utils.tensorboard as tb def print_args(args: Any) -> None: """Utilities to print arguments Arsg: args: arguments to pring out """ print("################################ args ################################") for k, v in args.__dict__.items(): print("{0: <10}\t{1: <30}\t{2: <20}".format(k, str(v), str(type(v)))) print("########################################################################") def make_nograd_func(func: Callable) -> Callable: """Utilities to make function no gradient Args: func: input function Returns: no gradient function wrapper for input function """ def wrapper(*f_args, **f_kwargs): with torch.no_grad(): ret = func(*f_args, **f_kwargs) return ret return wrapper def make_recursive_func(func: Callable) -> Callable: """Convert a function into recursive style to handle nested dict/list/tuple variables Args: func: input function Returns: recursive style function """ def wrapper(vars): if isinstance(vars, list): return [wrapper(x) for x in vars] elif isinstance(vars, tuple): return tuple([wrapper(x) for x in vars]) elif isinstance(vars, dict): return {k: wrapper(v) for k, v in vars.items()} else: return func(vars) return wrapper @make_recursive_func def tensor2float(vars: Any) -> float: """Convert tensor to float""" if isinstance(vars, float): return vars elif isinstance(vars, torch.Tensor): return vars.data.item() else: raise NotImplementedError("invalid input type {} for tensor2float".format(type(vars))) @make_recursive_func def tensor2numpy(vars: Any) -> np.ndarray: """Convert tensor to numpy array""" if isinstance(vars, np.ndarray): return vars elif isinstance(vars, torch.Tensor): return vars.detach().cpu().numpy().copy() else: raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars))) @make_recursive_func def tocuda(vars: Any) -> Union[str, torch.Tensor]: """Convert tensor to tensor on GPU""" if isinstance(vars, torch.Tensor): return vars.cpu() elif isinstance(vars, str): return vars else: raise NotImplementedError("invalid input type {} for tocuda".format(type(vars))) def save_scalars(logger: tb.SummaryWriter, mode: str, scalar_dict: Dict[str, Any], global_step: int) -> None: """Log values stored in the scalar dictionary Args: logger: tensorboard summary writer mode: mode name used in writing summaries scalar_dict: python dictionary stores the key and value pairs to be recorded global_step: step index where the logger should write """ scalar_dict = tensor2float(scalar_dict) for key, value in scalar_dict.items(): if not isinstance(value, (list, tuple)): name = "{}/{}".format(mode, key) logger.add_scalar(name, value, global_step) else: for idx in range(len(value)): name = "{}/{}_{}".format(mode, key, idx) logger.add_scalar(name, value[idx], global_step) def save_images(logger: tb.SummaryWriter, mode: str, images_dict: Dict[str, Any], global_step: int) -> None: """Log images stored in the image dictionary Args: logger: tensorboard summary writer mode: mode name used in writing summaries images_dict: python dictionary stores the key and image pairs to be recorded global_step: step index where the logger should write """ images_dict = tensor2numpy(images_dict) def preprocess(name, img): if not (len(img.shape) == 3 or len(img.shape) == 4): raise NotImplementedError("invalid img shape {}:{} in save_images".format(name, img.shape)) if len(img.shape) == 3: img = img[:, np.newaxis, :, :] img = torch.from_numpy(img[:1]) return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True) for key, value in images_dict.items(): if not isinstance(value, (list, tuple)): name = "{}/{}".format(mode, key) logger.add_image(name, preprocess(name, value), global_step) else: for idx in range(len(value)): name = "{}/{}_{}".format(mode, key, idx) logger.add_image(name, preprocess(name, value[idx]), global_step) class DictAverageMeter: """Wrapper class for dictionary variables that require the average value""" def __init__(self) -> None: """Initialization method""" self.data: Dict[Any, float] = {} self.count = 0 def update(self, new_input: Dict[Any, float]) -> None: """Update the stored dictionary with new input data Args: new_input: new data to update self.data """ self.count += 1 if len(self.data) == 0: for k, v in new_input.items(): if not isinstance(v, float): raise NotImplementedError("invalid data {}: {}".format(k, type(v))) self.data[k] = v else: for k, v in new_input.items(): if not isinstance(v, float): raise NotImplementedError("invalid data {}: {}".format(k, type(v))) self.data[k] += v def mean(self) -> Any: """Return the average value of values stored in self.data""" return {k: v / self.count for k, v in self.data.items()} def compute_metrics_for_each_image(metric_func: Callable) -> Callable: """A wrapper to compute metrics for each image individually""" def wrapper(depth_est, depth_gt, mask, *args): batch_size = depth_gt.shape[0] print(batch_size) # if batch_size < BATCH_SIZE: # break results = [] # compute result one by one for idx in range(batch_size): ret = metric_func(depth_est[idx], depth_gt[idx], mask[idx], *args) results.append(ret) return torch.stack(results).mean() return wrapper @make_nograd_func @compute_metrics_for_each_image def Thres_metrics( depth_est: torch.Tensor, depth_gt: torch.Tensor, mask: torch.Tensor, thres: Union[int, float] ) -> torch.Tensor: """Return error rate for where absolute error is larger than threshold. Args: depth_est: estimated depth map depth_gt: ground truth depth map mask: mask thres: threshold Returns: error rate: error rate of the depth map """ # if thres is int or float, then True assert isinstance(thres, (int, float)) depth_est, depth_gt = depth_est[mask], depth_gt[mask] errors = torch.abs(depth_est - depth_gt) err_mask = errors > thres return torch.mean(err_mask.float()) # NOTE: please do not use this to build up training loss @make_nograd_func @compute_metrics_for_each_image def AbsDepthError_metrics(depth_est: torch.Tensor, depth_gt: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: """Calculate average absolute depth error Args: depth_est: estimated depth map depth_gt: ground truth depth map mask: mask """ depth_est, depth_gt = depth_est[mask], depth_gt[mask] return torch.mean((depth_est - depth_gt).abs()) """Utilities for reading and writing images, depth maps, and auxiliary data (cams, pairs) from/to disk.""" import re import struct import sys from typing import Dict, List, Tuple import cv2 import numpy as np from PIL import Image def scale_to_max_dim(image: np.ndarray, max_dim: int) -> Tuple[np.ndarray, int, int]: """Scale image to specified max dimension Args: image: the input image in original size max_dim: the max dimension to scale the image down to if smaller than the actual max dimension Returns: Tuple of scaled image along with original image height and width """ original_height = image.shape[0] original_width = image.shape[1] scale = max_dim / max(original_height, original_width) if 0 < scale < 1: width = int(scale * original_width) height = int(scale * original_height) image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR) return image, original_height, original_width def read_image(filename: str, max_dim: int = -1) -> Tuple[np.ndarray, int, int]: """Read image and rescale to specified max dimension (if exists) Args: filename: image input file path string max_dim: max dimension to scale down the image; keep original size if -1 Returns: Tuple of scaled image along with original image height and width """ image = Image.open(filename) # scale 0~255 to 0~1 np_image = np.array(image, dtype=np.float32) / 255.0 return scale_to_max_dim(np_image, max_dim) def save_image(filename: str, image: np.ndarray) -> None: """Save images including binary mask (bool), float (0<= val <= 1), or int (as-is) Args: filename: image output file path string image: output image array """ if image.dtype == bool: image = image.astype(np.uint8) * 255 elif image.dtype == np.float32 or image.dtype == np.float64: image = image * 255 image = image.astype(np.uint8) else: image = image.astype(np.uint8) Image.fromarray(image).save(filename) def read_image_dictionary(filename: str) -> Dict[int, str]: """Create image dictionary from file; useful for ETH3D dataset reading and conversion. Args: filename: input dictionary text file path Returns: Dictionary of image id (int) and corresponding image file name (string) """ image_dict: Dict[int, str] = {} with open(filename) as f: num_entries = int(f.readline().strip()) for _ in range(num_entries): parts = f.readline().strip().split(' ') image_dict[int(parts[0].strip())] = parts[1].strip() return image_dict def read_cam_file(filename: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Read camera intrinsics, extrinsics, and depth values (min, max) from text file Args: filename: cam text file path string Returns: Tuple with intrinsics matrix (3x3), extrinsics matrix (4x4), and depth params vector (min and max) if exists """ with open(filename) as f: lines = [line.rstrip() for line in f.readlines()] # extrinsics: line [1,5), 4x4 matrix extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4)) # intrinsics: line [7-10), 3x3 matrix intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3)) # depth min and max: line 11 if len(lines) >= 12: depth_params = np.fromstring(lines[11], dtype=np.float32, sep=' ') else: depth_params = np.empty(0) return intrinsics, extrinsics, depth_params def read_pair_file(filename: str) -> List[Tuple[int, List[int]]]: """Read image pairs from text file and output a list of tuples each containing the reference image ID and a list of source image IDs Args: filename: pair text file path string Returns: List of tuples with reference ID and list of source IDs """ data = [] with open(filename) as f: num_viewpoint = int(f.readline()) for _ in range(num_viewpoint): # ref_view = int(f.readline().rstrip()) ref_view = int(f.readline().rstrip()) # print(ref_view) # src_views = [int(x) for x in f.readline().rstrip().split()[1::2]] src_views = [int(x) for x in f.readline().rstrip().split()[1::2]] # print(src_views) view_ids = [ref_view] + src_views[:2] # print(view_ids) if len(src_views) != 0: data.append((ref_view, src_views)) return data def read_map(path: str, max_dim: int = -1) -> np.ndarray: """ Read a binary depth map from either PFM or Colmap (bin) format determined by the file extension and also scale the map to the max dim if given Args: path: input depth map file path string max_dim: max dimension to scale down the map; keep original size if -1 Returns: Array of depth map values """ if path.endswith('.bin'): in_map = read_bin(path) elif path.endswith('.pfm'): in_map, _ = read_pfm(path) else: raise Exception('Invalid input format; only pfm and bin are supported') return scale_to_max_dim(in_map, max_dim)[0] def save_map(path: str, data: np.ndarray) -> None: """Save binary depth or confidence maps in PFM or Colmap (bin) format determined by the file extension Args: path: output map file path string data: map data array """ if path.endswith('.bin'): save_bin(path, data) elif path.endswith('.pfm'): save_pfm(path, data) else: raise Exception('Invalid input format; only pfm and bin are supported') def read_bin(path: str) -> np.ndarray: """Read a depth map from a Colmap .bin file Args: path: .pfm file path string Returns: data: array of shape (H, W, C) representing loaded depth map """ with open(path, 'rb') as fid: width, height, channels = np.genfromtxt(fid, delimiter='&', max_rows=1, usecols=(0, 1, 2), dtype=int) fid.seek(0) num_delimiter = 0 byte = fid.read(1) while True: if byte == b'&': num_delimiter += 1 if num_delimiter >= 3: break byte = fid.read(1) data = np.fromfile(fid, np.float32) data = data.reshape((width, height, channels), order='F') data = np.transpose(data, (1, 0, 2)) return data def save_bin(filename: str, data: np.ndarray): """Save a depth map to a Colmap .bin file Args: filename: output .pfm file path string, data: depth map to save, of shape (H,W) or (H,W,C) """ if data.dtype != np.float32: raise Exception('Image data type must be float32.') if len(data.shape) == 2: height, width = data.shape channels = 1 elif len(data.shape) == 3 and (data.shape[2] == 3 or data.shape[2] == 1): height, width, channels = data.shape else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') with open(filename, 'w') as fid: fid.write(str(width) + '&' + str(height) + '&' + str(channels) + '&') with open(filename, 'ab') as fid: if len(data.shape) == 2: image_trans = np.transpose(data, (1, 0)) else: image_trans = np.transpose(data, (1, 0, 2)) data_1d = image_trans.reshape(-1, order='F') data_list = data_1d.tolist() endian_character = '<' format_char_sequence = ''.join(['f'] * len(data_list)) byte_data = struct.pack(endian_character + format_char_sequence, *data_list) fid.write(byte_data) def read_pfm(filename: str) -> Tuple[np.ndarray, float]: """Read a depth map from a .pfm file Args: filename: .pfm file path string Returns: data: array of shape (H, W, C) representing loaded depth map scale: float to recover actual depth map pixel values """ file = open(filename, "rb") # treat as binary and read-only header = file.readline().decode("utf-8").rstrip() if header == "PF": color = True elif header == "Pf": # depth is Pf color = False else: raise Exception("Not a PFM file.") dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8")) if dim_match: width, height = map(int, dim_match.groups()) else: raise Exception("Malformed PFM header.") scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = "<" scale = -scale else: endian = ">" # big-endian data = np.fromfile(file, endian + "f") shape = (height, width, 3) if color else (height, width, 1) data = np.reshape(data, shape) data = np.flipud(data) file.close() return data, scale def save_pfm(filename: str, image: np.ndarray, scale: float = 1) -> None: """Save a depth map to a .pfm file Args: filename: output .pfm file path string, image: depth map to save, of shape (H,W) or (H,W,C) scale: scale parameter to save """ file = open(filename, "wb") color = None image = np.flipud(image) if image.dtype.name != "float32": raise Exception("Image dtype must be float32.") if len(image.shape) == 3 and image.shape[2] == 3: # color image color = True elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale color = False else: raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") file.write("PF\n".encode("utf-8") if color else "Pf\n".encode("utf-8")) file.write("{} {}\n".format(image.shape[1], image.shape[0]).encode("utf-8")) endian = image.dtype.byteorder if endian == "<" or endian == "=" and sys.byteorder == "little": scale = -scale file.write(("%f\n" % scale).encode("utf-8")) image.tofile(file) file.close() import argparse import os import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader import time # from datasets import find_dataset_def # from models import * # from utils import * import sys # from datasets.data_io import read_cam_file, read_pair_file, read_image, read_map, save_image, save_map import cv2 from plyfile import PlyData, PlyElement cudnn.benchmark = True parser = argparse.ArgumentParser(description='Predict depth, filter, and fuse') parser.add_argument('--model', default='PatchmatchNet', help='select model') parser.add_argument('--dataset', default='eth3d', help='select dataset') parser.add_argument('--testpath', help='testing data path') parser.add_argument('--testlist', help='testing scan list') parser.add_argument('--split', default='test', help='select data') parser.add_argument('--batch_size', type=int, default=1, help='testing batch size') parser.add_argument('--n_views', type=int, default=5, help='num of view') parser.add_argument('--loadckpt', default=None, help='load a specific checkpoint') parser.add_argument('--outdir', default='./outputs', help='output dir') parser.add_argument('--display', action='store_true', help='display depth images and masks') parser.add_argument('--patchmatch_iteration', nargs='+', type=int, default=[1, 2, 2], help='num of iteration of patchmatch on stages 1,2,3') parser.add_argument('--patchmatch_num_sample', nargs='+', type=int, default=[8, 8, 16], help='num of generated samples in local perturbation on stages 1,2,3') parser.add_argument('--patchmatch_interval_scale', nargs='+', type=float, default=[0.005, 0.0125, 0.025], help='normalized interval in inverse depth range to generate samples in local perturbation') parser.add_argument('--patchmatch_range', nargs='+', type=int, default=[6, 4, 2], help='fixed offset of sampling points for propogation of patchmatch on stages 1,2,3') parser.add_argument('--propagate_neighbors', nargs='+', type=int, default=[0, 8, 16], help='num of neighbors for adaptive propagation on stages 1,2,3') parser.add_argument('--evaluate_neighbors', nargs='+', type=int, default=[9, 9, 9], help='num of neighbors for adaptive matching cost aggregation of adaptive evaluation on stages 1,2,3') parser.add_argument('--geo_pixel_thres', type=float, default=1, help='pixel threshold for geometric consistency filtering') parser.add_argument('--geo_depth_thres', type=float, default=0.01, help='depth threshold for geometric consistency filtering') parser.add_argument('--photo_thres', type=float, default=0.8, help='threshold for photometric consistency filtering') # parse arguments and check args = parser.parse_args() print("argv:", sys.argv[1:]) print_args(args) # run MVS model to save depth maps def save_depth(): # dataset, dataloader mvs_dataset = find_dataset_def(args.dataset) test_dataset = mvs_dataset(args.testpath, args.n_views) image_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, drop_last=False) # image_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, drop_last=False) # model model = PatchmatchNet( patchmatch_interval_scale=args.patchmatch_interval_scale, propagation_range=args.patchmatch_range, patchmatch_iteration=args.patchmatch_iteration, patchmatch_num_sample=args.patchmatch_num_sample, propagate_neighbors=args.propagate_neighbors, evaluate_neighbors=args.evaluate_neighbors ) model = nn.DataParallel(model) model.cpu() # load checkpoint file specified by args.loadckpt print("loading model {}".format(args.loadckpt)) state_dict = torch.load(args.loadckpt,map_location=torch.device('cpu')) model.load_state_dict(state_dict['model'], strict=False) model.eval() with torch.no_grad(): for batch_idx, sample in enumerate(image_loader): # print(batch_idx) start_time = time.time() sample_cuda = tocuda(sample) refined_depth, confidence, _ = model(sample_cuda["imgs"], sample_cuda["proj_matrices"], sample_cuda["depth_min"], sample_cuda["depth_max"]) refined_depth = tensor2numpy(refined_depth) confidence = tensor2numpy(confidence) del sample_cuda print('Iter {}/{}, time = {:.3f}'.format(batch_idx, len(image_loader), time.time() - start_time)) filenames = sample["filename"] # save depth maps and confidence maps for filename, depth_est, photometric_confidence in zip(filenames, refined_depth, confidence): depth_filename = os.path.join(args.outdir, filename.format('depth_est', '.pfm')) confidence_filename = os.path.join(args.outdir, filename.format('confidence', '.pfm')) os.makedirs(depth_filename.rsplit('/', 1)[0], exist_ok=True) os.makedirs(confidence_filename.rsplit('/', 1)[0], exist_ok=True) # save depth maps depth_est = np.squeeze(depth_est, 0) save_map(depth_filename, depth_est) # save confidence maps save_map(confidence_filename, photometric_confidence) # project the reference point cloud into the source view, then project back def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src): width, height = depth_ref.shape[1], depth_ref.shape[0] # step1. project reference pixels to the source view # reference view x, y x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height)) x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1]) # reference 3D space xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref), np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1])) # source 3D space xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)), np.vstack((xyz_ref, np.ones_like(x_ref))))[:3] # source view x, y k_xyz_src = np.matmul(intrinsics_src, xyz_src) xy_src = k_xyz_src[:2] / k_xyz_src[2:3] # step2. reproject the source view points with source view depth estimation # find the depth estimation of the source view x_src = xy_src[0].reshape([height, width]).astype(np.float32) y_src = xy_src[1].reshape([height, width]).astype(np.float32) sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR) # mask = sampled_depth_src > 0 # source 3D space # NOTE that we should use sampled source-view depth_here to project back xyz_src = np.matmul(np.linalg.inv(intrinsics_src), np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1])) # reference 3D space xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)), np.vstack((xyz_src, np.ones_like(x_ref))))[:3] # source view x, y, depth depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32) k_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected) xy_reprojected = k_xyz_reprojected[:2] / k_xyz_reprojected[2:3] x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32) y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32) return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src, geo_pixel_thres, geo_depth_thres): width, height = depth_ref.shape[1], depth_ref.shape[0] x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height)) depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src = reproject_with_depth( depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src) # print(depth_ref.shape) # print(depth_reprojected.shape) # check |p_reproj-p_1| < 1 dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2) # check |d_reproj-d_1| / d_1 < 0.01 # depth_ref = np.squeeze(depth_ref, 2) depth_diff = np.abs(depth_reprojected - depth_ref) relative_depth_diff = depth_diff / depth_ref mask = np.logical_and(dist < geo_pixel_thres, relative_depth_diff < geo_depth_thres) depth_reprojected[~mask] = 0 return mask, depth_reprojected, x2d_src, y2d_src def filter_depth( scan_folder, out_folder, plyfilename, geo_pixel_thres, geo_depth_thres, photo_thres, img_wh, geo_mask_thres): # the pair file pair_file = os.path.join(scan_folder, "pair.txt") # for the final point cloud vertexs = [] vertex_colors = [] pair_data = read_pair_file(pair_file) # for each reference view and the corresponding source views for ref_view, src_views in pair_data: # load the reference image ref_img, original_h, original_w = read_image( os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(ref_view)), max(img_wh)) ref_intrinsics, ref_extrinsics, _ = read_cam_file( os.path.join(scan_folder, 'cams/{:0>8}_cam.txt'.format(ref_view)))[0:2] # print([ref_intrinsics,ref_extrinsics]) ref_intrinsics[0] *= img_wh[0]/original_w ref_intrinsics[1] *= img_wh[1]/original_h # load the estimated depth of the reference view ref_depth_est = read_map(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(ref_view))) ref_depth_est = np.squeeze(ref_depth_est, 2) # load the photometric mask of the reference view confidence = read_map(os.path.join(out_folder, 'confidence/{:0>8}.pfm'.format(ref_view))) photo_mask = confidence > photo_thres photo_mask = np.squeeze(photo_mask, 2) all_srcview_depth_ests = [] # compute the geometric mask geo_mask_sum = 0 for src_view in src_views: # camera parameters of the source view _, original_h, original_w = read_image( os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(src_view)), max(img_wh)) src_intrinsics, src_extrinsics, _ = read_cam_file( os.path.join(scan_folder, 'cams/{:0>8}_cam.txt'.format(src_view)))[0:2] src_intrinsics[0] *= img_wh[0]/original_w src_intrinsics[1] *= img_wh[1]/original_h # the estimated depth of the source view src_depth_est = read_map(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(src_view))) geo_mask, depth_reprojected, _, _ = check_geometric_consistency( ref_depth_est, ref_intrinsics, ref_extrinsics, src_depth_est, src_intrinsics, src_extrinsics, geo_pixel_thres, geo_depth_thres) geo_mask_sum += geo_mask.astype(np.int32) all_srcview_depth_ests.append(depth_reprojected) depth_est_averaged = (sum(all_srcview_depth_ests) + ref_depth_est) / (geo_mask_sum + 1) geo_mask = geo_mask_sum >= geo_mask_thres final_mask = np.logical_and(photo_mask, geo_mask) os.makedirs(os.path.join(out_folder, "mask"), exist_ok=True) save_image(os.path.join(out_folder, "mask/{:0>8}_photo.png".format(ref_view)), photo_mask) save_image(os.path.join(out_folder, "mask/{:0>8}_geo.png".format(ref_view)), geo_mask) save_image(os.path.join(out_folder, "mask/{:0>8}_final.png".format(ref_view)), final_mask) print("processing {}, ref-view{:0>2}, geo_mask:{:3f} photo_mask:{:3f} final_mask: {:3f}".format( scan_folder, ref_view, geo_mask.mean(), photo_mask.mean(), final_mask.mean())) if args.display: cv2.imshow('ref_img', ref_img[:, :, ::-1]) cv2.imshow('ref_depth', ref_depth_est) cv2.imshow('ref_depth * photo_mask', ref_depth_est * photo_mask.astype(np.float32)) cv2.imshow('ref_depth * geo_mask', ref_depth_est * geo_mask.astype(np.float32)) cv2.imshow('ref_depth * mask', ref_depth_est * final_mask.astype(np.float32)) cv2.waitKey(1) height, width = depth_est_averaged.shape[:2] x, y = np.meshgrid(np.arange(0, width), np.arange(0, height)) valid_points = final_mask x, y, depth = x[valid_points], y[valid_points], depth_est_averaged[valid_points] color = ref_img[valid_points] xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics), np.vstack((x, y, np.ones_like(x))) * depth) xyz_world = np.matmul(np.linalg.inv(ref_extrinsics), np.vstack((xyz_ref, np.ones_like(x))))[:3] vertexs.append(xyz_world.transpose((1, 0))) vertex_colors.append((color * 255).astype(np.uint8)) vertexs = np.concatenate(vertexs, axis=0) vertex_colors = np.concatenate(vertex_colors, axis=0) vertexs = np.array([tuple(v) for v in vertexs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr) for prop in vertexs.dtype.names: vertex_all[prop] = vertexs[prop] for prop in vertex_colors.dtype.names: vertex_all[prop] = vertex_colors[prop] el = PlyElement.describe(vertex_all, 'vertex') PlyData([el]).write(plyfilename) print("saving the final model to", plyfilename) if __name__ == '__main__': # step1. save all the depth maps and the masks in outputs directory save_depth() # the size of image input for PatchmatchNet, maybe downsampled img_wh = (640, 480) # number of source images need to be consistent with in geometric consistency filtering geo_mask_thres = 2 # step2. filter saved depth maps and reconstruct point cloud filter_depth(args.testpath, args.outdir, os.path.join(args.outdir, 'custom.ply'), args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres) ```
github_jupyter
``` import sys sys.path.append('..') import torch import numpy as np import matplotlib.pyplot as plt from lens import logic torch.manual_seed(0) np.random.seed(0) # XOR problem x_train = torch.tensor([ [0, 0], [0, 1], [1, 0], [1, 1], ], dtype=torch.float) y_train = torch.tensor([0, 1, 1, 0], dtype=torch.float).unsqueeze(1) x_test = torch.tensor([ [0, 0.95], [0, 0.9], [0.05, 1], [0.1, 0.8], [0.45, 1], [0, 0.35], [0.95, 0.9], [0.75, 0.2], [0.75, 0.15], ], dtype=torch.float) y_test = torch.tensor([1, 1, 1, 1, 1, 0, 0, 1, 1], dtype=torch.float).unsqueeze(1) layers = [ torch.nn.Linear(x_train.size(1), 10), torch.nn.LeakyReLU(), torch.nn.Linear(10, 4), torch.nn.LeakyReLU(), torch.nn.Linear(4, 1), torch.nn.Sigmoid(), ] model = torch.nn.Sequential(*layers) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) model.train() need_pruning = True for epoch in range(1000): # forward pass optimizer.zero_grad() y_pred = model(x_train) # Compute Loss loss = torch.nn.functional.mse_loss(y_pred, y_train) for module in model.children(): if isinstance(module, torch.nn.Linear): loss += 0.001 * torch.norm(module.weight, 1) # backward pass loss.backward() optimizer.step() # compute accuracy if epoch % 100 == 0: y_pred_d = (y_pred > 0.5) accuracy = (y_pred_d.eq(y_train).sum(dim=1) == y_train.size(1)).sum().item() / y_train.size(0) print(f'Epoch {epoch}: train accuracy: {accuracy:.4f}') ``` # Decision boundaries ``` def plot_decision_bundaries(model, x, h=0.1, cmap='BrBG'): x1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1 x2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, h), np.arange(x2_min, x2_max, h)) xx = torch.FloatTensor(np.c_[xx1.ravel(), xx2.ravel()]) Z = model(xx).detach().numpy() Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.2, cmap=cmap) return cmap = 'BrBG' plt.figure(figsize=[8, 8]) for sample_id, (xin, yin) in enumerate(zip(x_train, y_train)): output = model(xin) explanation = logic.relu_nn.explain_local(model, x_train, y_train, xin, yin, method='lime', concept_names=['f1', 'f2']) plt.subplot(2, 2, sample_id+1) plt.title(f'INPUT={xin.detach().numpy()} - OUTPUT={output.detach().numpy()} \n Explanation: {explanation}') plot_decision_bundaries(model, x_train, h=0.01) plt.scatter(x_train[:, 0].detach().numpy(), x_train[:, 1].detach().numpy(), c=y_train.detach().numpy(), cmap=cmap) plt.scatter(xin[0], xin[1], c='k', marker='x', s=100, cmap=cmap) c = plt.Circle((xin[0], xin[1]), radius=0.2, edgecolor='k', fill=False, linestyle='--') plt.gca().add_artist(c) plt.xlim([-0.5, 1.5]) plt.ylim([-0.5, 1.5]) plt.tight_layout() plt.show() ``` # Combine local explanations ``` for i, target_class in enumerate(range(2)): global_explanation, _, _ = logic.relu_nn.combine_local_explanations(model, x_train, y_train.squeeze(), target_class=target_class, method='lime') accuracy, preds = logic.base.test_explanation(global_explanation, target_class, x_test, y_test) final_formula = logic.base.replace_names(global_explanation, ['f1', 'f2']) print(f'Class {target_class} - Global explanation: "{final_formula}" - Accuracy: {accuracy:.4f}') ```
github_jupyter
![Aeropython Logo](/files/imagenes/aeropython_logo.png) #Ejercicio: Algoritmo genético para optimizar un rotor o hélice, paso a paso ##El problema A menudo, en ingeniería, cuando nos enfrentamos a un problema, no podemos resolver directamente o despejar la solución como en los problemas sencillos típicos de matemáticas o física clásica. Una manera muy típica en la que nos encontraremos los problemas es en la forma de simulación: tenemos una serie de parámetros y un modelo, y podemos simularlo para obtener sus características, pero sin tener ninguna fórmula explícita que relacione parámetros y resultados y que nos permita obtener una función inversa. En este ejercicio, nos plantearemos un problema de ese tipo: tenemos una función que calcula las propiedades de una hélice en función de una serie de parámetros, pero no conocemos los cálculos que hace internamente. Para nosotros, es una caja negra. Para optimizar, iremos recuperando las funciones del algoritmo genético que se vieron en la parte de teoría. ``` %matplotlib inline import numpy as np # Trabajaremos con arrays import matplotlib.pyplot as plt # Y vamos a pintar gráficos from optrot.rotor import calcular_rotor # Esta función es la que vamos a usar para calcular el rotor import random as random # Necesitaremos números aleatorios ``` Empecemos echando un ojo a la función del rotor, para ver qué vamos a necesitar y con qué parámetros vamos a trabajar. ``` help(calcular_rotor) ``` Podemos trazar unas cuantas curvas para observar qué pinta va a tener lo que saquemos. Por ejemplo, cómo cambian las características de la hélice dependiendo de la velocidad de vuelo, para una hélice de ejemplo que gira a uyna velocidad dada. ``` vel = np.linspace(0, 30, 100) efic = np.zeros_like(vel) T = np.zeros_like(vel) P = np.zeros_like(vel) mach = np.zeros_like(vel) for i in range(len(vel)): T[i], P[i], efic[i], mach[i] = calcular_rotor(130, vel[i], 0.5, 3) plt.plot(vel, T) plt.title('Tracción de la hélice') plt.plot(vel, P) plt.title('Potencia consumida') plt.plot(vel, efic) plt.title('Eficiencia de la hélice') plt.plot(vel, mach) plt.title('Mach en la punta de las palas') ``` ##Definiendo el genoma Definamos un individuo genérico: Cada individuo será un posible diseño del rotor, con unas características determinadas. ``` class Individual (object): def __init__(self, genome): self.genome = genome self.traits = {} self.performances = {} self.fitness = 0 ``` Nuestro rotor depende de varios parámetros, pero en general, buscaremos optimizar el valor de unos, mateniendo un valor controlado de otros. Por ejemplo, la velocidad de avance y la altitud normalmente las impondremos, ya que querremos optimizar para una velocidad y altura de vuelos dadas. En nuestro algoritmo, usaremos como genoma los parámetros de optimización, y las variables circunstanciales las controlaremos a mano. ***Sugerencia*** (esta es una manera de organizar las variables, aunque puedes escoger otras) Parámetros de optimización: - omega (velocidad de rotación) (Entre 0 y 200 radianes/segundo) - R (radio de la hélice) (Entre 0.1 y 2 metros) - b (número de palas) (Entre 2 y 5 palas) - theta0 (ángulo de paso colectivo) (Entre -0.26 y 0.26 radianes)(*se corresponde a -15 y 15 grados*) - p (parámetro de torsión) (Entre -5 y 20 grados) - cuerda (anchura de la pala) (Entre 0.01 y 0.2 metros) Parámetros circunstanciales: - vz (velocidad de vuelo) - h (altura de vuelo) Variables que se van a mantener - ley de torsión (hiperbólica) - formato de chord params: un solo número, para que la anchura sea constante a lo largo de la pala ``` 15 * np.pi / 180 ``` A continuación crearemos un diccionario de genes. En él iremos almacenando los nombres de los parámetros y la cantidad de bits que usaremos para definirlos. Cuantos más bits, más resolución Ej: 1 bit : 2 valores, 2 bit : 4 valores, 10 bit : 1024 valores ``` #Completa este diccionario con las variables que hayas elegido y los bits que usarás dict_genes = { 'omega' : 10, 'R': 10, 'b': 2 } ``` Ahora, crearemos una función que rellene estos genomas con datos aleatorios: ``` def generate_genome (dict_genes): #Calculamos el número total de bits con un bucle que recorra el diccionario n_bits = ? #Generamos un array aletorio de 1 y 0 de esa longitud con numpy genome = np.random.randint(0, 2, nbits) #Transformamos el array en una lista antes de devolverlo return list(genome) # Podemos probar a usar nuestra función, para ver qué pinta tiene el ADN de un rotor: generate_genome(dict_genes) ``` ##Trabajando con el individuo Ahora necesitamos una función que transforme esos genes a valores con sentido. Cada gen es un número binario cuyo valor estará entre 0 y 2 ^ n, siendo n el número de bits que hayamos escogido. Estas variables traducidas las guardaremos en otro diccionario, ya con su valor. Estos genes no están volando por ahí sueltos, sino que estarán guardados en el interior del individuo al que pertenezcan, por lo que la función deberá estar preparada para extraerlos del individuo, y guardar los resultados a su vez en el interior del individuo. ``` def calculate_traits (individual, dict_genes): genome = individual.genome integer_temporal_list = [] for gen in dict_genes: #Recorremos el diccionario de genes para ir traduciendo del binario ??? #Buscamos los bits que se corresponden al bit en cuestión ??? #Pasamos de lista binaria a número entero integer_temporal_list.append(??) #Añadimos el entero a la lista # Transformamos cada entero en una variable con sentido físico: # Por ejemplo, si el entero de la variable Omega está entre 0 y 1023 (10bits), # pero la variable Omega real estará entre 0 y 200 radianes por segundo: omega = integer_temporal_list[0] * 200 / 1023 #del mismo modo, para R: R = 0.1 + integer_temporal_list[1] * 1.9 / 1023 #Obtendremos un radio entre 0.1 y 2 metros #El número de palas debe ser un entero, hay que tener cuidado: b = integer_temporal_list[2] + 2 #(entre 2 y 5 palas) #Continúa con el resto de variables que hayas elegido! dict_traits = { #Aquí iremos guardando los traits, o parámetros 'omega' : omega, 'R': R } individual.traits = dict_traits #Por último, guardamos los traits en el individuo ``` El siguiente paso es usar estos traits(parámetros) para calcular las performances (características o desempeños) del motor. Aquí es donde entra el modelo del motor propiamente dicho. ``` def calculate_performances (individual): dict_traits = individual.traits #Nuestras circunstancias las podemos imponer aquí, o irlas pasando como argumento a la función h = 2000 #Altitud de vuelo en metros vz = 70 #velocidad de avance en m/s, unos 250 km/h #Extraemos los traits del diccionario: omega = dict_traits['omega'] R = dict_traits['R'] #... etc T, P, efic, mach_punta = calcular_rotor(omega, vz, R, b, h...) #Introduce las variables que uses de parámetro. # Consulta la ayuda para asegurarte de que usas el # formato correcto! dict_perfo = { 'T' : T, #Tracción de la hélice 'P' : P, #Potencia consumida por la hélice 'efic': efic, #Eficiencia propulsiva de la hélice 'mach_punta': mach_punta #Mach en la punta de las palas } individual.performances = dict_perfo ``` Comprobemos si todo funciona! ``` individuo = Individual(generate_genome(dict_genes)) calculate_traits(individuo, dict_genes) calculate_performances(individuo) print(individuo.traits) print(individuo.performances) ``` El último paso que tenemos que realizar sobre el individuo es uno de los más críticos: Transformar las performances en un valor único (fitness) que con exprese cómo de bueno es con respecto al objetivo de optimización. La función de fitness puede ser función de parámetros(traits) y performances, dependiendo de qué queramos optimizar. Por ejemplo, si buscáramos que tuviera la tracción máxima sin preocuparnos de nada más, el valor de fitnes sería simplemente igual al de T: fitness = T Si queremos imponer restricciones, por ejemplo, que la potencia sea menor a 1000 watios, se pueden añadir sentencias del tipo: if P > 1000: fitness -= 1000 Se puede hacer depender la fitness de varios parámetros de manera ponderada: fitness = parámetro_importante * 10 + parámetro_poco_importante * 0.5 También se pueden combinar diferentes funciones no lineales: fitness = parámetro_1 * parámetro_2 - parámetro_3 **2 * log(parámetro_4) Ahora te toca ser creativo! Elige con qué objetivo quieres optimizar la hélice! Sugerencias de posibles objetivos de optimización: - Mínimo radio posible, manteniendo una tracción mínima de 30 Newtons - Mínima potencia posible, máxima eficiencia, y mínimo radio posible en menor medida, manteniendo una tracción mínima de 40 Newtons y un mach en la punta de las palas de como mucho 0.7 - Mínima potencia posible y máxima eficiencia cuando vuela a 70 m/s, tracción mayor a 50 Newtons en el despegue (vz = 0), mínimo peso posible (calculado a partir del radio, número y anchura de las palas) (Puede que tengas que reescribir la función y el diccionario de performances!) ``` def calculate_fitness (individual): dict_traits = individuo.traits dict_performances = individuo.performances fitness = ????? #Be Creative! individual.fitness = fitness ``` Ya tenemos terminado todo lo que necesitamos a nivel de individuo! ## Que comiencen los Juegos! Es hora de trabajar a nivel de algoritmo, y para ello, lo primero es crear una sociedad compuesta de individuos aleatorios. Definamos una función para ello. ``` def immigration (society, target_population, dict_genes): while len(society) < target_population: new_individual = Individual (generate_genome (dict_genes)) # Generamos un individuo aleatorio calculate_traits (new_individual, dict_genes) # Calculamos sus traits calculate_performances (new_individual) # Calculamos sus performances calculate_fitness (new_individual) # Calculamos su fitness society.append (new_individual) # Nuestro nuevo ciudadano está listo para unirse al grupo! ``` Ahora podemos crear nuestra sociedad: ``` society = [] immigration (society, 12, dict_genes) #12 por ejemplo, pueden ser los que sean #Veamos qué pinta tienen los genes de la población plt.matshow([individual.genome for individual in society], cmap=plt.cm.gray) ``` Ya tenemos nuestra pequeña sociedad, aumentémosla un poco más mezclando entre sí a los ciudadanos con mejores fitness! Vamos a extender nuestra población mezclando los genomas de otros individuos. Los individuos con mejor fitness es más probable que se reproduzcan. Además, en los nuevos individuos produciremos ligeras mutaciones aleatorias. ``` #This function was taken from Eli Bendersky's website #It returns an index of a list called "weights", #where the content of each element in "weights" is the probability of this index to be returned. #For this function to be as fast as possible we need to pass it a list of weights in descending order. def weighted_choice_sub(weights): rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: return i def crossover (society, reproduction_rate, mutation_rate): #First we create a list with the fitness values of every individual in the society fitness_list = [individual.fitness for individual in society] #We sort the individuals in the society in descending order of fitness. society_sorted = [x for (y, x) in sorted(zip(fitness_list, society), key=lambda x: x[0], reverse=True)] #We then create a list of relative probabilities in descending order, #so that the fittest individual in the society has N times more chances to reproduce than the least fit, #where N is the number of individuals in the society. probability = [i for i in reversed(range(1,len(society_sorted)+1))] #We create a list of weights with the probabilities of non-mutation and mutation mutation = [1 - mutation_rate, mutation_rate] #For every new individual to be created through reproduction: for i in range (int(len(society) * reproduction_rate)): #We select two parents randomly, using the list of probabilities in "probability". father, mother = society_sorted[weighted_choice_sub(probability)], society_sorted[weighted_choice_sub(probability)] #We randomly select two cutting points for the genome. a, b = random.randrange(0, len(father.genome)), random.randrange(0, len(father.genome)) #And we create the genome of the child putting together the genome slices of the parents in the cutting points. child_genome = father.genome[0:min(a,b)]+mother.genome[min(a,b):max(a,b)]+father.genome[max(a,b):] #For every bit in the not-yet-born child, we generate a list containing #1's in the positions where the genome must mutate (i.e. the bit must switch its value) #and 0's in the positions where the genome must stay the same. n = [weighted_choice_sub(mutation) for ii in range(len(child_genome))] #This line switches the bits of the genome of the child that must mutate. mutant_child_genome = [abs(n[i] - child_genome[i]) for i in range(len(child_genome))] #We finally append the newborn individual to the society newborn = Individual(mutant_child_genome) calculate_traits (newborn, dict_genes) calculate_performances (newborn) calculate_fitness (newborn) society.append(newborn) ``` Ahora que tenemos una sociedad extensa, es el momento de que actúe la selección "natural": Eliminaremos de la sociedad a los individuos con peor fitness hasta llegar a una población objetivo. ``` def tournament(society, target_population): while len(society) > target_population: fitness_list = [individual.fitness for individual in society] society.pop(fitness_list.index(min(fitness_list))) ``` Ya tenemos nuestro algoritmo prácticamente terminado! ``` society = [] fitness_max = [] for generation in range(30): immigration (society, 100, dict_genes) #Añade individuos aleatorios a la sociedad hasta tener 100 fitness_max += [max([individual.fitness for individual in society])] tournament (society, 15) #Los hace competir hasta que quedan 15 crossover(society, 5, 0.05) #Los ganadores se reproducen hasta tener 75 plt.plot(fitness_max) plt.title('Evolución del valor de fitness') tournament (society, 1) #Buscamos el mejor de todos winner = society[0] print(winner.traits) #Comprobamos sus características print(winner.performances) ``` Siro Moreno y Carlos Dorado, Aeropython, 20 de Noviembre de 2015
github_jupyter
The following additional libraries are needed to run this notebook. Note that running on Colab is experimental, please report a Github issue if you have any problem. ``` !pip install d2l==0.14.3 ``` # Deep Convolutional Neural Networks (AlexNet) :label:`sec_alexnet` Although CNNs were well known in the computer vision and machine learning communities following the introduction of LeNet, they did not immediately dominate the field. Although LeNet achieved good results on early small datasets, the performance and feasibility of training CNNs on larger, more realistic datasets had yet to be established. In fact, for much of the intervening time between the early 1990s and the watershed results of 2012, neural networks were often surpassed by other machine learning methods, such as support vector machines. For computer vision, this comparison is perhaps not fair. That is although the inputs to convolutional networks consist of raw or lightly-processed (e.g., by centering) pixel values, practitioners would never feed raw pixels into traditional models. Instead, typical computer vision pipelines consisted of manually engineering feature extraction pipelines. Rather than *learn the features*, the features were *crafted*. Most of the progress came from having more clever ideas for features, and the learning algorithm was often relegated to an afterthought. Although some neural network accelerators were available in the 1990s, they were not yet sufficiently powerful to make deep multichannel, multilayer CNNs with a large number of parameters. Moreover, datasets were still relatively small. Added to these obstacles, key tricks for training neural networks including parameter initialization heuristics, clever variants of stochastic gradient descent, non-squashing activation functions, and effective regularization techniques were still missing. Thus, rather than training *end-to-end* (pixel to classification) systems, classical pipelines looked more like this: 1. Obtain an interesting dataset. In early days, these datasets required expensive sensors (at the time, 1 megapixel images were state-of-the-art). 2. Preprocess the dataset with hand-crafted features based on some knowledge of optics, geometry, other analytic tools, and occasionally on the serendipitous discoveries of lucky graduate students. 3. Feed the data through a standard set of feature extractors such as the SIFT (scale-invariant feature transform) :cite:`Lowe.2004`, the SURF (speeded up robust features) :cite:`Bay.Tuytelaars.Van-Gool.2006`, or any number of other hand-tuned pipelines. 4. Dump the resulting representations into your favorite classifier, likely a linear model or kernel method, to train a classifier. If you spoke to machine learning researchers, they believed that machine learning was both important and beautiful. Elegant theories proved the properties of various classifiers. The field of machine learning was thriving, rigorous, and eminently useful. However, if you spoke to a computer vision researcher, you would hear a very different story. The dirty truth of image recognition, they would tell you, is that features, not learning algorithms, drove progress. Computer vision researchers justifiably believed that a slightly bigger or cleaner dataset or a slightly improved feature-extraction pipeline mattered far more to the final accuracy than any learning algorithm. ## Learning Representations Another way to cast the state of affairs is that the most important part of the pipeline was the representation. And up until 2012 the representation was calculated mechanically. In fact, engineering a new set of feature functions, improving results, and writing up the method was a prominent genre of paper. SIFT :cite:`Lowe.2004`, SURF :cite:`Bay.Tuytelaars.Van-Gool.2006`, HOG (histograms of oriented gradient) :cite:`Dalal.Triggs.2005`, [bags of visual words](https://en.wikipedia.org/wiki/Bag-of-words_model_in_computer_vision) and similar feature extractors ruled the roost. Another group of researchers, including Yann LeCun, Geoff Hinton, Yoshua Bengio, Andrew Ng, Shun-ichi Amari, and Juergen Schmidhuber, had different plans. They believed that features themselves ought to be learned. Moreover, they believed that to be reasonably complex, the features ought to be hierarchically composed with multiple jointly learned layers, each with learnable parameters. In the case of an image, the lowest layers might come to detect edges, colors, and textures. Indeed, Alex Krizhevsky, Ilya Sutskever, and Geoff Hinton proposed a new variant of a CNN, *AlexNet*, that achieved excellent performance in the 2012 ImageNet challenge. AlexNet was named after Alex Krizhevsky, the first author of the breakthrough ImageNet classification paper :cite:`Krizhevsky.Sutskever.Hinton.2012`. Interestingly in the lowest layers of the network, the model learned feature extractors that resembled some traditional filters. :numref:`fig_filters` is reproduced from the AlexNet paper :cite:`Krizhevsky.Sutskever.Hinton.2012` and describes lower-level image descriptors. ![Image filters learned by the first layer of AlexNet.](https://github.com/d2l-ai/d2l-pytorch-colab/blob/master/img/filters.png?raw=1) :width:`400px` :label:`fig_filters` Higher layers in the network might build upon these representations to represent larger structures, like eyes, noses, blades of grass, and so on. Even higher layers might represent whole objects like people, airplanes, dogs, or frisbees. Ultimately, the final hidden state learns a compact representation of the image that summarizes its contents such that data belonging to different categories be separated easily. While the ultimate breakthrough for many-layered CNNs came in 2012, a core group of researchers had dedicated themselves to this idea, attempting to learn hierarchical representations of visual data for many years. The ultimate breakthrough in 2012 can be attributed to two key factors. ### Missing Ingredient: Data Deep models with many layers require large amounts of data in order to enter the regime where they significantly outperform traditional methods based on convex optimizations (e.g., linear and kernel methods). However, given the limited storage capacity of computers, the relative expense of sensors, and the comparatively tighter research budgets in the 1990s, most research relied on tiny datasets. Numerous papers addressed the UCI collection of datasets, many of which contained only hundreds or (a few) thousands of images captured in unnatural settings with low resolution. In 2009, the ImageNet dataset was released, challenging researchers to learn models from 1 million examples, 1000 each from 1000 distinct categories of objects. The researchers, led by Fei-Fei Li, who introduced this dataset leveraged Google Image Search to prefilter large candidate sets for each category and employed the Amazon Mechanical Turk crowdsourcing pipeline to confirm for each image whether it belonged to the associated category. This scale was unprecedented. The associated competition, dubbed the ImageNet Challenge pushed computer vision and machine learning research forward, challenging researchers to identify which models performed best at a greater scale than academics had previously considered. ### Missing Ingredient: Hardware Deep learning models are voracious consumers of compute cycles. Training can take hundreds of epochs, and each iteration requires passing data through many layers of computationally-expensive linear algebra operations. This is one of the main reasons why in the 1990s and early 2000s, simple algorithms based on the more-efficiently optimized convex objectives were preferred. *Graphical processing units* (GPUs) proved to be a game changer in making deep learning feasible. These chips had long been developed for accelerating graphics processing to benefit computer games. In particular, they were optimized for high throughput $4 \times 4$ matrix-vector products, which are needed for many computer graphics tasks. Fortunately, this math is strikingly similar to that required to calculate convolutional layers. Around that time, NVIDIA and ATI had begun optimizing GPUs for general computing operations, going as far as to market them as *general-purpose GPUs* (GPGPU). To provide some intuition, consider the cores of a modern microprocessor (CPU). Each of the cores is fairly powerful running at a high clock frequency and sporting large caches (up to several megabytes of L3). Each core is well-suited to executing a wide range of instructions, with branch predictors, a deep pipeline, and other bells and whistles that enable it to run a large variety of programs. This apparent strength, however, is also its Achilles heel: general-purpose cores are very expensive to build. They require lots of chip area, a sophisticated support structure (memory interfaces, caching logic between cores, high-speed interconnects, and so on), and they are comparatively bad at any single task. Modern laptops have up to 4 cores, and even high-end servers rarely exceed 64 cores, simply because it is not cost effective. By comparison, GPUs consist of $100 \sim 1000$ small processing elements (the details differ somewhat between NVIDIA, ATI, ARM and other chip vendors), often grouped into larger groups (NVIDIA calls them warps). While each core is relatively weak, sometimes even running at sub-1GHz clock frequency, it is the total number of such cores that makes GPUs orders of magnitude faster than CPUs. For instance, NVIDIA's recent Volta generation offers up to 120 TFlops per chip for specialized instructions (and up to 24 TFlops for more general-purpose ones), while floating point performance of CPUs has not exceeded 1 TFlop to date. The reason for why this is possible is actually quite simple: first, power consumption tends to grow *quadratically* with clock frequency. Hence, for the power budget of a CPU core that runs 4 times faster (a typical number), you can use 16 GPU cores at $1/4$ the speed, which yields $16 \times 1/4 = 4$ times the performance. Furthermore, GPU cores are much simpler (in fact, for a long time they were not even *able* to execute general-purpose code), which makes them more energy efficient. Last, many operations in deep learning require high memory bandwidth. Again, GPUs shine here with buses that are at least 10 times as wide as many CPUs. Back to 2012. A major breakthrough came when Alex Krizhevsky and Ilya Sutskever implemented a deep CNN that could run on GPU hardware. They realized that the computational bottlenecks in CNNs, convolutions and matrix multiplications, are all operations that could be parallelized in hardware. Using two NVIDIA GTX 580s with 3GB of memory, they implemented fast convolutions. The code [cuda-convnet](https://code.google.com/archive/p/cuda-convnet/) was good enough that for several years it was the industry standard and powered the first couple years of the deep learning boom. ## AlexNet AlexNet, which employed an 8-layer CNN, won the ImageNet Large Scale Visual Recognition Challenge 2012 by a phenomenally large margin. This network showed, for the first time, that the features obtained by learning can transcend manually-designed features, breaking the previous paradigm in computer vision. The architectures of AlexNet and LeNet are very similar, as :numref:`fig_alexnet` illustrates. Note that we provide a slightly streamlined version of AlexNet removing some of the design quirks that were needed in 2012 to make the model fit on two small GPUs. ![From LeNet (left) to AlexNet (right).](http://d2l.ai/_images/alexnet.svg) :label:`fig_alexnet` The design philosophies of AlexNet and LeNet are very similar, but there are also significant differences. First, AlexNet is much deeper than the comparatively small LeNet5. AlexNet consists of eight layers: five convolutional layers, two fully-connected hidden layers, and one fully-connected output layer. Second, AlexNet used the ReLU instead of the sigmoid as its activation function. Let us delve into the details below. ### Architecture In AlexNet's first layer, the convolution window shape is $11\times11$. Since most images in ImageNet are more than ten times higher and wider than the MNIST images, objects in ImageNet data tend to occupy more pixels. Consequently, a larger convolution window is needed to capture the object. The convolution window shape in the second layer is reduced to $5\times5$, followed by $3\times3$. In addition, after the first, second, and fifth convolutional layers, the network adds maximum pooling layers with a window shape of $3\times3$ and a stride of 2. Moreover, AlexNet has ten times more convolution channels than LeNet. After the last convolutional layer there are two fully-connected layers with 4096 outputs. These two huge fully-connected layers produce model parameters of nearly 1 GB. Due to the limited memory in early GPUs, the original AlexNet used a dual data stream design, so that each of their two GPUs could be responsible for storing and computing only its half of the model. Fortunately, GPU memory is comparatively abundant now, so we rarely need to break up models across GPUs these days (our version of the AlexNet model deviates from the original paper in this aspect). ### Activation Functions Besides, AlexNet changed the sigmoid activation function to a simpler ReLU activation function. On one hand, the computation of the ReLU activation function is simpler. For example, it does not have the exponentiation operation found in the sigmoid activation function. On the other hand, the ReLU activation function makes model training easier when using different parameter initialization methods. This is because, when the output of the sigmoid activation function is very close to 0 or 1, the gradient of these regions is almost 0, so that backpropagation cannot continue to update some of the model parameters. In contrast, the gradient of the ReLU activation function in the positive interval is always 1. Therefore, if the model parameters are not properly initialized, the sigmoid function may obtain a gradient of almost 0 in the positive interval, so that the model cannot be effectively trained. ### Capacity Control and Preprocessing AlexNet controls the model complexity of the fully-connected layer by dropout (:numref:`sec_dropout`), while LeNet only uses weight decay. To augment the data even further, the training loop of AlexNet added a great deal of image augmentation, such as flipping, clipping, and color changes. This makes the model more robust and the larger sample size effectively reduces overfitting. We will discuss data augmentation in greater detail in :numref:`sec_image_augmentation`. ``` from d2l import torch as d2l import torch from torch import nn net = nn.Sequential( # Here, we use a larger 11 x 11 window to capture objects. At the same # time, we use a stride of 4 to greatly reduce the height and width of the # output. Here, the number of output channels is much larger than that in # LeNet nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), # Make the convolution window smaller, set padding to 2 for consistent # height and width across the input and output, and increase the number of # output channels nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), # Use three successive convolutional layers and a smaller convolution # window. Except for the final convolutional layer, the number of output # channels is further increased. Pooling layers are not used to reduce the # height and width of input after the first two convolutional layers nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(), # Here, the number of outputs of the fully-connected layer is several # times larger than that in LeNet. Use the dropout layer to mitigate # overfitting nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), # Output layer. Since we are using Fashion-MNIST, the number of classes is # 10, instead of 1000 as in the paper nn.Linear(4096, 10)) ``` We construct a single-channel data example with both height and width of 224 to observe the output shape of each layer. It matches the AlexNet architecture in :numref:`fig_alexnet`. ``` X = torch.randn(1, 1, 224, 224) for layer in net: X=layer(X) print(layer.__class__.__name__,'Output shape:\t',X.shape) ``` ## Reading the Dataset Although AlexNet is trained on ImageNet in the paper, we use Fashion-MNIST here since training an ImageNet model to convergence could take hours or days even on a modern GPU. One of the problems with applying AlexNet directly on Fashion-MNIST is that its images have lower resolution ($28 \times 28$ pixels) than ImageNet images. To make things work, we upsample them to $224 \times 224$ (generally not a smart practice, but we do it here to be faithful to the AlexNet architecture). We perform this resizing with the `resize` argument in the `d2l.load_data_fashion_mnist` function. ``` batch_size = 128 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224) ``` ## Training Now, we can start training AlexNet. Compared with LeNet in :numref:`sec_lenet`, the main change here is the use of a smaller learning rate and much slower training due to the deeper and wider network, the higher image resolution, and the more costly convolutions. ``` lr, num_epochs = 0.01, 10 d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr) ``` ## Summary * AlexNet has a similar structure to that of LeNet, but uses more convolutional layers and a larger parameter space to fit the large-scale ImageNet dataset. * Today AlexNet has been surpassed by much more effective architectures but it is a key step from shallow to deep networks that are used nowadays. * Although it seems that there are only a few more lines in AlexNet's implementation than in LeNet, it took the academic community many years to embrace this conceptual change and take advantage of its excellent experimental results. This was also due to the lack of efficient computational tools. * Dropout, ReLU, and preprocessing were the other key steps in achieving excellent performance in computer vision tasks. ## Exercises 1. Try increasing the number of epochs. Compared with LeNet, how are the results different? Why? 1. AlexNet may be too complex for the Fashion-MNIST dataset. 1. Try simplifying the model to make the training faster, while ensuring that the accuracy does not drop significantly. 1. Design a better model that works directly on $28 \times 28$ images. 1. Modify the batch size, and observe the changes in accuracy and GPU memory. 1. Analyze computational performance of AlexNet. 1. What is the dominant part for the memory footprint of AlexNet? 1. What is the dominant part for computation in AlexNet? 1. How about memory bandwidth when computing the results? 1. Apply dropout and ReLU to LeNet-5. Does it improve? How about preprocessing? [Discussions](https://discuss.d2l.ai/t/76)
github_jupyter
<a id="topD"></a> # Downloading COS Data # Learning Goals <font size="5"> This Notebook is designed to walk the user (<em>you</em>) through: <b>Downloading existing Cosmic Origins Spectrograph (<em>COS</em>) data from the online archive</b></font> **1. [Using the web browser interface](#mastD)** \- 1.1. [The Classic HST Web Search](#mastD) \- 1.2. [Searching for a Series of Observations on the Classic Web Search](#WebSearchSeriesD) \- 1.3. [The MAST Portal](#mastportD) \- 1.4. [Searching for a Series of Observations on the MAST Portal](#mastportSeriesD) **2. [Using the `Python` module `Astroquery`](#astroqueryD)** \- 2.1. [Searching for a single source with Astroquery](#Astroquery1D) \- 2.2. [Narrowing Search with Observational Parameters](#NarrowSearchD) \- 2.3. [Choosing and Downloading Data Products](#dataprodsD) \- 2.4. [Using astroquery to find data on a series of sources](#Astroquery2D) ## Choosing how to access the data **This Notebook explains three methods of accessing COS data hosted by the STScI Mikulski Archive for Space Telescopes (MAST).** You may read through all three, or you may wish to focus on a particular method which best suits your needs. **Please use the table below to determine which section on which to focus.** ||The [Classic HST Search (Web Interface)](https://archive.stsci.edu/hst/search.php)|The [MAST Portal (Web Interface)](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html)|The [`Astroquery` (`Python` Interface)](http://astroquery.readthedocs.io/)| |-|-|-|-| ||- User-friendly point-and-click searching|- Very user-friendly point-and-click searching|- Requires a bit of `Python` experience| ||- Advanced **mission-specific** search parameters, including: central wavelength, detector, etc.|- Lacks some mission-specific search parameters|- Allows for programmatic searching and downloads| ||- Can be difficult to download the data if not on the STScI network|- Easy to download selected data|- Best for large datasets| ||||| |***Use this method if...*** |*...You're unfamiliar with `Python` and need to search for data by cenwave*|*...You're exploring the data and you don't need to search by cenwave*|*...You know `Python` and have an idea of what data you're looking for, or you have a lot of data*| |***Described in...***|*[Section 1.1](#mastD)*|*[Section 1.3](#mastportD)*|*[Section 2.1](#astroqueryD)*| *Note* that these are only recommendations, and you may prefer another option. For most purposes, the writer of this tutorial recommends the `Astroquery` `Python` interface, unless you are not at all comfortable using python or doing purely exploratory work. <!-- *You may review Section 1 or 2 independently or together.* --> <!-- *The web search (Section 1) is generally better for introductory users and exploratory use, while the `astroquery` method (Section 2) is easier for those with some `python` experience.* --> # 0. Introduction **The Cosmic Origins Spectrograph ([*COS*](https://www.nasa.gov/content/hubble-space-telescope-cosmic-origins-spectrograph)) is an ultraviolet spectrograph on-board the Hubble Space Telescope([*HST*](https://www.stsci.edu/hst/about)) with capabilities in the near ultraviolet (*NUV*) and far ultraviolet (*FUV*).** **This tutorial aims to prepare you to access the existing COS data of your choice by walking you through downloading a processed spectrum, as well as various calibration files obtained with COS.** - For an in-depth manual to working with COS data and a discussion of caveats and user tips, see the [COS Data Handbook](https://hst-docs.stsci.edu/display/COSDHB/). - For a detailed overview of the COS instrument, see the [COS Instrument Handbook](https://hst-docs.stsci.edu/display/COSIHB/). <font size="5"> We will define a few directories in which to place our data.</font> And to create new directories, we'll import `pathlib.Path`: ``` #Import for: working with system paths from pathlib import Path # This will be an important directory for the Notebook, where we save data data_dir = Path('./data/') data_dir.mkdir(exist_ok=True) ``` <a id="downloadD"></a> # 1. Downloading the data through the browser interface One can search for COS data from both a browser-based Graphical User Interface (*gui*) and a `Python` interface. This Section (1) will examine two web interfaces. [Section 2](#astroqueryD) will explain the `Python` interface. *Note, there are other, more specialized ways to query the mast API not discussed in this Notebook. An in-depth MAST API tutorial can be found [here](https://mast.stsci.edu/api/v0/MastApiTutorial.html).* <a id="mastD"></a> ## 1.1 The Classic HST Web Search **A browser gui for searching *specifically* through [HST archival data can be found here](http://archive.stsci.edu/hst/search.php). We will be discussing *this* HST search in the section below.** As of September, 2021, two other portals also allow access to the same data: * A newer HST-specific search page ([here](https://mast.stsci.edu/search/hst/ui/#/)). Most downloading difficulties have been solved with this new site, and upcoming versions of this tutorial will focus on its use. * A more general MAST gui, which also allows access to data from other telescopes such as TESS, but does not offer all HST-specific search parameters. We will discuss this interface in [Section 1.3](#mastportD). The search page of the HST interface is laid out as in fig. 1.1: ### Fig 1.1 <center><img src=./figures/Mast_hst_searchformQSO.png width ="900" title="MAST Archive search form for a COS data query"> </center> where here we have indicated we would like to find all archival science data from the **COS far-ultraviolet (FUV) configuration**, taken with any grating while looking at Quasi-Stellar Objects (QSO) within a 3 arcminute radius of (1hr:37':40", +33d 09m 32s). The output columns we have selected to see are visible in the bottom left of Fig 1.1. Note that if you have a list of coordinates, Observation ID(s), etc. for a series of targets you can click on the "File Upload Form" and attach your list of OBSIDs or identifying features. Then specify which type of data your list contains using the "File Contents" drop-down menu. Figure 1.2 shows the results of our search shown in Fig 1.1. ### Fig 1.2 <center><img src=figures/QSO_MastSearchRes.png width ="900" title="MAST Archive search results for a COS data query"> </center> **We now choose our dataset.** We rather arbitrarily select **`LCXV13050`** because of its long exposure time, taken under an observing program described as: > "Project AMIGA: Mapping the Circumgalactic Medium of Andromeda" This is a Quasar known as [3C48](http://simbad.u-strasbg.fr/simbad/sim-basic?Ident=3c48&submit=SIMBAD+search), one of the first quasars discovered. Clicking on the dataset, we are taken to a page displaying a preview spectrum (Fig 1.3). ### Fig 1.3 <center><img src=./figures/QSOPreviewSpec.png width ="900" title="MAST Archive preview spectrum of LCXV13050"> </center> We now return to the [search page](http://archive.stsci.edu/hst/search.php) and enter in LCXV13050 under "Dataset" with no other parameters set. Clicking "search", now we see a single-rowed table with *just* our dataset, and the option to download datasets. We mark the row we wish to download and click "Submit marked data for retrieval from STDADS". See Fig 1.4. ### Fig 1.4 <center><img src =figures/LCXV13050_res.png width ="900" title="MAST Archive dataset overview of LCXV13050"> </center> Now we see a page like in Fig 1.5, where we can either sign in with STScI credentials, or simply provide our email to proceed without credentials. Generally, you may proceed anonymously, unless you are retrieving proprietary data to which you have access. Next, make sure to select "Deliver the data to the Archive staging area". Click "Send Retrieval Request to ST-DADS" and you will receive an email with instructions on downloading the data. ### Fig 1.5 <center><img src =figures/DownloadOptions.png width ="900" title="Download Options for LCXV13050"> </center> Now the data is "staged" on a MAST server, and you need to download it to your local computer. ### Downloading the staged data We demonstrate three methods of downloading your staged data: 1. If your terminal supports it, you may [use the `wget` tool](#wgetDLD). 2. However if that does not work, we recommend [using a secure ftp client application](#download_ftps_cyduckD). 3. Finally, if you would instead like to download *staged data* programmatically, you may [use the Python `ftplib` package](#download_ftps_funcD), as described [here](https://archive.stsci.edu/ftp.html) in STScI's documentation of the MAST FTP Service. For your convenience, we have built the `download_anonymous_staged_data` function below, which will download anonymously staged data via ftps. <a id=wgetDLD></a> #### Downloading the staged data with `wget` **If you are connected to the STScI network, either in-person or via a virtual private network (VPN), you should use the `wget` command as in the example below:** `wget -r --ftp-user=anonymous --ask-password ftps://archive.stsci.edu/stage/anonymous/anonymous<directory_number> --directory-prefix=<data_dir>` where `directory_number` is the number at the end of the anonymous path specified in the email you received from MAST and `data_dir` is the local directory where you want the downloaded data. You will be prompted for a password. Type in the email address you used, then press enter/return. Now all the data will be downloaded into a subdirectory of data_dir: `"./archive.stsci.edu/stage/anonymous/anonymous<directory_number>/"` <a id=download_ftps_cyduckD></a> #### Downloading the staged data with a secure ftp client application (`CyberDuck`) CyberDuck is an application which allows you to securely access data stored on another machine using ftps. To download your staged data using Cyberduck, first download the [Cyberduck](https://cyberduck.io) application (*free, with a recommended donation*). Next, open a new browser window (Safari, Firefox, and Google Chrome have all been shown to work,) and type in the following web address: `ftps://archive.stsci.edu/stage/anonymous<directory_number>`, where `directory_number` is the number at the end of the anonymous path specified in the email you received from MAST. For example, if the email specifies: > "The data can be found in the directory... /stage/anonymous/anonymous42822" then this number is **42822** Your browser will attempt to redirect to the CyberDuck application. Allow it to "Open CyberDuck.app", and CyberDuck should open a finder window displaying your files. Select whichever files you want to download by highlighting them (command-click or control-click) then right click one of the highlighted files, and select "Download To". This will bring up a file browser allowing you to save the selected files to wherever you wish on your local computer. <a id=download_ftps_funcD></a> #### Downloading the staged data with `ftps` To download anonymously staged data programmatically with ftps, you may run the `download_anonymous_staged_data` function as shown here: ```python download_anonymous_staged_data(email_used="my_email@stsci.edu", directory_number=80552, outdir="./here_is_where_I_want_the_data") ``` Which results in: ``` Downloading lcxv13050_x1dsum1.fits Done ... ... Downloading lcxv13gxq_flt_b.fits Done ``` ``` import ftplib def download_anonymous_staged_data(email_used, directory_number, outdir = "./data/ftps_download/", verbose=True): """ A direct implementation of the MAST FTP Service webpage's ftplib example code. Downloads anonymously staged data from the MAST servers via ftps. Inputs: email_used (str): the email address used to stage the data directory_number (str or int): The number at the end of the anonymous filepath. i.e. if the email you received includes: "The data can be found in the directory... /stage/anonymous/anonymous42822", then this number is 42822 outdir (str): Path to where the file will download locally. verbose (bool): If True, prints name of each file downloaded. """ ftps = ftplib.FTP_TLS('archive.stsci.edu') # Set up connection ftps.login(user="anonymous", passwd=email_used) # Login with anonymous credentials ftps.prot_p() # Add protection to the connection ftps.cwd(f"stage/anonymous/anonymous{directory_number}") filenames = ftps.nlst() outdir = Path(outdir) # Set up the output directory as a path outdir.mkdir(exist_ok=True) for filename in filenames: # Loop through all the staged files if verbose: print("Downloading " + filename) with open(outdir / filename, 'wb') as fp: # Download each file locally ftps.retrbinary('RETR {}'.format(filename), fp.write) if verbose: print(" Done") ``` <font size="5"> <b>Well Done making it this far!</b></font> Attempt the exercise below for some extra practice. ### Exercise 1: *Searching the archive for TRAPPIST-1 data* [TRAPPIST-1](https://en.wikipedia.org/wiki/TRAPPIST-1) is a cool red dwarf with a multiple-exoplanet system. - Find its coordinates using the [SIMBAD Basic Search](http://simbad.u-strasbg.fr/simbad/sim-fbasic). - Use those coordinates in the [HST web search](https://archive.stsci.edu/hst/search.php) or the [MAST portal](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html) to find all COS exposures of the system. - Limit the search terms to find the COS dataset taken in the COS far-UV configuration with the grating G130M. **What is the dataset ID, and how long was the exposure?** Place your answer in the cell below. ``` # Your answer here ``` <a id=WebSearchSeriesD></a> ## 1.2. Searching for a Series of Observations on the Classic HST Web Search Now let's try using the web interface's [file upload form](http://archive.stsci.edu/hst/search.php?form=fuf) to search for a series of observations by their dataset IDs. We're going to look for three observations of the same object, the white dwarf WD1057+719, taken with three different COS gratings. Two are in the FUV and one in the NUV. The dataset IDs are - LDYR52010 - LBNM01040 - LBBD04040 So that we have an example list of datasets to input to the web search, we make a comma-separated-value txt file with these three obs_ids, and save it as `obsId_list.txt`. ``` obsIdList = ['LDYR52010','LBNM01040','LBBD04040'] # The three observation IDs we want to gather obsIdList_length = len(obsIdList) with open('./obsId_list.txt', 'w') as f: # Open up this new file in "write" mode for i, item in enumerate(obsIdList): # We want a newline after each obs_id except the last one if i < obsIdList_length - 1: f.writelines(item + "," + '\n') if i == obsIdList_length - 1: # Make sure we don't end the file with a blank line (below) f.writelines(item) ``` Then we link to this file under the **Local File Name** browse menu on the file upload form. We must set the **File Contents** term to Data ID, as that is the identifier we have provided in our file, and we change the **delimiter** to a comma. Because we are searching by Dataset ID, we don't need to specify any additional parameters to narrow down the data. ### Fig 1.6 <center><img src =figures/FUF_search.png width ="900" title="File Upload Search Form"> </center> **We now can access all the datasets, as shown in Fig. 1.7:** ### Fig 1.7 <center><img src =figures/FUF_res.png width ="900" title="File Upload Search Results"> </center> Now, to download all of the relevant files, we can check the **mark** box for all of them, and again hit "Submit marked data for retrieval from STDADS". This time, we want to retrieve **all the calibration files** associated with each dataset, so we check the following boxes: - Uncalibrated - Calibrated - Used Reference Files (*See Fig. 1.8*) ### Fig 1.8 <center><img src =./figures/DownloadOptions_FUF.png width ="900" title="Download Options for multiple datasets"> </center> The procedure from here is the same described above in Section 1.1. Now, when we download the staged data, we obtain multiple subdirectories with each dataset separated. <a id = mastportD></a> ## 1.3. The MAST Portal STScI hosts another web-based gui for accessing data, the [MAST Portal](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html). This is a newer interface which hosts data from across many missions and allows the user to visualize the target in survey images, take quick looks at spectra or lightcurves, and manage multiple search tabs at once. Additionally, it handles downloads in a slightly more beginner-friendly manner than the current implementation of the Classic HST Search. This guide will only cover the basics of accessing COS data through the MAST Portal; you can find more in-depth documentation in the form of helpful video guides on the [MAST YouTube Channel](https://www.youtube.com/user/STScIMAST). **Let's find the same data we found in Section 1.1, on the QSO 3C48:** Navigate to the MAST Portal at <https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html>, and you will be greeted by a screen where the top looks like Fig. 1.9. ### Fig 1.9 <center><img src =figures/mastp_top.png width ="900" title="Top of MAST Portal Home"> </center> Click on "Advanced Search" (boxed in red in Fig. 1.9). This will open up a new search tab, as shown in Fig. 1.10: ### Fig 1.10 <center><img src =figures/mastp_adv.png width ="900" title="The advanced search tab"> </center> Fig 1.10 (above) shows the default search fields which appear. Depending on what you are looking for, these may or may not be the most helpful search fields. By unchecking some of the fields which we are not interested in searching by right now (boxed in green), and then entering the parameter values by which to narrow the search into each parameter's box, we generate Fig. 1.11 (below). One of the six fields (Mission) by which we are narrowing is boxed in a dashed blue line. The list of applied filters is boxed in red. A dashed pink box at the top left indicates that 2 records were found matching all of these parameters. To its left is an orange box around the "Search" button to press to bring up the list of results Here we are searching by: |**Search Parameter**|**Value**| |-|-| |Mission|HST| |Instrument|COS/FUV| |Filters|G160M| |Target Name|3C48| |Observation ID|LCXV\* (*the star is a "wild card" value, so the search will find any file whose `obs_id` begins with LCXV*)| |Product Type|spectrum| ### Fig 1.11 <center><img src =figures/mastp_adv_2.png width ="900" title="The advanced search tab with some selections"> </center> Click the "Search" button (boxed in orange), and you will be brought to a page resembling Fig. 1.12. ### Fig 1.12 <center><img src =figures/mastp_res1.png width ="900" title="Results of MAST Portal search"> </center> <font size="4"> <b>Above, in Fig 1.12</b>:</font> - The yellow box to the right shows the AstroView panel, where you can interactively explore the area around your target: - click and drag to pan around - scroll to zoom in/out - The dashed-blue box highlights additional filters you can use to narrow your search results. - The red box highlights a button you can click with *some* spectral datasets to pull up an interactive spectrum. - The green box highlights the "Mark" checkboxes for each dataset. - The black circle highlights the single dataset download button: - **If you only need to download one or two datasets, you may simply click this button for each dataset** - Clicking the single dataset download button will attempt to open a "pop-up" window, which you must allow in order to download the file. Some browsers will require you to manually allow pop-ups. <a id="mastportSeriesD"></a> ## 1.4. Searching for a Series of Observations on the MAST Portal <font size="4"> <b>To download multiple datasets</b>:</font> The MAST portal acts a bit like an online shopping website, where you add your *data products* to the checkout *cart*/*basket*, then open up your cart to *checkout* and download the files. Using the checkboxes, mark all the datasets you wish to download (in this case, we'll download both LCXV13040 and LCXV13050). Then, click the "Add data products to Download Basket" button (circled in a dashed-purple line), which will take you to a "Download Basket" screen resembling Fig 1.13: ### Fig 1.13 <center><img src =figures/mastp_cart2.png width ="900" title="MAST Portal Download Basket"> </center> Each dataset contains *many* files, most of which are calibration files or intermediate processing files. You may or may not want some of these intermediate files in addition to the final product file. In the leftmost "Filters" section of the Download Basket page, you can narrow which files will be downloaded (boxed in red). By default, only the **minimum recommended products** (*mrp*) will be selected. In the case of most COS data, this will be the final spectrum `x1dsum` file and association `asn` file for each dataset. The mrp files for the first dataset (`LCXV13040`) are highlighted in yellow. These two mrp filetypes are fine for our purposes here; however if you want to download files associated with specific exposures, or any calibration files or intermediate files, you can select those you wish to download with the checkboxes in the file tree system (boxed in dashed-green). **For this tutorial, we simply select "Minimum Recommended Products" at the top left. With this box checked, all of the folders representing individual exposures are no longer visible.** Check the box labelled "HST" to select all files included by the filters, and click the "Download Selected Items" button at the top right (dashed-black circle). This will bring up a small window asking you what format to download your files as. For datasets smaller than several Gigabytes, the `Zip` format will do fine. Click Download, and a pop-up window will try to open to download the files. If no download begins, make sure to enable this particular pop-up, or allow pop-ups on the MAST page. **Your files should now be downloaded as a compressed `Zip` folder.** If you need help uncompressing the `Zip`ped files, check out these links for: [Windows](https://support.microsoft.com/en-us/windows/zip-and-unzip-files-8d28fa72-f2f9-712f-67df-f80cf89fd4e5) and [Mac](https://support.apple.com/guide/mac-help/zip-and-unzip-files-and-folders-on-mac-mchlp2528/mac). There are numerous ways to do this on Linux, however we have not vetted them. <a id = astroqueryD></a> # 2. The Python Package `astroquery.mast` Another way to search for and download archived datasets is from within `Python` using the module [`astroquery.mast`](https://astroquery.readthedocs.io/en/latest/mast/mast.html). We will import one of this module's key submodules: `Observations`. *Please note* that the canonical source of information on this package is the [`astroquery` docs](https://astroquery.readthedocs.io/en/latest/) - please look there for the most up-to-date instructions. ## We will import the following packages: - `astroquery.mast`'s submodule `Observations` for finding and downloading data from the [MAST](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html) archive - `csv`'s submodule `reader` for reading in/out from a csv file of source names. ``` # Downloading data from archive from astroquery.mast import Observations # Reading in multiple source names from a csv file from csv import reader ``` <a id=Astroquery1D></a> ## 2.1. Searching for a single source with Astroquery There are *many* options for searching the archive with astroquery, but we will begin with a very general search using the coordinates we found for WD1057+719 in the last section to find the dataset with the longest exposure time using the COS/FUV mode through the G160M filter. We could also search by object name to have it resolved to a set of coordinates, with the function `Observations.query_object(objectname = '3C48')`. - Our coordinates were: (11:00:34.126 +71:38:02.80). - We can search these coordinates as sexagesimal coordinates, or convert them to decimal degrees. ``` query_1 = Observations.query_object("11:00:34.126 +71:38:02.80", radius="5 sec") ``` This command has generated a table of objects called **"query_1"**. We can see what information we have on the objects in the table by printing its *`keys`*, and see how many objects are in the table with `len(query_1)`. ``` print(f"We have table information on {len(query_1)} observations in the following categories/columns:\n") q1_keys = (query_1.keys()) q1_keys ``` <a id=NarrowSearchD></a> ## 2.2. Narrowing Search with Observational Parameters Now we narrow down a bit with some additional parameters and sort by exposure time. The parameter limits we add to the search are: - *Only look for sources in the coordinate range between right ascension 165 to 166 degrees and declination +71 to +72 degrees* - *Only find observations in the UV* - *Only find observations taken with the COS instrument (either in its FUV or NUV configuration).* - *Only find spectrographic observations* - *Only find observations made using the COS grating "G160M"* ``` query_2 = Observations.query_criteria(s_ra=[165., 166.], s_dec=[+71.,+72.], wavelength_region="UV", instrument_name=["COS/NUV","COS/FUV"], dataproduct_type = "spectrum", filters = 'G160M') # Next lines simplifies the columns of data we see to some useful data we will look at right now limq2 = query_2['obsid','obs_id', 'target_name', 'dataproduct_type', 'instrument_name', 'project', 'filters', 'wavelength_region', 't_exptime'] sort_order = query_2.argsort('t_exptime') # This is the index list in order of exposure time, increasing print(limq2[sort_order]) chosenObs = limq2[sort_order][-1] # Grab the last value of the sorted list print(f"\n\nThe longest COS/FUV exposure with the G160M filter is: \n\n{chosenObs}") ``` <font size="5">Caution! </font> <img src=./figures/warning.png width ="60" title="CAUTION"> Please note that these queries are `Astropy` tables and do not always respond as expected for other data structures like `Pandas DataFrames`. For instance, the first way of filtering a table shown below is correct, but the second will consistently produce the *wrong result*. You *must* search and filter these tables by masking them, as in the first example below. ``` # Searching a table generated with a query ## First, correct way using masking mask = (query_1['obs_id'] == 'lbbd01020') # NOTE, obs_id must be lower-case print("Correct way yields: \n" , query_1[mask]['obs_id'],"\n\n") # Second INCORRECT way print("Incorrect way yields: \n" , query_1['obs_id' == 'LBBD01020']['obs_id'], "\nwhich is NOT what we're looking for!") ``` <a id=dataprodsD></a> ## 2.3. Choosing and Downloading Data Products **Now we can choose and download our data products from the archive dataset.** We will first generate a list of data products in the dataset: `product_list`. This will generate a large list, but we will only show the first 10 values. ``` product_list = Observations.get_product_list(chosenObs) product_list[:10] #Not the whole dataset, just first 10 lines/observations ``` Now, we will download *just the* **minimum recommended products** (*mrp*) which are the fully calibrated spectrum (denoted by the suffix `_x1d` or here `x1dsum`) and the association file (denoted by the suffix `_asn`). We do this by setting the parameter `mrp_only` to True. The association file contains no data, but rather the metadata explaining which exposures produced the `x1dsum` dataset. The `x1dsum` file is the final product summed across all of the [fixed pattern noise positions](https://hst-docs.stsci.edu/cosdhb/chapter-1-cos-overview/1-1-instrument-capabilities-and-design#id-1.1InstrumentCapabilitiesandDesign-GratingOffset(FP-POS)GratingOffsetPositions(FP-POS)) (`FP-POS`). The `x1d` and `x1dsum<n>` files are intermediate spectra. Much more information can be found in the [COS Instrument Handbook](https://hst-docs.stsci.edu/display/COSIHB/). We would set `mrp_only` to False, if we wanted to download ***all*** the data from the observation, including: - support files such as the spacecraft's pointing data over time (`jit` files). - intermediate data products such as calibrated TIME-TAG data (`corrtag` or `corrtag_a`/`corrtag_b` files) and extracted 1-dimensional spectra averaged over exposures with a specific `FP-POS` value (`x1dsum<n>` files). <img src=./figures/warning.png width ="60" title="CAUTION"> However, use caution with downloading all files, as in this case, setting `mrp_only` to False results in the transfer of **7 Gigabytes** of data, which can take a long time to transfer and eat away at your computer's storage! In general, only download the files you need. On the other hand, often researchers will download only the raw data, so that they can process it for themselves. Since here we only need the final `x1dsum` and `asn` files, we only need to download 2 Megabytes. ``` downloads = Observations.download_products(product_list, download_dir=str(data_dir) , extension='fits', mrp_only=True, cache=False) ``` ### Exercise 2: *Download the raw counts data on TRAPPIST-1* In the previous exercise, we found an observation COS took on TRAPPIST-1 system. In case you skipped Exercise 1, the observation's Dataset ID is `LDLM40010`. Use `Astroquery.mast` to download the raw `TIME-TAG` data, rather than the x1d spectra files. See the [COS Data Handbook Ch. 2](https://hst-docs.stsci.edu/cosdhb/chapter-2-cos-data-files/2-4-cos-data-products) for details on TIME-TAG data files. Make sure to get the data from both segments of the FUV detector (i.e. both `RAWTAG_A` and `RAWTAG_B` files). If you do this correctly, there should be five data files for each detector segment. *Note that some of the obs_id may appear in the table as slightly different, i.e.: ldlm40alq and ldlm40axq, rather than ldlm40010. The main obs_id they fall under is still ldlm40010, and this will still work as a search term. They are linked together by the association file described here in section 2.3.* ``` # Your answer here ``` <a id=Astroquery2D></a> ## 2.4. Using astroquery to find data on a series of sources In this case, we'll look for COS data around several bright globular clusters: - Omega Centauri - M5 - M13 - M15 - M53 We will first write a comma-separated-value (csv) file `objectname_list.csv` listing these sources by their common name. This is a bit redundant here, as we will immediately read back in what we have written; however it is done here to deliberately teach both sides of the writing/reading process, and as many users will find themselves with a csv sourcelist they must search. ``` sourcelist = ['omega Centauri', 'M5', 'M13', 'M15', 'M53'] # The 5 sources we want to look for sourcelist_length = len(sourcelist) # measures the length of the list for if statements below with open('./objectname_list.csv', 'w') as f: # Open this new file in "write" mode for i, item in enumerate(sourcelist): # We want a comma after each source name except the last one if i < sourcelist_length - 1: f.writelines(item + ",") if i == sourcelist_length - 1: # No comma after the last entry f.writelines(item) with open('./objectname_list.csv', 'r', newline = '') as csvFile: # Open the file we just wrote in "read" mode objList = list(reader(csvFile, delimiter = ','))[0] # This is the exact same list as `sourcelist`! print("The input csv file contained the following sources:\n", objList) globular_cluster_queries = {} # Make a dictionary, where each source name (i.e. "M15") corresponds to a list of its observations with COS for obj in objList: # each "obj" is a source name query_x = Observations.query_criteria(objectname = obj, radius = "5 min", instrument_name=['COS/FUV', 'COS/NUV']) # query the area in +/- 5 arcminutes globular_cluster_queries[obj] = (query_x) # add this entry to the dictionary globular_cluster_queries # show the dictionary ``` **Excellent! You've now done the hardest part - finding and downloading the right data.** From here, it's generally straightforward to read in and plot the spectrum. We recommend you look into our tutorial on [Viewing a COS Spectrum](https://github.com/spacetelescope/notebooks/blob/master/notebooks/COS/ViewData/ViewData.ipynb). ## Congratulations! You finished this Notebook! ### There are more COS data walkthrough Notebooks on different topics. You can find them [here](https://spacetelescope.github.io/COS-Notebooks/). --- ## About this Notebook **Author:** Nat Kerman <nkerman@stsci.edu> **Updated On:** 2021-10-29 > *This tutorial was generated to be in compliance with the [STScI style guides](https://github.com/spacetelescope/style-guides) and would like to cite the [Jupyter guide](https://github.com/spacetelescope/style-guides/blob/master/templates/example_notebook.ipynb) in particular.* ## Citations If you use `astropy`, `matplotlib`, `astroquery`, or `numpy` for published research, please cite the authors. Follow these links for more information about citations: * [Citing `astropy`/`numpy`/`matplotlib`](https://www.scipy.org/citing.html) * [Citing `astroquery`](https://astroquery.readthedocs.io/en/latest/) --- [Top of Page](#topD) <img style="float: right;" src="https://raw.githubusercontent.com/spacetelescope/notebooks/master/assets/stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="Space Telescope Logo" width="200px"/> <br></br> <br></br> <br></br> ## Exercise Solutions: Note, that for many of these, there are multiple ways to get an answer. **We will import:** - numpy to handle array functions - astropy.table Table for creating tidy tables of the data ``` # Manipulating arrays import numpy as np # Reading in data from astropy.table import Table ## Ex. 1 solution: dataset_id_ = 'LDLM40010' exptime_ = 12403.904 print(f"The TRAPPIST-1 COS data is in dataset {dataset_id_}, taken with an exosure time of {exptime_}") ## Ex. 2 solution: query_3 = Observations.query_criteria(obs_id = 'LDLM40010', wavelength_region="UV", instrument_name="COS/FUV", filters = 'G130M') product_list2 = Observations.get_product_list(query_3) rawRowsA = np.where(product_list2['productSubGroupDescription'] == "RAWTAG_A") rawRowsB = np.where(product_list2['productSubGroupDescription'] == "RAWTAG_B") rawRows = np.append(rawRowsA,rawRowsB) !mkdir ./data/Ex2/ downloads2 = Observations.download_products(product_list2[rawRows], download_dir=str(data_dir/'Ex2/') , extension='fits', mrp_only=False, cache=True) downloads3 = Observations.download_products(product_list2, download_dir=str(data_dir/'Ex2/') , extension='fits', mrp_only=True, cache=True) asn_data = Table.read('./data/Ex2/mastDownload/HST/ldlm40010/ldlm40010_asn.fits', hdu = 1) print(asn_data) ```
github_jupyter
``` !git clone https://github.com/broadinstitute/raman_classifier_challenge.git import pandas as pd import numpy as np from scipy import stats import matplotlib.pyplot as plt import seaborn as sns df= pd.read_csv('raman_classifier_challenge/data/raman_data.csv') df.describe() df.dtypes len(df) df.head() # Dataset contains almost equal samples of all class #df.hist(figsize = (200,200)) condition_tmpdf= df.iloc[:,0] features_tmpdf= df.iloc[:,1:] features_tmpdf.boxplot() #filtering corresponding condition values condition_tmpdf= condition_tmpdf[(np.abs(stats.zscore(features_tmpdf)) < 4).all(axis=1)] #removing outliers from features features_tmpdf= features_tmpdf[(np.abs(stats.zscore(features_tmpdf)) < 4).all(axis=1)] mlist=[] for i in features_tmpdf.columns: if float(i) <=1800.0 and float(i) >=800.0: mlist.append(i) print(mlist) print(len(mlist)) features_tmpdf= features_tmpdf[mlist] features_tmpdf ``` #### I was trying averaging the features, didnt work ``` # mlist=[[] for i in range(40)] # for i in features_tmpdf.columns: # ind= (int(float(i)/100)) # mlist[ind].append(i) # #mlist # f_tmpdf= pd.DataFrame() # for i in mlist: # if i: # colname= str(int(float(i[0])/100)) # ar= features_tmpdf[i].mean(axis=1) # features_tmpdf= features_tmpdf.drop(features_tmpdf[i], axis=1) # f_tmpdf[colname] = ar # features_tmpdf= f_tmpdf # features_tmpdf.head() #features with pretty much outliers removed features_tmpdf.boxplot() tmerg= pd.concat([condition_tmpdf, features_tmpdf], axis=1) tmerg #I was hoping each concencetration would have different area for spectrometry reading but it was not so. cdf= condition_tmpdf.copy() cdf= cdf.replace('0mM', 'red') cdf= cdf.replace('0.1mM', 'green') cdf= cdf.replace('0.5mM', 'blue') cdf= cdf.replace('1mM', 'yellow') features_tmpdf.T.iloc[:,:].plot(color=cdf) ``` ## scale features ``` from sklearn import preprocessing x = features_tmpdf.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) scaled_features_tmpdf= pd.DataFrame(x_scaled) scaled_features_tmpdf ``` ## Encode Classes ``` from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder x= condition_tmpdf.values label_encoder = LabelEncoder() integer_encoded = label_encoder.fit_transform(x) print(integer_encoded[:5]) onehot_encoder = OneHotEncoder(sparse=False) integer_encoded = integer_encoded.reshape(len(integer_encoded), 1) onehot_encoded = onehot_encoder.fit_transform(integer_encoded) print(onehot_encoded[:5]) onehot_condition_tmpdf= onehot_encoded # verifying if both are same length print(len(onehot_condition_tmpdf)) print(len(scaled_features_tmpdf)) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.preprocessing import MinMaxScaler X_norm = MinMaxScaler().fit_transform(features_tmpdf) chi_selector = SelectKBest(chi2, k=30) chi_selector.fit(X_norm, integer_encoded) chi_support = chi_selector.get_support() chi_feature = features_tmpdf.loc[:,chi_support].columns.tolist() print(str(len(chi_feature)), 'selected features') chi2_features_tmpdf= features_tmpdf.loc[:, chi_support] ``` ## PCA ``` from sklearn.decomposition import PCA pca = PCA(n_components=30) principalComponents = pca.fit_transform(scaled_features_tmpdf) principalDf = pd.DataFrame(data = principalComponents) print(principalDf.head()) print(len(condition_tmpdf)) # finalDf = pd.concat([principalDf, condition_tmpdf], axis = 1) finalDf= principalDf.copy() finalDf['condition']= condition_tmpdf print(finalDf.head()) ``` #### Let's see first what amount of variance does each PC explain. ``` print(pca.explained_variance_ratio_) ``` PC1 explains 97.3% and PC2 1.4%. Together, if we keep PC1 and PC2 only, they explain 98.7%. Now lets look at the important features. ``` print(abs( pca.components_ )) #Selecting PC1 yaxis= pca.components_[0] from collections import OrderedDict od = OrderedDict() for i in range(len(yaxis)): od[i]= yaxis[i] od2= OrderedDict(sorted(od.items(), key=lambda t: t[1], reverse=True)) ``` #### The ordered dict below contains all the features sorted in order of their importance, like feature 299th (feature indexing starts from 0) has the most importance. ``` print(od2) ``` Here, pca.components_ has shape [n_components, n_features]. Thus, by looking at the PC1 (First Principal Component) which is the first row: [0.04295346 0.04492502 0.04503862 ... 0.02696454 0.02666117 0.02636576] we can conclude that feature 299, 91 and 305 are the most important. ``` #Selecting PC2 yaxis= pca.components_[1] od = OrderedDict() for i in range(len(yaxis)): od[i]= yaxis[i] od2= OrderedDict(sorted(od.items(), key=lambda t: t[1], reverse=True)) print(od2) ``` ##### Similarly for PC2, feature at 481, 482, 480th column have the most importance ``` len(principalDf) ``` ### Creating a features dictionary so it will be easy to traverse features when training a model. ``` featuredict={ 'pca':{ 'X':principalDf }, 'chi2':{ 'X':chi2_features_tmpdf } } ``` ## Train Test Split ``` from sklearn.model_selection import train_test_split ``` #### As the number of rows are less, I only took 10% of them for the test set and 90% for train set. ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(scaled_features_tmpdf, onehot_condition_tmpdf, test_size=0.1, random_state=42) X_train2, X_test2, y_train2, y_test2 = train_test_split(scaled_features_tmpdf, integer_encoded, test_size=0.1, random_state=42) X_train3, X_test3, y_train3, y_test3 = train_test_split(principalDf, integer_encoded, test_size=0.1, random_state=42) X_train4, X_test4, y_train4, y_test4 = train_test_split(principalDf, onehot_condition_tmpdf, test_size=0.1, random_state=42) ``` ## SVM achieved 66% accuracy ``` featuredict['chi2']['X'] from sklearn import svm #linear kernel works the best. I also tried changing C and gamma but no improvrmrnt for i in featuredict.keys(): print(i) X= featuredict[i]['X'] X_train, X_test, y_train, y_test = train_test_split(X, integer_encoded, test_size=0.1, random_state=42) # print(X_train) clf = svm.SVC(kernel='linear') clf.fit(X_train, y_train) print(clf.score(X_test, y_test)) clf = svm.SVC(kernel='rbf') clf.fit(X_train3, y_train) print(clf.score(X_test, y_test)) clf = svm.SVC(kernel='poly') clf.fit(X_train, y_train) print(clf.score(X_test, y_test)) print("===============================") ``` 61% accuracy with ridge classifier ``` from pandas.core.common import flatten from sklearn.metrics import accuracy_score, classification_report from sklearn.linear_model import RidgeClassifier for i in featuredict.keys(): print(i) X= featuredict[i]['X'] X_train, X_test, y_train, y_test = train_test_split(X, integer_encoded, test_size=0.1, random_state=42) clf = RidgeClassifier().fit(X_train, y_train) print(clf.score(X_test, y_test)) y_pred= clf.predict(X_test) print(classification_report(y_test, y_pred)) print("==================================================") ``` ## Random Forest 77.8% accuracy ``` from sklearn.ensemble import RandomForestClassifier for i in featuredict.keys(): print(i) X= featuredict[i]['X'] X_train, X_test, y_train, y_test = train_test_split(X, integer_encoded, test_size=0.1, random_state=42) rfc = RandomForestClassifier(max_depth=10, criterion = 'entropy', random_state = 0) #Fit model on the training Data rfc.fit(X_train, y_train) print(rfc.score(X_test, y_test)) print("==============================") # Build Neural Network from keras.layers import Dense from keras import Sequential from keras.constraints import maxnorm from keras.layers import Dropout from keras.optimizers import SGD from keras import optimizers from keras import metrics from keras.losses import CategoricalCrossentropy, KLDivergence, SparseCategoricalCrossentropy hidden_units=512 learning_rate=0.0002 #Learning rate was quite optimal hidden_layer_act='relu' output_layer_act='softmax' no_epochs=1000 #Increasing The epochs would overfit bsize = 16 #Batch Size Of 128 def create_network(optimizer='rmsprop', init_mode='uniform', lossfns='categorical_crossentropy'): model = Sequential() model.add(Dense(hidden_units, kernel_initializer=init_mode, input_shape=(30, ), activation=hidden_layer_act)) model.add(Dropout(0.1)) model.add(Dense(256, kernel_initializer=init_mode, activation=hidden_layer_act)) model.add(Dropout(0.1)) model.add(Dense(128, kernel_initializer=init_mode, activation=hidden_layer_act)) model.add(Dense(64, kernel_initializer=init_mode, activation=hidden_layer_act)) model.add(Dense(4, kernel_initializer=init_mode, activation=output_layer_act)) model.compile(loss=lossfns, optimizer=optimizer, metrics = ["accuracy"])#metrics.categorical_accuracy return model # model.fit(X_train, y_train, epochs=no_epochs, batch_size= bsize, verbose=2) # model.evaluate(x=X_test, y=y_test, batch_size=bsize) from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support # Wrap Keras model so it can be used by scikit-learn neural_network = KerasClassifier(build_fn=create_network, epochs=no_epochs, batch_size=bsize, verbose=0) from keras.losses import CategoricalCrossentropy, KLDivergence, SparseCategoricalCrossentropy ``` ### After googling a bit I finalized 3 popular loss functions for Multi Class Classification ##### Multi-Class Cross-Entropy Loss ##### Sparse Multiclass Cross-Entropy Loss ##### Kullback Leibler Divergence Loss ``` from sklearn.model_selection import GridSearchCV # define the grid search parameters init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'] lossfns = ['categorical_crossentropy', 'kl_divergence', 'sparse_categorical_crossentropy'] param_grid = dict(init_mode=init_mode, lossfns=lossfns) grid = GridSearchCV(estimator=neural_network, param_grid=param_grid, n_jobs=-1, cv=3) for i in featuredict.keys(): print(i) X= featuredict[i]['X'] X_train, X_test, y_train, y_test = train_test_split(X, onehot_condition_tmpdf, test_size=0.1, random_state=42) grid_result = grid.fit(X_train, y_train) # print results print(f'Best Accuracy for {grid_result.best_score_} using {grid_result.best_params_}') means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print(f' mean={mean:.4}, std={stdev:.4} using {param}') ``` We see that the best results are obtained from the model using he_uniform initialization and kl_divergence loss function, which is close to 76%. ``` from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support # we choose the initializers that came at the top in our previous cross-validation!! init_mode = ['he_uniform'] lossfns = ['categorical_crossentropy'] batches = [16, 32 ] epochs = [100, 300, 500] model_init_batch_epoch_CV = KerasClassifier(build_fn=create_network, verbose=0) # grid search for initializer, batch size and number of epochs param_grid = dict(epochs=epochs, batch_size=batches, init_mode=init_mode) grid = GridSearchCV(estimator=model_init_batch_epoch_CV, param_grid=param_grid, cv=3) for i in featuredict.keys(): print(i) X= featuredict[i]['X'] X_train, X_test, y_train, y_test = train_test_split(X, onehot_condition_tmpdf, test_size=0.1, random_state=42) grid_result = grid.fit(X_train, y_train) # print results print(f'Best Accuracy for {grid_result.best_score_:.4} using {grid_result.best_params_}') means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print(f'mean={mean:.4}, std={stdev:.4} using {param}') ``` #### using the required parameters which gave good accuracy, now doing kfold using those parameters. ``` # Wrap Keras model so it can be used by scikit-learn neural_network = KerasClassifier(build_fn=create_network, epochs=300, batch_size=32, verbose=0) X_train, X_test, y_train, y_test = train_test_split(featuredict['pca']['X'], onehot_condition_tmpdf, test_size=0.1, random_state=42) out= cross_val_score(neural_network, X_train, y_train, cv=10) out.mean() neural_network.fit(X_train, y_train) y_pred=neural_network.predict(X_test) y_test_inv=onehot_encoder.inverse_transform(y_test) print(classification_report(y_test_inv, y_pred)) cm= confusion_matrix(y_test_inv, y_pred) labels= label_encoder.inverse_transform([0,1,2,3]) # Transform to df for easier plotting cm_df = pd.DataFrame(cm, index = labels, columns = labels) plt.figure(figsize=(5.5,4)) sns.heatmap(cm_df, annot=True) plt.title('NN \nAccuracy:{0:.3f}'.format(accuracy_score(y_test_inv, y_pred))) plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() ``` ### The confusion matrix won't tell much because there are very less entries and out of those some labels are selected more. However we can see using KFold that our accuracy average is around 75-77%. Random Forest also gave similar results. Machine learning models scale with data, so I think with more data the performance of my model will give more promising results. Now 100-200 rows are very few generally to train deeplearning models.
github_jupyter
# EJERCICIO 7 A partir de análisis clínicos y de la edad y el sexo de pacientes de una clínica ubicada en el noreste de Andhra Pradesh, India, se intentará obtener un clasificador automático que sirva para diagnosticar a pacientes con problemas de hígado. Para esto, se recabaron muestras de ocho análisis distintos realizados a 579 pacientes que, junto con su edad y sexo, se dividieron en dos grupos: 414 de ellos diagnosticados con problemas de hígado por expertos en el área mientras que los 165 restantes fueron señalados como exentos de ese problema. Los 11 atributos que constituyen una muestra son los indicados en la tabla de la derecha. Todos son atributos son valores numéricos continuos a excepción del atributo “Sexo”, en donde el valor 1 representa “HOMBRE” y el valor 2 representa “MUJER”, y del atributo “Diagnóstico”, donde el valor 1 representa “CON PROBLEMA DE HÍGADO” mientras que el valor 2 representa “SIN PROBLEMA DE HÍGADO”. Utilice perceptrones o una red neuronal artificial (según resulte más conveniente). Informe el motivo por el que se eligió el tipo de clasificador. Detalle la arquitectura y los parámetros usados en su entrenamiento (según corresponda). Documente todos los intentos realizados. Para el entrenamiento emplee sólo el 90% de las muestras disponibles de cada tipo. Informe la matriz de confusión que produce el mejor clasificador obtenido al evaluarlo con las muestras de entrenamiento e indique la matriz que ese clasificador produce al usarlo sobre el resto de las muestras reservadas para prueba. $$ \begin{array}{|c|c|} \hline 1 & Edad \\ \hline 2 & Sexo \\ \hline 3 & Bilirrubina Total \\ \hline 4 & Bilirrubina Directa \\ \hline 5 & Fosfatasa Alcalina \\ \hline 6 & Alanina Aminotransferasa \\ \hline 7 & Aspartato Aminotransferasa \\ \hline 8 & Proteínas Total \\ \hline 9 & Albúmina \\ \hline 10 & Relación Albúmina/Globulina \\ \hline 11 & Diagnóstico (valor a predecir) \\ \hline \end{array} $$ ``` import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import mpld3 %matplotlib inline mpld3.enable_notebook() from cperceptron import Perceptron from cbackpropagation import ANN #, Identidad, Sigmoide import patrones as magia def progreso(ann, X, T, y=None, n=-1, E=None): if n % 20 == 0: print("Pasos: {0} - Error: {1:.32f}".format(n, E)) def progresoPerceptron(perceptron, X, T, n): y = perceptron.evaluar(X) incorrectas = (T != y).sum() print("Pasos: {0}\tIncorrectas: {1}\n".format(n, incorrectas)) #Cargo datos higado = np.load('higado.npy') #muestras = higado[:, :-1] muestras = np.hstack((higado[:,0].reshape(-1,1), higado[:,2:-1])) # Atributo 'sexo': # muestras[:, 1] == 1 --> HOMBRE # muestras[:, 1] == 2 --> MUJER diagnostico = (higado[:, -1] != 2).astype(np.int8) # diagnostico == 1 --> CON PROBLEMA DE HÍGADO # diagnostico == 0 --> SIN PROBLEMA DE HÍGADO #Armo Patrones clases, patronesEnt, patronesTest = magia.generar_patrones(magia.escalar(muestras),diagnostico,90) X, T = magia.armar_patrones_y_salida_esperada(clases,patronesEnt) Xtest, Ttest = magia.armar_patrones_y_salida_esperada(clases,patronesTest) ``` ## Intento con Perceptrones, pero no funciona ``` # Esto es para poder usar Cython y que sea mas rapido TT = T[:,0].copy(order='C') TT = TT.astype(np.int8) #Entrenamiento p1 = Perceptron(X.shape[1]) p1.reiniciar() I1 = p1.entrenar(X, TT, max_pasos=100000, callback=progresoPerceptron, frecuencia_callback=50000) print("Pasos:{0}".format(I1)) #Evaluo print("Errores:{0} de {1}\n".format((p1.evaluar(Xtest) != Ttest[:,0]).sum(), Ttest.shape[0])) ``` ## Ahora intento con BackPropagation ``` # Crea la red neuronal ocultas = 20 #2,3,5,10,20 entradas = X.shape[1] salidas = T.shape[1] ann = ANN(entradas, ocultas, salidas) ann.reiniciar() #Entreno E, n = ann.entrenar_rprop(X, T, min_error=0, max_pasos=100000, callback=progreso, frecuencia_callback=50000) print("\nRed entrenada en {0} pasos con un error de {1:.32f}".format(n, E)) Y = (ann.evaluar(Xtest) >= 0.5).astype(np.float32) magia.matriz_de_confusion(Ttest,Y) ```
github_jupyter
# Train a CNN Model for MNIST This script here is to train a CNN model with 2 convolutional layers each with a pooling layer and a 2 fully-connected layers. The variables that would be needed for inference later have been added to tensorflow collections in this script. - The MNIST dataset should be placed under a folder named 'MNIST_data' in the same directory as this script. - The outputs of this script are tensorflow checkpoint models in a folder called 'models' in the same directory. ``` import tensorflow as tf import numpy as np import os #import MNIST dataset from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) sess = tf.InteractiveSession() #initialize session #input placeholders with tf.name_scope('x'): x = tf.placeholder(tf.float32, shape=[None, 784], name='x') y_ = tf.placeholder(tf.float32, shape=[None, 10]) #function definitions def weight_variable(shape, name): initial = tf.truncated_normal(shape, stddev=0.1, name=name) return tf.Variable(initial) def bias_variable(shape, name): initial = tf.constant(0.1, shape=shape, name=name) return tf.Variable(initial) def conv2d(x, W, name): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name) def max_pool_2x2(x,name): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) W_conv1 = weight_variable([5, 5, 1, 32], name='W_C1') b_conv1 = bias_variable([32], name='B_C1') x_image = tf.reshape(x, [-1,28,28,1]) #vectorize the image h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1 , name='conv_1') + b_conv1) h_pool1 = max_pool_2x2(h_conv1, name='pool_1') W_conv2 = weight_variable([5, 5, 32, 64], name='W_C2') b_conv2 = bias_variable([64], name='B_C2') h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, name='conv_2') + b_conv2) h_pool2 = max_pool_2x2(h_conv2, name='pool_2') W_fc1 = weight_variable([7 * 7 * 64, 1024], name='W_FC1') b_fc1 = bias_variable([1024], name='B_FC1') feature_vector = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(feature_vector, W_fc1) + b_fc1, name='FC_1') W_fc2 = weight_variable([1024, 10], name='W_FC2') b_fc2 = bias_variable([10], name='B_FC2') with tf.name_scope('logits'): logits = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, name='logits') y = tf.nn.softmax(logits, name='softmax_prediction') cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # we only need these two to make inference using the trained model tf.add_to_collection("logits", logits) tf.add_to_collection("x", x) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) for i in range(500): batch = mnist.train.next_batch(100) if i%100 == 0: train_acc = accuracy.eval(feed_dict={ x:batch[0], y_:batch[1] }) print("Step %d, training accuracy %g"%(i, train_acc)) train_step.run(feed_dict={x:batch[0], y_:batch[1]}) current_dir = os.getcwd() #get the current working directory saver.save(sess, current_dir + '/model/mnist.ckpt') #save the model in the specified directory print("Training is finished.") ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt ``` ### Indepedent Component Analysis ``` class icaDemo: def __init__(self,N): self.N = N def remmean(self,sig): newVec = np.zeros(sig.shape) meanVal = np.mean(sig,axis=1) newVec = sig-np.transpose(np.tile(meanVal,(self.N,1))) return newVec, meanVal def demoSig(self): v = np.linspace(0,self.N-1,self.N) sinArr = np.sin(v/2); s1 = np.std(sinArr) funArr = ((np.remainder(v,23)-11)/9)**5; s2 = np.std(funArr) sawtooth = (np.remainder(v,27)-13)/9; s3 = np.std(sawtooth) uni = np.random.uniform(0,1,self.N) impul = ((uni<.5)*2-1)*np.log(uni); s4 = np.std(impul) sig = np.vstack((sinArr/s1,funArr/s2,sawtooth/s3,impul/s4)) sig, mean = self.remmean(sig) Aorig = np.random.uniform(0,1,(sig.shape[0],sig.shape[0])) mixedsig = np.matmul(Aorig,sig) return sig, mixedsig def pcamat(self,mixedsig,Eig1,Eig_1): oldDimension = mixedsig.shape[0] D,E = np.linalg.eigh(np.cov(mixedsig)) return E,D def whitenv(self,mixed_sig,E,D): whiteMat = np.matmul(np.linalg.inv(np.diag(D)**.5),np.transpose(E)) dewhiteMat = np.matmul(E,np.diag(D)**.5) whitesig = np.matmul(whiteMat,mixed_sig) return whitesig, whiteMat, dewhiteMat def fpica(self,whitesig,whiteMat,dewhiteMat): vecSize, numSamples = whitesig.shape B = np.zeros((vecSize,vecSize)) iteration = 1; numFailures = 0; epsilon = 1e-4 while iteration <= vecSize: w = np.random.normal(0,1,(vecSize,1)) w -= np.matmul(np.matmul(B,np.transpose(B)),w) w /= np.linalg.norm(w) wOld = np.zeros(w.shape); wOld2 = np.zeros(w.shape) i = 1; gabba = 1; maxIter = 1000; while i<=maxIter+gabba: w -= np.matmul(np.matmul(B,np.transpose(B)),w) w /= np.linalg.norm(w) if (np.linalg.norm(w-wOld)<epsilon) or (np.linalg.norm(w+wOld)<epsilon): numFailures = 0; B[:,iteration-1]=np.transpose(w) if iteration==1: A = np.matmul(dewhiteMat,w) W = np.matmul(np.transpose(w),whiteMat) else: A = np.concatenate((A,np.matmul(dewhiteMat,w)),axis=1) W = np.concatenate((W,np.matmul(np.transpose(w),whiteMat))) break wOld2 = wOld; wOld = w; w=np.matmul(whitesig,np.matmul(np.transpose(whitesig),w)**3)/numSamples-3*w w /= np.linalg.norm(w) i += 1 iteration += 1 return A, W def fastica(self): sigTrue, mixedsig = self.demoSig() mixedsig,mixedmean = self.remmean(mixedsig) Dim, NumOfSample = mixedsig.shape E,D = self.pcamat(mixedsig,1,4) whitesig,whiteMat,dewhiteMat = self.whitenv(mixedsig,E,D) A,W = self.fpica(whitesig,whiteMat,dewhiteMat) icasig1 = np.matmul(W,mixedsig) icasig2 = np.tile(np.matmul(W,np.transpose(mixedmean)),(self.N,1)).transpose() icasig = icasig1+icasig2 return icasig, A, W demo = icaDemo(500) sigTrue, mixedsig = demo.demoSig() ``` #### Latent Indepedent Signals ``` for i in range(4): plt.subplot(4,1,i+1); plt.plot(sigTrue[i,:500]) ``` #### Mixed Observation Signals ``` for i in range(4): plt.subplot(4,1,i+1); plt.plot(mixedsig[i,:500]) ``` #### ICA estimate Signals ``` icasig, A, W = demo.fastica() for i in range(4): plt.subplot(4,1,i+1); plt.plot(icasig[i,:500]) ```
github_jupyter
# Introduction to Numpy NumPy is the fundamental package for scientific computing in Python. It is a Python library that provides a multidimensional array object. In this course, we will be using NumPy for linear algebra. If you are interested in learning more about NumPy, you can find the user guide and reference at https://docs.scipy.org/doc/numpy/index.html Let's first import the NumPy package ``` import numpy as np # we commonly use the np abbreviation when referring to numpy ``` ## Creating Numpy Arrays New arrays can be made in several ways. We can take an existing list and convert it to a numpy array: ``` a = np.array([1,2,3]) ``` There are also functions for creating arrays with ones and zeros ``` np.zeros((2,2)) np.ones((3,2)) ``` ## Accessing Numpy Arrays You can use the common square bracket syntax for accessing elements of a numpy array ``` A = np.arange(9).reshape(3,3) print(A) print(A[0].shape) # Access the first row of A print(A[0, 1]) # Access the second item of the first row print(A[:, 1]) # Access the second column ``` ## Operations on Numpy Arrays You can use the operations '*', '**', '\', '+' and '-' on numpy arrays and they operate elementwise. ``` a = np.array([[1,2], [2,3]]) b = np.array([[4,5], [6,7]]) print(a + b) print(a - b) print(a * b) print(a / b) print(a**2) ``` There are also some commonly used function For example, you can sum up all elements of an array ``` print(a) print(np.sum(a)) ``` Or sum along the first dimension ``` np.sum(a, axis=0) ``` There are many other functions in numpy, and some of them **will be useful** for your programming assignments. As an exercise, check out the documentation for these routines at https://docs.scipy.org/doc/numpy/reference/routines.html and see if you can find the documentation for `np.sum` and `np.reshape`. ## Linear Algebra In this course, we use the numpy arrays for linear algebra. We usually use 1D arrays to represent vectors and 2D arrays to represent matrices ``` A = np.array([[2,4], [6,8]]) ``` You can take transposes of matrices with `A.T` ``` print('A\n', A) print('A.T\n', A.T) ``` Note that taking the transpose of a 1D array has **NO** effect. ``` a = np.ones(3) print(a) print(a.shape) print(a.T) print(a.T.shape) ``` But it does work if you have a 2D array of shape (3,1) ``` a = np.ones((3,1)) print(a) print(a.shape) print(a.T) print(a.T.shape) ``` ### Dot product We can compute the dot product between two vectors with np.dot ``` x = np.array([1,2,3]) y = np.array([4,5,6]) np.dot(x, y) ``` We can compute the matrix-matrix product, matrix-vector product too. In Python 3, this is conveniently expressed with the @ syntax ``` A = np.eye(3) # You can create an identity matrix with np.eye B = np.random.randn(3,3) x = np.array([1,2,3]) # Matrix-Matrix product A @ B # Matrix-vector product A @ x ``` Sometimes, we might want to compute certain properties of the matrices. For example, we might be interested in a matrix's determinant, eigenvalues/eigenvectors. Numpy ships with the `numpy.linalg` package to do these things on 2D arrays (matrices). ``` from numpy import linalg # This computes the determinant linalg.det(A) # This computes the eigenvalues and eigenvectors eigenvalues, eigenvectors = linalg.eig(A) print("The eigenvalues are\n", eigenvalues) print("The eigenvectors are\n", eigenvectors) ``` ## Miscellaneous ### Time your code One tip that is really useful is to use the magic commannd `%time` to time the execution time of your function. ``` %time np.abs(A) ```
github_jupyter
``` %load_ext autoreload from __future__ import print_function, division %autoreload import copy, math, os, pickle, time, pandas as pd, numpy as np, scipy.stats as ss from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score, f1_score import torch, torch.utils.data as utils, torch.nn as nn, torch.nn.functional as F, torch.optim as optim from torch.autograd import Variable from torch.nn.parameter import Parameter DATA_FILEPATH = '/scratch/mmd/mimic_data/final/grouping_5/all_hourly_data.h5' RAW_DATA_FILEPATH = '/scratch/mmd/mimic_data/final/nogrouping_5/all_hourly_data.h5' GAP_TIME = 6 # In hours WINDOW_SIZE = 24 # In hours SEED = 1 ID_COLS = ['subject_id', 'hadm_id', 'icustay_id'] GPU = '2' os.environ['CUDA_VISIBLE_DEVICES'] = GPU np.random.seed(SEED) torch.manual_seed(SEED) class DictDist(): def __init__(self, dict_of_rvs): self.dict_of_rvs = dict_of_rvs def rvs(self, n): a = {k: v.rvs(n) for k, v in self.dict_of_rvs.items()} out = [] for i in range(n): out.append({k: vs[i] for k, vs in a.items()}) return out class Choice(): def __init__(self, options): self.options = options def rvs(self, n): return [self.options[i] for i in ss.randint(0, len(self.options)).rvs(n)] %%time data_full_lvl2 = pd.read_hdf(DATA_FILEPATH, 'vitals_labs') data_full_raw = pd.read_hdf(RAW_DATA_FILEPATH, 'vitals_labs') statics = pd.read_hdf(DATA_FILEPATH, 'patients') data_full_lvl2.head() data_full_raw.head() statics.head() def simple_imputer(df): idx = pd.IndexSlice df = df.copy() if len(df.columns.names) > 2: df.columns = df.columns.droplevel(('label', 'LEVEL1', 'LEVEL2')) df_out = df.loc[:, idx[:, ['mean', 'count']]] icustay_means = df_out.loc[:, idx[:, 'mean']].groupby(ID_COLS).mean() df_out.loc[:,idx[:,'mean']] = df_out.loc[:,idx[:,'mean']].groupby(ID_COLS).fillna( method='ffill' ).groupby(ID_COLS).fillna(icustay_means).fillna(0) df_out.loc[:, idx[:, 'count']] = (df.loc[:, idx[:, 'count']] > 0).astype(float) df_out.rename(columns={'count': 'mask'}, level='Aggregation Function', inplace=True) is_absent = (1 - df_out.loc[:, idx[:, 'mask']]) hours_of_absence = is_absent.cumsum() time_since_measured = hours_of_absence - hours_of_absence[is_absent==0].fillna(method='ffill') time_since_measured.rename(columns={'mask': 'time_since_measured'}, level='Aggregation Function', inplace=True) df_out = pd.concat((df_out, time_since_measured), axis=1) df_out.loc[:, idx[:, 'time_since_measured']] = df_out.loc[:, idx[:, 'time_since_measured']].fillna(100) df_out.sort_index(axis=1, inplace=True) return df_out Ys = statics[statics.max_hours > WINDOW_SIZE + GAP_TIME][['mort_hosp', 'mort_icu', 'los_icu']] Ys['los_3'] = Ys['los_icu'] > 3 Ys['los_7'] = Ys['los_icu'] > 7 Ys.drop(columns=['los_icu'], inplace=True) Ys.astype(float) lvl2, raw = [df[ (df.index.get_level_values('icustay_id').isin(set(Ys.index.get_level_values('icustay_id')))) & (df.index.get_level_values('hours_in') < WINDOW_SIZE) ] for df in (data_full_lvl2, data_full_raw)] raw.columns = raw.columns.droplevel(level=['label', 'LEVEL1', 'LEVEL2']) train_frac, dev_frac, test_frac = 0.7, 0.1, 0.2 lvl2_subj_idx, raw_subj_idx, Ys_subj_idx = [df.index.get_level_values('subject_id') for df in (lvl2, raw, Ys)] lvl2_subjects = set(lvl2_subj_idx) assert lvl2_subjects == set(Ys_subj_idx), "Subject ID pools differ!" assert lvl2_subjects == set(raw_subj_idx), "Subject ID pools differ!" np.random.seed(SEED) subjects, N = np.random.permutation(list(lvl2_subjects)), len(lvl2_subjects) N_train, N_dev, N_test = int(train_frac * N), int(dev_frac * N), int(test_frac * N) train_subj = subjects[:N_train] dev_subj = subjects[N_train:N_train + N_dev] test_subj = subjects[N_train+N_dev:] [(lvl2_train, lvl2_dev, lvl2_test), (raw_train, raw_dev, raw_test), (Ys_train, Ys_dev, Ys_test)] = [ [df[df.index.get_level_values('subject_id').isin(s)] for s in (train_subj, dev_subj, test_subj)] \ for df in (lvl2, raw, Ys) ] idx = pd.IndexSlice lvl2_means, lvl2_stds = lvl2_train.loc[:, idx[:,'mean']].mean(axis=0), lvl2_train.loc[:, idx[:,'mean']].std(axis=0) raw_means, raw_stds = raw_train.loc[:, idx[:,'mean']].mean(axis=0), raw_train.loc[:, idx[:,'mean']].std(axis=0) lvl2_train.loc[:, idx[:,'mean']] = (lvl2_train.loc[:, idx[:,'mean']] - lvl2_means)/lvl2_stds lvl2_dev.loc[:, idx[:,'mean']] = (lvl2_dev.loc[:, idx[:,'mean']] - lvl2_means)/lvl2_stds lvl2_test.loc[:, idx[:,'mean']] = (lvl2_test.loc[:, idx[:,'mean']] - lvl2_means)/lvl2_stds raw_train.loc[:, idx[:,'mean']] = (raw_train.loc[:, idx[:,'mean']] - raw_means)/raw_stds raw_dev.loc[:, idx[:,'mean']] = (raw_dev.loc[:, idx[:,'mean']] - raw_means)/raw_stds raw_test.loc[:, idx[:,'mean']] = (raw_test.loc[:, idx[:,'mean']] - raw_means)/raw_stds raw_train, raw_dev, raw_test, lvl2_train, lvl2_dev, lvl2_test = [ simple_imputer(df) for df in (raw_train, raw_dev, raw_test, lvl2_train, lvl2_dev, lvl2_test) ] raw_flat_train, raw_flat_dev, raw_flat_test, lvl2_flat_train, lvl2_flat_dev, lvl2_flat_test = [ df.pivot_table(index=['subject_id', 'hadm_id', 'icustay_id'], columns=['hours_in']) for df in ( raw_train, raw_dev, raw_test, lvl2_train, lvl2_dev, lvl2_test ) ] for df in lvl2_train, lvl2_dev, lvl2_test, raw_train, raw_dev, raw_test: assert not df.isnull().any().any() ``` ### Task Prediction #### Hyperparams ``` N = 15 LR_dist = DictDist({ 'C': Choice(np.geomspace(1e-3, 1e3, 10000)), 'penalty': Choice(['l1', 'l2']), 'solver': Choice(['liblinear', 'lbfgs']), 'max_iter': Choice([100, 500]) }) np.random.seed(SEED) LR_hyperparams_list = LR_dist.rvs(N) for i in range(N): if LR_hyperparams_list[i]['solver'] == 'lbfgs': LR_hyperparams_list[i]['penalty'] = 'l2' RF_dist = DictDist({ 'n_estimators': ss.randint(50, 500), 'max_depth': ss.randint(2, 10), 'min_samples_split': ss.randint(2, 75), 'min_samples_leaf': ss.randint(1, 50), }) np.random.seed(SEED) RF_hyperparams_list = RF_dist.rvs(N) GRU_D_dist = DictDist({ 'cell_size': ss.randint(50, 75), 'hidden_size': ss.randint(65, 95), 'learning_rate': ss.uniform(2e-3, 1e-1), 'num_epochs': ss.randint(15, 150), 'patience': ss.randint(3, 7), 'batch_size': ss.randint(35, 65), 'early_stop_frac': ss.uniform(0.05, 0.1), 'seed': ss.randint(1, 10000), }) np.random.seed(SEED) GRU_D_hyperparams_list = GRU_D_dist.rvs(N) def run_basic(model, hyperparams_list, X_flat_train, X_flat_dev, X_flat_test, target): best_s, best_hyperparams = -np.Inf, None for i, hyperparams in enumerate(hyperparams_list): print("On sample %d / %d (hyperparams = %s)" % (i+1, len(hyperparams_list), repr((hyperparams)))) M = model(**hyperparams) M.fit(X_flat_train, Ys_train[target]) s = roc_auc_score(Ys_dev[target], M.predict_proba(X_flat_dev)[:, 1]) if s > best_s: best_s, best_hyperparams = s, hyperparams print("New Best Score: %.2f @ hyperparams = %s" % (100*best_s, repr((best_hyperparams)))) return run_only_final(model, best_hyperparams, X_flat_train, X_flat_dev, X_flat_test, target) def run_only_final(model, best_hyperparams, X_flat_train, X_flat_dev, X_flat_test, target): best_M = model(**best_hyperparams) best_M.fit(pd.concat((X_flat_train, X_flat_dev)), pd.concat((Ys_train, Ys_dev))[target]) y_true = Ys_test[target] y_score = best_M.predict_proba(X_flat_test)[:, 1] y_pred = best_M.predict(X_flat_test) auc = roc_auc_score(y_true, y_score) auprc = average_precision_score(y_true, y_score) acc = accuracy_score(y_true, y_pred) F1 = f1_score(y_true, y_pred) return best_M, best_hyperparams, auc, auprc, acc, F1 ``` ### Sklearn ``` RESULTS_PATH = '/scratch/mmd/extraction_baselines-sklearn.pkl' with open(RESULTS_PATH, mode='rb') as f: results = pickle.load(f) RERUN = True for model_name, model, hyperparams_list in [ ('RF', RandomForestClassifier, RF_hyperparams_list), ('LR', LogisticRegression, LR_hyperparams_list) ]: if model_name not in results: results[model_name] = {} for t in ['mort_icu', 'los_3']: if t not in results[model_name]: results[model_name][t] = {} for n, X_flat_train, X_flat_dev, X_flat_test in ( ('lvl2', lvl2_flat_train, lvl2_flat_dev, lvl2_flat_test), ('raw', raw_flat_train, raw_flat_dev, raw_flat_test) ): if n in results[model_name][t]: print("Finished model %s on target %s with representation %s" % (model_name, t, n)) if RERUN: h = results[model_name][t][n][1] results[model_name][t][n] = run_only_final(model, h, X_flat_train, X_flat_dev, X_flat_test, t) print("Final results for model %s on target %s with representation %s" % (model_name, t, n)) print(results[model_name][t][n][2:]) with open(RESULTS_PATH, mode='wb') as f: pickle.dump(results, f) continue print("Running model %s on target %s with representation %s" % (model_name, t, n)) results[model_name][t][n] = run_basic( model, hyperparams_list, X_flat_train, X_flat_dev, X_flat_test, t ) print("Final results for model %s on target %s with representation %s" % (model_name, t, n)) print(results[model_name][t][n][2:]) with open(RESULTS_PATH, mode='wb') as f: pickle.dump(results, f) np.random.seed(SEED+1) LR_hyperparams_list_2 = LR_dist.rvs(45) for i in range(45): if LR_hyperparams_list_2[i]['solver'] == 'lbfgs': LR_hyperparams_list_2[i]['penalty'] = 'l2' results_2 = {} results_2_PATH = '/scratch/mmd/extraction_baselines-sklearn_LR_2_runs.pkl' for model_name, model, hyperparams_list in [ # ('RF', RandomForestClassifier, RF_hyperparams_list), ('LR', LogisticRegression, LR_hyperparams_list_2) ]: if model_name not in results_2: results_2[model_name] = {} for t in ['mort_icu', 'los_3']: if t not in results_2[model_name]: results_2[model_name][t] = {} for n, X_flat_train, X_flat_dev, X_flat_test in ( ('lvl2', lvl2_flat_train, lvl2_flat_dev, lvl2_flat_test), # ('raw', raw_flat_train, raw_flat_dev, raw_flat_test) ): if n in results_2[model_name][t]: print("Finished model %s on target %s with representation %s" % (model_name, t, n)) if RERUN: h = results_2[model_name][t][n][1] results_2[model_name][t][n] = run_only_final(model, h, X_flat_train, X_flat_dev, X_flat_test, t) print("Final results_2 for model %s on target %s with representation %s" % (model_name, t, n)) print(results_2[model_name][t][n][2:]) with open(results_2_PATH, mode='wb') as f: pickle.dump(results_2, f) continue print("Running model %s on target %s with representation %s" % (model_name, t, n)) results_2[model_name][t][n] = run_basic( model, hyperparams_list, X_flat_train, X_flat_dev, X_flat_test, t ) print("Final results_2 for model %s on target %s with representation %s" % (model_name, t, n)) print(results_2[model_name][t][n][2:]) with open(results_2_PATH, mode='wb') as f: pickle.dump(results_2, f) for model_name, model, hyperparams_list in [ # ('RF', RandomForestClassifier, RF_hyperparams_list), ('LR', LogisticRegression, LR_hyperparams_list_2) ]: if model_name not in results_2: results_2[model_name] = {} for t in ['mort_icu', 'los_3']: if t not in results_2[model_name]: results_2[model_name][t] = {} for n, X_flat_train, X_flat_dev, X_flat_test in ( # ('lvl2', lvl2_flat_train, lvl2_flat_dev, lvl2_flat_test), ('raw', raw_flat_train, raw_flat_dev, raw_flat_test), ): if n in results_2[model_name][t]: print("Finished model %s on target %s with representation %s" % (model_name, t, n)) if RERUN: h = results_2[model_name][t][n][1] results_2[model_name][t][n] = run_only_final(model, h, X_flat_train, X_flat_dev, X_flat_test, t) print("Final results_2 for model %s on target %s with representation %s" % (model_name, t, n)) print(results_2[model_name][t][n][2:]) with open(results_2_PATH, mode='wb') as f: pickle.dump(results_2, f) continue print("Running model %s on target %s with representation %s" % (model_name, t, n)) results_2[model_name][t][n] = run_basic( model, hyperparams_list, X_flat_train, X_flat_dev, X_flat_test, t ) print("Final results_2 for model %s on target %s with representation %s" % (model_name, t, n)) print(results_2[model_name][t][n][2:]) with open(results_2_PATH, mode='wb') as f: pickle.dump(results_2, f) for model_name, model, hyperparams_list in [ ('RF', RandomForestClassifier, RF_hyperparams_list), ('LR', LogisticRegression, LR_hyperparams_list) ]: if model_name not in results: results[model_name] = {} for t in ['mort_hosp', 'los_7']: if t not in results[model_name]: results[model_name][t] = {} for n, X_flat_train, X_flat_dev, X_flat_test in ( ('lvl2', lvl2_flat_train, lvl2_flat_dev, lvl2_flat_test), ('raw', raw_flat_train, raw_flat_dev, raw_flat_test) ): if n in results[model_name][t]: print("Finished model %s on target %s with representation %s" % (model_name, t, n)) if RERUN: h = results[model_name][t][n][1] results[model_name][t][n] = run_only_final(model, h, X_flat_train, X_flat_dev, X_flat_test, t) print("Final results for model %s on target %s with representation %s" % (model_name, t, n)) print(results[model_name][t][n][2:]) with open(RESULTS_PATH, mode='wb') as f: pickle.dump(results, f) continue print("Running model %s on target %s with representation %s" % (model_name, t, n)) results[model_name][t][n] = run_basic( model, hyperparams_list, X_flat_train, X_flat_dev, X_flat_test, t ) print("Final results for model %s on target %s with representation %s" % (model_name, t, n)) print(results[model_name][t][n][2:]) with open(RESULTS_PATH, mode='wb') as f: pickle.dump(results, f) ```
github_jupyter
# **3D-RCAN** --- <font size = 4>3D-RCAN is a neural network capable of image restoration from corrupted bio-images, first released in 2020 by [Chen *et al.* in biorXiv](https://www.biorxiv.org/content/10.1101/2020.08.27.270439v1). <font size = 4> **This particular notebook enables restoration of 3D dataset. If you are interested in restoring 2D dataset, you should use the CARE 2D notebook instead.** --- <font size = 4>*Disclaimer*: <font size = 4>This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories. <font size = 4>This notebook is largely based on the following paper: <font size = 4>**Three-dimensional residual channel attention networks denoise and sharpen fluorescence microscopy image volumes**, by Chen *et al.* published in bioRxiv in 2020 (https://www.biorxiv.org/content/10.1101/2020.08.27.270439v1) <font size = 4>And source code found in: https://github.com/AiviaCommunity/3D-RCAN <font size = 4>We provide a dataset for the training of this notebook as a way to test its functionalities but the training and test data of the restoration experiments is also available from the authors of the original paper [here](https://www.dropbox.com/sh/hieldept1x476dw/AAC0pY3FrwdZBctvFF0Fx0L3a?dl=0). <font size = 4>**Please also cite this original paper when using or developing this notebook.** # **How to use this notebook?** --- <font size = 4>Video describing how to use our notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook --- ###**Structure of a notebook** <font size = 4>The notebook contains two types of cell: <font size = 4>**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`. <font size = 4>**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`. --- ###**Table of contents, Code snippets** and **Files** <font size = 4>On the top left side of the notebook you find three tabs which contain from top to bottom: <font size = 4>*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections. <font size = 4>*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook. <font size = 4>*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. <font size = 4>**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2. <font size = 4>**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here! --- ###**Making changes to the notebook** <font size = 4>**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive. <font size = 4>To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells). You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. #**0. Before getting started** --- <font size = 4> For CARE to train, **it needs to have access to a paired training dataset**. This means that the same image needs to be acquired in the two conditions (for instance, low signal-to-noise ratio and high signal-to-noise ratio) and provided with indication of correspondence. <font size = 4> Therefore, the data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called "Training - Low SNR images" (Training_source) and "Training - high SNR images" (Training_target). Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki <font size = 4>**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. <font size = 4> **Additionally, the corresponding input and output files need to have the same name**. <font size = 4> Please note that you currently can **only use .tif files!** <font size = 4> You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed. <font size = 4>Here's a common data structure that can work: * Experiment A - **Training dataset** - Low SNR images (Training_source) - img_1.tif, img_2.tif, ... - High SNR images (Training_target) - img_1.tif, img_2.tif, ... - **Quality control dataset** - Low SNR images - img_1.tif, img_2.tif - High SNR images - img_1.tif, img_2.tif - **Data to be predicted** - **Results** --- <font size = 4>**Important note** <font size = 4>- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained. <font size = 4>- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model. <font size = 4>- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model. --- # **1. Initialise the Colab session** --- ## **1.1. Check for GPU access** --- By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following: <font size = 4>Go to **Runtime -> Change the Runtime type** <font size = 4>**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)* <font size = 4>**Accelator: GPU** *(Graphics processing unit)* ``` #@markdown ##Run this cell to check if you have GPU access %tensorflow_version 1.x import tensorflow as tf if tf.test.gpu_device_name()=='': print('You do not have GPU access.') print('Did you change your runtime ?') print('If the runtime setting is correct then Google did not allocate a GPU for your session') print('Expect slow performance. To access GPU try reconnecting later') else: print('You have GPU access') !nvidia-smi ``` ## **1.2. Mount your Google Drive** --- <font size = 4> To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. <font size = 4> Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. <font size = 4> Once this is done, your data are available in the **Files** tab on the top left of notebook. ``` #@markdown ##Play the cell to connect your Google Drive to Colab #@markdown * Click on the URL. #@markdown * Sign in your Google Account. #@markdown * Copy the authorization code. #@markdown * Enter the authorization code. #@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive". # mount user's Google Drive to Google Colab. from google.colab import drive drive.mount('/content/gdrive') ``` # **2. Install 3D-RCAN and dependencies** --- ## **2.1. Install key dependencies** --- <font size = 4> ``` Notebook_version = ['1.12'] #@markdown ##Install 3D-RCAN and dependencies !git clone https://github.com/AiviaCommunity/3D-RCAN import os !pip install q keras==2.2.5 !pip install colorama; sys_platform=='win32' !pip install jsonschema !pip install numexpr !pip install tqdm>=4.41.0 %tensorflow_version 1.x #Here, we install libraries which are not already included in Colab. !pip install tifffile # contains tools to operate tiff-files !pip install wget !pip install fpdf !pip install memory_profiler %load_ext memory_profiler ``` ## **2.2. Restart your runtime** --- <font size = 4> **<font size = 4> Here you need to restart your runtime to load the newly installed dependencies** <font size = 4> Click on "Runtime" ---> "Restart Runtime" ## **2.3. Load key dependencies** --- <font size = 4> ``` Notebook_version = ['1.11.1'] #@markdown ##Load key dependencies !pip install q keras==2.2.5 #Here, we import and enable Tensorflow 1 instead of Tensorflow 2. %tensorflow_version 1.x import tensorflow import tensorflow as tf print(tensorflow.__version__) print("Tensorflow enabled.") # ------- Variable specific to 3D-RCAN ------- # ------- Common variable to all ZeroCostDL4Mic notebooks ------- import numpy as np from matplotlib import pyplot as plt import urllib import os, random import shutil import zipfile from tifffile import imread, imsave import time import sys import wget from pathlib import Path import pandas as pd import csv from glob import glob from scipy import signal from scipy import ndimage from skimage import io from sklearn.linear_model import LinearRegression from skimage.util import img_as_uint import matplotlib as mpl from skimage.metrics import structural_similarity from skimage.metrics import peak_signal_noise_ratio as psnr from astropy.visualization import simple_norm from skimage import img_as_float32 from skimage.util import img_as_ubyte from tqdm import tqdm from fpdf import FPDF, HTMLMixin from datetime import datetime from pip._internal.operations.freeze import freeze import subprocess # For sliders and dropdown menu and progress bar from ipywidgets import interact import ipywidgets as widgets # Colors for the warning messages class bcolors: WARNING = '\033[31m' W = '\033[0m' # white (normal) R = '\033[31m' # red #Disable some of the tensorflow warnings import warnings warnings.filterwarnings("ignore") print("Libraries installed") # Check if this is the latest version of the notebook Latest_notebook_version = pd.read_csv("https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_ZeroCostDL4Mic_Release.csv") print('Notebook version: '+Notebook_version[0]) strlist = Notebook_version[0].split('.') Notebook_version_main = strlist[0]+'.'+strlist[1] if Notebook_version_main == Latest_notebook_version.columns: print("This notebook is up-to-date.") else: print(bcolors.WARNING +"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki") def pdf_export(trained = False, augmentation = False, pretrained_model = False): class MyFPDF(FPDF, HTMLMixin): pass pdf = MyFPDF() pdf.add_page() pdf.set_right_margin(-1) pdf.set_font("Arial", size = 11, style='B') Network = '3D-RCAN' day = datetime.now() datetime_str = str(day)[0:16] Header = 'Training report for '+Network+' model ('+model_name+')\nDate and Time: '+datetime_str pdf.multi_cell(180, 5, txt = Header, align = 'L') # add another cell if trained: training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)" pdf.cell(190, 5, txt = training_time, ln = 1, align='L') pdf.ln(1) Header_2 = 'Information for your materials and method:' pdf.cell(190, 5, txt=Header_2, ln=1, align='L') all_packages = '' for requirement in freeze(local_only=True): all_packages = all_packages+requirement+', ' #print(all_packages) #Main Packages main_packages = '' version_numbers = [] for name in ['tensorflow','numpy','Keras']: find_name=all_packages.find(name) main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', ' #Version numbers only here: version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)]) cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True) cuda_version = cuda_version.stdout.decode('utf-8') cuda_version = cuda_version[cuda_version.find(', V')+3:-1] gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True) gpu_name = gpu_name.stdout.decode('utf-8') gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10] #print(cuda_version[cuda_version.find(', V')+3:-1]) #print(gpu_name) shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape dataset_size = len(os.listdir(Training_source)) text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs (image dimensions: '+str(shape)+', using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.' pdf.set_font('') pdf.set_font_size(10.) pdf.multi_cell(190, 5, txt = text, align='L') pdf.set_font('') pdf.set_font('Arial', size = 10, style = 'B') pdf.ln(1) pdf.cell(28, 5, txt='Augmentation: ', ln=0) pdf.set_font('') if augmentation: aug_text = 'The dataset was augmented by' if Rotation: aug_text = aug_text+'\n- rotation' if Flip: aug_text = aug_text+'\n- flipping' else: aug_text = 'No augmentation was used for training.' pdf.multi_cell(190, 5, txt=aug_text, align='L') pdf.set_font('Arial', size = 11, style = 'B') pdf.ln(1) pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1) pdf.set_font('') pdf.set_font_size(10.) if Use_Default_Advanced_Parameters: pdf.cell(200, 5, txt='Default Advanced Parameters were enabled') pdf.cell(200, 5, txt='The following parameters were used for training:') pdf.ln(1) html = """ <table width=40% style="margin-left:0px;"> <tr> <th width = 50% align="left">Parameter</th> <th width = 50% align="left">Value</th> </tr> <tr> <td width = 50%>number_of_epochs</td> <td width = 50%>{0}</td> </tr> <tr> <td width = 50%>number_of_steps</td> <td width = 50%>{1}</td> </tr> <tr> <td width = 50%>percentage_validation</td> <td width = 50%>{2}</td> </tr> <tr> <td width = 50%>num_residual_groups</td> <td width = 50%>{3}</td> </tr> <tr> <td width = 50%>num_residual_blocks</td> <td width = 50%>{4}</td> </tr> <tr> <td width = 50%>num_channels</td> <td width = 50%>{5}</td> </tr> <tr> <td width = 50%>channel_reduction</td> <td width = 50%>{6}</td> </tr> </table> """.format(number_of_epochs,number_of_steps, percentage_validation, num_residual_groups, num_residual_blocks, num_channels, channel_reduction) pdf.write_html(html) #pdf.multi_cell(190, 5, txt = text_2, align='L') pdf.set_font("Arial", size = 11, style='B') pdf.ln(1) pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1) pdf.set_font('') pdf.set_font('Arial', size = 10, style = 'B') pdf.cell(32, 5, txt= 'Training_source:', align = 'L', ln=0) pdf.set_font('') pdf.multi_cell(170, 5, txt = Training_source, align = 'L') pdf.set_font('') pdf.set_font('Arial', size = 10, style = 'B') pdf.cell(30, 5, txt= 'Training_target:', align = 'L', ln=0) pdf.set_font('') pdf.multi_cell(170, 5, txt = Training_target, align = 'L') #pdf.cell(190, 5, txt=aug_text, align='L', ln=1) pdf.ln(1) pdf.set_font('') pdf.set_font('Arial', size = 10, style = 'B') pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0) pdf.set_font('') pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L') pdf.ln(1) pdf.cell(60, 5, txt = 'Example Training pair', ln=1) pdf.ln(1) exp_size = io.imread('/content/TrainingDataExample_3D_RCAN.png').shape pdf.image('/content/TrainingDataExample_3D_RCAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8)) pdf.ln(1) ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).' pdf.multi_cell(190, 5, txt = ref_1, align='L') ref_2 = '- 3D-RCAN: Chen et al. "Three-dimensional residual channel attention networks denoise and sharpen fluorescence microscopy image volumes." bioRxiv 2020 https://www.biorxiv.org/content/10.1101/2020.08.27.270439v1' pdf.multi_cell(190, 5, txt = ref_2, align='L') pdf.ln(3) reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo' pdf.set_font('Arial', size = 11, style='B') pdf.multi_cell(190, 5, txt=reminder, align='C') if trained: pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf") else: pdf.output('/content/'+model_name+"_training_report.pdf") def qc_pdf_export(): class MyFPDF(FPDF, HTMLMixin): pass pdf = MyFPDF() pdf.add_page() pdf.set_right_margin(-1) pdf.set_font("Arial", size = 11, style='B') Network = '3D RCAN' day = datetime.now() datetime_str = str(day)[0:10] Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str pdf.multi_cell(180, 5, txt = Header, align = 'L') all_packages = '' for requirement in freeze(local_only=True): all_packages = all_packages+requirement+', ' pdf.set_font('') pdf.set_font('Arial', size = 11, style = 'B') pdf.ln(2) pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L') exp_size = io.imread(full_QC_model_path+'/Quality Control/QC_example_data.png').shape if os.path.exists(full_QC_model_path+'/Quality Control/lossCurvePlots.png'): pdf.image(full_QC_model_path+'/Quality Control/lossCurvePlots.png', x = 11, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/13)) else: pdf.set_font('') pdf.set_font('Arial', size=10) # pdf.ln(3) pdf.multi_cell(190, 5, txt='You can see these curves in the notebook.') pdf.ln(3) pdf.set_font('') pdf.set_font('Arial', size = 10, style = 'B') pdf.ln(3) pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1) pdf.ln(1) exp_size = io.imread(full_QC_model_path+'/Quality Control/QC_example_data.png').shape pdf.image(full_QC_model_path+'/Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10)) pdf.ln(1) pdf.set_font('') pdf.set_font('Arial', size = 11, style = 'B') pdf.ln(1) pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1) pdf.set_font('') pdf.set_font_size(10.) pdf.ln(1) html = """ <body> <font size="7" face="Courier New" > <table width=97% style="margin-left:0px;">""" with open(full_QC_model_path+'/Quality Control/QC_metrics_'+QC_model_name+'.csv', 'r') as csvfile: metrics = csv.reader(csvfile) header = next(metrics) image = header[0] slice_n = header[1] mSSIM_PvsGT = header[2] mSSIM_SvsGT = header[3] NRMSE_PvsGT = header[4] NRMSE_SvsGT = header[5] PSNR_PvsGT = header[6] PSNR_SvsGT = header[7] header = """ <tr> <th width = 9% align="left">{0}</th> <th width = 4% align="left">{1}</th> <th width = 15% align="center">{2}</th> <th width = 14% align="left">{3}</th> <th width = 15% align="center">{4}</th> <th width = 14% align="left">{5}</th> <th width = 15% align="center">{6}</th> <th width = 14% align="left">{7}</th> </tr>""".format(image,slice_n,mSSIM_PvsGT,mSSIM_SvsGT,NRMSE_PvsGT,NRMSE_SvsGT,PSNR_PvsGT,PSNR_SvsGT) html = html+header for row in metrics: image = row[0] slice_n = row[1] mSSIM_PvsGT = row[2] mSSIM_SvsGT = row[3] NRMSE_PvsGT = row[4] NRMSE_SvsGT = row[5] PSNR_PvsGT = row[6] PSNR_SvsGT = row[7] cells = """ <tr> <td width = 9% align="left">{0}</td> <td width = 4% align="center">{1}</td> <td width = 15% align="center">{2}</td> <td width = 14% align="center">{3}</td> <td width = 15% align="center">{4}</td> <td width = 14% align="center">{5}</td> <td width = 15% align="center">{6}</td> <td width = 14% align="center">{7}</td> </tr>""".format(image,slice_n,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)),str(round(float(NRMSE_PvsGT),3)),str(round(float(NRMSE_SvsGT),3)),str(round(float(PSNR_PvsGT),3)),str(round(float(PSNR_SvsGT),3))) html = html+cells html = html+"""</body></table>""" pdf.write_html(html) pdf.ln(1) pdf.set_font('') pdf.set_font_size(10.) ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." bioRxiv (2020).' pdf.multi_cell(190, 5, txt = ref_1, align='L') ref_2 = '- Three-dimensional residual channel attention networks denoise and sharpen fluorescence microscopy image volumes, by Chen et al. bioRxiv (2020)' pdf.multi_cell(190, 5, txt = ref_2, align='L') pdf.ln(3) reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.' pdf.set_font('Arial', size = 11, style='B') pdf.multi_cell(190, 5, txt=reminder, align='C') pdf.output(full_QC_model_path+'/Quality Control/'+QC_model_name+'_QC_report.pdf') !pip freeze > requirements.txt ``` # **3. Select your parameters and paths** --- ## **3.1. Setting main training parameters** --- <font size = 4> <font size = 5> **Paths for training, predictions and results** <font size = 4>**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (Low SNR images) and Training_target (High SNR images or ground truth) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below. <font size = 4>**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten. <font size = 4>**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder). <font size = 5>**Training Parameters** <font size = 4>**`number of epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-300 epochs. Evaluate the performance after training (see 5.). **Default value: 30** <font size = 4>**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: 256** <font size = 5>**Advanced Parameters - experienced users only** <font size = 4>**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** <font size = 4>**`num_residual_groups`:** Number of residual groups in RCAN. **Default value: 5** <font size = 4>**If you get an Out of memory (OOM) error during the training, manually decrease the num_residual_groups value until the OOM error disappear.** <font size = 4>**`num_residual_blocks`:** Number of residual channel attention blocks in each residual group in RCAN. **Default value: 3** <font size = 4>**`num_channels`:** Number of feature channels in RCAN. **Default value: 32** <font size = 4>**`channel_reduction`:** Channel reduction ratio for channel attention. **Default value: 8** ``` #@markdown ###Path to training images: # base folder of GT and low images base = "/content" # low SNR images Training_source = "" #@param {type:"string"} lowfile = Training_source+"/*.tif" # Ground truth images Training_target = "" #@param {type:"string"} GTfile = Training_target+"/*.tif" # model name and path #@markdown ###Name of the model and path to model folder: model_name = "" #@param {type:"string"} model_path = "" #@param {type:"string"} # create the training data file into model_path folder. training_data = model_path+"/my_training_data.npz" # other parameters for training. #@markdown ###Training Parameters #@markdown Number of epochs: number_of_epochs = 30#@param {type:"number"} number_of_steps = 256#@param {type:"number"} #@markdown ###Advanced Parameters Use_Default_Advanced_Parameters = True #@param {type:"boolean"} #@markdown ###If not, please input: percentage_validation = 10 #@param {type:"number"} num_residual_groups = 5 #@param {type:"number"} num_residual_blocks = 3 #@param {type:"number"} num_channels = 32 #@param {type:"number"} channel_reduction = 8 #@param {type:"number"} if (Use_Default_Advanced_Parameters): print("Default advanced parameters enabled") percentage_validation = 10 num_residual_groups = 5 num_channels = 32 num_residual_blocks = 3 channel_reduction = 8 percentage = percentage_validation/100 full_model_path = model_path+'/'+model_name #here we check that no model with the same name already exist, if so print a warning if os.path.exists(model_path+'/'+model_name): print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!") print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3"+W) # Here we disable pre-trained model by default (in case the next cell is not ran) Use_pretrained_model = False # Here we disable data augmentation by default (in case the cell is not ran) Use_Data_augmentation = False #Load one randomly chosen training source file random_choice=random.choice(os.listdir(Training_source)) x = imread(Training_source+"/"+random_choice) # Here we check that the input images are stacks if len(x.shape) == 3: print("Image dimensions (z,y,x)",x.shape) if not len(x.shape) == 3: print(bcolors.WARNING +"Your images appear to have the wrong dimensions. Image dimension",x.shape) #Find image Z dimension and select the mid-plane Image_Z = x.shape[0] mid_plane = int(Image_Z / 2)+1 #Find image XY dimension Image_Y = x.shape[1] Image_X = x.shape[2] # Here we split the data between training and validation # Here we count the number of files in the training target folder Filelist = os.listdir(Training_target) number_files = len(Filelist) File_for_validation = int((number_files)/percentage_validation)+1 #Here we split the training dataset between training and validation # Everything is copied in the /Content Folder Training_source_temp = "/content/training_source" if os.path.exists(Training_source_temp): shutil.rmtree(Training_source_temp) os.makedirs(Training_source_temp) Training_target_temp = "/content/training_target" if os.path.exists(Training_target_temp): shutil.rmtree(Training_target_temp) os.makedirs(Training_target_temp) Validation_source_temp = "/content/validation_source" if os.path.exists(Validation_source_temp): shutil.rmtree(Validation_source_temp) os.makedirs(Validation_source_temp) Validation_target_temp = "/content/validation_target" if os.path.exists(Validation_target_temp): shutil.rmtree(Validation_target_temp) os.makedirs(Validation_target_temp) list_source = os.listdir(os.path.join(Training_source)) list_target = os.listdir(os.path.join(Training_target)) #Move files into the temporary source and target directories: for f in os.listdir(os.path.join(Training_source)): shutil.copy(Training_source+"/"+f, Training_source_temp+"/"+f) for p in os.listdir(os.path.join(Training_target)): shutil.copy(Training_target+"/"+p, Training_target_temp+"/"+p) list_source_temp = os.listdir(os.path.join(Training_source_temp)) list_target_temp = os.listdir(os.path.join(Training_target_temp)) #Here we move images to be used for validation for i in range(File_for_validation): name = list_source_temp[i] shutil.move(Training_source_temp+"/"+name, Validation_source_temp+"/"+name) shutil.move(Training_target_temp+"/"+name, Validation_target_temp+"/"+name) #Load one randomly chosen training target file os.chdir(Training_target) y = imread(Training_target+"/"+random_choice) f=plt.figure(figsize=(16,8)) plt.subplot(1,2,1) plt.imshow(x[mid_plane], norm=simple_norm(x[mid_plane], percent = 99), interpolation='nearest') plt.axis('off') plt.title('Low SNR image (single Z plane)'); plt.subplot(1,2,2) plt.imshow(y[mid_plane], norm=simple_norm(y[mid_plane], percent = 99), interpolation='nearest') plt.axis('off') plt.title('High SNR image (single Z plane)'); plt.savefig('/content/TrainingDataExample_3D_RCAN.png',bbox_inches='tight',pad_inches=0) ``` ## **3.2. Data augmentation** --- <font size = 4>Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it. <font size = 4> **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** <font size = 4>Data augmentation is performed here by rotating the training images in the XY-Plane and flipping them along X-Axis. <font size = 4>**The flip option alone will double the size of your dataset, rotation will quadruple and both together will increase the dataset by a factor of 8.** ``` Use_Data_augmentation = False #@param{type:"boolean"} #@markdown Select this option if you want to use augmentation to increase the size of your dataset #@markdown **Rotate each image 3 times by 90 degrees.** Rotation = False #@param{type:"boolean"} #@markdown **Flip each image once around the x axis of the stack.** Flip = False #@param{type:"boolean"} #@markdown **Would you like to save your augmented images?** Save_augmented_images = False #@param {type:"boolean"} Saving_path = "" #@param {type:"string"} if not Save_augmented_images: Saving_path= "/content" def rotation_aug(Source_path, Target_path, flip=False): Source_images = os.listdir(Source_path) Target_images = os.listdir(Target_path) for image in Source_images: source_img = io.imread(os.path.join(Source_path,image)) target_img = io.imread(os.path.join(Target_path,image)) # Source Rotation source_img_90 = np.rot90(source_img,axes=(1,2)) source_img_180 = np.rot90(source_img_90,axes=(1,2)) source_img_270 = np.rot90(source_img_180,axes=(1,2)) # Target Rotation target_img_90 = np.rot90(target_img,axes=(1,2)) target_img_180 = np.rot90(target_img_90,axes=(1,2)) target_img_270 = np.rot90(target_img_180,axes=(1,2)) # Add a flip to the rotation if flip == True: source_img_lr = np.fliplr(source_img) source_img_90_lr = np.fliplr(source_img_90) source_img_180_lr = np.fliplr(source_img_180) source_img_270_lr = np.fliplr(source_img_270) target_img_lr = np.fliplr(target_img) target_img_90_lr = np.fliplr(target_img_90) target_img_180_lr = np.fliplr(target_img_180) target_img_270_lr = np.fliplr(target_img_270) #source_img_90_ud = np.flipud(source_img_90) # Save the augmented files # Source images io.imsave(Saving_path+'/augmented_source/'+image,source_img) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270) # Target images io.imsave(Saving_path+'/augmented_target/'+image,target_img) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270) if flip == True: io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr) def flip(Source_path, Target_path): Source_images = os.listdir(Source_path) Target_images = os.listdir(Target_path) for image in Source_images: source_img = io.imread(os.path.join(Source_path,image)) target_img = io.imread(os.path.join(Target_path,image)) source_img_lr = np.fliplr(source_img) target_img_lr = np.fliplr(target_img) io.imsave(Saving_path+'/augmented_source/'+image,source_img) io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr) io.imsave(Saving_path+'/augmented_target/'+image,target_img) io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr) if Use_Data_augmentation: if os.path.exists(Saving_path+'/augmented_source'): shutil.rmtree(Saving_path+'/augmented_source') os.mkdir(Saving_path+'/augmented_source') if os.path.exists(Saving_path+'/augmented_target'): shutil.rmtree(Saving_path+'/augmented_target') os.mkdir(Saving_path+'/augmented_target') print("Data augmentation enabled") print("Data augmentation in progress....") if Rotation == True: rotation_aug(Training_source_temp,Training_target_temp,flip=Flip) elif Rotation == False and Flip == True: flip(Training_source_temp,Training_target_temp) print("Done") if not Use_Data_augmentation: print(bcolors.WARNING+"Data augmentation disabled") ``` # **4. Train the network** --- ## **4.1. Prepare the training data and model for training** --- <font size = 4>Here, we use the information from 3. to build the model and convert the training data into a suitable format for training. ``` #@markdown ##Create the model and dataset objects # --------------------- Here we delete the model folder if it already exist ------------------------ if os.path.exists(model_path+'/'+model_name): print(bcolors.WARNING +"!! WARNING: Model folder already exists and has been removed !!" + W) shutil.rmtree(model_path+'/'+model_name) print("Preparing the config file...") if Use_Data_augmentation == True: Training_source_temp = Saving_path+'/augmented_source' Training_target_temp = Saving_path+'/augmented_target' # Here we prepare the JSON file import json # Config file for 3D-RCAN dictionary ={ "epochs": number_of_epochs, "steps_per_epoch": number_of_steps, "num_residual_groups": num_residual_groups, "training_data_dir": {"raw": Training_source_temp, "gt": Training_target_temp}, "validation_data_dir": {"raw": Validation_source_temp, "gt": Validation_target_temp}, "num_channels": num_channels, "num_residual_blocks": num_residual_blocks, "channel_reduction": channel_reduction } json_object = json.dumps(dictionary, indent = 4) with open("/content/config.json", "w") as outfile: outfile.write(json_object) # Export pdf summary of training parameters pdf_export(augmentation = Use_Data_augmentation) print("Done") ``` ## **4.2. Start Training** --- <font size = 4>When playing the cell below you should see updates after each epoch (round). Network training can take some time. <font size = 4>* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point. <font size = 4>Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder from Google Drive as all data can be erased at the next training if using the same folder. ``` #@markdown ##Start Training start = time.time() # Start Training !python /content/3D-RCAN/train.py -c /content/config.json -o "$full_model_path" print("Training, done.") if os.path.exists(model_path+"/"+model_name+"/Quality Control"): shutil.rmtree(model_path+"/"+model_name+"/Quality Control") os.makedirs(model_path+"/"+model_name+"/Quality Control") # Displaying the time elapsed for training dt = time.time() - start mins, sec = divmod(dt, 60) hour, mins = divmod(mins, 60) print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)") #Create a pdf document with training summary pdf_export(trained = True, augmentation = Use_Data_augmentation) ``` # **5. Evaluate your model** --- <font size = 4>This section allows the user to perform important quality checks on the validity and generalisability of the trained model. <font size = 4>**We highly recommend to perform quality control on all newly trained models.** ``` # model name and path #@markdown ###Do you want to assess the model you just trained ? Use_the_current_trained_model = True #@param {type:"boolean"} #@markdown ###If not, please provide the path to the model folder: QC_model_folder = "" #@param {type:"string"} #Here we define the loaded model name and path QC_model_name = os.path.basename(QC_model_folder) QC_model_path = os.path.dirname(QC_model_folder) if (Use_the_current_trained_model): QC_model_name = model_name QC_model_path = model_path full_QC_model_path = QC_model_path+'/'+QC_model_name+'/' if os.path.exists(full_QC_model_path): print("The "+QC_model_name+" network will be evaluated") else: W = '\033[0m' # white (normal) R = '\033[31m' # red print(R+'!! WARNING: The chosen model does not exist !!'+W) print('Please make sure you provide a valid model path and model name before proceeding further.') ``` ## **5.1. Inspection of the loss function** --- <font size = 4>First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.* <font size = 4>**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target. <font size = 4>**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target. <font size = 4>During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance. <font size = 4>Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased. ``` #@markdown ##Play the cell to show a plot of training errors vs. epoch number %load_ext tensorboard %tensorboard --logdir "$full_QC_model_path" ``` ## **5.2. Error mapping and quality metrics estimation** --- <font size = 4>This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the "Source_QC_folder" and "Target_QC_folder" ! <font size = 4>**1. The SSIM (structural similarity) map** <font size = 4>The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). <font size=4>**mSSIM** is the SSIM value calculated across the entire window of both images. <font size=4>**The output below shows the SSIM maps with the mSSIM** <font size = 4>**2. The RSE (Root Squared Error) map** <font size = 4>This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark). <font size =4>**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores. <font size = 4>**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement. <font size=4>**The output below shows the RSE maps with the NRMSE and PSNR values.** ``` #@markdown ##Choose the folders that contain your Quality Control dataset Source_QC_folder = "" #@param{type:"string"} Target_QC_folder = "" #@param{type:"string"} path_metrics_save = QC_model_path+'/'+QC_model_name+'/Quality Control/' path_QC_prediction = path_metrics_save+'Prediction' # Create a quality control/Prediction Folder if os.path.exists(path_QC_prediction): shutil.rmtree(path_QC_prediction) os.makedirs(path_QC_prediction) # Perform the predictions print("Restoring images...") !python /content/3D-RCAN/apply.py -m "$full_QC_model_path" -i "$Source_QC_folder" -o "$path_QC_prediction" print("Done...") def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32): """This function is adapted from Martin Weigert""" """Percentile-based image normalization.""" mi = np.percentile(x,pmin,axis=axis,keepdims=True) ma = np.percentile(x,pmax,axis=axis,keepdims=True) return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype) def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32 """This function is adapted from Martin Weigert""" if dtype is not None: x = x.astype(dtype,copy=False) mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False) ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False) eps = dtype(eps) try: import numexpr x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )") except ImportError: x = (x - mi) / ( ma - mi + eps ) if clip: x = np.clip(x,0,1) return x def norm_minmse(gt, x, normalize_gt=True): """This function is adapted from Martin Weigert""" """ normalizes and affinely scales an image pair such that the MSE is minimized Parameters ---------- gt: ndarray the ground truth image x: ndarray the image that will be affinely scaled normalize_gt: bool set to True of gt image should be normalized (default) Returns ------- gt_scaled, x_scaled """ if normalize_gt: gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False) x = x.astype(np.float32, copy=False) - np.mean(x) #x = x - np.mean(x) gt = gt.astype(np.float32, copy=False) - np.mean(gt) #gt = gt - np.mean(gt) scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten()) return gt, scale * x # Open and create the csv file that will contain all the QC metrics with open(path_metrics_save+'QC_metrics_'+QC_model_name+".csv", "w", newline='') as file: writer = csv.writer(file) # Write the header in the csv file writer.writerow(["File name","Slice #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"]) # These lists will be used to collect all the metrics values per slice file_name_list = [] slice_number_list = [] mSSIM_GvP_list = [] mSSIM_GvS_list = [] NRMSE_GvP_list = [] NRMSE_GvS_list = [] PSNR_GvP_list = [] PSNR_GvS_list = [] # These lists will be used to display the mean metrics for the stacks mSSIM_GvP_list_mean = [] mSSIM_GvS_list_mean = [] NRMSE_GvP_list_mean = [] NRMSE_GvS_list_mean = [] PSNR_GvP_list_mean = [] PSNR_GvS_list_mean = [] # Let's loop through the provided dataset in the QC folders for thisFile in os.listdir(Source_QC_folder): if not os.path.isdir(os.path.join(Source_QC_folder, thisFile)): print('Running QC on: '+thisFile) test_GT_stack = io.imread(os.path.join(Target_QC_folder, thisFile)) test_source_stack = io.imread(os.path.join(Source_QC_folder,thisFile)) test_prediction_stack_raw = io.imread(os.path.join(path_metrics_save+"Prediction/",thisFile)) test_prediction_stack = test_prediction_stack_raw[:, 1, :, :] n_slices = test_GT_stack.shape[0] # Calculating the position of the mid-plane slice z_mid_plane = int(n_slices / 2)+1 img_SSIM_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2])) img_SSIM_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2])) img_RSE_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2])) img_RSE_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2])) for z in range(n_slices): # -------------------------------- Normalising the dataset -------------------------------- test_GT_norm, test_source_norm = norm_minmse(test_GT_stack[z], test_source_stack[z], normalize_gt=True) test_GT_norm, test_prediction_norm = norm_minmse(test_GT_stack[z], test_prediction_stack[z], normalize_gt=True) # -------------------------------- Calculate the SSIM metric and maps -------------------------------- # Calculate the SSIM maps and index index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5) index_SSIM_GTvsSource, img_SSIM_GTvsSource = structural_similarity(test_GT_norm, test_source_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5) #Calculate ssim_maps img_SSIM_GTvsPrediction_stack[z] = img_as_float32(img_SSIM_GTvsPrediction, force_copy=False) img_SSIM_GTvsSource_stack[z] = img_as_float32(img_SSIM_GTvsSource, force_copy=False) # -------------------------------- Calculate the NRMSE metrics -------------------------------- # Calculate the Root Squared Error (RSE) maps img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm)) img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm)) # Calculate SE maps img_RSE_GTvsPrediction_stack[z] = img_as_float32(img_RSE_GTvsPrediction, force_copy=False) img_RSE_GTvsSource_stack[z] = img_as_float32(img_RSE_GTvsSource, force_copy=False) # Normalised Root Mean Squared Error (here it's valid to take the mean of the image) NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction)) NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource)) # Calculate the PSNR between the images PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0) PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0) writer.writerow([thisFile, str(z),str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource), str(PSNR_GTvsPrediction), str(PSNR_GTvsSource)]) # Collect values to display in dataframe output slice_number_list.append(z) mSSIM_GvP_list.append(index_SSIM_GTvsPrediction) mSSIM_GvS_list.append(index_SSIM_GTvsSource) NRMSE_GvP_list.append(NRMSE_GTvsPrediction) NRMSE_GvS_list.append(NRMSE_GTvsSource) PSNR_GvP_list.append(PSNR_GTvsPrediction) PSNR_GvS_list.append(PSNR_GTvsSource) if (z == z_mid_plane): # catch these for display SSIM_GTvsP_forDisplay = index_SSIM_GTvsPrediction SSIM_GTvsS_forDisplay = index_SSIM_GTvsSource NRMSE_GTvsP_forDisplay = NRMSE_GTvsPrediction NRMSE_GTvsS_forDisplay = NRMSE_GTvsSource # If calculating average metrics for dataframe output file_name_list.append(thisFile) mSSIM_GvP_list_mean.append(sum(mSSIM_GvP_list)/len(mSSIM_GvP_list)) mSSIM_GvS_list_mean.append(sum(mSSIM_GvS_list)/len(mSSIM_GvS_list)) NRMSE_GvP_list_mean.append(sum(NRMSE_GvP_list)/len(NRMSE_GvP_list)) NRMSE_GvS_list_mean.append(sum(NRMSE_GvS_list)/len(NRMSE_GvS_list)) PSNR_GvP_list_mean.append(sum(PSNR_GvP_list)/len(PSNR_GvP_list)) PSNR_GvS_list_mean.append(sum(PSNR_GvS_list)/len(PSNR_GvS_list)) # ----------- Change the stacks to 32 bit images ----------- img_SSIM_GTvsSource_stack_32 = img_as_float32(img_SSIM_GTvsSource_stack, force_copy=False) img_SSIM_GTvsPrediction_stack_32 = img_as_float32(img_SSIM_GTvsPrediction_stack, force_copy=False) img_RSE_GTvsSource_stack_32 = img_as_float32(img_RSE_GTvsSource_stack, force_copy=False) img_RSE_GTvsPrediction_stack_32 = img_as_float32(img_RSE_GTvsPrediction_stack, force_copy=False) # ----------- Saving the error map stacks ----------- io.imsave(path_metrics_save+'SSIM_GTvsSource_'+thisFile,img_SSIM_GTvsSource_stack_32) io.imsave(path_metrics_save+'SSIM_GTvsPrediction_'+thisFile,img_SSIM_GTvsPrediction_stack_32) io.imsave(path_metrics_save+'RSE_GTvsSource_'+thisFile,img_RSE_GTvsSource_stack_32) io.imsave(path_metrics_save+'RSE_GTvsPrediction_'+thisFile,img_RSE_GTvsPrediction_stack_32) #Averages of the metrics per stack as dataframe output pdResults = pd.DataFrame(file_name_list, columns = ["File name"]) pdResults["Prediction v. GT mSSIM"] = mSSIM_GvP_list_mean pdResults["Input v. GT mSSIM"] = mSSIM_GvS_list_mean pdResults["Prediction v. GT NRMSE"] = NRMSE_GvP_list_mean pdResults["Input v. GT NRMSE"] = NRMSE_GvS_list_mean pdResults["Prediction v. GT PSNR"] = PSNR_GvP_list_mean pdResults["Input v. GT PSNR"] = PSNR_GvS_list_mean # All data is now processed saved Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same way plt.figure(figsize=(20,20)) # Currently only displays the last computed set, from memory # Target (Ground-truth) plt.subplot(3,3,1) plt.axis('off') img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1])) # Calculating the position of the mid-plane slice z_mid_plane = int(img_GT.shape[0] / 2)+1 plt.imshow(img_GT[z_mid_plane], norm=simple_norm(img_GT[z_mid_plane], percent = 99)) plt.title('Target (slice #'+str(z_mid_plane)+')') # Source plt.subplot(3,3,2) plt.axis('off') img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1])) plt.imshow(img_Source[z_mid_plane], norm=simple_norm(img_Source[z_mid_plane], percent = 99)) plt.title('Source (slice #'+str(z_mid_plane)+')') #Prediction plt.subplot(3,3,3) plt.axis('off') img_Prediction_raw = io.imread(os.path.join(path_metrics_save+'Prediction/', Test_FileList[-1])) img_Prediction = img_Prediction_raw[:, 1, :, :] plt.imshow(img_Prediction[z_mid_plane], norm=simple_norm(img_Prediction[z_mid_plane], percent = 99)) plt.title('Prediction (slice #'+str(z_mid_plane)+')') #Setting up colours cmap = plt.cm.CMRmap #SSIM between GT and Source plt.subplot(3,3,5) #plt.axis('off') plt.tick_params( axis='both', # changes apply to the x-axis and y-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, # ticks along the left edge are off right=False, # ticks along the right edge are off labelbottom=False, labelleft=False) img_SSIM_GTvsSource = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsSource_'+Test_FileList[-1])) imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax=1) plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04) plt.title('Target vs. Source',fontsize=15) plt.xlabel('mSSIM: '+str(round(SSIM_GTvsS_forDisplay,3)),fontsize=14) plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75) #SSIM between GT and Prediction plt.subplot(3,3,6) #plt.axis('off') plt.tick_params( axis='both', # changes apply to the x-axis and y-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, # ticks along the left edge are off right=False, # ticks along the right edge are off labelbottom=False, labelleft=False) img_SSIM_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsPrediction_'+Test_FileList[-1])) imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0,vmax=1) plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04) plt.title('Target vs. Prediction',fontsize=15) plt.xlabel('mSSIM: '+str(round(SSIM_GTvsP_forDisplay,3)),fontsize=14) #Root Squared Error between GT and Source plt.subplot(3,3,8) #plt.axis('off') plt.tick_params( axis='both', # changes apply to the x-axis and y-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, # ticks along the left edge are off right=False, # ticks along the right edge are off labelbottom=False, labelleft=False) img_RSE_GTvsSource = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsSource_'+Test_FileList[-1])) imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax = 1) plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04) plt.title('Target vs. Source',fontsize=15) plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsS_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14) #plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3))) plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75) #Root Squared Error between GT and Prediction plt.subplot(3,3,9) #plt.axis('off') plt.tick_params( axis='both', # changes apply to the x-axis and y-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, # ticks along the left edge are off right=False, # ticks along the right edge are off labelbottom=False, labelleft=False) img_RSE_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsPrediction_'+Test_FileList[-1])) imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0, vmax=1) plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04) plt.title('Target vs. Prediction',fontsize=15) plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsP_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14) plt.savefig(full_QC_model_path+'/Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0) print('-----------------------------------') print('Here are the average scores for the stacks you tested in Quality control. To see values for all slices, open the .csv file saved in the Quality Control folder.') pdResults.head() #Make a pdf summary of the QC results qc_pdf_export() ``` # **6. Using the trained model** --- <font size = 4>In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. ## **6.1. Generate prediction(s) from unseen dataset** --- <font size = 4>The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images). <font size = 4>**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing. <font size = 4>**`Result_folder`:** This folder will contain the predicted output images. ``` #@markdown ##Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images. Data_folder = "" #@param {type:"string"} Result_folder = "" #@param {type:"string"} # model name and path #@markdown ###Do you want to use the current trained model? Use_the_current_trained_model = True #@param {type:"boolean"} #@markdown ###If not, please provide the path to the model folder: Prediction_model_folder = "" #@param {type:"string"} #Here we find the loaded model name and parent path Prediction_model_name = os.path.basename(Prediction_model_folder) Prediction_model_path = os.path.dirname(Prediction_model_folder) if (Use_the_current_trained_model): print("Using current trained network") Prediction_model_name = model_name Prediction_model_path = model_path full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/' if os.path.exists(full_Prediction_model_path): print("The "+Prediction_model_name+" network will be used.") else: W = '\033[0m' # white (normal) R = '\033[31m' # red print(R+'!! WARNING: The chosen model does not exist !!'+W) print('Please make sure you provide a valid model path and model name before proceeding further.') print("Restoring images...") !python /content/3D-RCAN/apply.py -m "$full_Prediction_model_path" -i "$Data_folder" -o "$Result_folder" print("Images saved into the result folder:", Result_folder) #Display an example random_choice=random.choice(os.listdir(Data_folder)) x = imread(Data_folder+"/"+random_choice) z_mid_plane = int(x.shape[0] / 2)+1 @interact def show_results(file=os.listdir(Data_folder), z_plane=widgets.IntSlider(min=0, max=(x.shape[0]-1), step=1, value=z_mid_plane)): x = imread(Data_folder+"/"+file) y_raw = imread(Result_folder+"/"+file) y = y_raw[:, 1, :, :] f=plt.figure(figsize=(16,8)) plt.subplot(1,2,1) plt.imshow(x[z_plane], norm=simple_norm(x[z_plane], percent = 99), interpolation='nearest') plt.axis('off') plt.title('Noisy Input (single Z plane)'); plt.subplot(1,2,2) plt.imshow(y[z_plane], norm=simple_norm(y[z_plane], percent = 99), interpolation='nearest') plt.axis('off') plt.title('Prediction (single Z plane)'); ``` ## **6.2. Download your predictions** --- <font size = 4>**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name. #**Thank you for using 3D-RCAN!**
github_jupyter
# Nosy Bagging Duelling Prioritised Replay Double Deep Q Learning - A simple ambulance dispatch point allocation model ## Reinforcement learning introduction ### RL involves: * Trial and error search * Receiving and maximising reward (often delayed) * Linking state -> action -> reward * Must be able to sense something of their environment * Involves uncertainty in sensing and linking action to reward * Learning -> improved choice of actions over time * All models find a way to balance best predicted action vs. exploration ### Elements of RL * *Environment*: all observable and unobservable information relevant to us * *Observation*: sensing the environment * *State*: the perceived (or perceivable) environment * *Agent*: senses environment, decides on action, receives and monitors rewards * *Action*: may be discrete (e.g. turn left) or continuous (accelerator pedal) * *Policy* (how to link state to action; often based on probabilities) * *Reward signal*: aim is to accumulate maximum reward over time * *Value function* of a state: prediction of likely/possible long-term reward * *Q*: prediction of likely/possible long-term reward of an *action* * *Advantage*: The difference in Q between actions in a given state (sums to zero for all actions) * *Model* (optional): a simulation of the environment ### Types of model * *Model-based*: have model of environment (e.g. a board game) * *Model-free*: used when environment not fully known * *Policy-based*: identify best policy directly * *Value-based*: estimate value of a decision * *Off-policy*: can learn from historic data from other agent * *On-policy*: requires active learning from current decisions ## Duelling Deep Q Networks for Reinforcement Learning Q = The expected future rewards discounted over time. This is what we are trying to maximise. The aim is to teach a network to take the current state observations and recommend the action with greatest Q. Duelling is very similar to Double DQN, except that the policy net splits into two. One component reduces to a single value, which will model the state *value*. The other component models the *advantage*, the difference in Q between different actions (the mean value is subtracted from all values, so that the advtantage always sums to zero). These are aggregated to produce Q for each action. <img src="./images/duelling_dqn.png" width="500"/> Q is learned through the Bellman equation, where the Q of any state and action is the immediate reward achieved + the discounted maximum Q value (the best action taken) of next best action, where gamma is the discount rate. $$Q(s,a)=r + \gamma.maxQ(s',a')$$ ## Key DQN components <img src="./images/dqn_components.png" width="700"/> ## General method for Q learning: Overall aim is to create a neural network that predicts Q. Improvement comes from improved accuracy in predicting 'current' understood Q, and in revealing more about Q as knowledge is gained (some rewards only discovered after time). <img src="./images/dqn_process.png" width="600|"/> Target networks are used to stabilise models, and are only updated at intervals. Changes to Q values may lead to changes in closely related states (i.e. states close to the one we are in at the time) and as the network tries to correct for errors it can become unstable and suddenly lose signficiant performance. Target networks (e.g. to assess Q) are updated only infrequently (or gradually), so do not have this instability problem. ## Training networks Double DQN contains two networks. This ammendment, from simple DQN, is to decouple training of Q for current state and target Q derived from next state which are closely correlated when comparing input features. The *policy network* is used to select action (action with best predicted Q) when playing the game. When training, the predicted best *action* (best predicted Q) is taken from the *policy network*, but the *policy network* is updated using the predicted Q value of the next state from the *target network* (which is updated from the policy network less frequently). So, when training, the action is selected using Q values from the *policy network*, but the the *policy network* is updated to better predict the Q value of that action from the *target network*. The *policy network* is copied across to the *target network* every *n* steps (e.g. 1000). <img src="./images/dqn_training.png" width="700|"/> ## Bagging (Bootstrap Aggregation) Each network is trained from the same memory, but have different starting weights and are trained on different bootstrap samples from that memory. In this example actions are chosen randomly from each of the networks (an alternative could be to take the most common action recommended by the networks, or an average output). This bagging method may also be used to have some measure of uncertainty of action by looking at the distribution of actions recommended from the different nets. Bagging may also be used to aid exploration during stages where networks are providing different suggested action. <img src="./images/bagging.png" width="800|"/> ## Noisy layers Noisy layers are an alternative to epsilon-greedy exploration (here, we leave the epsilon-greedy code in the model, but set it to reduce to zero immediately after the period of fully random action choice). For every weight in the layer we have a random value that we draw from the normal distribution. This random value is used to add noise to the output. The parameters for the extent of noise for each weight, sigma, are stored within the layer and get trained as part of the standard back-propogation. A modification to normal nosiy layers is to use layers with ‘factorized gaussian noise’. This reduces the number of random numbers to be sampled (so is less computationally expensive). There are two random vectors, one with the size of the input, and the other with the size of the output. A random matrix is created by calculating the outer product of the two vectors. ## Prioritised replay In standard DQN samples are taken randomly from the memory (replay buffer). In *prioritised replay* samples are taken in proportion to their loss when training the network; where the network has the greatest error in predicting the target valur of a state/action, then those samples will be sampled more frequently (which will reduce the error in the network until the sample is not prioritised). In other words, the training focuses more heavenly on samples it gets most wrong, and spends less time training on samples that it can acurately predict already. This priority may also be used as a weight for training the network, but this i snot implemented here; we use loss just for sampling. When we use the loss for priority we add a small value (1e-5) t the loss. This avoids any sample having zero priority (and never having a chance of being sampled). For frequency of sampling we also raise the loss to the power of 'alpha' (default value of 0.6). Smaller values of alpha will compress the differences between samples, making the priority weighting less significant in the frequency of sampling. The memory stores the priority/loss of state/action/Next_state/reward, and this is particular to each network, so we create a separate memory for each network. ## References Double DQN: van Hasselt H, Guez A, Silver D. (2015) Deep Reinforcement Learning with Double Q-learning. arXiv:150906461 http://arxiv.org/abs/1509.06461 Bagging: Osband I, Blundell C, Pritzel A, et al. (2016) Deep Exploration via Bootstrapped DQN. arXiv:160204621 http://arxiv.org/abs/1602.04621 Noisy networks: Fortunato M, Azar MG, Piot B, et al. (2019) Noisy Networks for Exploration. arXiv:170610295 http://arxiv.org/abs/1706.10295 Prioritised replay: Schaul T, Quan J, Antonoglou I, et al (2016). Prioritized Experience Replay. arXiv:151105952 http://arxiv.org/abs/1511.05952 Code for the nosiy layers comes from: Lapan, M. (2020). Deep Reinforcement Learning Hands-On: Apply modern RL methods to practical problems of chatbots, robotics, discrete optimization, web automation, and more, 2nd Edition. Packt Publishing. ## Code structure <img src="./images/dqn_program_structure.png" width="700|"/> ``` ################################################################################ # 1 Import packages # ################################################################################ from amboworld.environment import Env import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.optim as optim from torch.nn import functional as F # Use a double ended queue (deque) for memory # When memory is full, this will replace the oldest value with the new one from collections import deque # Supress all warnings (e.g. deprecation warnings) for regular use import warnings warnings.filterwarnings("ignore") ################################################################################ # 2 Define model parameters # ################################################################################ # Set whether to display on screen (slows model) DISPLAY_ON_SCREEN = False # Discount rate of future rewards GAMMA = 0.99 # Learing rate for neural network LEARNING_RATE = 0.003 # Maximum number of game steps (state, action, reward, next state) to keep MEMORY_SIZE = 10000000 # Sample batch size for policy network update BATCH_SIZE = 5 # Number of game steps to play before starting training (all random actions) REPLAY_START_SIZE = 50000 # Number of steps between policy -> target network update SYNC_TARGET_STEPS = 1000 # Exploration rate (epsilon) is probability of choosing a random action EXPLORATION_MAX = 1.0 EXPLORATION_MIN = 0.0 # Reduction in epsilon with each game step EXPLORATION_DECAY = 0.0 # Training episodes TRAINING_EPISODES = 50 # Set number of parallel networks NUMBER_OF_NETS = 5 # Results filename RESULTS_NAME = 'bagging_pr_noisy_d3qn' # SIM PARAMETERS RANDOM_SEED = 42 SIM_DURATION = 5000 NUMBER_AMBULANCES = 3 NUMBER_INCIDENT_POINTS = 1 INCIDENT_RADIUS = 2 NUMBER_DISPTACH_POINTS = 25 AMBOWORLD_SIZE = 50 INCIDENT_INTERVAL = 60 EPOCHS = 2 AMBO_SPEED = 60 AMBO_FREE_FROM_HOSPITAL = False ################################################################################ # 3 Define DQN (Duelling Deep Q Network) class # # (Used for both policy and target nets) # ################################################################################ """ Code for nosiy layers comes from: Lapan, M. (2020). Deep Reinforcement Learning Hands-On: Apply modern RL methods to practical problems of chatbots, robotics, discrete optimization, web automation, and more, 2nd Edition. Packt Publishing. """ class NoisyLinear(nn.Linear): """ Noisy layer for network. For every weight in the layer we have a random value that we draw from the normal distribution.Paraemters for the noise, sigma, are stored within the layer and get trained as part of the standard back-propogation. 'register_buffer' is used to create tensors in the network that are not updated during back-propogation. They are used to create normal distributions to add noise (multiplied by sigma which is a paramater in the network). """ def __init__(self, in_features, out_features, sigma_init=0.017, bias=True): super(NoisyLinear, self).__init__( in_features, out_features, bias=bias) w = torch.full((out_features, in_features), sigma_init) self.sigma_weight = nn.Parameter(w) z = torch.zeros(out_features, in_features) self.register_buffer("epsilon_weight", z) if bias: w = torch.full((out_features,), sigma_init) self.sigma_bias = nn.Parameter(w) z = torch.zeros(out_features) self.register_buffer("epsilon_bias", z) self.reset_parameters() def reset_parameters(self): std = math.sqrt(3 / self.in_features) self.weight.data.uniform_(-std, std) self.bias.data.uniform_(-std, std) def forward(self, input): self.epsilon_weight.normal_() bias = self.bias if bias is not None: self.epsilon_bias.normal_() bias = bias + self.sigma_bias * \ self.epsilon_bias.data v = self.sigma_weight * self.epsilon_weight.data + self.weight return F.linear(input, v, bias) class NoisyFactorizedLinear(nn.Linear): """ NoisyNet layer with factorized gaussian noise. This reduces the number of random numbers to be sampled (so less computationally expensive). There are two random vectors. One with the size of the input, and the other with the size of the output. A random matrix is create by calculating the outer product of the two vectors. 'register_buffer' is used to create tensors in the network that are not updated during back-propogation. They are used to create normal distributions to add noise (multiplied by sigma which is a paramater in the network). """ def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True): super(NoisyFactorizedLinear, self).__init__( in_features, out_features, bias=bias) sigma_init = sigma_zero / math.sqrt(in_features) w = torch.full((out_features, in_features), sigma_init) self.sigma_weight = nn.Parameter(w) z1 = torch.zeros(1, in_features) self.register_buffer("epsilon_input", z1) z2 = torch.zeros(out_features, 1) self.register_buffer("epsilon_output", z2) if bias: w = torch.full((out_features,), sigma_init) self.sigma_bias = nn.Parameter(w) def forward(self, input): self.epsilon_input.normal_() self.epsilon_output.normal_() func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x)) eps_in = func(self.epsilon_input.data) eps_out = func(self.epsilon_output.data) bias = self.bias if bias is not None: bias = bias + self.sigma_bias * eps_out.t() noise_v = torch.mul(eps_in, eps_out) v = self.weight + self.sigma_weight * noise_v return F.linear(input, v, bias) class DQN(nn.Module): """Deep Q Network. Udes for both policy (action) and target (Q) networks.""" def __init__(self, observation_space, action_space): """Constructor method. Set up neural nets.""" # nerurones per hidden layer = 2 * max of observations or actions neurons_per_layer = 2 * max(observation_space, action_space) # Set starting exploration rate self.exploration_rate = EXPLORATION_MAX # Set up action space (choice of possible actions) self.action_space = action_space # First layerswill be common to both Advantage and value super(DQN, self).__init__() self.feature = nn.Sequential( nn.Linear(observation_space, neurons_per_layer), nn.ReLU() ) # Advantage has same number of outputs as the action space self.advantage = nn.Sequential( NoisyFactorizedLinear(neurons_per_layer, neurons_per_layer), nn.ReLU(), NoisyFactorizedLinear(neurons_per_layer, action_space) ) # State value has only one output (one value per state) self.value = nn.Sequential( nn.Linear(neurons_per_layer, neurons_per_layer), nn.ReLU(), nn.Linear(neurons_per_layer, 1) ) def act(self, state): """Act either randomly or by redicting action that gives max Q""" # Act randomly if random number < exploration rate if np.random.rand() < self.exploration_rate: action = random.randrange(self.action_space) else: # Otherwise get predicted Q values of actions q_values = self.forward(torch.FloatTensor(state)) # Get index of action with best Q action = np.argmax(q_values.detach().numpy()[0]) return action def forward(self, x): x = self.feature(x) advantage = self.advantage(x) value = self.value(x) action_q = value + advantage - advantage.mean() return action_q ################################################################################ # 4 Define policy net training function # ################################################################################ def optimize(policy_net, target_net, memory): """ Update model by sampling from memory. Uses policy network to predict best action (best Q). Uses target network to provide target of Q for the selected next action. """ # Do not try to train model if memory is less than reqired batch size if len(memory) < BATCH_SIZE: return # Reduce exploration rate (exploration rate is stored in policy net) policy_net.exploration_rate *= EXPLORATION_DECAY policy_net.exploration_rate = max(EXPLORATION_MIN, policy_net.exploration_rate) # Sample a random batch from memory batch = memory.sample(BATCH_SIZE) for state, action, reward, state_next, terminal, index in batch: state_action_values = policy_net(torch.FloatTensor(state)) # Get target Q for policy net update if not terminal: # For non-terminal actions get Q from policy net expected_state_action_values = policy_net(torch.FloatTensor(state)) # Detach next state values from gradients to prevent updates expected_state_action_values = expected_state_action_values.detach() # Get next state action with best Q from the policy net (double DQN) policy_next_state_values = policy_net(torch.FloatTensor(state_next)) policy_next_state_values = policy_next_state_values.detach() best_action = np.argmax(policy_next_state_values[0].numpy()) # Get target net next state next_state_action_values = target_net(torch.FloatTensor(state_next)) # Use detach again to prevent target net gradients being updated next_state_action_values = next_state_action_values.detach() best_next_q = next_state_action_values[0][best_action].numpy() updated_q = reward + (GAMMA * best_next_q) expected_state_action_values[0][action] = updated_q else: # For termal actions Q = reward (-1) expected_state_action_values = policy_net(torch.FloatTensor(state)) # Detach values from gradients to prevent gradient update expected_state_action_values = expected_state_action_values.detach() # Set Q for all actions to reward (-1) expected_state_action_values[0] = reward # Set net to training mode policy_net.train() # Reset net gradients policy_net.optimizer.zero_grad() # calculate loss loss_v = nn.MSELoss()(state_action_values, expected_state_action_values) # Backpropogate loss loss_v.backward() # Update replay buffer (add 1e-5 to loss to avoid zero priority with no # chance of being sampled). loss_numpy = loss_v.data.numpy() memory.update_priorities(index, loss_numpy + 1e-5) # Update network gradients policy_net.optimizer.step() return ################################################################################ # 5 Define prioritised replay memory class # ################################################################################ class NaivePrioritizedBuffer(): """ Based on code from https://github.com/higgsfield/RL-Adventure Each sample (state, action, reward, next_state, done) has an associated priority, which is the loss from training the policy network. The priority is used to adjust the frequency of sampling. """ def __init__(self, capacity=MEMORY_SIZE, prob_alpha=0.6): self.prob_alpha = prob_alpha self.capacity = capacity self.buffer = [] self.pos = 0 self.priorities = np.zeros((capacity,), dtype=np.float32) def remember(self, state, action, reward, next_state, done): """ Add sample (state, action, reward, next_state, done) to memory, or replace oldest sample if memory full""" max_prio = self.priorities.max() if self.buffer else 1.0 if len(self.buffer) < self.capacity: # Add new sample when room in memory self.buffer.append((state, action, reward, next_state, done)) else: # Replace sample when memory full self.buffer[self.pos] = (state, action, reward, next_state, done) # Set maximum priority present self.priorities[self.pos] = max_prio # Increment replacement position self.pos = (self.pos + 1) % self.capacity def sample(self, batch_size, beta=0.4): # Get priorities if len(self.buffer) == self.capacity: prios = self.priorities else: prios = self.priorities[:self.pos] # Raise priorities by the square of 'alpha' # (lower alpha compresses differences) probs = prios ** self.prob_alpha # Normlaise priorities probs /= probs.sum() # Sample using priorities for relative sampling frequency indices = np.random.choice(len(self.buffer), batch_size, p=probs) samples = [self.buffer[idx] for idx in indices] # Add index to sample (used to update priority after getting new loss) batch = [] for index, sample in enumerate(samples): sample = list(sample) sample.append(indices[index]) batch.append(sample) return batch def update_priorities(self, index, priority): """Update sample priority with new loss""" self.priorities[index] = priority def __len__(self): return len(self.buffer) ################################################################################ # 6 Define results plotting function # ################################################################################ def plot_results(run, exploration, score, mean_call_to_arrival, mean_assignment_to_arrival): """Plot and report results at end of run""" # Set up chart (ax1 and ax2 share x-axis to combine two plots on one graph) fig = plt.figure(figsize=(6,6)) ax1 = fig.add_subplot(111) ax2 = ax1.twinx() # Plot results lns1 = ax1.plot( run, exploration, label='exploration', color='g', linestyle=':') lns2 = ax2.plot(run, mean_call_to_arrival, label='call to arrival', color='r') lns3 = ax2.plot(run, mean_assignment_to_arrival, label='assignment to arrival', color='b', linestyle='--') # Get combined legend lns = lns1 + lns2 + lns3 labs = [l.get_label() for l in lns] ax1.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=3) # Set axes ax1.set_xlabel('run') ax1.set_ylabel('exploration') ax2.set_ylabel('Response time') filename = 'output/' + RESULTS_NAME +'.png' plt.savefig(filename, dpi=300) plt.show() ################################################################################ # 7 Main program # ################################################################################ def qambo(): """Main program loop""" ############################################################################ # 8 Set up environment # ############################################################################ # Set up game environemnt sim = Env( random_seed = RANDOM_SEED, duration_incidents = SIM_DURATION, number_ambulances = NUMBER_AMBULANCES, number_incident_points = NUMBER_INCIDENT_POINTS, incident_interval = INCIDENT_INTERVAL, number_epochs = EPOCHS, number_dispatch_points = NUMBER_DISPTACH_POINTS, incident_range = INCIDENT_RADIUS, max_size = AMBOWORLD_SIZE, ambo_kph = AMBO_SPEED, ambo_free_from_hospital = AMBO_FREE_FROM_HOSPITAL ) # Get number of observations returned for state observation_space = sim.observation_size # Get number of actions possible action_space = sim.action_number ############################################################################ # 9 Set up policy and target nets # ############################################################################ # Set up policy and target neural nets policy_nets = [DQN(observation_space, action_space) for i in range(NUMBER_OF_NETS)] target_nets = [DQN(observation_space, action_space) for i in range(NUMBER_OF_NETS)] best_nets = [DQN(observation_space, action_space) for i in range(NUMBER_OF_NETS)] # Set optimizer, copy weights from policy_net to target, and for i in range(NUMBER_OF_NETS): # Set optimizer policy_nets[i].optimizer = optim.Adam( params=policy_nets[i].parameters(), lr=LEARNING_RATE) # Copy weights from policy -> target target_nets[i].load_state_dict(policy_nets[i].state_dict()) # Set target net to eval rather than training mode target_nets[i].eval() ############################################################################ # 10 Set up memory # ############################################################################ # Set up memomry memory = [NaivePrioritizedBuffer() for i in range(NUMBER_OF_NETS)] ############################################################################ # 11 Set up + start training loop # ############################################################################ # Set up run counter and learning loop run = 0 all_steps = 0 continue_learning = True best_reward = -np.inf # Set up list for results results_run = [] results_exploration = [] results_score = [] results_mean_call_to_arrival = [] results_mean_assignment_to_arrival = [] # Continue repeating games (episodes) until target complete while continue_learning: ######################################################################## # 12 Play episode # ######################################################################## # Increment run (episode) counter run += 1 ######################################################################## # 13 Reset game # ######################################################################## # Reset game environment and get first state observations state = sim.reset() # Reset total reward and rewards list total_reward = 0 rewards = [] # Reshape state into 2D array with state obsverations as first 'row' state = np.reshape(state, [1, observation_space]) # Continue loop until episode complete while True: #################################################################### # 14 Game episode loop # #################################################################### #################################################################### # 15 Get action # #################################################################### # Get actions to take (use evalulation mode) actions = [] for i in range(NUMBER_OF_NETS): policy_nets[i].eval() actions.append(policy_nets[i].act(state)) # Randomly choose an action from net actions random_index = random.randint(0, NUMBER_OF_NETS - 1) action = actions[random_index] #################################################################### # 16 Play action (get S', R, T) # #################################################################### # Act state_next, reward, terminal, info = sim.step(action) total_reward += reward # Update trackers rewards.append(reward) # Reshape state into 2D array with state observations as first 'row' state_next = np.reshape(state_next, [1, observation_space]) # Update display if needed if DISPLAY_ON_SCREEN: sim.render() #################################################################### # 17 Add S/A/R/S/T to memory # #################################################################### # Record state, action, reward, new state & terminal for i in range(NUMBER_OF_NETS): memory[i].remember(state, action, reward, state_next, terminal) # Update state state = state_next #################################################################### # 18 Check for end of episode # #################################################################### # Actions to take if end of game episode if terminal: # Get exploration rate exploration = policy_nets[0].exploration_rate # Clear print row content clear_row = '\r' + ' ' * 79 + '\r' print(clear_row, end='') print(f'Run: {run}, ', end='') print(f'Exploration: {exploration: .3f}, ', end='') average_reward = np.mean(rewards) print(f'Average reward: {average_reward:4.1f}, ', end='') mean_assignment_to_arrival = np.mean(info['assignment_to_arrival']) print(f'Mean assignment to arrival: {mean_assignment_to_arrival:4.1f}, ', end='') mean_call_to_arrival = np.mean(info['call_to_arrival']) print(f'Mean call to arrival: {mean_call_to_arrival:4.1f}, ', end='') demand_met = info['fraction_demand_met'] print(f'Demand met {demand_met:0.3f}') # Add to results lists results_run.append(run) results_exploration.append(exploration) results_score.append(total_reward) results_mean_call_to_arrival.append(mean_call_to_arrival) results_mean_assignment_to_arrival.append(mean_assignment_to_arrival) # Save model if best reward total_reward = np.sum(rewards) if total_reward > best_reward: best_reward = total_reward # Copy weights to best net for i in range(NUMBER_OF_NETS): best_nets[i].load_state_dict(policy_nets[i].state_dict()) ################################################################ # 18b Check for end of learning # ################################################################ if run == TRAINING_EPISODES: continue_learning = False # End episode loop break #################################################################### # 19 Update policy net # #################################################################### # Avoid training model if memory is not of sufficient length if len(memory[0]) > REPLAY_START_SIZE: # Update policy net for i in range(NUMBER_OF_NETS): optimize(policy_nets[i], target_nets[i], memory[i]) ################################################################ # 20 Update target net periodically # ################################################################ # Use load_state_dict method to copy weights from policy net if all_steps % SYNC_TARGET_STEPS == 0: for i in range(NUMBER_OF_NETS): target_nets[i].load_state_dict( policy_nets[i].state_dict()) ############################################################################ # 21 Learning complete - plot and save results # ############################################################################ # Target reached. Plot results plot_results(results_run, results_exploration, results_score, results_mean_call_to_arrival, results_mean_assignment_to_arrival) # SAVE RESULTS run_details = pd.DataFrame() run_details['run'] = results_run run_details['exploration '] = results_exploration run_details['mean_call_to_arrival'] = results_mean_call_to_arrival run_details['mean_assignment_to_arrival'] = results_mean_assignment_to_arrival filename = 'output/' + RESULTS_NAME + '.csv' run_details.to_csv(filename, index=False) ############################################################################ # Test best model # ############################################################################ print() print('Test Model') print('----------') for i in range(NUMBER_OF_NETS): best_nets[i].eval() best_nets[i].exploration_rate = 0 # Set up results dictionary results = dict() results['call_to_arrival'] = [] results['assign_to_arrival'] = [] results['demand_met'] = [] # Replicate model runs for run in range(30): # Reset game environment and get first state observations state = sim.reset() state = np.reshape(state, [1, observation_space]) # Continue loop until episode complete while True: # Get actions to take (use evalulation mode) actions = [] for i in range(NUMBER_OF_NETS): actions.append(best_nets[i].act(state)) # Randomly choose an action from net actions random_index = random.randint(0, NUMBER_OF_NETS - 1) action = actions[random_index] # Act state_next, reward, terminal, info = sim.step(action) # Reshape state into 2D array with state observations as first 'row' state_next = np.reshape(state_next, [1, observation_space]) # Update state state = state_next if terminal: print(f'Run: {run}, ', end='') mean_assignment_to_arrival = np.mean(info['assignment_to_arrival']) print(f'Mean assignment to arrival: {mean_assignment_to_arrival:4.1f}, ', end='') mean_call_to_arrival = np.mean(info['call_to_arrival']) print(f'Mean call to arrival: {mean_call_to_arrival:4.1f}, ', end='') demand_met = info['fraction_demand_met'] print(f'Demand met: {demand_met:0.3f}') # Add to results results['call_to_arrival'].append(mean_call_to_arrival) results['assign_to_arrival'].append(mean_assignment_to_arrival) results['demand_met'].append(demand_met) # End episode loop break results = pd.DataFrame(results) filename = './output/results_' + RESULTS_NAME +'.csv' results.to_csv(filename, index=False) print() print(results.describe()) return run_details ######################## MODEL ENTRY POINT ##################################### # Run model and return last run results last_run = qambo() ```
github_jupyter
![license_header_logo](../../../images/license_header_logo.png) > **Copyright (c) 2021 CertifAI Sdn. Bhd.**<br> <br> This program is part of OSRFramework. You can redistribute it and/or modify <br>it under the terms of the GNU Affero General Public License as published by <br>the Free Software Foundation, either version 3 of the License, or <br>(at your option) any later version. <br> <br>This program is distributed in the hope that it will be useful <br>but WITHOUT ANY WARRANTY; without even the implied warranty of <br>MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the <br>GNU Affero General Public License for more details. <br> <br>You should have received a copy of the GNU Affero General Public License <br>along with this program. If not, see <http://www.gnu.org/licenses/>. <br> # Introduction In this notebook, we are going to build neural machine translation (NMT) using Transformer with pytorch. This NMT could translate English to French. Let's get started. ![Language Modelling](../../../images/NMT.gif) # What will we accomplish? Steps to implement neural machine translation using Transformer with Pytorch: > Step 1: Load and preprocess dataset > Step 2: Building transformer architecture > Step 3: Train the transformer model > Step 4: Test the trained model # Notebook Content * [Load Dataset](#Load-Dataset) * [Tokenization](#Tokenization) * [Preprocessing](#Preprocessing) * [Train-Test Split](#Train-Test-Split) * [TabularDataset](#TabularDataset) * [BucketIterator](#BucketIterator) * [Custom Iterator](#Custom-Iterator) * [Dive Deep into Transformer](#Dive-Deep-into-Transformer) * [Embedding](#Embedding) * [Positional Encoding](#Positional-Encoding) * [Masking](#Masking) * [Input Masks](#Input-Masks) * [Target Sequence Masks](#Target-Sequence-Masks) * [Multi-Headed Attention](#Multi-Headed-Attention) * [Attention](#Attention) * [Feed-Forward Network](#Feed-Forward-Network) * [Normalisation](#Normalisation) * [Building Transformer](#Building-Transformer) * [EncoderLayer](#EncoderLayer) * [DecoderLayer](#DecoderLayer) * [Encoder](#Encoder) * [Decoder](#Decoder) * [Transformer](#Transformer) * [Training the Model](#Training-the-Model) * [Testing the Model](#Testing-the-Model) # Load Dataset The dataset we used is [parallel corpus French-English](http://www.statmt.org/europarl/v7/fr-en.tgz) dataset from [European Parliament Proceedings Parallel Corpus (1996–2011)](http://www.statmt.org/europarl/). This dataset contains 15 years of write-ups from E.U. proceedings, weighing in at 2,007,724 sentences, and 50,265,039 words. You should found the dataset in the `datasets` folder, else you may download it [here](http://www.statmt.org/europarl/v7/fr-en.tgz). You will have the following files after unzipping the downloaded file: 1. europarl-v7.fr-en.en 2. europarl-v7.fr-en.fr ![](../../../images/fr-en.png) Now we are going to load the dataset for preprocessing. ``` europarl_en = open('../../../resources/day_11/fr-en/europarl-v7.fr-en.en', encoding='utf-8').read().split('\n') europarl_fr = open('../../../resources/day_11/fr-en/europarl-v7.fr-en.fr', encoding='utf-8').read().split('\n') ``` # Tokenization The first job we need done is to **create a tokenizer for each language**. This is a function that will split the text into separate words and assign them unique numbers (indexes). This number will come into play later when we discuss embeddings. ![Tokenization](../../../images/tokenize.png) He we will tokenize the text using **Torchtext** and **Spacy** together. Spacy is a library that has been specifically built to take sentences in various languages and split them into different tokens (see [here](https://spacy.io/) for more information). Without Spacy, Torchtext defaults to a simple .split(' ') method for tokenization. This is much less nuanced than Spacy’s approach, which will also split words like “don’t” into “do” and “n’t”, and much more. ``` import spacy import torchtext import torch import numpy as np from torchtext.legacy.data import Field, BucketIterator, TabularDataset device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # !python -m spacy download fr_core_news_lg # !python -m spacy download en_core_web_lg en = spacy.load('en_core_web_lg') fr = spacy.load('fr_core_news_lg') def tokenize_en(sentence): return [tok.text for tok in en.tokenizer(sentence)] def tokenize_fr(sentence): return [tok.text for tok in fr.tokenizer(sentence)] EN_TEXT = Field(tokenize=tokenize_en) FR_TEXT = Field(tokenize=tokenize_fr, init_token = "<sos>", eos_token = "<eos>") ``` # Preprocessing The best way to work with Torchtext is to turn your data into **spreadsheet format**, no matter the original format of your data file. This is due to the incredible versatility of the **Torchtext TabularDataset** function, which creates datasets from spreadsheet formats. So first to turn our data into an appropriate CSV file. ``` import pandas as pd raw_data = {'English' : [line for line in europarl_en], 'French': [line for line in europarl_fr]} df = pd.DataFrame(raw_data, columns=["English", "French"]) # Remove very long sentences and sentences where translations are not of roughly equal length df['eng_len'] = df['English'].str.count(' ') df['fr_len'] = df['French'].str.count(' ') df = df.query('fr_len < 80 & eng_len < 80') df = df.query('fr_len < eng_len * 1.5 & fr_len * 1.5 > eng_len') ``` ## Train-Test Split Now we are going to split the data into train set and test set. Fortunately Sklearn and Torchtext together make this process incredibly easy. ``` from sklearn.model_selection import train_test_split # Create train and validation set train, test = train_test_split(df, test_size=0.1) train.to_csv("../../../resources/day_11/train.csv", index=False) test.to_csv("../../../resources/day_11/test.csv", index=False) ``` This creates a train and test csv each with two columns (English, French), where each row contains an English sentence in the 'English' column, and its French translation in the 'French' column. ## TabularDataset Calling the magic `TabularDataset.splits` then returns a train and test dataset with the respective data loaded into them, processed(/tokenized) according to the fields we defined earlier. ``` # Associate the text in the 'English' column with the EN_TEXT field, # and 'French' with FR_TEXT data_fields = [('English', EN_TEXT), ('French', FR_TEXT)] train, test = TabularDataset.splits(path='../../../resources/day_11', train='train.csv', validation='test.csv', format='csv', fields=data_fields) ``` Processing a few million words can take a while so grab a cup of tea here… ``` FR_TEXT.build_vocab(train, test) EN_TEXT.build_vocab(train, test) ``` To see what numbers the tokens have been assigned and vice versa in each field, we can use `self.vocab.stoi` and `self.vocab.itos`. ``` print(EN_TEXT.vocab.stoi['the']) print(EN_TEXT.vocab.itos[11]) ``` ## BucketIterator **BucketIterator** Defines an iterator that batches examples of similar lengths together. It minimizes amount of padding needed while producing freshly shuffled batches for each new epoch. See pool for the bucketing procedure used. ``` train_iter = BucketIterator(train, batch_size=20, sort_key=lambda x: len(x.French), shuffle=True) ``` The `sort_key` dictates how to form each batch. The lambda function tells the iterator to try and find sentences of the **same length** (meaning more of the matrix is filled with useful data and less with padding). ``` batch = next(iter(train_iter)) print(batch.English) print("Number of columns:", len(batch)) ``` In each batch, sentences have been transposed so they are descending vertically (important: we will need to transpose these again to work with transformer). **Each index represents a token (word)**, and **each column represents a sentence**. We have 20 columns, as 20 was the batch_size we specified. You might notice all the ‘1’s and think which incredibly common word is this the index for? Well the ‘1’ is not of course a word, but purely **padding**. While Torchtext is brilliant, it’s `sort_key` based batching leaves a little to be desired. Often the sentences aren’t of the same length at all, and you end up feeding a lot of padding into your network (as you can see with all the 1s in the last figure). We will solve this by implementing our own iterator. ## Custom Iterator The custom iterator is built in reference to the code from http://nlp.seas.harvard.edu/2018/04/03/attention.html. Feel free to explore yourself to have more understanding about `MyIterator` class. ``` from torchtext.legacy import data global max_src_in_batch, max_tgt_in_batch def batch_size_fn(new, count, sofar): "Keep augmenting batch and calculate total number of tokens + padding." global max_src_in_batch, max_tgt_in_batch if count == 1: max_src_in_batch = 0 max_tgt_in_batch = 0 max_src_in_batch = max(max_src_in_batch, len(new.English)) max_tgt_in_batch = max(max_tgt_in_batch, len(new.French) + 2) src_elements = count * max_src_in_batch tgt_elements = count * max_tgt_in_batch return max(src_elements, tgt_elements) class MyIterator(data.Iterator): def create_batches(self): if self.train: def pool(d, random_shuffler): for p in data.batch(d, self.batch_size * 100): p_batch = data.batch( sorted(p, key=self.sort_key), self.batch_size, self.batch_size_fn) for b in random_shuffler(list(p_batch)): yield b self.batches = pool(self.data(), self.random_shuffler) else: self.batches = [] for b in data.batch(self.data(), self.batch_size, self.batch_size_fn): self.batches.append(sorted(b, key=self.sort_key)) train_iter = MyIterator(train, batch_size= 64, device=device, repeat=False, sort_key= lambda x: (len(x.English), len(x.French)), batch_size_fn=batch_size_fn, train=True, shuffle=True) ``` # Dive Deep into Transformer ![Transformer](../../../images/transformer.png) The diagram above shows the overview of the Transformer model. The inputs to the encoder will be the **English** sentence, and the 'Outputs' from the decoder will be the **French** sentence. ## Embedding Embedding words has become standard practice in NMT, feeding the network with far more information about words than a one hot encoding would. ![Embedding Layer](../../../images/embeddings.gif) ``` from torch import nn class Embedder(nn.Module): def __init__(self, vocab_size, embedding_dimension): super().__init__() self.embed = nn.Embedding(vocab_size, embedding_dimension) def forward(self, x): return self.embed(x) ``` When each word is fed into the network, this code will perform a look-up and retrieve its embedding vector. These vectors will then be learnt as a parameters by the model, adjusted with each iteration of gradient descent. ## Positional Encoding In order for the model to make sense of a sentence, it needs to know two things about each word: what does the **word mean**? And what is its **position** in the sentence? The embedding vector for each word will **learn the meaning**, so now we need to input something that tells the network about the word’s position. *Vasmari et al* answered this problem by using these functions to create a constant of position-specific values: ![Position Encoding](../../../images/pos_encoding_1.png) ![Position Encoding](../../../images/pos_encoding_2.png) This constant is a 2D matrix. Pos refers to the order in the sentence, and $i$ refers to the position along the embedding vector dimension. Each value in the pos/i matrix is then worked out using the equations above. ![Position Encoding](../../../images/pos_encoding_3.png) ``` import math class PositionalEncoder(nn.Module): def __init__(self, d_model, max_seq_len = 200, dropout = 0.1): super().__init__() self.d_model = d_model self.dropout = nn.Dropout(dropout) # Create constant 'pe' matrix with values dependant on pos and i pe = torch.zeros(max_seq_len, d_model) for pos in range(max_seq_len): for i in range(0, d_model, 2): pe[pos, i] = \ math.sin(pos / (10000 ** ((2 * i)/d_model))) pe[pos, i + 1] = \ math.cos(pos / (10000 ** ((2 * (i + 1))/d_model))) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): # Make embeddings relatively larger x = x * math.sqrt(self.d_model) # Add constant to embedding seq_len = x.size(1) pe = Variable(self.pe[:,:seq_len], requires_grad=False) if x.is_cuda: pe.cuda() x = x + pe return self.dropout(x) ``` `PositionalEncoder` lets us add the **positional encoding to the embedding vector**, providing information about structure to the model. The reason we increase the embedding values before addition is to make the positional encoding relatively smaller. This means the original meaning in the embedding vector won’t be lost when we add them together. ## Masking Masking plays an important role in the transformer. It serves two purposes: * In the encoder and decoder: To zero attention outputs wherever there is just padding in the input sentences. * In the decoder: To prevent the decoder ‘peaking’ ahead at the rest of the translated sentence when predicting the next word. ![Masking](../../../images/masking.gif) ### Input Masks ``` batch = next(iter(train_iter)) input_seq = batch.English.transpose(0,1) input_pad = EN_TEXT.vocab.stoi['<pad>'] # creates mask with 0s wherever there is padding in the input input_msk = (input_seq != input_pad).unsqueeze(1) ``` ### Target Sequence Masks ``` from torch.autograd import Variable target_seq = batch.French.transpose(0,1) target_pad = FR_TEXT.vocab.stoi['<pad>'] target_msk = (target_seq != target_pad).unsqueeze(1) ``` The initial input into the decoder will be the **target sequence** (the French translation). The way the decoder predicts each output word is by making use of all the encoder outputs and the French sentence only up until the point of each word its predicting. Therefore we need to prevent the first output predictions from being able to see later into the sentence. For this we use the `nopeak_mask`. ``` # Get seq_len for matrix size = target_seq.size(1) nopeak_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8') nopeak_mask = Variable(torch.from_numpy(nopeak_mask) == 0).cuda() print(nopeak_mask) target_msk = target_msk & nopeak_mask def create_masks(src, trg): src_pad = EN_TEXT.vocab.stoi['<pad>'] trg_pad = FR_TEXT.vocab.stoi['<pad>'] src_mask = (src != src_pad).unsqueeze(-2) if trg is not None: trg_mask = (trg != trg_pad).unsqueeze(-2) # Get seq_len for matrix size = trg.size(1) np_mask = nopeak_mask(size) if device.type == 'cuda': np_mask = np_mask.cuda() trg_mask = trg_mask & np_mask else: trg_mask = None return src_mask, trg_mask def nopeak_mask(size): np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8') np_mask = Variable(torch.from_numpy(np_mask) == 0) return np_mask ``` If we later apply this mask to the attention scores, the values wherever the input is ahead will not be able to contribute when calculating the outputs. ## Multi-Headed Attention Once we have our embedded values (with positional encodings) and our masks, we can start building the layers of our model. Here is an overview of the multi-headed attention layer: ![Multi-Headed Attention](../../../images/multi-head-attention.png) In multi-headed attention layer, each **input is split into multiple heads** which allows the network to simultaneously attend to different subsections of each embedding. $V$, $K$ and $Q$ stand for ***key***, ***value*** and ***query***. These are terms used in attention functions. In the case of the Encoder, $V$, $K$ and $Q$ will simply be identical copies of the embedding vector (plus positional encoding). They will have the dimensions `Batch_size` * `seq_len` * `embedding_dimension`. In multi-head attention we split the embedding vector into $N$ heads, so they will then have the dimensions `batch_size` * `N` * `seq_len` * (`embedding_dimension` / `N`). This final dimension (`embedding_dimension` / `N`) we will refer to as $d_k$. ``` class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model, dropout = 0.1): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) self.out = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): bs = q.size(0) # Perform linear operation and split into h heads k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) # Transpose to get dimensions bs * h * sl * d_model k = k.transpose(1,2) q = q.transpose(1,2) v = v.transpose(1,2) # Calculate attention using function we will define next scores = attention(q, k, v, self.d_k, mask, self.dropout) # Concatenate heads and put through final linear layer concat = scores.transpose(1,2).contiguous()\ .view(bs, -1, self.d_model) output = self.out(concat) return output ``` ## Attention The equation below is the attention formula with retrieved from [Attention is All You Need](https://arxiv.org/abs/1706.03762) paper and it does a good job at explaining each step. ![Attention Equation](../../../images/attention.png) ![Attention Equation](../../../images/attention-img.png) Each arrow in the diagram reflects a part of the equation. Initially we must **multiply** $Q$ by the transpose of $K$. This is then scaled by **dividing the output by the square root** of $d_k$. A step that’s not shown in the equation is the masking operation. Before we perform **Softmax**, we apply our mask and hence reduce values where the input is padding (or in the decoder, also where the input is ahead of the current word). Another step not shown is **dropout**, which we will apply after Softmax. Finally, the last step is doing a **dot product** between the result so far and $V$. ``` import torch.nn.functional as F def attention(q, k, v, d_k, mask=None, dropout=None): scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1e9) scores = F.softmax(scores, dim=-1) if dropout is not None: scores = dropout(scores) output = torch.matmul(scores, v) return output ``` ## Feed-Forward Network The feed-forward layer just consists of two linear operations, with a **relu** and **dropout** operation in between them. It simply deepens our network, employing linear layers to **analyse patterns** in the attention layers output. ![FeedForward Neural Network](../../../images/feed-forward-nn.gif) ``` class FeedForward(nn.Module): def __init__(self, d_model, d_ff=2048, dropout = 0.1): super().__init__() # We set d_ff as a default to 2048 self.linear_1 = nn.Linear(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = nn.Linear(d_ff, d_model) def forward(self, x): x = self.dropout(F.relu(self.linear_1(x))) x = self.linear_2(x) return x ``` ## Normalisation Normalisation is highly important in deep neural networks. It prevents the range of values in the layers changing too much, meaning the model **trains faster** and has **better ability to generalise**. ![Normalization](../../../images/norm.png) We will be normalising our results between each layer in the encoder/decoder. ``` class Norm(nn.Module): def __init__(self, d_model, eps = 1e-6): super().__init__() self.size = d_model # create two learnable parameters to calibrate normalisation self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias return norm ``` # Building Transformer Let’s have another look at the over-all architecture and start building: ![Transformer](../../../images/transformer.png) **One last Variable**: If you look at the diagram closely you can see a $N_x$ next to the encoder and decoder architectures. In reality, the encoder and decoder in the diagram above represent one layer of an encoder and one of the decoder. $N$ is the variable for the **number of layers** there will be. Eg. if `N=6`, the data goes through six encoder layers (with the architecture seen above), then these outputs are passed to the decoder which also consists of six repeating decoder layers. We will now build `EncoderLayer` and `DecoderLayer` modules with the architecture shown in the model above. Then when we build the encoder and decoder we can define how many of these layers to have. ## EncoderLayer ``` # build an encoder layer with one multi-head attention layer and one feed-forward layer class EncoderLayer(nn.Module): def __init__(self, d_model, heads, dropout = 0.1): super().__init__() self.norm_1 = Norm(d_model) self.norm_2 = Norm(d_model) self.attn = MultiHeadAttention(heads, d_model) self.ff = FeedForward(d_model) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) def forward(self, x, mask): x2 = self.norm_1(x) x = x + self.dropout_1(self.attn(x2,x2,x2,mask)) x2 = self.norm_2(x) x = x + self.dropout_2(self.ff(x2)) return x ``` ## DecoderLayer ``` # build a decoder layer with two multi-head attention layers and one feed-forward layer class DecoderLayer(nn.Module): def __init__(self, d_model, heads, dropout=0.1): super().__init__() self.norm_1 = Norm(d_model) self.norm_2 = Norm(d_model) self.norm_3 = Norm(d_model) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.dropout_3 = nn.Dropout(dropout) self.attn_1 = MultiHeadAttention(heads, d_model) self.attn_2 = MultiHeadAttention(heads, d_model) self.ff = FeedForward(d_model).cuda() def forward(self, x, e_outputs, src_mask, trg_mask): x2 = self.norm_1(x) x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask)) x2 = self.norm_2(x) x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, src_mask)) x2 = self.norm_3(x) x = x + self.dropout_3(self.ff(x2)) return x ``` We can then build a convenient cloning function that can generate multiple layers: ``` import copy def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) ``` ## Encoder ``` class Encoder(nn.Module): def __init__(self, vocab_size, d_model, N, heads): super().__init__() self.N = N self.embed = Embedder(vocab_size, d_model) self.pe = PositionalEncoder(d_model) self.layers = get_clones(EncoderLayer(d_model, heads), N) self.norm = Norm(d_model) def forward(self, src, mask): x = self.embed(src) x = self.pe(x) for i in range(N): x = self.layers[i](x, mask) return self.norm(x) ``` ## Decoder ``` class Decoder(nn.Module): def __init__(self, vocab_size, d_model, N, heads): super().__init__() self.N = N self.embed = Embedder(vocab_size, d_model) self.pe = PositionalEncoder(d_model) self.layers = get_clones(DecoderLayer(d_model, heads), N) self.norm = Norm(d_model) def forward(self, trg, e_outputs, src_mask, trg_mask): x = self.embed(trg) x = self.pe(x) for i in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) ``` ## Transformer ``` class Transformer(nn.Module): def __init__(self, src_vocab, trg_vocab, d_model, N, heads): super().__init__() self.encoder = Encoder(src_vocab, d_model, N, heads) self.decoder = Decoder(trg_vocab, d_model, N, heads) self.out = nn.Linear(d_model, trg_vocab) def forward(self, src, trg, src_mask, trg_mask): e_outputs = self.encoder(src, src_mask) d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) output = self.out(d_output) return output ``` **Note**: We don't perform softmax on the output as this will be handled automatically by our loss function. # Training the Model With the transformer built, all that remains is to train on the dataset. The coding part is done, but be prepared to wait for about 2 days for this model to start converging! However, in this session, we only perform minimal epoch to train the model and you may try to use more epoch during your self-study. Let’s define some parameters first: ``` embedding_dimension = 512 heads = 4 N = 6 src_vocab = len(EN_TEXT.vocab) trg_vocab = len(FR_TEXT.vocab) model = Transformer(src_vocab, trg_vocab, embedding_dimension, N, heads) if device.type == 'cuda': model.cuda() for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # This code is very important! It initialises the parameters with a # range of values that stops the signal fading or getting too big. optim = torch.optim.Adam(model.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9) ``` Now we’re good to train the transformer model ``` import time import torch def train_model(epochs, print_every=100, timelimit=None): model.train() start = time.time() temp = start total_loss = 0 min_loss = float('inf') for epoch in range(epochs): for i, batch in enumerate(train_iter): src = batch.English.transpose(0,1) trg = batch.French.transpose(0,1) # the French sentence we input has all words except # the last, as it is using each word to predict the next trg_input = trg[:, :-1] # the words we are trying to predict targets = trg[:, 1:].contiguous().view(-1) # create function to make masks using mask code above src_mask, trg_mask = create_masks(src, trg_input) preds = model(src, trg_input, src_mask, trg_mask) ys = trg[:, 1:].contiguous().view(-1) optim.zero_grad() loss = F.cross_entropy(preds.view(-1, preds.size(-1)), ys, ignore_index=target_pad) loss.backward() optim.step() total_loss += loss.data.item() if (i + 1) % print_every == 0: loss_avg = total_loss / print_every duration = (time.time() - start) // 60 print("time = %dm, epoch %d, iter = %d, loss = %.3f, %ds per %d iters" % (duration, epoch + 1, i + 1, loss_avg, time.time() - temp, print_every)) if loss_avg < min_loss: min_loss = loss_avg torch.save(model, "model/training.model") print("Current best model saved", "loss =", loss_avg) if (timelimit and duration >= timelimit): break total_loss = 0 temp = time.time() # train_model(1, timelimit=300) torch.load("model/pretrained.model") ``` # Testing the Model We can use the below function to translate sentences. We can feed it sentences directly from our batches, or input custom strings. The translator works by running a loop. We start off by encoding the English sentence. We then feed the decoder the `<sos>` token index and the encoder outputs. The decoder makes a prediction for the first word, and we add this to our decoder input with the sos token. We rerun the loop, getting the next prediction and adding this to the decoder input, until we reach the `<eos>` token letting us know it has finished translating. ``` def translate(model, src, max_len = 80, custom_string=False): model.eval() if custom_string == True: src = tokenize_en(src) src = Variable(torch.LongTensor([[EN_TEXT.vocab.stoi[tok] for tok in src]])).cuda() src_mask = (src != input_pad).unsqueeze(-2) e_outputs = model.encoder(src, src_mask) outputs = torch.zeros(max_len).type_as(src.data) outputs[0] = torch.LongTensor([FR_TEXT.vocab.stoi['<sos>']]) for i in range(1, max_len): trg_mask = np.triu(np.ones((1, i, i)), k=1).astype('uint8') trg_mask = Variable(torch.from_numpy(trg_mask) == 0).cuda() out = model.out(model.decoder(outputs[:i].unsqueeze(0), e_outputs, src_mask, trg_mask)) out = F.softmax(out, dim=-1) val, ix = out[:, -1].data.topk(1) outputs[i] = ix[0][0] if ix[0][0] == FR_TEXT.vocab.stoi['<eos>']: break return ' '.join([FR_TEXT.vocab.itos[ix] for ix in outputs[:i]]) translate(model, "How're you my friend?", custom_string=True) ``` # Contributors **Author** <br>Chee Lam # References 1. [How to Code The Transformer in Pytorch](https://towardsdatascience.com/how-to-code-the-transformer-in-pytorch-24db27c8f9ec#b0ed) 2. [How to Use TorchText for Neural Machine Translation](https://towardsdatascience.com/how-to-use-torchtext-for-neural-machine-translation-plus-hack-to-make-it-5x-faster-77f3884d95)
github_jupyter
# Circuit Basics Here, we provide an overview of working with Qiskit. Qiskit provides the basic building blocks necessary to program quantum computers. The fundamental unit of Qiskit is the [quantum circuit](https://en.wikipedia.org/wiki/Quantum_circuit). A basic workflow using Qiskit consists of two stages: **Build** and **Run**. **Build** allows you to make different quantum circuits that represent the problem you are solving, and **Run** that allows you to run them on different backends. After the jobs have been run, the data is collected and postprocessed depending on the desired output. ``` import numpy as np from qiskit import QuantumCircuit %matplotlib inline ``` ## Building the circuit <a name='basics'></a> The basic element needed for your first program is the QuantumCircuit. We begin by creating a `QuantumCircuit` comprised of three qubits. ``` # Create a Quantum Circuit acting on a quantum register of three qubits circ = QuantumCircuit(3) ``` After you create the circuit with its registers, you can add gates ("operations") to manipulate the registers. As you proceed through the tutorials you will find more gates and circuits; below is an example of a quantum circuit that makes a three-qubit GHZ state $$|\psi\rangle = \left(|000\rangle+|111\rangle\right)/\sqrt{2}.$$ To create such a state, we start with a three-qubit quantum register. By default, each qubit in the register is initialized to $|0\rangle$. To make the GHZ state, we apply the following gates: - A Hadamard gate $H$ on qubit 0, which puts it into the superposition state $\left(|0\rangle+|1\rangle\right)/\sqrt{2}$. - A controlled-Not operation ($C_{X}$) between qubit 0 and qubit 1. - A controlled-Not operation between qubit 0 and qubit 2. On an ideal quantum computer, the state produced by running this circuit would be the GHZ state above. In Qiskit, operations can be added to the circuit one by one, as shown below. ``` # Add a H gate on qubit 0, putting this qubit in superposition. circ.h(0) # Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting # the qubits in a Bell state. circ.cx(0, 1) # Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting # the qubits in a GHZ state. circ.cx(0, 2) ``` ## Visualize Circuit <a name='visualize'></a> You can visualize your circuit using Qiskit `QuantumCircuit.draw()`, which plots the circuit in the form found in many textbooks. ``` circ.draw('mpl') ``` In this circuit, the qubits are put in order, with qubit zero at the top and qubit two at the bottom. The circuit is read left to right (meaning that gates that are applied earlier in the circuit show up further to the left). <div class="alert alert-block alert-info"> When representing the state of a multi-qubit system, the tensor order used in Qiskit is different than that used in most physics textbooks. Suppose there are $n$ qubits, and qubit $j$ is labeled as $Q_{j}$. Qiskit uses an ordering in which the $n^{\mathrm{th}}$ qubit is on the <em><strong>left</strong></em> side of the tensor product, so that the basis vectors are labeled as $Q_{n-1}\otimes \cdots \otimes Q_1\otimes Q_0$. For example, if qubit zero is in state 0, qubit 1 is in state 0, and qubit 2 is in state 1, Qiskit would represent this state as $|100\rangle$, whereas many physics textbooks would represent it as $|001\rangle$. This difference in labeling affects the way multi-qubit operations are represented as matrices. For example, Qiskit represents a controlled-X ($C_{X}$) operation with qubit 0 being the control and qubit 1 being the target as $$C_X = \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 0 \\\end{pmatrix}.$$ </div> ## Simulating circuits <a name='simulation'></a> To simulate a circuit we use the quant-info module in Qiskit. This simulator returns the quantum state, which is a complex vector of dimensions $2^n$, where $n$ is the number of qubits (so be careful using this as it will quickly get too large to run on your machine). There are two stages to the simulator. The fist is to set the input state and the second to evolve the state by the quantum circuit. ``` from qiskit.quantum_info import Statevector # Set the intial state of the simulator to the ground state using from_int state = Statevector.from_int(0, 2**3) # Evolve the state by the quantum circuit state = state.evolve(circ) #draw using latex state.draw('latex') ``` Qiskit also provides a visualization toolbox to allow you to view the state. Below, we use the visualization function to plot the qsphere and a hinton representing the real and imaginary components of the state density matrix $\rho$. ``` state.draw('qsphere') state.draw('hinton') ``` ## Unitary representation of a circuit Qiskit's quant_info module also has an operator method which can be used to make a unitary operator for the circuit. This calculates the $2^n \times 2^n$ matrix representing the quantum circuit. ``` from qiskit.quantum_info import Operator U = Operator(circ) # Show the results U.data ``` ## OpenQASM backend The simulators above are useful because they provide information about the state output by the ideal circuit and the matrix representation of the circuit. However, a real experiment terminates by _measuring_ each qubit (usually in the computational $|0\rangle, |1\rangle$ basis). Without measurement, we cannot gain information about the state. Measurements cause the quantum system to collapse into classical bits. For example, suppose we make independent measurements on each qubit of the three-qubit GHZ state $$|\psi\rangle = (|000\rangle +|111\rangle)/\sqrt{2},$$ and let $xyz$ denote the bitstring that results. Recall that, under the qubit labeling used by Qiskit, $x$ would correspond to the outcome on qubit 2, $y$ to the outcome on qubit 1, and $z$ to the outcome on qubit 0. <div class="alert alert-block alert-info"> <b>Note:</b> This representation of the bitstring puts the most significant bit (MSB) on the left, and the least significant bit (LSB) on the right. This is the standard ordering of binary bitstrings. We order the qubits in the same way (qubit representing the MSB has index 0), which is why Qiskit uses a non-standard tensor product order. </div> Recall the probability of obtaining outcome $xyz$ is given by $$\mathrm{Pr}(xyz) = |\langle xyz | \psi \rangle |^{2}$$ and as such for the GHZ state probability of obtaining 000 or 111 are both 1/2. To simulate a circuit that includes measurement, we need to add measurements to the original circuit above, and use a different Aer backend. ``` # Create a Quantum Circuit meas = QuantumCircuit(3, 3) meas.barrier(range(3)) # map the quantum measurement to the classical bits meas.measure(range(3), range(3)) # The Qiskit circuit object supports composition. # Here the meas has to be first and front=True (putting it before) # as compose must put a smaller circuit into a larger one. qc = meas.compose(circ, range(3), front=True) #drawing the circuit qc.draw('mpl') ``` This circuit adds a classical register, and three measurements that are used to map the outcome of qubits to the classical bits. To simulate this circuit, we use the ``qasm_simulator`` in Qiskit Aer. Each run of this circuit will yield either the bitstring 000 or 111. To build up statistics about the distribution of the bitstrings (to, e.g., estimate $\mathrm{Pr}(000)$), we need to repeat the circuit many times. The number of times the circuit is repeated can be specified in the ``execute`` function, via the ``shots`` keyword. ``` # Adding the transpiler to reduce the circuit to QASM instructions # supported by the backend from qiskit import transpile # Use Aer's qasm_simulator from qiskit.providers.aer import QasmSimulator backend = QasmSimulator() # First we have to transpile the quantum circuit # to the low-level QASM instructions used by the # backend qc_compiled = transpile(qc, backend) # Execute the circuit on the qasm simulator. # We've set the number of repeats of the circuit # to be 1024, which is the default. job_sim = backend.run(qc_compiled, shots=1024) # Grab the results from the job. result_sim = job_sim.result() ``` Once you have a result object, you can access the counts via the function `get_counts(circuit)`. This gives you the _aggregated_ binary outcomes of the circuit you submitted. ``` counts = result_sim.get_counts(qc_compiled) print(counts) ``` Approximately 50 percent of the time, the output bitstring is 000. Qiskit also provides a function `plot_histogram`, which allows you to view the outcomes. ``` from qiskit.visualization import plot_histogram plot_histogram(counts) ``` The estimated outcome probabilities $\mathrm{Pr}(000)$ and $\mathrm{Pr}(111)$ are computed by taking the aggregate counts and dividing by the number of shots (times the circuit was repeated). Try changing the ``shots`` keyword in the ``execute`` function and see how the estimated probabilities change. ``` import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ```
github_jupyter
# Segmenting and Clustering Neighborhoods in Toronto Import requests and panda ``` import requests import pandas as pd ``` Get the HTML of the Wiki page, convert into a table with help of read_html (read HTML tables into a list of DataFrame objects), remove cells with a borough that is Not assigned. ``` wiki = 'https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M' wikipedia_page = requests.get(wiki) df_raw = pd.read_html(wikipedia_page.content, header=0)[0] df_new = df_raw[df_raw.Borough != 'Not assigned'] df_new.head() ``` Find whether there is a "Not assigned" in Neighbourhood ``` df_new.loc[df_new.Neighbourhood == 'Not assigned'] ``` If we have Neighbourhood Not assigned, we change it with the value of Borough ``` df_new.Neighbourhood.replace('Not assigned',df_new.Borough,inplace=True) df_new.head(8) ``` Group Neighbourhoods with the same Postcode ``` df_toronto = df_new.groupby(['Postcode', 'Borough'])['Neighbourhood'].apply(lambda x: ', '.join(x)) df_toronto = df_toronto.reset_index() df_toronto.rename(columns = {'Postcode':'PostalCode'}, inplace = True) df_toronto.rename(columns = {'Neighbourhood':'Neighborhood'}, inplace = True) df_toronto.head() df_toronto.shape ``` ### Get the latitude and the longitude coordinates of each neighborhood Use the csv file, because of some problems with geocoder ``` url = 'http://cocl.us/Geospatial_data' df_geo=pd.read_csv(url) df_geo.head() #check the sshape o the csv file df_geo.shape ``` Both tables have the same chape. Now we can join new colums to our data. ``` df_toronto = df_toronto.join(df_geo.set_index('Postal Code'), on='PostalCode') df_toronto.head ``` ### Use the foursquere API to segment and cluster the neighborhoods of Toronto ``` !conda install -c conda-forge geocoder --yes import geocoder from geopy.geocoders import Nominatim address = 'Toronto, Ontario' geolocator = Nominatim(user_agent="toronto_explorer") location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude print('The geograpical coordinate of Toronto are {}, {}.'.format(latitude, longitude)) ``` Print the map with all the boroughs ``` import folium # create map of Toronto using latitude and longitude values map_Toronto = folium.Map(location=[latitude, longitude], zoom_start=10) # add markers to map for lat, lng, borough, neighborhood in zip(df_toronto['Latitude'], df_toronto['Longitude'], df_toronto['Borough'], df_toronto['Neighborhood']): label = '{}, {}'.format(neighborhood, borough) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=5, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, ).add_to(map_Toronto) map_Toronto ``` Define Foursquare Credentials and Version ``` CLIENT_ID = 'XLBDRGURZVOOULUGDUC4DSJZSRE5ZI0XPB1WA5RV3YL5D1TP' # your Foursquare ID CLIENT_SECRET = '5TW0I4L1WKC5A0H1NZHEDZD535BFN1CDQ2MRHBW1VBNFUTEK' # your Foursquare Secret VERSION = '20180604' # Foursquare API version print('Your credentails:') print('CLIENT_ID: ' + CLIENT_ID) print('CLIENT_SECRET:' + CLIENT_SECRET) ``` Explore the data, and get the venues in 500 meters range from our first entry ``` #df_toronto.loc[0, 'Neighbourhood'] neighborhood_latitude = df_toronto.loc[0, 'Latitude'] # neighborhood latitude value neighborhood_longitude = df_toronto.loc[0, 'Longitude'] # neighborhood longitude value neighborhood_name = df_toronto.loc[0, 'Neighborhood'] # neighborhood name print('Latitude and longitude values of {} are {}, {}.'.format(neighborhood_name, neighborhood_latitude, neighborhood_longitude)) ``` Create the GET request URL ``` LIMIT = 100 radius = 500 url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, neighborhood_latitude, neighborhood_longitude, radius, LIMIT) url results = requests.get(url).json() results # function that extracts the category of the venue def get_category_type(row): try: categories_list = row['categories'] except: categories_list = row['venue.categories'] if len(categories_list) == 0: return None else: return categories_list[0]['name'] ``` We can see that we had only 1 response for our first entry ``` import json from pandas.io.json import json_normalize venues = results['response']['groups'][0]['items'] nearby_venues = json_normalize(venues) # flatten JSON # filter columns filtered_columns = ['venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng'] nearby_venues =nearby_venues.loc[:, filtered_columns] # filter the category for each row nearby_venues['venue.categories'] = nearby_venues.apply(get_category_type, axis=1) # clean columns nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns] nearby_venues.head() ``` Generalize to obtain the venues from all neighbourhoods in Toronto ``` def getNearbyVenues(names, latitudes, longitudes, radius=500): venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print(name) # create the API request URL url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, LIMIT) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name, lat, lng, v['venue']['name'], v['venue']['location']['lat'], v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in results]) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Neighbourhood', 'Neighborhood Latitude', 'Neighborhood Longitude', 'Venue', 'Venue Latitude', 'Venue Longitude', 'Venue Category'] return(nearby_venues) toronto_venues = getNearbyVenues(names=df_toronto['Neighborhood'], latitudes=df_toronto['Latitude'], longitudes=df_toronto['Longitude'] ) ``` check the size of the dataframe ``` print(toronto_venues.shape) toronto_venues.head() ``` And how many venues for each Neighbourhood ``` toronto_venues.groupby('Neighbourhood').count() ``` How many categorys can we find? ``` # one hot encoding toronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix="", prefix_sep="") # add neighborhood column back to dataframe toronto_onehot['Neighbourhood'] = toronto_venues['Neighbourhood'] # move neighborhood column to the first column fixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1]) toronto_onehot = toronto_onehot[fixed_columns] toronto_onehot.head() toronto_onehot.shape toronto_grouped = toronto_onehot.groupby('Neighbourhood').mean().reset_index() toronto_grouped.head() toronto_grouped.shape ``` Get the top 10 for each neighbourhood ``` def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] import numpy as np num_top_venues = 10 indicators = ['st', 'nd', 'rd'] # create columns according to number of top venues columns = ['Neighbourhood'] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) # create a new dataframe neighborhoods_venues_sorted = pd.DataFrame(columns=columns) neighborhoods_venues_sorted['Neighbourhood'] = toronto_grouped['Neighbourhood'] for ind in np.arange(toronto_grouped.shape[0]): neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues) neighborhoods_venues_sorted.head() ``` Custer neighbourhoods ``` # import k-means from clustering stage from sklearn.cluster import KMeans # set number of clusters kclusters = 5 toronto_grouped_clustering = toronto_grouped.drop('Neighbourhood', 1) # run k-means clustering kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering) # check cluster labels generated for each row in the dataframe kmeans.labels_[0:10] ``` Merge the dataframe with the top 10 and the cluster for each neighbourhood ``` # add clustering labels #neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_) toronto_merged = df_toronto # merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighbourhood'), on='Neighborhood') toronto_merged.head() # check the last columns! toronto_merged[toronto_merged['Cluster Labels'].isnull()] ``` Plot the clusters in the map ``` import matplotlib.cm as cm import matplotlib.colors as colors # create map map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11) # set color scheme for the clusters x = np.arange(kclusters) ys = [i + x + (i*x)**2 for i in range(kclusters)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] toronto_merged_nonan = toronto_merged.dropna(subset=['Cluster Labels']) # add markers to the map markers_colors = [] for lat, lon, poi, cluster in zip(toronto_merged_nonan['Latitude'], toronto_merged_nonan['Longitude'], toronto_merged_nonan['Neighborhood'], toronto_merged_nonan['Cluster Labels']): label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=5, popup=label, color=rainbow[int(cluster-1)], fill=True, fill_color=rainbow[int(cluster-1)], fill_opacity=0.7).add_to(map_clusters) map_clusters ``` Examine clusters Cluster 1 ``` toronto_merged_nonan.loc[toronto_merged_nonan['Cluster Labels'] == 0, toronto_merged_nonan.columns[[1] + list(range(5, toronto_merged_nonan.shape[1]))]] ``` Cluster 2 ``` toronto_merged_nonan.loc[toronto_merged_nonan['Cluster Labels'] == 1, toronto_merged_nonan.columns[[1] + list(range(5, toronto_merged_nonan.shape[1]))]] ``` Cluster 3 ``` toronto_merged_nonan.loc[toronto_merged_nonan['Cluster Labels'] == 2, toronto_merged_nonan.columns[[1] + list(range(5, toronto_merged_nonan.shape[1]))]] ``` Cluster 4 ``` toronto_merged_nonan.loc[toronto_merged_nonan['Cluster Labels'] == 3, toronto_merged_nonan.columns[[1] + list(range(5, toronto_merged_nonan.shape[1]))]] ``` Cluster 5 ``` toronto_merged_nonan.loc[toronto_merged_nonan['Cluster Labels'] == 4, toronto_merged_nonan.columns[[1] + list(range(5, toronto_merged_nonan.shape[1]))]] ```
github_jupyter
<p style="text-align: right;"> &#9989; Put your name here</p> # Pre-Class Assignment: Practice In todays pre-class assignment you are going to practice what we have learned. # Goals for today's pre-class assignment </p> 1. topic 1. Assignment Wrap-up ``` # Read data for this assignment %matplotlib inline import matplotlib.pyplot as plt from scipy.misc import imread, imsave from urllib.request import urlopen url1 = 'http://res.cloudinary.com/miles-extranet-dev/image/upload/ar_16:9,c_fill,w_1000,g_face,q_50/Michigan/migration_photos/G21696/G21696-msubeaumonttower01.jpg' with urlopen(url1) as file: im1 = imread(file, mode='RGB') url2 = 'http://msutoday.msu.edu/_/img/assets/2013/beaumont-spring-1.jpg' with urlopen(url2) as file: im2 = imread(file, mode='RGB') f, (ax1, ax2) = plt.subplots(1, 2,figsize=(20,10)) ax1.imshow(im1) ax2.imshow(im2) ``` ---- # 1. Point Seleciton ``` # The following command will install mpld3 - Uncomment and run once. !pip install -t ./packages mpld3 #Add Packages folder to path (Do only once per run of notebook: from sys import path package_folder = './packages' if not package_folder in path: print('adding packages') path.append(package_folder) else: print('found packages folder') # Code snipit to make point seleciton function (REQUIRES mpld3 installed) %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as img import sys import mpld3 from mpld3 import plugins mpld3.enable_notebook() # PLOTS THE IMAGE IN THE NOTEBOOK def plot(imgname): fig, ax = plt.subplots() im = img.imread(imgname) plt.imshow(im, origin='lower') return fig # FUNCTION CALLED IN THE NOTEBOOK def pickpoints(fig='', radius=4, color="white", x = 'x', y = 'y'): if not fig: fig = plt.gcf() plugins.connect(fig, Annotate(radius, color, x, y)) # color='htmlcolorname', radius=int plugins.connect(fig, plugins.MousePosition()) # FORMATS x AND y LISTS INTO SHORTER DECIMALS, SO THEY'RE NOT TOO LENGTHY def cleanformat(var): varlist = [] if type(var) == float: varlist = '{:05.2f}'.format(var) else: for i in range(len(var)): varlist.append('{:05.2f}'.format(var[i])) return varlist # MAIN CLASS THAT CONTAINS JAVASCRIPT CODE TO CREATE CIRCLES AND DRAG CIRCLES class Annotate(plugins.PluginBase): """A plugin that creates points in a figure by clicking the mouse""" JAVASCRIPT = r""" mpld3.register_plugin("annotate", Annotate); Annotate.prototype = Object.create(mpld3.Plugin.prototype); Annotate.prototype.constructor = Annotate; Annotate.prototype.requiredProps = []; Annotate.prototype.defaultProps = {radius: 4, color: "white", x: 'x', y: 'y'}; function Annotate(fig, props){ mpld3.Plugin.call(this, fig, props); }; Annotate.prototype.draw = function(){ /// NECESSARY STARTUP VARIABLES /// var fig = this.fig; var ax = fig.axes; var dataset = []; var svg = d3.select(".mpld3-figure"); // existing svg element var radius = this.props.radius; var color = this.props.color; var x = this.props.x; var y = this.props.y; var ax = fig.axes[0]; /// INDEXES HTML DOC TO PULL VALUES FOR x,y CALIBRATION /// var xcal = this.parent.axes[0].position[0]; var ycal = this.parent.axes[0].position[1]; console.log('x calibration: ' + xcal); console.log('y calibration: ' + ycal); var xcommand = x+" = []"; IPython.notebook.kernel.execute(xcommand); var ycommand = y+" = []"; IPython.notebook.kernel.execute(ycommand); ////////// CREATE POINT COMPONENT ////////// var update_coords = function() { return function() { var pos = d3.mouse(this), xpos = ax.x.invert(pos[0]), ypos = ax.y.invert(pos[1]); var newpoint = { cx: pos[0] + xcal, cy: pos[1] + ycal, r: radius, fill: color }; dataset.push(newpoint); var circles = svg.selectAll("circle") .data(dataset) .enter() .append("circle") .attr(newpoint) .call(drag); var xcommand = x+".append("+xpos+")"; IPython.notebook.kernel.execute(xcommand); console.log(xcommand); var ycommand = y+".append("+ypos+")"; IPython.notebook.kernel.execute(ycommand); console.log(ycommand); }; }(); ax.baseaxes .on("mousedown", update_coords); ////////// DRAG POINT COMPONENT ////////// var drag = d3.behavior.drag() .on("dragstart", dragstarted) .on("drag", dragged) .on("dragend", dragended); function dragstarted(d) { d3.event.sourceEvent.stopPropagation(); d3.select(this).classed("dragging", true); } function dragged(d) { d3.select(this).attr("cx", d3.event.x) .attr("cy", d3.event.y); } function dragended(d, i) { d3.event.sourceEvent.stopPropagation(); d3.select(this).classed("dragging", false); var calib_cx = d3.select(this)[0][0].cx.animVal.value - xcal; var calib_cy = d3.select(this)[0][0].cy.animVal.value - ycal; var xcommand = x+"["+i+"] = "+ax.x.invert(calib_cx); var ycommand = y+"["+i+"] = "+ax.y.invert(calib_cy); IPython.notebook.kernel.execute(xcommand); IPython.notebook.kernel.execute(ycommand); console.log(xcommand); console.log(ycommand); } };""" def __init__(self, radius=4, color="white", x ='x', y ='y'): self.dict_ = {"type": "annotate", "radius": radius, "color": color, "x": x, "y": y}; fig = plt.figure(figsize=(9,6)) plt.imshow(im1) pickpoints(color='cyan', radius=2, x='x1', y='y1') fig = plt.figure(figsize=(9,6)) plt.imshow(im2) pickpoints(color='cyan', radius=2, x='x2', y='y2') ``` ---- # 1. Topic one ==TOPIC DESCRIPTION== ``` %matplotlib inline from __future__ import division import matplotlib.pyplot as plt import numpy as np import scipy.misc as misc from skimage import transform # Here are some libraries you may need to use %matplotlib inline import matplotlib.pylab as plt import sympy as sp sp.init_printing() import numpy as np from ipywidgets import interact import math plt.imshow(im2) im = im1 def affine_image(a1=0,s=1,tx=0,ty=0, alpha=1): theta = -a1/180 * math.pi dx = tx*im.shape[1] dy = ty*im.shape[0] S = np.matrix([[1/s,0,0], [0,1/s,0], [0,0,1]]) T2 = np.matrix([[1,0,im.shape[1]/2], [0,1,im.shape[0]/2], [0,0,1]]) T1 = np.matrix([[1,0,-im.shape[1]/2-dx], [0,1,-im.shape[0]/2-dy], [0,0,1]]) R = np.matrix([[math.cos(theta),-math.sin(theta),0],[math.sin(theta), math.cos(theta),0],[0,0,1]]) img = transform.warp(im, T2*S*R*T1); plt.imshow(im2); plt.imshow(img, alpha=alpha); plt.show(); interact(affine_image, a1=(-180,180), s=(0.001,5), tx=(-1.0,1.0), ty=(-1,1,0.1),alpha=(0.0,1.0)); ##TODO: Modify this line of code # Here are some libraries you may need to use %matplotlib inline import matplotlib.pylab as plt import sympy as sp sp.init_printing() import numpy as np from ipywidgets import interact import math from urllib.request import urlopen from scipy.misc import imread, imsave url = 'http://res.cloudinary.com/miles-extranet-dev/image/upload/ar_16:9,c_fill,w_1000,g_face,q_50/Michigan/migration_photos/G21696/G21696-msubeaumonttower01.jpg' with urlopen(url) as file: im = imread(file, mode='RGB') #Show the image plt.imshow(im); ##Put your function here from skimage import transform def affine_image(a1=0,s=0.5,tx=0,ty=0,shx=0, shy=0): T = np.matrix([[1,0,0],[0,1,0],[0,0,1]]) #Your transformations go here. img = transform.warp(im, T); plt.imshow(img); plt.show(); ##ANSWER## def affine_image(a1=0,s=0.5,tx=0,ty=0, shx=0, shy=0): theta = -a1/180 * math.pi dx = tx*im.shape[1] dy = ty*im.shape[0] S = np.matrix([[1/s,0,0], [0,1/s,0], [0,0,1]]) SH = np.matrix([[1,shx,0], [shy,1,0], [0,0,1]]) T2 = np.matrix([[1,0,im.shape[1]/2], [0,1,im.shape[0]/2], [0,0,1]]) T1 = np.matrix([[1,0,-im.shape[1]/2-dx], [0,1,-im.shape[0]/2-dy], [0,0,1]]) R = np.matrix([[math.cos(theta),-math.sin(theta),0],[math.sin(theta), math.cos(theta),0],[0,0,1]]) img = transform.warp(im, T2*S*R*SH*T1); plt.imshow(img); plt.show(); ##ANSWER## interact(affine_image, a1=(-180,180), s=(0.001,5), tx=(-1,1,0.1), ty=(-1,1,0.1), shx = (-1,1,0.1), shy = (-1,1,0.1)); ##TODO: Modify this line of code from IPython.display import YouTubeVideo YouTubeVideo("NzysdpexqsM",width=640,height=360) ``` **Quesiton:** Ask a question that requires a written answer? <font size=8 color="#009600">&#9998;</font> Do This - Erase the contents of this cell and replace it with your answer to the above question! (double-click on this text to edit this cell, and hit shift+enter to save the text) &#9989; **To Do:** Task to be completed by student. ``` %matplotlib inline import matplotlib.pylab as plt import numpy as np import sympy as sp sp.init_printing(use_unicode=True) #Example matrix in sympy sp.Matrix([[1,0], [0,1]]) #Example equation in sympy #First define all of the symples to use in our equation x,y,z,om = sp.symbols('x, y, z, \omega', negative=False) pa = om*x**2+om*y**2+om*z**2 pa #Put your code here ``` Example matrix notation: $$ \left[ \begin{matrix} 1 & 0 & 4 \\ 0 & 2 & -2 \\ 0 & 1 & 2 \end{matrix} \, \middle\vert \, \begin{matrix} -10 \\ 3 \\ 1 \end{matrix} \right] $$ ---- # 2. Assignment wrap-up Please fill out the form that appears when you run the code below. **You must completely fill this out in order to receive credit for the assignment!** Direct Link: https://goo.gl/YcjHHB ``` from IPython.display import HTML HTML( """ <iframe src="https://goo.gl/YcjHHB?embedded=true" width="80%" height="1200px" frameborder="0" marginheight="0" marginwidth="0"> Loading... </iframe> """ ) ``` --------- ### Congratulations, you're done with your pre-class assignment! &#169; Copyright 2018, Michigan State University Board of Trustees
github_jupyter
## Deep face recognition with Keras, Dlib and OpenCV Face recognition identifies persons on face images or video frames. In a nutshell, a face recognition system extracts features from an input face image and compares them to the features of labeled faces in a database. Comparison is based on a feature similarity metric and the label of the most similar database entry is used to label the input image. If the similarity value is below a certain threshold the input image is labeled as *unknown*. Comparing two face images to determine if they show the same person is known as face verification. This notebook uses a deep convolutional neural network (CNN) to extract features from input images. It follows the approach described in [[1]](https://arxiv.org/abs/1503.03832) with modifications inspired by the [OpenFace](http://cmusatyalab.github.io/openface/) project. [Keras](https://keras.io/) is used for implementing the CNN, [Dlib](http://dlib.net/) and [OpenCV](https://opencv.org/) for aligning faces on input images. Face recognition performance is evaluated on a small subset of the [LFW](http://vis-www.cs.umass.edu/lfw/) dataset which you can replace with your own custom dataset e.g. with images of your family and friends if you want to further experiment with this notebook. After an overview of the CNN architecure and how the model can be trained, it is demonstrated how to: - Detect, transform, and crop faces on input images. This ensures that faces are aligned before feeding them into the CNN. This preprocessing step is very important for the performance of the neural network. - Use the CNN to extract 128-dimensional representations, or *embeddings*, of faces from the aligned input images. In embedding space, Euclidean distance directly corresponds to a measure of face similarity. - Compare input embedding vectors to labeled embedding vectors in a database. Here, a support vector machine (SVM) and a KNN classifier, trained on labeled embedding vectors, play the role of a database. Face recognition in this context means using these classifiers to predict the labels i.e. identities of new inputs. ### Environment setup For running this notebook, create and activate a new [virtual environment](https://docs.python.org/3/tutorial/venv.html) and install the packages listed in [requirements.txt](requirements.txt) with `pip install -r requirements.txt`. Furthermore, you'll need a local copy of Dlib's face landmarks data file for running face alignment: ``` import bz2 import os from urllib.request import urlopen def download_landmarks(dst_file): url = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' decompressor = bz2.BZ2Decompressor() with urlopen(url) as src, open(dst_file, 'wb') as dst: data = src.read(1024) while len(data) > 0: dst.write(decompressor.decompress(data)) data = src.read(1024) dst_dir = 'models' dst_file = os.path.join(dst_dir, 'landmarks.dat') if not os.path.exists(dst_file): os.makedirs(dst_dir) download_landmarks(dst_file) ``` ### CNN architecture and training The CNN architecture used here is a variant of the inception architecture [[2]](https://arxiv.org/abs/1409.4842). More precisely, it is a variant of the NN4 architecture described in [[1]](https://arxiv.org/abs/1503.03832) and identified as [nn4.small2](https://cmusatyalab.github.io/openface/models-and-accuracies/#model-definitions) model in the OpenFace project. This notebook uses a Keras implementation of that model whose definition was taken from the [Keras-OpenFace](https://github.com/iwantooxxoox/Keras-OpenFace) project. The architecture details aren't too important here, it's only useful to know that there is a fully connected layer with 128 hidden units followed by an L2 normalization layer on top of the convolutional base. These two top layers are referred to as the *embedding layer* from which the 128-dimensional embedding vectors can be obtained. The complete model is defined in [model.py](model.py) and a graphical overview is given in [model.png](model.png). A Keras version of the nn4.small2 model can be created with `create_model()`. ``` from model import create_model nn4_small2 = create_model() ``` Model training aims to learn an embedding $f(x)$ of image $x$ such that the squared L2 distance between all faces of the same identity is small and the distance between a pair of faces from different identities is large. This can be achieved with a *triplet loss* $L$ that is minimized when the distance between an anchor image $x^a_i$ and a positive image $x^p_i$ (same identity) in embedding space is smaller than the distance between that anchor image and a negative image $x^n_i$ (different identity) by at least a margin $\alpha$. $$L = \sum^{m}_{i=1} \large[ \small {\mid \mid f(x_{i}^{a}) - f(x_{i}^{p})) \mid \mid_2^2} - {\mid \mid f(x_{i}^{a}) - f(x_{i}^{n})) \mid \mid_2^2} + \alpha \large ] \small_+$$ $[z]_+$ means $max(z,0)$ and $m$ is the number of triplets in the training set. The triplet loss in Keras is best implemented with a custom layer as the loss function doesn't follow the usual `loss(input, target)` pattern. This layer calls `self.add_loss` to install the triplet loss: ``` from keras import backend as K from keras.models import Model from keras.layers import Input, Layer # Input for anchor, positive and negative images in_a = Input(shape=(96, 96, 3)) in_p = Input(shape=(96, 96, 3)) in_n = Input(shape=(96, 96, 3)) # Output for anchor, positive and negative embedding vectors # The nn4_small model instance is shared (Siamese network) emb_a = nn4_small2(in_a) emb_p = nn4_small2(in_p) emb_n = nn4_small2(in_n) class TripletLossLayer(Layer): def __init__(self, alpha, **kwargs): self.alpha = alpha super(TripletLossLayer, self).__init__(**kwargs) def triplet_loss(self, inputs): a, p, n = inputs p_dist = K.sum(K.square(a-p), axis=-1) n_dist = K.sum(K.square(a-n), axis=-1) return K.sum(K.maximum(p_dist - n_dist + self.alpha, 0), axis=0) def call(self, inputs): loss = self.triplet_loss(inputs) self.add_loss(loss) return loss # Layer that computes the triplet loss from anchor, positive and negative embedding vectors triplet_loss_layer = TripletLossLayer(alpha=0.2, name='triplet_loss_layer')([emb_a, emb_p, emb_n]) # Model that can be trained with anchor, positive negative images nn4_small2_train = Model([in_a, in_p, in_n], triplet_loss_layer) ``` During training, it is important to select triplets whose positive pairs $(x^a_i, x^p_i)$ and negative pairs $(x^a_i, x^n_i)$ are hard to discriminate i.e. their distance difference in embedding space should be less than margin $\alpha$, otherwise, the network is unable to learn a useful embedding. Therefore, each training iteration should select a new batch of triplets based on the embeddings learned in the previous iteration. Assuming that a generator returned from a `triplet_generator()` call can generate triplets under these constraints, the network can be trained with: ``` from data import triplet_generator # triplet_generator() creates a generator that continuously returns # ([a_batch, p_batch, n_batch], None) tuples where a_batch, p_batch # and n_batch are batches of anchor, positive and negative RGB images # each having a shape of (batch_size, 96, 96, 3). generator = triplet_generator() nn4_small2_train.compile(loss=None, optimizer='adam') nn4_small2_train.fit_generator(generator, epochs=10, steps_per_epoch=100) # Please note that the current implementation of the generator only generates # random image data. The main goal of this code snippet is to demonstrate # the general setup for model training. In the following, we will anyway # use a pre-trained model so we don't need a generator here that operates # on real training data. I'll maybe provide a fully functional generator # later. ``` The above code snippet should merely demonstrate how to setup model training. But instead of actually training a model from scratch we will now use a pre-trained model as training from scratch is very expensive and requires huge datasets to achieve good generalization performance. For example, [[1]](https://arxiv.org/abs/1503.03832) uses a dataset of 200M images consisting of about 8M identities. The OpenFace project provides [pre-trained models](https://cmusatyalab.github.io/openface/models-and-accuracies/#pre-trained-models) that were trained with the public face recognition datasets [FaceScrub](http://vintage.winklerbros.net/facescrub.html) and [CASIA-WebFace](http://arxiv.org/abs/1411.7923). The Keras-OpenFace project converted the weights of the pre-trained nn4.small2.v1 model to [CSV files](https://github.com/iwantooxxoox/Keras-OpenFace/tree/master/weights) which were then [converted here](face-recognition-convert.ipynb) to a binary format that can be loaded by Keras with `load_weights`: ``` nn4_small2_pretrained = create_model() nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5') ``` ### Custom dataset To demonstrate face recognition on a custom dataset, a small subset of the [LFW](http://vis-www.cs.umass.edu/lfw/) dataset is used. It consists of 100 face images of [10 identities](images). The metadata for each image (file and identity name) are loaded into memory for later processing. ``` import numpy as np import os.path class IdentityMetadata(): def __init__(self, base, name, file): # dataset base directory self.base = base # identity name self.name = name # image file name self.file = file def __repr__(self): return self.image_path() def image_path(self): return os.path.join(self.base, self.name, self.file) def load_metadata(path): metadata = [] for i in os.listdir(path): for f in os.listdir(os.path.join(path, i)): # Check file extension. Allow only jpg/jpeg' files. ext = os.path.splitext(f)[1] if ext == '.jpg' or ext == '.jpeg': metadata.append(IdentityMetadata(path, i, f)) return np.array(metadata) metadata = load_metadata('images') ``` ### Face alignment The nn4.small2.v1 model was trained with aligned face images, therefore, the face images from the custom dataset must be aligned too. Here, we use [Dlib](http://dlib.net/) for face detection and [OpenCV](https://opencv.org/) for image transformation and cropping to produce aligned 96x96 RGB face images. By using the [AlignDlib](align.py) utility from the OpenFace project this is straightforward: ``` import cv2 import matplotlib.pyplot as plt import matplotlib.patches as patches from align import AlignDlib %matplotlib inline def load_image(path): img = cv2.imread(path, 1) # OpenCV loads images with color channels # in BGR order. So we need to reverse them return img[...,::-1] # Initialize the OpenFace face alignment utility alignment = AlignDlib('models/landmarks.dat') # Load an image of Jacques Chirac jc_orig = load_image(metadata[2].image_path()) # Detect face and return bounding box bb = alignment.getLargestFaceBoundingBox(jc_orig) # Transform image using specified face landmark indices and crop image to 96x96 jc_aligned = alignment.align(96, jc_orig, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE) # Show original image plt.subplot(131) plt.imshow(jc_orig) # Show original image with bounding box plt.subplot(132) plt.imshow(jc_orig) plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red')) # Show aligned image plt.subplot(133) plt.imshow(jc_aligned); ``` As described in the OpenFace [pre-trained models](https://cmusatyalab.github.io/openface/models-and-accuracies/#pre-trained-models) section, landmark indices `OUTER_EYES_AND_NOSE` are required for model nn4.small2.v1. Let's implement face detection, transformation and cropping as `align_image` function for later reuse. ``` def align_image(img): return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img), landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE) ``` ### Embedding vectors Embedding vectors can now be calculated by feeding the aligned and scaled images into the pre-trained network. ``` embedded = np.zeros((metadata.shape[0], 128)) for i, m in enumerate(metadata): img = load_image(m.image_path()) img = align_image(img) # scale RGB values to interval [0,1] img = (img / 255.).astype(np.float32) # obtain embedding vector for image embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0] ``` Let's verify on a single triplet example that the squared L2 distance between its anchor-positive pair is smaller than the distance between its anchor-negative pair. ``` def distance(emb1, emb2): return np.sum(np.square(emb1 - emb2)) def show_pair(idx1, idx2): plt.figure(figsize=(8,3)) plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}') plt.subplot(121) plt.imshow(load_image(metadata[idx1].image_path())) plt.subplot(122) plt.imshow(load_image(metadata[idx2].image_path())); show_pair(2, 3) show_pair(2, 12) ``` As expected, the distance between the two images of Jacques Chirac is smaller than the distance between an image of Jacques Chirac and an image of Gerhard Schröder (0.30 < 1.12). But we still do not know what distance threshold $\tau$ is the best boundary for making a decision between *same identity* and *different identity*. ### Distance threshold To find the optimal value for $\tau$, the face verification performance must be evaluated on a range of distance threshold values. At a given threshold, all possible embedding vector pairs are classified as either *same identity* or *different identity* and compared to the ground truth. Since we're dealing with skewed classes (much more negative pairs than positive pairs), we use the [F1 score](https://en.wikipedia.org/wiki/F1_score) as evaluation metric instead of [accuracy](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html). ``` from sklearn.metrics import f1_score, accuracy_score distances = [] # squared L2 distance between pairs identical = [] # 1 if same identity, 0 otherwise num = len(metadata) for i in range(num - 1): for j in range(1, num): distances.append(distance(embedded[i], embedded[j])) identical.append(1 if metadata[i].name == metadata[j].name else 0) distances = np.array(distances) identical = np.array(identical) thresholds = np.arange(0.3, 1.0, 0.01) f1_scores = [f1_score(identical, distances < t) for t in thresholds] acc_scores = [accuracy_score(identical, distances < t) for t in thresholds] opt_idx = np.argmax(f1_scores) # Threshold at maximal F1 score opt_tau = thresholds[opt_idx] # Accuracy at maximal F1 score opt_acc = accuracy_score(identical, distances < opt_tau) # Plot F1 score and accuracy as function of distance threshold plt.plot(thresholds, f1_scores, label='F1 score'); plt.plot(thresholds, acc_scores, label='Accuracy'); plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold') plt.title(f'Accuracy at threshold {opt_tau:.2f} = {opt_acc:.3f}'); plt.xlabel('Distance threshold') plt.legend(); ``` The face verification accuracy at $\tau$ = 0.56 is 95.7%. This is not bad given a baseline of 89% for a classifier that always predicts *different identity* (there are 980 pos. pairs and 8821 neg. pairs) but since nn4.small2.v1 is a relatively small model it is still less than what can be achieved by state-of-the-art models (> 99%). The following two histograms show the distance distributions of positive and negative pairs and the location of the decision boundary. There is a clear separation of these distributions which explains the discriminative performance of the network. One can also spot some strong outliers in the positive pairs class but these are not further analyzed here. ``` dist_pos = distances[identical == 1] dist_neg = distances[identical == 0] plt.figure(figsize=(12,4)) plt.subplot(121) plt.hist(dist_pos) plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold') plt.title('Distances (pos. pairs)') plt.legend(); plt.subplot(122) plt.hist(dist_neg) plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold') plt.title('Distances (neg. pairs)') plt.legend(); ``` ### Face recognition Given an estimate of the distance threshold $\tau$, face recognition is now as simple as calculating the distances between an input embedding vector and all embedding vectors in a database. The input is assigned the label (i.e. identity) of the database entry with the smallest distance if it is less than $\tau$ or label *unknown* otherwise. This procedure can also scale to large databases as it can be easily parallelized. It also supports one-shot learning, as adding only a single entry of a new identity might be sufficient to recognize new examples of that identity. A more robust approach is to label the input using the top $k$ scoring entries in the database which is essentially [KNN classification](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) with a Euclidean distance metric. Alternatively, a linear [support vector machine](https://en.wikipedia.org/wiki/Support_vector_machine) (SVM) can be trained with the database entries and used to classify i.e. identify new inputs. For training these classifiers we use 50% of the dataset, for evaluation the other 50%. ``` from sklearn.preprocessing import LabelEncoder from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC targets = np.array([m.name for m in metadata]) encoder = LabelEncoder() encoder.fit(targets) # Numerical encoding of identities y = encoder.transform(targets) train_idx = np.arange(metadata.shape[0]) % 2 != 0 test_idx = np.arange(metadata.shape[0]) % 2 == 0 # 50 train examples of 10 identities (5 examples each) X_train = embedded[train_idx] # 50 test examples of 10 identities (5 examples each) X_test = embedded[test_idx] y_train = y[train_idx] y_test = y[test_idx] knn = KNeighborsClassifier(n_neighbors=1, metric='euclidean') svc = LinearSVC() knn.fit(X_train, y_train) svc.fit(X_train, y_train) acc_knn = accuracy_score(y_test, knn.predict(X_test)) acc_svc = accuracy_score(y_test, svc.predict(X_test)) print(f'KNN accuracy = {acc_knn}, SVM accuracy = {acc_svc}') ``` The KNN classifier achieves an accuracy of 96% on the test set, the SVM classifier 98%. Let's use the SVM classifier to illustrate face recognition on a single example. ``` import warnings # Suppress LabelEncoder warning warnings.filterwarnings('ignore') example_idx = 29 example_image = load_image(metadata[test_idx][example_idx].image_path()) example_prediction = svc.predict([embedded[test_idx][example_idx]]) example_identity = encoder.inverse_transform(example_prediction)[0] plt.imshow(example_image) plt.title(f'Recognized as {example_identity}'); ``` Seems reasonable :-) Classification results should actually be checked whether (a subset of) the database entries of the predicted identity have a distance less than $\tau$, otherwise one should assign an *unknown* label. This step is skipped here but can be easily added. ### Dataset visualization To embed the dataset into 2D space for displaying identity clusters, [t-distributed Stochastic Neighbor Embedding](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) (t-SNE) is applied to the 128-dimensional embedding vectors. Except from a few outliers, identity clusters are well separated. ``` from sklearn.manifold import TSNE X_embedded = TSNE(n_components=2).fit_transform(embedded) for i, t in enumerate(set(targets)): idx = targets == t plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t) plt.legend(bbox_to_anchor=(1, 1)); ``` ### References - [1] [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/abs/1503.03832) - [2] [Going Deeper with Convolutions](https://arxiv.org/abs/1409.4842)
github_jupyter
![data-x](http://oi64.tinypic.com/o858n4.jpg) --- # Pandas Introduction ### with Stock Data and Correlation Examples **Author list:** Alexander Fred-Ojala & Ikhlaq Sidhu **References / Sources:** Includes examples from Wes McKinney and the 10min intro to Pandas **License Agreement:** Feel free to do whatever you want with this code ___ ## What Does Pandas Do? <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p1.jpg"> ## What is a Pandas Table Object? <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p2.jpg"> # Import packages ``` # import packages import pandas as pd # Extra packages import numpy as np import matplotlib.pyplot as plt # for plotting import seaborn as sns # for plotting and styling # jupyter notebook magic to display plots in output %matplotlib inline plt.rcParams['figure.figsize'] = (10,6) # make the plots bigger ``` # Part 1 ### Simple creation and manipulation of Pandas objects **Key Points:** Pandas has two / three main data types: * Series (similar to numpy arrays, but with index) * DataFrames (table or spreadsheet with Series in the columns) * Panels (3D version of DataFrame, not as common) ### It is easy to create a DataFrame ### We use `pd.DataFrame(**inputs**)` and can insert almost any data type as an argument **Function:** `pd.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)` Input data can be a numpy ndarray (structured or homogeneous), dict, or DataFrame. Dict can contain Series, arrays, constants, or list-like objects as the values. ``` # Try it with an array np.random.seed(0) # set seed for reproducibility a1 = np.array(np.random.randn(3)) a2 = np.array(np.random.randn(3)) a3 = np.array(np.random.randn(3)) print (a1) print (a2) print (a3) # Create our first DataFrame w/ an np.array - it becomes a column df0 = pd.DataFrame(a1) print(type(df0)) df0 # DataFrame from list of np.arrays df0 = pd.DataFrame([a1, a2, a3]) df0 # notice that there is no column label, only integer values, # and the index is set automatically # DataFrame from 2D np.array ax = np.random.randn(9).reshape(3,3) ax df0 = pd.DataFrame(ax,columns=['rand_normal_1','Random Again','Third'], index=[100,200,99]) # we can also assign columns and indices, sizes have to match df0 # DataFrame from a Dictionary dict1 = {'A':a1, 'B':a2} df1 = pd.DataFrame(dict1) df1 # note that we now have columns without assignment # We can easily add another column (just as you add values to a dictionary) df1['C']=a3 df1 # We can add a list with strings and ints as a column df1['L'] = ["Something", 3, "words"] df1 ``` # Pandas Series object ### Like an np.array, but we can combine data types and it has its own index Note: Every column in a DataFrame is a Series ``` print(df1[['L','A']]) print(type(df1['L'])) df1 # We can also rename columns df1 = df1.rename(columns = {'L':'Renamed'}) df1 # We can delete columns del df1['C'] df1 # or drop columns df1.drop('A',axis=1,inplace=True) # does not change df1 if we don't set inplace=True df1 df1 # or drop rows df1.drop(0) # Example: view only one column df1['B'] # Or view several column df1[['B','Renamed']] ``` # Other ways of slicing In the 10 min Pandas Guide, you will see many ways to view, slice a dataframe * view/slice by rows, eg `df[1:3]`, etc. * view by index location, see `df.iloc` (iloc) * view by ranges of labels, ie index label 2 to 5, or dates feb 3 to feb 25, see `df.loc` (loc) * view a single row by the index `df.xs` (xs) or `df.ix` (ix) * filtering rows that have certain conditions * add column * add row * How to change the index and more... ``` print (df1[0:2]) # ok df1 df1.iloc[1,1] df1 ``` # Part 2 ## Finance example: Large Data Frames ### Now, lets get some data in CSV format. See https://www.quantshare.com/sa-43-10-ways-to-download-historical-stock-quotes-data-for-free ``` !ls data/ # We can download data from the web by using pd.read_csv # A CSV file is a comma seperated file # We can use this 'pd.read_csv' method with urls that host csv files base_url = 'https://google.com/finance?output=csv&q=' dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) dfg dfg.head() # show first five values dfg.tail(3) # last three dfg.columns # returns columns, can be used to loop over dfg.index # return ``` # Convert the index to pandas datetime object ``` dfg['Date'][0] type(dfg['Date'][0]) dfg.index = pd.to_datetime(dfg['Date']) # set index dfg.drop(['Date'],axis=1,inplace=True) dfg.head() print(type(dfg.index[0])) dfg.index[0] dfg.index dfg['2017-08':'2017-06'] ``` # Attributes & general statitics of a Pandas DataFrame ``` dfg.shape # 251 business days last year dfg.columns dfg.size # Some general statistics dfg.describe() # Boolean indexing dfg['Open'][dfg['Open']>1130] # check what dates the opening # Check where Open, High, Low and Close where greater than 1130 dfg[dfg>1000].drop('Volume',axis=1) # If you want the values in an np array dfg.values ``` ## .loc() ``` # Getting a cross section with .loc - BY VALUES of the index and columns # df.loc[a:b, x:y], by rows and column location # Note: You have to know indices and columns dfg.loc['2017-08-31':'2017-08-21','Open':'Low'] ``` ## .iloc() ``` # .iloc slicing at specific location - BY POSITION in the table # Recall: # dfg[a:b] by rows # dfg[[col]] or df[[col1, col2]] by columns # df.loc[a:b, x:y], by index and column values + location # df.iloc[3:5,0:2], numeric position in table dfg.iloc[1:4,3:5] # 2nd to 4th row, 4th to 5th column ``` ### More Basic Statistics ``` # We can change the index sorting dfg.sort_index(axis=0, ascending=True).head() # starts a year ago # sort by value dfg.sort_values(by='Open')[0:10] ``` # Boolean ``` dfg[dfg>1115].head(10) # we can also drop all NaN values dfg[dfg>1115].head(10).dropna() dfg2 = dfg # make a copy and not a view dfg2 is dfg ``` ### Setting Values ``` # Recall dfg.head(4) # All the ways to view # can also be used to set values # good for data normalization dfg['Volume'] = dfg['Volume']/100000.0 dfg.head(4) ``` ### More Statistics and Operations ``` # mean by column, also try var() for variance dfg.mean() dfg[0:5].mean(axis = 1) # row means of first five rows ``` # PlotCorrelation ### Load several stocks ``` # Reload dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfm = pd.read_csv('data/microsoft.csv').drop('Unnamed: 0',axis=1) # Google stock data dfn = pd.read_csv('data/nike.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb = pd.read_csv('data/boeing.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb.head() # Rename columns dfg = dfg.rename(columns = {'Close':'GOOG'}) #print (dfg.head()) dfa = dfa.rename(columns = {'Close':'AAPL'}) #print (dfa.head()) dfm = dfm.rename(columns = {'Close':'MSFT'}) #print (dfm.head()) dfn = dfn.rename(columns = {'Close':'NKE'}) #print (dfn.head()) dfb = dfb.rename(columns = {'Close':'BA'}) dfb.head(2) # Lets merge some tables # They will all merge on the common column Date df = dfg[['Date','GOOG']].merge(dfa[['Date','AAPL']]) df = df.merge(dfm[['Date','MSFT']]) df = df.merge(dfn[['Date','NKE']]) df = df.merge(dfb[['Date','BA']]) df.head() df['Date'] = pd.to_datetime(df['Date']) df = df.set_index('Date') df.head() df.plot() df['2017'][['NKE','BA']].plot() # show a correlation matrix (pearson) crl = df.corr() crl crl.sort_values(by='GOOG',ascending=False) s = crl.unstack() so = s.sort_values(ascending=False) so[so<1] df.mean() sim=df-df.mean() sim.tail() sim[['MSFT','BA']].plot() ```
github_jupyter
# Parameter estimation and hypothesis testing ``` #Import packages import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import pymc3 as pm from ipywidgets import interact import arviz as az %matplotlib inline sns.set() ``` ## Learning Objectives of Part 2 1. Understand what priors, likelihoods and posteriors are; 2. Use random sampling for parameter estimation to appreciate the relationship between sample size & the posterior distribution, along with the effect of the prior; 3. Use probabilistic programming for parameter estimation; 4. Use probabilistic programming for hypothesis testing. ## 1. From Bayes Theorem to Bayesian Inference Let's say that we flip a biased coin several times and we want to estimate the probability of heads from the number of heads we saw. Statistical intuition tells us that our best estimate of $p(heads)=$ number of heads divided by total number of flips. However, 1. It doesn't tell us how certain we can be of that estimate and 2. This type of intuition doesn't extend to even slightly more complex examples. Bayesian inference helps us here. We can calculate the probability of a particular $p=p(H)$ given data $D$ by setting $A$ in Bayes Theorem equal to $p$ and $B$ equal to $D$. $$P(p|D) = \frac{P(D|p)P(p)}{P(D)} $$ In this equation, we call $P(p)$ the prior (distribution), $P(D|p)$ the likelihood and $P(p|D)$ the posterior (distribution). The intuition behind the nomenclature is as follows: the prior is the distribution containing our knowledge about $p$ prior to the introduction of the data $D$ & the posterior is the distribution containing our knowledge about $p$ after considering the data $D$. **Note** that we're _overloading_ the term _probability_ here. In fact, we have 3 distinct usages of the word: - The probability $p$ of seeing a head when flipping a coin; - The resulting binomial probability distribution $P(D|p)$ of seeing the data $D$, given $p$; - The prior & posterior probability distributions of $p$, encoding our _uncertainty_ about the value of $p$. **Key concept:** We only need to know the posterior distribution $P(p|D)$ up to multiplication by a constant at the moment: this is because we really only care about the values of $P(p|D)$ relative to each other – for example, what is the most likely value of $p$? To answer such questions, we only need to know what $P(p|D)$ is proportional to, as a function of $p$. Thus we don’t currently need to worry about the term $P(D)$. In fact, $$P(p|D) \propto P(D|p)P(p) $$ **Note:** What is the prior? Really, what do we know about $p$ before we see any data? Well, as it is a probability, we know that $0\leq p \leq1$. If we haven’t flipped any coins yet, we don’t know much else: so it seems logical that all values of $p$ within this interval are equally likely, i.e., $P(p)=1$, for $0\leq p \leq1$. This is known as an uninformative prior because it contains little information (there are other uninformative priors we may use in this situation, such as the Jeffreys prior, to be discussed later). People who like to hate on Bayesian inference tend to claim that the need to choose a prior makes Bayesian methods somewhat arbitrary, but as we’ll now see, if you have enough data, the likelihood dominates over the prior and the latter doesn’t matter so much. **Essential remark:** we get the whole distribution of $P(p|D)$, not merely a point estimate plus errors bars, such as [95% confidence intervals](http://andrewgelman.com/2018/07/04/4th-july-lets-declare-independence-95/). ## 2. Bayesian parameter estimation I: flip those coins Now let's generate some coin flips and try to estimate $p(H)$. Two notes: - given data $D$ consisting of $n$ coin tosses & $k$ heads, the likelihood function is given by $L:=P(D|p) \propto p^k(1-p)^{n-k}$; - given a uniform prior, the posterior is proportional to the likelihood. ``` def plot_posterior(p=0.6, N=0): """Plot the posterior given a uniform prior; Bernoulli trials with probability p; sample size N""" np.random.seed(42) # Flip coins n_successes = np.random.binomial(N, p) # X-axis for PDF x = np.linspace(0, 1, 100) # Prior prior = 1 # Compute posterior, given the likelihood (analytic form) posterior = x**n_successes*(1-x)**(N-n_successes)*prior posterior /= np.max(posterior) # so that peak always at 1 plt.plot(x, posterior) plt.show() plot_posterior(N=10) ``` * Now use the great ipywidget interact to check out the posterior as you generate more and more data (you can also vary $p$): ``` interact(plot_posterior, p=(0, 1, 0.01), N=(0, 1500)); ``` **Notes for discussion:** * as you generate more and more data, your posterior gets narrower, i.e. you get more and more certain of your estimate. * you need more data to be certain of your estimate when $p=0.5$, as opposed to when $p=0$ or $p=1$. ### The choice of the prior You may have noticed that we needed to choose a prior and that, in the small to medium data limit, this choice can affect the posterior. We'll briefly introduce several types of priors and then you'll use one of them for the example above to see the effect of the prior: - **Informative priors** express specific, definite information about a variable, for example, if we got a coin from the mint, we may use an informative prior with a peak at $p=0.5$ and small variance. - **Weakly informative priors** express partial information about a variable, such as a peak at $p=0.5$ (if we have no reason to believe the coin is biased), with a larger variance. - **Uninformative priors** express no information about a variable, except what we know for sure, such as knowing that $0\leq p \leq1$. Now you may think that the _uniform distribution_ is uninformative, however, what if I am thinking about this question in terms of the probability $p$ and Eric Ma is thinking about it in terms of the _odds ratio_ $r=\frac{p}{1-p}$? Eric rightly feels that he has no prior knowledge as to what this $r$ is and thus chooses the uniform prior on $r$. With a bit of algebra (transformation of variables), we can show that choosing the uniform prior on $p$ amounts to choosing a decidedly non-uniform prior on $r$ and vice versa. So Eric and I have actually chosen different priors, using the same philosophy. How do we avoid this happening? Enter the **Jeffreys prior**, which is an uninformative prior that solves this problem. You can read more about the Jeffreys prior [here](https://en.wikipedia.org/wiki/Jeffreys_prior) & in your favourite Bayesian text book (Sivia gives a nice treatment). In the binomial (coin flip) case, the Jeffreys prior is given by $P(p) = \frac{1}{\sqrt{p(1-p)}}$. #### Hands-on * Create an interactive plot like the one above, except that it has two posteriors on it: one for the uniform prior, another for the Jeffries prior. ``` # Solution def plot_posteriors(p=0.6, N=0): np.random.seed(42) n_successes = np.random.binomial(N, p) x = np.linspace(0.01, 0.99, 100) posterior1 = x**n_successes*(1-x)**(N-n_successes) # w/ uniform prior posterior1 /= np.max(posterior1) # so that peak always at 1 plt.plot(x, posterior1, label='Uniform prior') jp = np.sqrt(x*(1-x))**(-1) # Jeffreys prior posterior2 = posterior1*jp # w/ Jeffreys prior posterior2 /= np.max(posterior2) # so that peak always at 1 (not quite correct to do; see below) plt.plot(x, posterior2, label='Jeffreys prior') plt.legend() plt.show() interact(plot_posteriors, p=(0, 1, 0.01), N=(0, 100)); ``` **Question:** What happens to the posteriors as you generate more and more data? ## 3. Bayesian parameter estimation using PyMC3 Well done! You've learnt the basics of Bayesian model building. The steps are 1. To completely specify the model in terms of _probability distributions_. This includes specifying - what the form of the sampling distribution of the data is _and_ - what form describes our _uncertainty_ in the unknown parameters (This formulation is adapted from [Fonnesbeck's workshop](https://github.com/fonnesbeck/intro_stat_modeling_2017/blob/master/notebooks/2.%20Basic%20Bayesian%20Inference.ipynb) as Chris said it so well there). 2. Calculate the _posterior distribution_. In the above, the form of the sampling distribution of the data was Binomial (described by the likelihood) and the uncertainty around the unknown parameter $p$ captured by the prior. Now it is time to do the same using the **probabilistic programming language** PyMC3. There's _loads_ about PyMC3 and this paradigm, two of which are - _probabililty distributions_ are first class citizens, in that we can assign them to variables and use them intuitively to mirror how we think about priors, likelihoods & posteriors. - PyMC3 calculates the posterior for us! Under the hood, PyMC3 will compute the posterior using a sampling based approach called Markov Chain Monte Carlo (MCMC) or Variational Inference. Check the [PyMC3 docs](https://docs.pymc.io/) for more on these. But now, it's time to bust out some MCMC and get sampling! ### Parameter estimation I: click-through rate A common experiment in tech data science is to test a product change and see how it affects a metric that you're interested in. Say that I don't think enough people are clicking a button on my website & I hypothesize that it's because the button is a similar color to the background of the page. Then I can set up two pages and send some people to each: the first the original page, the second a page that is identical, except that it has a button that is of higher contrast and see if more people click through. This is commonly referred to as an A/B test and the metric of interest is click-through rate (CTR), what proportion of people click through. Before even looking at two rates, let's use PyMC3 to estimate one. First generate click-through data, given a CTR $p_a=0.15$. ``` # click-through rates p_a = 0.15 N = 150 n_successes_a = np.sum(np.random.binomial(N, p_a)) ``` Now it's time to build your probability model. Noticing that our model of having a constant CTR resulting in click or not is a biased coin flip, - the sampling distribution is binomial and we need to encode this in the likelihood; - there is a single parameter $p$ that we need to describe the uncertainty around, using a prior and we'll use a uniform prior for this. These are the ingredients for the model so let's now build it: ``` # Build model of p_a with pm.Model() as Model: # Prior on p prob = pm.Uniform('p') # Binomial Likelihood y = pm.Binomial('y', n=N, p=prob, observed=n_successes_a) ``` **Discussion:** - What do you think of the API for PyMC3. Does it reflect how we think about model building? It's now time to sample from the posterior using PyMC3. You'll also plot the posterior: ``` with Model: samples = pm.sample(2000, njobs=1) az.plot_posterior(samples, kind='hist'); ``` **For discussion:** Interpret the posterior ditribution. What would your tell the non-technical manager of your growth team about the CTR? ### Hands-on: Parameter estimation II -- the mean of a population In this exercise, you'll calculate the posterior mean beak depth of Galapagos finches in a given species. First you'll load the data and subset wrt species: ``` # Import and view head of data df_12 = pd.read_csv('../data/finch_beaks_2012.csv') df_fortis = df_12.loc[df_12['species'] == 'fortis'] df_scandens = df_12.loc[df_12['species'] == 'scandens'] ``` To specify the full probabilty model, you need - a likelihood function for the data & - priors for all unknowns. What is the likelihood here? Let's plot the measurements below and see that they look approximately Gaussian/normal so you'll use a normal likelihood $y_i\sim \mathcal{N}(\mu, \sigma^2)$. The unknowns here are the mean $\mu$ and standard deviation $\sigma$ and we'll use weakly informative priors on both - a normal prior for $\mu$ with mean $10$ and standard deviation $5$; - a uniform prior for $\sigma$ bounded between $0$ and $10$. We can discuss biological reasons for these priors also but you can also test that the posteriors are relativelyt robust to the choice of prior here due to the amount of data. ``` sns.distplot(df_fortis['blength']); with pm.Model() as model: # Prior for mean & standard deviation μ_1 = pm.Normal('μ_1', mu=10, sd=5) σ_1 = pm.Lognormal('σ_1', 0, 10) # Gaussian Likelihood y_1 = pm.Normal('y_1', mu=μ_1, sd=σ_1, observed=df_fortis['blength']) # bust it out & sample with model: samples = pm.sample(2000, njobs=1) az.plot_posterior(samples); ``` ## 4. Bayesian Hypothesis testing ### Bayesian Hypothesis testing I: A/B tests on click through rates Assume we have a website and want to redesign the layout (*A*) and test whether the new layout (*B*) results in a higher click through rate. When people come to our website we randomly show them layout *A* or *B* and see how many people click through for each. First let's generate the data we need: ``` # click-through rates p_a = 0.15 p_b = 0.20 N = 1000 n_successes_a = np.sum(np.random.uniform(size=N) <= p_a) n_successes_b = np.sum(np.random.uniform(size=N) <= p_b) ``` Once again, we need to specify our models for $p_a$ and $p_b$. Each will be the same as the CTR example above - Binomial likelihoods - uniform priors on $p_a$ and $_p$. We also want to calculate the posterior of the difference $p_a-p_b$ and we do so using `pm.Deterministic()`, which specifies a deterministic random variable, i.e., one that is completely determined by the values it references, in the case $p_a$ & $p_b$. We'll now build the model: ``` with pm.Model() as Model: # Prior on p prob_a = pm.Uniform('p_a') prob_b = pm.Uniform('p_b') # Binomial Likelihood y_a = pm.Binomial('y_a', n=N, p=prob_a, observed=n_successes_a) y_b = pm.Binomial('y_b', n=N, p=prob_b, observed=n_successes_b) diff_clicks = pm.Deterministic('diff_clicks', prob_a-prob_b) ``` Sample from the posterior and plot them: ``` with Model: samples = pm.sample(2000, njobs=1) az.plot_posterior(samples, kind='hist'); ``` ### Hands-on: Bayesian Hypothesis testing II -- beak lengths difference between species **Task**: Determine whether the mean beak length of the Galapogas finches differs between species. For the mean of each species, use the same model as in previous hand-on section: - Gaussian likelihood; - Normal prior for the means; - Uniform prior for the variances. Also calculate the difference between the means and, for bonus points, the _effect size_, which is the difference between the means divided by the pooled standard deviations = $\sqrt{(\sigma_1^2+\sigma_2^2)/2}$. Hugo will talk through the importance of the _effect size_. Don't forget to sample from the posteriors and plot them! ``` with pm.Model() as model: # Priors for means and variances μ_1 = pm.Normal('μ_1', mu=10, sd=5) σ_1 = pm.Uniform('σ_1', 0, 10) μ_2 = pm.Normal('μ_2', mu=10, sd=5) σ_2 = pm.Uniform('σ_2', 0, 10) # Gaussian Likelihoods y_1 = pm.Normal('y_1', mu=μ_1, sd=σ_1, observed=df_fortis['blength']) y_2 = pm.Normal('y_2', mu=μ_2, sd=σ_2, observed=df_scandens['blength']) # Calculate the effect size and its uncertainty. diff_means = pm.Deterministic('diff_means', μ_1 - μ_2) pooled_sd = pm.Deterministic('pooled_sd', np.sqrt(np.power(σ_1, 2) + np.power(σ_2, 2)) / 2) effect_size = pm.Deterministic('effect_size', diff_means / pooled_sd) # bust it out & sample with model: samples = pm.sample(2000, njobs=1) az.plot_posterior(samples, var_names=['μ_1', 'μ_2', 'diff_means', 'effect_size'], kind='hist'); ```
github_jupyter
# Clonotype and sequence deduplication Starting with annotated sequence data (in AbStar's `minimal` output format), reduces sequences to clonotypes and collapses dupicate clonotypes. The [`abutils`](https://www.github.com/briney/abutils) Python package is required, and can be installed by running `pip install abutils` *NOTE: this notebook requires the use of the Unix command line tool `sort`. Thus, it requires a Unix-based operating system to run correctly (MacOS and most flavors of Linux should be fine). Running this notebook on Windows 10 may be possible using the [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/about) but we have not tested this.* ``` from __future__ import print_function, division import itertools import multiprocessing as mp import os import subprocess as sp import sys import tempfile from abutils.utils.jobs import monitor_mp_jobs from abutils.utils.pipeline import list_files, make_dir from abutils.utils.progbar import progress_bar ``` ### Subjects, directories and data fields The input data (annotated sequences in [abstar's](https://github.com/briney/abstar) `minimal` format) is too large to be stored in a Github repository. A compressed archive of the data can be downloaded [**here**](http://burtonlab.s3.amazonaws.com/GRP_github_data/techrep-merged_minimal_no-header.tar.gz). The data file is fairly large (about 400GB uncompressed), so make sure you have enough space before downloading. Decompressing the archive from within the `data` directory (located in the same parent directory as this notebook) will allow the code in this notebook to run without modification. If you would prefer to store the input data somewhere else, be sure to modify the `raw_input_dir` path below. The data fields defined below correspond to the prosition in abstar's `minimal` format. If for some reason you have a differently formatted annotation file, change the field positions to suit your annotation file. ``` # subjects with open('./data/subjects.txt') as f: subjects = sorted(f.read().split()) # directories raw_input_dir = './data/techrep-merged_minimal_no-header/' raw_clonotype_dir = './data/techrep-merged_vj-aa/' dedup_clonotype_dir = './data/dedup_techrep-merged_vj-aa/' dedup_sequence_dir = './data/dedup_techrep-merged_nt-seq/' logfile = './data/dedup.log' # data fields prod_field = 3 v_field = 5 j_field = 9 cdr3aa_field = 12 vdjnt_field = 14 ``` ## Deduplication (biological replicates) ``` def dedup_bioreps(files, raw_clonotype_dir, unique_clonotype_dir, raw_sequence_dir, unique_sequence_dir, log_file=None): # set up output directories make_dir(raw_clonotype_dir) make_dir(unique_clonotype_dir) make_dir(raw_sequence_dir) make_dir(unique_sequence_dir) # process minimal output files for _f in files: print(os.path.basename(_f)) clonotype_output_data = [] sequence_output_data = [] raw_clonotype_file = os.path.join(raw_clonotype_dir, os.path.basename(_f)) unique_clonotype_file = os.path.join(unique_clonotype_dir, os.path.basename(_f)) raw_sequence_file = os.path.join(raw_sequence_dir, os.path.basename(_f)) unique_sequence_file = os.path.join(unique_sequence_dir, os.path.basename(_f)) # collect clonotype/sequence information with open(_f) as f: for line in f: data = line.strip().split(',') if data[prod_field] == 'no': continue v_gene = data[v_field] j_gene = data[j_field] cdr3_aa = data[cdr3aa_field] vdj_nt = data[vdjnt_field] clonotype_output_data.append(' '.join([v_gene, j_gene, cdr3_aa])) sequence_output_data.append(' '.join([v_gene, j_gene, vdj_nt])) # write raw clonotype info to file raw_clontype_string = '\n'.join(clonotype_output_data) with open(raw_clonotype_file, 'w') as rf: rf.write(raw_clontype_string) raw_clonotype_count = len(clonotype_output_data) print('raw clonotypes:', raw_clonotype_count) # collapse duplicate clonotypes (without counts) uniq_cmd = 'sort -u -o {} -'.format(unique_clonotype_file) p = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, stdin=sp.PIPE, shell=True) stdout, stderr = p.communicate(input=raw_clonotype_string) # count the number of unique clonotypes wc_cmd = 'wc -l {}'.format(unique_clonotype_file) q = sp.Popen(wc_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) _count, _ = q.communicate() unique_clonotype_count = int(_count.split()[0]) print('unique clonotypes:', unique_clonotype_count) if log_file is not None: with open(log_file, 'a') as f: f.write('CLONOTYPES: {} {}\n'.format(raw_clonotype_count, unique_clonotype_count)) # write raw sequence info to file raw_sequence_string = '\n'.join(sequence_output_data) with open(raw_sequence_file, 'w') as rf: rf.write(raw_sequence_string) raw_sequence_count = len(sequence_output_data) print('raw sequences:', raw_sequence_count) # collapse duplicate sequences (without counts) uniq_cmd = 'sort -u -o {} -'.format(unique_sequence_file) p = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, stdin=sp.PIPE, shell=True) stdout, stderr = p.communicate(input=raw_sequence_string) # count the number of unique sequences wc_cmd = 'wc -l {}'.format(unique_sequence_file) q = sp.Popen(wc_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) _count, _ = q.communicate() unique_sequence_count = int(_count.split()[0]) print('unique sequences:', unique_sequence_count) if log_file is not None: with open(log_file, 'a') as f: f.write('SEQUENCES: {} {}\n'.format(raw_sequence_count, unique_sequence_count)) print('') # clear the logfile with open(logfile, 'w') as f: f.write('') # iteratively process each subject for subject in subjects: print(subject) with open(logfile, 'a') as f: f.write('#' + subject + '\n') files = list_files('./data/techrep-merged_minimal_no-header/{}'.format(subject)) raw_clonotype_dir = './data/techrep-merged_vj-aa/{}'.format(subject) unique_clonptype_dir = './data/dedup_techrep-merged_vj-aa/{}'.format(subject) raw_sequence_dir = './data/techrep-merged_vdj-nt/{}'.format(subject) unique_sequence_dir = './data/dedup_techrep-merged_vdj-nt/{}'.format(subject) dedup_bioreps(files, raw_clonotype_dir, unique_clonptype_dir, raw_sequence_dir, unique_sequence_dir, log_file=logfile) print('') ``` ## Deduplication (subject pools) In the previous blocks of code, we created a unique clonotype file for each biological replicate for each subject. Here, we'd like to create a single file for each subject containing only unique clonotypes (regardless of which biological replicate they came from). ``` dedup_clonotype_subject_pool_dir = './data/dedup_subject_clonotype_pools/' dedup_sequence_subject_pool_dir = './data/dedup_subject_sequence_pools/' make_dir(dedup_clonotype_subject_pool_dir) make_dir(dedup_sequence_subject_pool_dir) ``` First we want to create a unique clonotype file for each subject that also contains the number of times we saw each clonotype (using the deduplicated biological replicates, so the clonotype count essentially tallies the number of biological replicates in which we observed each clonotype) ``` for subject in subjects: print(subject) # clonotypes input_clonotype_files = list_files(os.path.join(dedup_clonotype_dir, subject)) ofile = os.path.join(dedup_clonotype_subject_pool_dir, '{}_dedup_pool_vj-aa_with-counts.txt'.format(subject)) uniq_cmd = 'cat {} | sort | uniq -c > {}'.format(' '.join(input_clonotype_files), ofile) c = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) stdout, stderr = c.communicate() # sequences input_sequence_files = list_files(os.path.join(dedup_sequence_dir, subject)) ofile = os.path.join(dedup_sequence_subject_pool_dir, '{}_dedup_pool_vdj-nt_with-counts.txt'.format(subject)) uniq_cmd = 'cat {} | sort | uniq -c > {}'.format(' '.join(input_sequence_files), ofile) s = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) stdout, stderr = s.communicate() ``` Now the same process, but without counts: ``` for subject in subjects: print(subject) # clonotypes input_clonotype_files = list_files(os.path.join(dedup_clonotype_dir, subject)) ofile = os.path.join(dedup_clonotype_subject_pool_dir, '{}_dedup_pool_vj-aa.txt'.format(subject)) uniq_cmd = 'cat {} | sort | uniq > {}'.format(' '.join(input_clonotype_files), ofile) c = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) stdout, stderr = c.communicate() # sequences input_sequence_files = list_files(os.path.join(dedup_sequence_dir, subject)) ofile = os.path.join(dedup_sequence_subject_pool_dir, '{}_dedup_pool_vdj-nt.txt'.format(subject)) uniq_cmd = 'cat {} | sort | uniq > {}'.format(' '.join(input_sequence_files), ofile) s = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) stdout, stderr = s.communicate() ``` ## Deduplication (cross-subject pools) Finally, we'd like to create unique clonotype files (with counts) for every groupwise combination of our 10 subjects. Each group can contain two or more subjects, meaning the total number of possible groupwise combinations is quite large. We'll use the `multiprocessing` package to parallelize the process which should speed things up substantially, although even with parallelization, this will take some time. ***NOTE:*** *The output from the following code blocks will be quite large (deduplicated clonotype files are >2TB in total, deduplicated sequence files are >20TB in total). Make sure you have sufficient storage and that the output paths below (`dedup_cross_subject_clonotype_pool_dir` and `dedup_cross_subject_sequence_pool_dir` are correct before starting.* ``` # directories dedup_cross_subject_clonotype_pool_dir = './data/dedup_cross-subject_clonotype_pools/' dedup_cross_subject_sequence_pool_dir = './data/dedup_cross-subject_sequence_pools/' make_dir(dedup_cross_subject_clonotype_pool_dir) make_dir(dedup_cross_subject_sequence_pool_dir) # deduplicated subject pool files dedup_clonotype_subject_files = [f for f in list_files(dedup_clonotype_subject_pool_dir) if '_dedup_pool_vj-aa.txt' in f] dedup_sequence_subject_files = [f for f in list_files(dedup_sequence_subject_pool_dir) if '_dedup_pool_vdj-nt.txt' in f] # every possible groupwise combination of subjects (2 or more subjects per group) subject_combinations_by_size = {} for size in range(2, 11): subject_combinations_by_size[size] = [sorted(c) for c in itertools.combinations(subjects, size)] def dedup_cross_subject_pool(subjects, files, output_dir): files = sorted(list(set([f for f in dedup_subject_files if os.path.basename(f).split('_')[0] in subjects]))) output_file = os.path.join(output_dir, '{}_dedup_pool_vj-aa_with-counts.txt'.format('-'.join(subjects))) uniq_cmd = 'cat {} | sort -T {} | uniq -c > {}'.format(' '.join(files), temp_dir, output_file) p = sp.Popen(uniq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) stdout, stderr = p.communicate() ``` ### Clonotypes ``` p = mp.Pool(maxtasksperchild=1) for size in sorted(subject_combinations_by_size.keys()): subject_combinations = subject_combinations_by_size[size] async_results = [] print('{}-subject pools:'.format(size)) progress_bar(0, len(subject_combinations)) for sub_comb in subject_combinations: files = sorted(list(set([f for f in dedup_clonotype_subject_files if os.path.basename(f).split('_')[0] in sub_comb]))) async_results.append(p.apply_async(dedup_cross_subject_pool, args=(sub_comb, files, dedup_cross_subject_clonotype_pool_dir))) monitor_mp_jobs(async_results) print('\n') p.close() p.join() ``` ### Sequences Just one more warning that the following code block will produce a very large amount of data (>20TB) and will take many hours to run even on a fairly robust server (an `m4.16xlarge` AWS EC2 instance, for example). ``` p = mp.Pool(maxtasksperchild=1) for size in sorted(subject_combinations_by_size.keys()): subject_combinations = subject_combinations_by_size[size] async_results = [] print('{}-subject pools:'.format(size)) progress_bar(0, len(subject_combinations)) for sub_comb in subject_combinations: files = sorted(list(set([f for f in dedup_sequence_subject_files if os.path.basename(f).split('_')[0] in sub_comb]))) async_results.append(p.apply_async(dedup_cross_subject_pool, args=(sub_comb, files, dedup_cross_subject_sequence_pool_dir))) monitor_mp_jobs(async_results) print('\n') p.close() p.join() ```
github_jupyter
``` #export from fastai.basics import * #hide from nbdev.showdoc import * #default_exp callback.progress ``` # Progress and logging callbacks > Callback and helper function to track progress of training or log results ``` from fastai.test_utils import * ``` ## ProgressCallback - ``` # export @docs class ProgressCallback(Callback): "A `Callback` to handle the display of progress bars" run_after=Recorder def before_fit(self): assert hasattr(self.learn, 'recorder') if self.create_mbar: self.mbar = master_bar(list(range(self.n_epoch))) if self.learn.logger != noop: self.old_logger,self.learn.logger = self.logger,self._write_stats self._write_stats(self.recorder.metric_names) else: self.old_logger = noop def before_epoch(self): if getattr(self, 'mbar', False): self.mbar.update(self.epoch) def before_train(self): self._launch_pbar() def before_validate(self): self._launch_pbar() def after_train(self): self.pbar.on_iter_end() def after_validate(self): self.pbar.on_iter_end() def after_batch(self): self.pbar.update(self.iter+1) if hasattr(self, 'smooth_loss'): self.pbar.comment = f'{self.smooth_loss:.4f}' def _launch_pbar(self): self.pbar = progress_bar(self.dl, parent=getattr(self, 'mbar', None), leave=False) self.pbar.update(0) def after_fit(self): if getattr(self, 'mbar', False): self.mbar.on_iter_end() delattr(self, 'mbar') if hasattr(self, 'old_logger'): self.learn.logger = self.old_logger def _write_stats(self, log): if getattr(self, 'mbar', False): self.mbar.write([f'{l:.6f}' if isinstance(l, float) else str(l) for l in log], table=True) _docs = dict(before_fit="Setup the master bar over the epochs", before_epoch="Update the master bar", before_train="Launch a progress bar over the training dataloader", before_validate="Launch a progress bar over the validation dataloader", after_train="Close the progress bar over the training dataloader", after_validate="Close the progress bar over the validation dataloader", after_batch="Update the current progress bar", after_fit="Close the master bar") if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback, Recorder, ProgressCallback] elif ProgressCallback not in defaults.callbacks: defaults.callbacks.append(ProgressCallback) learn = synth_learner() learn.fit(5) #export @patch @contextmanager def no_bar(self:Learner): "Context manager that deactivates the use of progress bars" has_progress = hasattr(self, 'progress') if has_progress: self.remove_cb(self.progress) try: yield self finally: if has_progress: self.add_cb(ProgressCallback()) learn = synth_learner() with learn.no_bar(): learn.fit(5) #hide #Check validate works without any training def tst_metric(out, targ): return F.mse_loss(out, targ) learn = synth_learner(n_trn=5, metrics=tst_metric) preds,targs = learn.validate() #hide #Check get_preds works without any training learn = synth_learner(n_trn=5, metrics=tst_metric) preds,targs = learn.validate() show_doc(ProgressCallback.before_fit) show_doc(ProgressCallback.before_epoch) show_doc(ProgressCallback.before_train) show_doc(ProgressCallback.before_validate) show_doc(ProgressCallback.after_batch) show_doc(ProgressCallback.after_train) show_doc(ProgressCallback.after_validate) show_doc(ProgressCallback.after_fit) ``` ## ShowGraphCallback - ``` # export class ShowGraphCallback(Callback): "Update a graph of training and validation loss" run_after,run_valid=ProgressCallback,False def before_fit(self): self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") self.nb_batches = [] assert hasattr(self.learn, 'progress') def after_train(self): self.nb_batches.append(self.train_iter) def after_epoch(self): "Plot validation loss in the pbar graph" rec = self.learn.recorder iters = range_of(rec.losses) val_losses = [v[1] for v in rec.values] x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses)) y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(val_losses))))) self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds) #slow learn = synth_learner(cbs=ShowGraphCallback()) learn.fit(5) ``` ## CSVLogger - ``` # export class CSVLogger(Callback): run_after=Recorder "Log the results displayed in `learn.path/fname`" def __init__(self, fname='history.csv', append=False): self.fname,self.append = Path(fname),append def read_log(self): "Convenience method to quickly access the log." return pd.read_csv(self.path/self.fname) def before_fit(self): "Prepare file with metric names." self.path.parent.mkdir(parents=True, exist_ok=True) self.file = (self.path/self.fname).open('a' if self.append else 'w') self.file.write(','.join(self.recorder.metric_names) + '\n') self.old_logger,self.learn.logger = self.logger,self._write_line def _write_line(self, log): "Write a line with `log` and call the old logger." self.file.write(','.join([str(t) for t in log]) + '\n') self.old_logger(log) def after_fit(self): "Close the file and clean up." self.file.close() self.learn.logger = self.old_logger ``` The results are appended to an existing file if `append`, or they overwrite it otherwise. ``` learn = synth_learner(cbs=CSVLogger()) learn.fit(5) show_doc(CSVLogger.read_log) df = learn.csv_logger.read_log() test_eq(df.columns.values, learn.recorder.metric_names) for i,v in enumerate(learn.recorder.values): test_close(df.iloc[i][:3], [i] + v) os.remove(learn.path/learn.csv_logger.fname) show_doc(CSVLogger.before_fit) show_doc(CSVLogger.after_fit) ``` ## Export - ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
## In this Ipnyb , I'm going to build a model that can classify the Clothing Attribute Dataset which can be found at https://purl.stanford.edu/tb980qz1002 by the Category label. This is an image recognition and classification task . This dataset has only 1800 samples , out of which around 1100 samples have non - Nan values . ## ->Therefore , the approach to be followed will be : ## Use Transfer learning ( in this case VGGNet16 trained on Imagenet data ) to learn weights for our features ## -> Train our features against a classifier . Our choice of classifier here is SVM ``` import keras import scipy.io as sio import os from keras.applications import imagenet_utils from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img import numpy as np import h5py from keras.utils.np_utils import to_categorical import numpy as np from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dropout, Flatten, Dense, Conv2D, MaxPooling2D from keras import applications from keras.optimizers import Adam # Plot images from keras.datasets import mnist from matplotlib import pyplot import pickle #import cv2 image_dir = "/images" label_file = "/labels/category_GT.mat" ``` ## The first step is to load our data and labels . The data is stored in the images folder as folders. The label is stored in a matlab file . The name of the file , corresponds to its label index (plus 1 as image names start from 1) . ## To fix this , we first read all image file names in a list , sort the list and then parse files in ascending order, matching with the order of their labels¶ ``` #get labels from the category label file for the task mat_contents = sio.loadmat(os.getcwd() + label_file)['GT'] train_labels=np.array(mat_contents) print "training labels loaded" #print train_labels.shape file_list = [f for f in os.listdir(os.getcwd() + image_dir) if os.path.isfile(os.path.join(os.getcwd() + image_dir, f))] file_list.sort() #get train data inputShape = (150, 150) img_list =[] # for filename in os.listdir(os.getcwd() + image_dir): for filename in file_list: qualified_filename = os.getcwd() + image_dir + "/" + filename #print filename #print("[INFO] loading and pre-processing image...") image = load_img(qualified_filename, target_size=inputShape) #print (image.size) image = img_to_array(image) # our input image is now represented as a NumPy array of shape # (inputShape[0], inputShape[1], 3) pos = filename.split(".")[0] pos = int(pos) #print pos #inserting the image at correct index that matches its label img_list.insert(pos -1 , image) print pos -1 print "training data loaded" train_data = np.array(img_list) print "shape of training data is " + str(train_data.shape) #print img_list[0] ``` #### We'll do some EDA now. Because this data is labeled for multiple categories, we will explicitly look for Nan labels and filter them out . This reduces the number of available samples to 1104 ``` #removing nan values def get_filtered_data(train_data, train_labels): print "in Filter Data method" bool_label_array = np.isfinite(np.ravel(train_labels)) # print bool_label_array train_data_filter = train_data[bool_label_array] print train_data_filter.shape train_labels_filter = train_labels[np.isfinite(train_labels)] print train_labels_filter.shape return (train_data_filter, train_labels_filter) (train_data_filter, train_labels_filter) = get_filtered_data(train_data, train_labels) print train_data.shape ``` #### It is important to see how the labels are distributed. If the data is biased towards one class, we might have to resample ``` # now let's see the distribution of classes from collections import Counter print Counter(train_labels_filter) ``` ### The data seems to be distributed fairly , therefore we don't need to do class balancing . Now we'll write a function that shuffles our data , whilst maintaining the relative indexes of data and labels ``` def shuffle_data(x_train, y_train_zero): idx = np.random.randint(len(y_train_zero), size=int(len(y_train_zero))) y_train_zero = y_train_zero[idx] x_train = x_train[idx, :] return x_train, y_train_zero ``` ### Before we start training our model , it is important to split our data into training and testing (eval) data . This enforces that the model never sees the test data before we start evaluation and helps us measure the effectiveness of our models . ### Since the size of out dataset is 1104, we're splitting it roughly into 75 - 25 ratio of train and test data . After splitting the data , we also write these to numpy files which can be loaded into memory using auxillary methods provided at the end of the notebook ### we shall use VGG16 to learn weights from the 16th layer of VGGNet for our images. Finally we'll save these features to a file ``` #load images # dimensions of our images. top_model_weights_path = 'bottleneck_fc_model.h5' epochs = 5 batch_size = 16 train_data_filter = train_data_filter/255 def save_bottleneck_features_without_augmentation(): train_data_aug=[] train_labels_aug=[] model = applications.VGG16(include_top=False, weights='imagenet') print "loading gen on training data" print "generating augmentations of data" bottleneck_features_train =model.predict( train_data_filter, verbose =1) return bottleneck_features_train, train_labels_filter print "saving bottleneck features" train_data_aug, train_labels_aug = save_bottleneck_features_without_augmentation() #Compute one level accuaracy def accuracy(matrix): return (np.trace(matrix)) * 1.0 / np.sum(matrix) print train_data_aug.shape print train_labels_aug.shape ``` #### Visualizing our data : Let's see the first 9 images from the consolidated , as well as the evaluation and training datasets ``` def plot_first_n_images(img_list=img_list,n=9): # load data # create a grid of 3x3 images for i in range(0, n): pyplot.subplot(330 + 1 + i) pyplot.imshow(img_list[i]) # show the plot pyplot.show() plot_first_n_images(train_data_filter) ``` ### The features from VGGNet are very rich, but also very high in dimension ( 8192) . Since the size of our data is small, we shall be applying PCA to get the first 1000 more discriminative features. We chose the value 1000, after running hit and trial on a number of feature sizes to see which one produced the best evaluation metrics ``` #train_data_flat = np.reshape(train_data_aug,(8848, 67500) ) #print train_data_flat.shape import numpy as np import pandas as pd import sklearn from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix import pickle # PCA def pca(train_data_flat, num_features): import numpy as np from sklearn.decomposition import PCA pca = PCA(n_components=num_features) pca.fit(train_data_flat) # print(pca.explained_variance_ratio_) # print(pca.singular_values_) train_data_flat_pca = pca.transform(train_data_flat) print train_data_flat_pca.shape return train_data_flat_pca train_data_flat = np.reshape(train_data_aug, (1104, 8192)) train_data_flat_pca = pca(train_data_flat, 1000) print train_data_aug.shape print train_data_flat.shape print train_data_flat_pca.shape print train_labels_filter.shape ``` ### We will now use the model with pre-trained weights and train them with a linear classifier . Since we've used augmentation with zero mean and PCA, we can't use Naive Bayes (doesn't take negative values) . The algorithms we'll test against are : #### 1. Logistic Regression #### 2. SVM ( Use grid search to find the best params and predict with the given parameters) #### 3. Random Forest ``` #logistic regression from sklearn import linear_model from sklearn.metrics import f1_score def lr(train_data, label, split): logistic_clf = linear_model.LogisticRegression(penalty="l2", class_weight="balanced", max_iter=100, verbose=1) logistic_clf.fit(train_data[:split], label[:split]) pred = logistic_clf.predict(train_data[split:]) print confusion_matrix(label[split:], pred) print accuracy(confusion_matrix(label[split:], pred)) print f1_score(label[split:], pred, average= 'micro') print f1_score(label[split:], pred, average= 'macro') print f1_score(label[split:], pred, average= 'weighted') # lr(train_data_flat, train_labels_aug, 850) ``` #### Running logistic Regression ``` train_data_flat_pca = pca(train_data_flat, 1000) lr(train_data_flat_pca2, train_labels_aug2, 900) train_data_flat_pca = pca(train_data_flat, 1104) lr(train_data_flat_pca2, train_labels_aug2, 900) ``` #### running SVM, first selecting the best parameters using Grid Search then using those params to evaluate results ``` from sklearn.grid_search import GridSearchCV def svm(train_data, train_labels_augmented): from sklearn import svm svc = svm.SVC(C=0.5, kernel='linear') param_grid = [ {'C': [0.1, 0.5, 1, 5], 'kernel': ['linear']}, {'C': [0.1, 0.5, 1, 5], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']}, ] kernel = ['linear', 'rbf'] Cs = [0.1, 0.3, 1] clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=10, n_jobs=-1,) clf.fit(train_data, train_labels_augmented) print(clf.best_score_) print(clf.best_estimator_.C) print(clf.best_estimator_.kernel) print(clf.best_params_) return clf.cv_results_ # train_data_flat_pca = pca(train_data_flat, 1000) cv_results_ = svm(train_data_flat_pca, train_labels_aug) # train_data_flat_pca = pca(train_data_flat, 1104) # lr(train_data_flat_pca, train_labels_aug, 850) def svm_best(train_data, label, split): from sklearn import svm clf = svm.SVC(C=5, kernel='rbf', gamma = 0.001) clf.fit(train_data[:split], label[:split]) pred = clf.predict(train_data[split:]) print confusion_matrix(label[split:], pred) print accuracy(confusion_matrix(label[split:], pred)) print f1_score(label[split:], pred, average= 'micro') print f1_score(label[split:], pred, average= 'macro') print f1_score(label[split:], pred, average= 'weighted') train_data_flat_pca2, train_labels_aug2 = shuffle_data(train_data_flat_pca, train_labels_aug) svm_best(train_data_flat_pca2, train_labels_aug2, 900) ``` #### Running Random Forest using Grid Search to get classifier with best performance. Since the outputs in grid search don't do better than LR and SVM , we don't go forward with evaluation ``` def random_forest(X, y, split): k_fold = 10 kf_total = KFold(n_splits=k_fold) from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(n_estimators=250, random_state=0) #estimators_list = [50, 100, 150, 250, 500, 800, 1000] estimators_list = [50, 150, 500] clf_forest = GridSearchCV(estimator=forest, param_grid=dict(n_estimators=estimators_list, warm_start=[True, False]), cv=k_fold, n_jobs=-1) cms = [confusion_matrix(y[split:], clf_forest.fit(X[:split],y[:split]).predict(X[split:])) for train, test in kf_total.split(X)] accuracies = [] for cm in cms: accuracies.append(accuracy(cm)) print(accuracies) print(np.mean(accuracies)) random_forest(train_data_flat_pca2, train_labels_aug2, 900) ``` # End of code in notebook Auxillary methods to load data from pickle files ``` import pickle file = open('train_data_1044.pkl', 'wb') # Pickle dictionary using protocol 0. pickle.dump(train_data_aug, file) file.close() file = open('train_label_v16_1044.pkl', 'wb') # # Pickle dictionary using protocol 0. pickle.dump(train_labels_filter, file) file.close() def plot_contours(ax, clf, xx, yy, **params): """Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, **params) return out with open('train_data.pkl', 'rb') as f: train_data_augmented = pickle.load(f) train_data_augmented.shape, with open('train_label.pkl', 'rb') as f: train_labels_augmented = pickle.load(f) train_labels_augmented.shape ```
github_jupyter
# Write summaries TensorBoard helps us to summerize important parameters (such as wieghts, biases, activations, accuracy, loss, ...) to see how each parameter changes in each iteration of the training. We can also see the images using TensorBoard ## Imports: We will start with importing the needed libraries for our code. ``` # imports import tensorflow as tf import numpy as np import matplotlib.pyplot as plt ``` ## Input data: For this tutorial we use the MNIST dataset. MNIST is a dataset of handwritten digits. If you are into machine learning, you might have heard of this dataset by now. MNIST is kind of benchmark of datasets for deep learning. One other reason that we use the MNIST is that it is easily accesible through Tensorflow. If you want to know more about the MNIST dataset you can check Yann Lecun's website. We can easily import the dataset and see the size of training, test and validation set: ``` # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print("Size of:") print("- Training-set:\t\t{}".format(len(mnist.train.labels))) print("- Test-set:\t\t{}".format(len(mnist.test.labels))) print("- Validation-set:\t{}".format(len(mnist.validation.labels))) ``` ## Hyper-parameters: Hyper-parameters are important parameters which are not learned by the network. So, we have to specify them externally. These parameters are constant and they are not learnable. ``` # hyper-parameters logs_path = "./logs/write_summaries" # path to the folder that we want to save the logs for TensorBoard learning_rate = 0.001 # The optimization learning rate epochs = 10 # Total number of training epochs batch_size = 100 # Training batch size display_freq = 100 # Frequency of displaying the training results # Network Parameters # We know that MNIST images are 28 pixels in each dimension. img_h = img_w = 28 # Images are stored in one-dimensional arrays of this length. img_size_flat = img_h * img_w # Number of classes, one class for each of 10 digits. n_classes = 10 # number of units in the first hidden layer h1 = 200 ``` ## Graph: Like before, we start by constructing the graph. But, we need to define some functions that we need rapidly in our code. To visualize the parameters, we will use ```tf.summary``` class to write the summaries of parameters. Notice ```tf.summary.histogram()```__ functions added to the code. ``` # weight and bais wrappers def weight_variable(name, shape): """ Create a weight variable with appropriate initialization :param name: weight name :param shape: weight shape :return: initialized weight variable """ initer = tf.truncated_normal_initializer(stddev=0.01) return tf.get_variable('W_' + name, dtype=tf.float32, shape=shape, initializer=initer) def bias_variable(name, shape): """ Create a bias variable with appropriate initialization :param name: bias variable name :param shape: bias variable shape :return: initialized bias variable """ initial = tf.constant(0., shape=shape, dtype=tf.float32) return tf.get_variable('b_' + name, dtype=tf.float32, initializer=initial) def fc_layer(x, num_units, name, use_relu=True): """ Create a fully-connected layer :param x: input from previous layer :param num_units: number of hidden units in the fully-connected layer :param name: layer name :param use_relu: boolean to add ReLU non-linearity (or not) :return: The output array """ with tf.variable_scope(name): in_dim = x.get_shape()[1] W = weight_variable(name, shape=[in_dim, num_units]) tf.summary.histogram('W', W) b = bias_variable(name, [num_units]) tf.summary.histogram('b', b) layer = tf.matmul(x, W) layer += b if use_relu: layer = tf.nn.relu(layer) return layer ``` Now that we have our helper functions we can create our graph. To visualize some scalar values (such as loss and accuracy) we will use __```tf.summary.scalar()```__. To visualize some images we will use __```tf.summary.image()```__. Finally, to merge all the summeries, we will use __```tf.summary.merge_all()```__ function. ``` # Create graph # Placeholders for inputs (x), outputs(y) with tf.variable_scope('Input'): x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='X') tf.summary.image('input_image', tf.reshape(x, (-1, img_w, img_h, 1)), max_outputs=5) y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y') fc1 = fc_layer(x, h1, 'Hidden_layer', use_relu=True) output_logits = fc_layer(fc1, n_classes, 'Output_layer', use_relu=False) # Define the loss function, optimizer, and accuracy with tf.variable_scope('Train'): with tf.variable_scope('Loss'): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_logits), name='loss') tf.summary.scalar('loss', loss) with tf.variable_scope('Optimizer'): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='Adam-op').minimize(loss) with tf.variable_scope('Accuracy'): correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name='correct_pred') accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') tf.summary.scalar('accuracy', accuracy) # Network predictions cls_prediction = tf.argmax(output_logits, axis=1, name='predictions') # Initializing the variables init = tf.global_variables_initializer() merged = tf.summary.merge_all() ``` ## Train: As soon as the graph is created, we can run it on a session. A ```tf.Session()``` is as good as it's runtime. As soon as the cell is run, the session will be ended and we will loose all the information. So. we will define an _InteractiveSession_ to keep the parameters for testing. __NOTE:__ Each time that we run our session, we have to pass the _```merged```_ variable (which we merged all the summerize in) and we have to add the summaries in our __```tf.summary.FileWriter```__ class using __```add_summary```__ method in our class. __NOTE:__ We can let the summary writer class know that these summaries are for which step by passing the _```step```_ which we are in. ``` # Launch the graph (session) sess = tf.InteractiveSession() # using InteractiveSession instead of Session to test network in separate cell sess.run(init) train_writer = tf.summary.FileWriter(logs_path, sess.graph) num_tr_iter = int(mnist.train.num_examples / batch_size) global_step = 0 for epoch in range(epochs): print('Training epoch: {}'.format(epoch + 1)) for iteration in range(num_tr_iter): batch_x, batch_y = mnist.train.next_batch(batch_size) global_step += 1 # Run optimization op (backprop) feed_dict_batch = {x: batch_x, y: batch_y} _, summary_tr = sess.run([optimizer, merged], feed_dict=feed_dict_batch) train_writer.add_summary(summary_tr, global_step) if iteration % display_freq == 0: # Calculate and display the batch loss and accuracy loss_batch, acc_batch = sess.run([loss, accuracy], feed_dict=feed_dict_batch) print("iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}". format(iteration, loss_batch, acc_batch)) # Run validation after every epoch feed_dict_valid = {x: mnist.validation.images, y: mnist.validation.labels} loss_valid, acc_valid = sess.run([loss, accuracy], feed_dict=feed_dict_valid) print('---------------------------------------------------------') print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}". format(epoch + 1, loss_valid, acc_valid)) print('---------------------------------------------------------') ``` ## Test: Now that the model is trained. It is time to test our model. We will define some helper functions to plot some of the images and their corresponding predicted and true classes. We will also visualize some of the misclassified samples to see why the Neural Net failed to classify them correctly. ``` def plot_images(images, cls_true, cls_pred=None, title=None): """ Create figure with 3x3 sub-plots. :param images: array of images to be plotted, (9, img_h*img_w) :param cls_true: corresponding true labels (9,) :param cls_pred: corresponding true labels (9,) """ fig, axes = plt.subplots(3, 3, figsize=(9, 9)) fig.subplots_adjust(hspace=0.3, wspace=0.3) img_h = img_w = np.sqrt(images.shape[-1]).astype(int) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape((img_h, img_w)), cmap='binary') # Show true and predicted classes. if cls_pred is None: ax_title = "True: {0}".format(cls_true[i]) else: ax_title = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_title(ax_title) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) if title: plt.suptitle(title, size=20) plt.show() def plot_example_errors(images, cls_true, cls_pred, title=None): """ Function for plotting examples of images that have been mis-classified :param images: array of all images, (#imgs, img_h*img_w) :param cls_true: corresponding true labels, (#imgs,) :param cls_pred: corresponding predicted labels, (#imgs,) """ # Negate the boolean array. incorrect = np.logical_not(np.equal(cls_pred, cls_true)) # Get the images from the test-set that have been # incorrectly classified. incorrect_images = images[incorrect] # Get the true and predicted classes for those images. cls_pred = cls_pred[incorrect] cls_true = cls_true[incorrect] # Plot the first 9 images. plot_images(images=incorrect_images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9], title=title) # Test the network after training # Accuracy feed_dict_test = {x: mnist.test.images, y: mnist.test.labels} loss_test, acc_test = sess.run([loss, accuracy], feed_dict=feed_dict_test) print('---------------------------------------------------------') print("Test loss: {0:.2f}, test accuracy: {1:.01%}".format(loss_test, acc_test)) print('---------------------------------------------------------') # Plot some of the correct and misclassified examples cls_pred = sess.run(cls_prediction, feed_dict=feed_dict_test) cls_true = np.argmax(mnist.test.labels, axis=1) plot_images(mnist.test.images, cls_true, cls_pred, title='Correct Examples') plot_example_errors(mnist.test.images, cls_true, cls_pred, title='Misclassified Examples') ``` After we are finished the testing, we will close the session to free the memory. ``` # close the session after you are done with testing sess.close() ``` At this step our coding is done. We have also plotted the accuarcy and some examples. But to inspect more in our network, we can run the __Tensorboard__. Open your terminal and type: ```bash tensorboard --logdir=logs/write_summaries --host localhost ``` and Open the generated link in your browser. __NOTE:__ Don't forget to activate your environment !!! You can see the images, scalars and histograms in added tabs: __Image summaries:__ <img src="https://github.com/easy-tensorflow/easy-tensorflow/raw/master/4_Tensorboard/Tutorials/files/image_summaries.png"> __Scalar summaries:__ <img src="https://github.com/easy-tensorflow/easy-tensorflow/raw/master/4_Tensorboard/Tutorials/files/scalar_summaries.png"> __Histogram summaries:__ <img src="https://github.com/easy-tensorflow/easy-tensorflow/raw/master/4_Tensorboard/Tutorials/files/hist_summaries.png"> Thanks for reading! If you have any question or doubt, feel free to leave a comment in our [website](http://easy-tensorflow.com/).
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import pandas as pd %matplotlib inline ``` ## Sigmoid function ``` def sigmoid(x): return 1 / (1+np.exp(-x)) x = np.linspace(-10,10,100) plt.plot(x, sigmoid(x), 'r', label='linspace(-10,10,10)') plt.grid() plt.title('Sigmoid Function') plt.text(4, 0.8, r'$\sigma(x)=\frac{1}{1+e^{-x}}$', fontsize=15) plt.xlabel('X') plt.ylabel(r'$\sigma(x)$') bx = [-10,10] by = [.5, .5] plt.plot(x, sigmoid(x), 'r', label='sigmoid function') plt.plot(bx, by, 'b', label='boundary') plt.grid() plt.title('Sigmoid function with threshold') plt.text(4, 0.8, r'$\sigma(x)=\frac{1}{1+e^{-x}}$', fontsize=15) plt.xlabel('X') plt.ylabel(r'$\sigma(x)$') ``` ## Logistic Regression ``` import sklearn from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score ``` ## We only consider 2 classes here, so we need to drop one class. We can use pandas to do that ``` iris = datasets.load_iris() iris_df = pd.DataFrame(data= np.c_[iris['data'], iris['target']], columns= iris['feature_names'] + ['target']) iris_df = iris_df.astype({'target': int}) iris_df = iris_df[iris_df['target'] != 2] iris_df.head() iris_df['target'].value_counts() X = iris_df.drop(iris_df.columns[[4]], axis=1) y = iris_df.drop(iris_df.columns[[0,1,2,3]], axis=1) X.head() y.head() def intialization(n_features): w = np.zeros((1,n_features)) b = 0 return w, b def sigmoid(x): return 1 / (1+np.exp(-x)) def trainModel(w, b, X, Y, learning_rate=0.0001, no_iterations=5001): costs = [] m = X.shape[0] for i in range(no_iterations): # map the result to probability by sigmoid function a = sigmoid(np.dot(w,X.T)+b) # compute the neg log-likelihood cost = (-1/m)*(np.sum((Y.T*np.log(a)) + ((1-Y.T)*(np.log(1-a))))) # calculate the gradient dw = (1/m)*(np.dot(X.T, (a-Y.T).T)) db = (1/m)*(np.sum(a-Y.T)) # update w, b w = w - learning_rate*dw.T b = b - learning_rate*db if i%100==0: costs.append(cost) if i%500==0: print("%i iteration cost: %f" %(i, cost)) # final result coef = {"w": w, "b": b} return coef, costs def runModel(X_tr, y_tr, X_te, y_te, thershold=0.5): n_features = X_tr.shape[1] w, b = intialization(n_features) coef, costs = trainModel(w, b, X_tr, y_tr) w = coef['w'] b = coef['b'] y_tr_hat = np.array(sigmoid(np.dot(w,X_tr.T)+b)>thershold).astype(int) y_te_hat = np.array(sigmoid(np.dot(w,X_te.T)+b)>thershold).astype(int) print('Optimized weights:', w) print('Optimized intercept (b):',b) print('Training Accuracy',accuracy_score(y_tr_hat.T, y_tr)) print('Test Accuracy',accuracy_score(y_te_hat.T, y_te)) return costs X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.2, random_state=2018) y_tr = y_tr.as_matrix() y_ts = y_te.as_matrix() costs = runModel(X_tr, y_tr, X_te, y_te) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title('Cost reduction over time') ```
github_jupyter
``` # HIDDEN from datascience import * import numpy as np path_data = '../../../../data/' import matplotlib matplotlib.use('Agg', warn=False) %matplotlib inline import matplotlib.pyplot as plots plots.style.use('fivethirtyeight') import warnings warnings.simplefilter(action="ignore", category=FutureWarning) from urllib.request import urlopen import re def read_url(url): return re.sub('\\s+', ' ', urlopen(url).read().decode()) # HIDDEN # Read two books, fast (again)! huck_finn_url = 'https://www.inferentialthinking.com/chapters/01/3/huck_finn.txt' huck_finn_text = read_url(huck_finn_url) huck_finn_chapters = huck_finn_text.split('CHAPTER ')[44:] little_women_url = 'https://www.inferentialthinking.com/chapters/01/3/little_women.txt' little_women_text = read_url(little_women_url) little_women_chapters = little_women_text.split('CHAPTER ')[1:] ``` In some situations, the relationships between quantities allow us to make predictions. This text will explore how to make accurate predictions based on incomplete information and develop methods for combining multiple sources of uncertain information to make decisions. As an example of visualizing information derived from multiple sources, let us first use the computer to get some information that would be tedious to acquire by hand. In the context of novels, the word "character" has a second meaning: a printed symbol such as a letter or number or punctuation symbol. Here, we ask the computer to count the number of characters and the number of periods in each chapter of both *Huckleberry Finn* and *Little Women*. ``` # In each chapter, count the number of all characters; # call this the "length" of the chapter. # Also count the number of periods. chars_periods_huck_finn = Table().with_columns([ 'Huck Finn Chapter Length', [len(s) for s in huck_finn_chapters], 'Number of Periods', np.char.count(huck_finn_chapters, '.') ]) chars_periods_little_women = Table().with_columns([ 'Little Women Chapter Length', [len(s) for s in little_women_chapters], 'Number of Periods', np.char.count(little_women_chapters, '.') ]) ``` Here are the data for *Huckleberry Finn*. Each row of the table corresponds to one chapter of the novel and displays the number of characters as well as the number of periods in the chapter. Not surprisingly, chapters with fewer characters also tend to have fewer periods, in general – the shorter the chapter, the fewer sentences there tend to be, and vice versa. The relation is not entirely predictable, however, as sentences are of varying lengths and can involve other punctuation such as question marks. ``` chars_periods_huck_finn ``` Here are the corresponding data for *Little Women*. ``` chars_periods_little_women ``` You can see that the chapters of *Little Women* are in general longer than those of *Huckleberry Finn*. Let us see if these two simple variables – the length and number of periods in each chapter – can tell us anything more about the two books. One way for us to do this is to plot both sets of data on the same axes. In the plot below, there is a dot for each chapter in each book. Blue dots correspond to *Huckleberry Finn* and gold dots to *Little Women*. The horizontal axis represents the number of periods and the vertical axis represents the number of characters. ``` plots.figure(figsize=(6, 6)) plots.scatter(chars_periods_huck_finn.column(1), chars_periods_huck_finn.column(0), color='darkblue') plots.scatter(chars_periods_little_women.column(1), chars_periods_little_women.column(0), color='gold') plots.xlabel('Number of periods in chapter') plots.ylabel('Number of characters in chapter'); ``` The plot shows us that many but not all of the chapters of *Little Women* are longer than those of *Huckleberry Finn*, as we had observed by just looking at the numbers. But it also shows us something more. Notice how the blue points are roughly clustered around a straight line, as are the yellow points. Moreover, it looks as though both colors of points might be clustered around the *same* straight line. Now look at all the chapters that contain about 100 periods. The plot shows that those chapters contain about 10,000 characters to about 15,000 characters, roughly. That's about 100 to 150 characters per period. Indeed, it appears from looking at the plot that on average both books tend to have somewhere between 100 and 150 characters between periods, as a very rough estimate. Perhaps these two great 19th century novels were signaling something so very familiar us now: the 140-character limit of Twitter.
github_jupyter
``` from __future__ import division from __future__ import print_function import os import time import argparse import numpy as np import math import torch import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from utils import load_data, accuracy, normalize, load_polblogs_data from models import GCN from torch.autograd.gradcheck import zero_gradients import os.path as op os.environ["CUDA_VISIBLE_DEVICES"]="1" # Training settings class args: cuda = True fastmode = False # seed = 123 seed = 20 epochs = 200 lr = 0.01 weight_decay = 5e-4 hidden = 16 dropout = 0.5 # pert_num = 20 L1 = 0.5 L2 = 0.1 dataset = "cora" radius = 4 np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) if args.dataset == "polblogs": tmp_adj, features, labels, idx_train, idx_test = load_polblogs_data() else: _, features, labels, idx_train, idx_val, idx_test, tmp_adj = load_data(args.dataset) num_classes = labels.max().item() + 1 # tmp_adj = tmp_adj.toarray() adj = tmp_adj adj = np.eye(tmp_adj.shape[0]) + adj adj, _ = normalize(adj) adj = torch.from_numpy(adj.astype(np.float32)) # print (sum(features)) # print (labels.shape) # print (idx_train.shape) # print (idx_val.shape) # print (idx_test) # s = adj.shape[0] # Model and optimizer model = GCN(nfeat=features.shape[1], nhid=args.hidden, nclass=num_classes, dropout=args.dropout ) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) if args.cuda: model.cuda() features = features.cuda() adj = adj.cuda() labels = labels.cuda() idx_train = idx_train.cuda() if args.dataset != "polblogs": idx_val = idx_val.cuda() idx_test = idx_test.cuda() def train(epoch): t = time.time() model.train() optimizer.zero_grad() x = Variable(adj, requires_grad=True) output = model(features, x) loss_train = F.nll_loss(output[idx_train], labels[idx_train]) acc_train = accuracy(output[idx_train], labels[idx_train]) loss_train.backward() optimizer.step() if args.dataset != "polblogs": loss_val = F.nll_loss(output[idx_val], labels[idx_val]) acc_val = accuracy(output[idx_val], labels[idx_val]) print('Epoch: {:04d}'.format(epoch+1), 'loss_train: {:.4f}'.format(loss_train.item()), 'acc_train: {:.4f}'.format(acc_train.item()), 'loss_val: {:.4f}'.format(loss_val.item()), 'acc_val: {:.4f}'.format(acc_val.item()), 'time: {:.4f}s'.format(time.time() - t)) else: print('Epoch: {:04d}'.format(epoch+1), 'loss_train: {:.4f}'.format(loss_train.item()), 'acc_train: {:.4f}'.format(acc_train.item()), 'time: {:.4f}s'.format(time.time() - t)) def test(adj_m): model.eval() output = model(features, adj_m) loss_test = F.nll_loss(output[idx_test], labels[idx_test]) acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "loss= {:.4f}".format(loss_test.item()), "accuracy= {:.4f}".format(acc_test.item())) return output t_total = time.time() for epoch in range(args.epochs): train(epoch) print("Optimization Finished!") print("Total time elapsed: {:.4f}s".format(time.time() - t_total)) # torch.save(model, './cora_gcn.pth') # torch.save(model.state_dict(), 'cora_gcn.pkl') # Testing ori_output = test(adj) def calculate_grad(pert_adj, idx, classes): x = Variable(pert_adj, requires_grad=True) output = model(features, x) grad = [] # for i in range(classes): for i in classes: cls = torch.LongTensor(np.array(i).reshape(1)).cuda() loss = F.nll_loss(output[idx:idx+1], cls) loss.backward(retain_graph=True) grad.append(x.grad[idx].cpu().numpy()) # print ('grad', grad) return np.array(grad) def add_perturb(input_adj, idx, perturb): # (1-x)A + x(1-A) # input_adj = input_adj.toarray() x = np.zeros((input_adj.shape[0], input_adj.shape[1])) x[idx] = perturb x[:,idx] = perturb # print ('x', x[idx]) # x += np.transpose(x) #change the idx'th row and column x1 = np.ones((input_adj.shape[0], input_adj.shape[1])) - x # print ('x1', x1[idx]) adj2 = np.ones((input_adj.shape[0], input_adj.shape[1])) - input_adj # print ('adj2', adj2[idx]) for i in range(input_adj.shape[0]): adj2[i][i] = 0 perturbed_adj = np.multiply(x1, input_adj) + np.multiply(x, adj2) return perturbed_adj def proj_lp(v, xi=args.radius, p=2): # def proj_lp(v, xi=8, p=2): # Project on the lp ball centered at 0 and of radius xi # SUPPORTS only p = 2 and p = Inf for now # print ('the distance of v', np.linalg.norm(v.flatten(1))) if p == 2: v = v * min(1, xi/np.linalg.norm(v.flatten(1))) # v = v / np.linalg.norm(v.flatten(1)) * xi elif p == np.inf: v = np.sign(v) * np.minimum(abs(v), xi) else: v = v ################# v = np.clip(v, 0, 1) ######################## # v = np.where(v<0.1, 0, v) #to reduce the number of nonzero elements which means #the times of perturbation, also prevents saddle point # v = np.where(v>0.5, 1, 0) return v def select_pert(pert_m): tmp_pert_m = np.absolute(pert_m) sort_idx = tmp_pert_m.argsort()[::-1] sel_idx = np.zeros(pert_m.shape[0]) sel_idx[sort_idx[:args.pert_num]] = 1 return sel_idx def convert_to_v(adj, pert_m, deg, idx): a = np.multiply(pert_m, deg) inv_m = np.ones(adj.shape[0]) - np.multiply(adj[idx], 2) inv_m = np.power(inv_m, -1) res = np.multiply(a, inv_m) return res def normalize_add_perturb(ori_adj, idx, pert, d): a = ori_adj[idx] + pert inv_d = 1 + sum(pert) p_d = d * inv_d inv_d = 1.0/inv_d ## filter the perturbed matrix so that >= 0 # a = np.where(a<0, 0, a) ori_adj[idx] = np.multiply(a, inv_d) return ori_adj, p_d def deepfool(innormal_adj, ori_adj, idx, num_classes, degree, overshoot=0.02, max_iter=30): #innormal_adj: the perturbed adjacency matrix not normalized #ori_adj: the normalized perturbed adjacency matrix model.eval() pred = model(features, ori_adj)[idx] pred = pred.detach().cpu().numpy() I = pred.argsort()[::-1] I = I[0:num_classes] label = I[0] f_i = np.array(pred).flatten() k_i = int(np.argmax(f_i)) w = np.zeros(ori_adj.shape[0]) r_tot = np.zeros(ori_adj.size(0)) # pert_adj = ori_adj pert_adj = ori_adj.detach().cpu().numpy() pert_adj_tensor = ori_adj degree_idx = degree loop_i = 0 # print ('the correct class', label) while k_i == label and loop_i < max_iter: pert = np.inf # gradients = calculate_grad(pert_adj_tensor, idx, num_classes) gradients = calculate_grad(pert_adj_tensor, idx, I) for i in range(1, num_classes): # set new w_k and new f_k w_k = gradients[i, :] - gradients[0, :] f_k = f_i[I[i]] - f_i[I[0]] pert_k = abs(f_k)/np.linalg.norm(w_k.flatten()) # print ('num_classes', num_classes) # print ('pert_k', pert_k) # print ('w_k', w_k) # determine which w_k to use if pert_k < pert: pert = pert_k w = w_k #this is for the polblogs # if sum(w) == 0: # break # compute r_i and r_tot r_i = pert * w / np.linalg.norm(w) # r_i = convert_to_v(innormal_adj, r_i, idx) #x_change = A'_change / (1-2A) r_tot = r_tot + r_i pert_adj, _ = normalize_add_perturb(pert_adj, idx, (1+overshoot)*r_tot, degree_idx) ################## pert_adj = np.clip(pert_adj, 0, 1) ######################## # pert_adj, _ = normalize(pert_adj + np.eye(ori_adj.shape[0])) loop_i += 1 # compute new label pert_adj_tensor = torch.from_numpy(pert_adj.astype(np.float32)) pert_adj_tensor = pert_adj_tensor.cuda() f_i = np.array(model(features, pert_adj_tensor)[idx].detach().cpu().numpy()).flatten() k_i = int(np.argmax(f_i)) # print ('degree', degree[idx]) # print ('original conn', ori_adj[idx]) # print ('r_tot', r_tot) # print ('perturbed conn row', pert_adj[idx]) # print ('output', f_i) # print ('the predict class', k_i) if k_i != label: print ('attack succeeds') r_tot = (1+overshoot)*r_tot # print ('the r_tot', r_tot) r_tot = convert_to_v(innormal_adj, r_tot, degree_idx, idx) return r_tot, loop_i def universal_attack(attack_epoch, max_epoch): model.eval() delta = 0.1 fooling_rate = 0.0 overshoot = 0.02 max_iter_df = 10 v = np.zeros(tmp_adj.shape[0]).astype(np.float32) # stdv = 1./math.sqrt(tmp_adj.shape[0]) # v = np.random.uniform(-stdv, stdv, tmp_adj.shape[0]) cur_foolingrate = 0.0 epoch = 0 early_stop = 0 results = [] folder_path = op.join("./", "perturbation_results") if not op.exists(folder_path): os.mkdir(folder_path) while fooling_rate < 1 - delta and epoch < max_epoch: epoch += 1 train_idx = idx_train.cpu().numpy() np.random.shuffle(train_idx) ############################################### print ('deepfooling...') attack_time = time.time() for k in train_idx: print ('deepfool node',k) #add v to see if the attack succeeds innormal_x_p = add_perturb(tmp_adj, k, v) ##################whether to use filtering # innormal_x_p = np.where(innormal_x_p<0.5, 0, 1) x_p, degree_p = normalize(innormal_x_p + np.eye(tmp_adj.shape[0])) #A' = A + I x_p = torch.from_numpy(x_p.astype(np.float32)) x_p = x_p.cuda() output = model(features, x_p) if int(torch.argmax(output[k])) == int(torch.argmax(ori_output[k])): dr, iter = deepfool(innormal_x_p, x_p, k, num_classes, degree_p[k]) if iter < max_iter_df-1: v = v + dr # Project on l_p ball v = proj_lp(v) # print ('L1 norm ov v', torch.norm(v, p=1)) # print ('L2 norm ov v', torch.norm(v, p=2)) else: print ('cant attack this node') # print ('the prediction of k node', int(torch.argmax(output[k]))) # print ('the true label', int(labels[k])) else: print ('attack succeeds') print ('the deepfooling time cost is', time.time()-attack_time) ################################################### # v = np.random.rand(tmp_adj.shape[0]) print ('the perturbation matrix is', v) print ('calculate the whole attack success rate over the train nodes') res = [] v = np.where(v>0.5, 1, 0) for k in train_idx: print ('test node', k) innormal_x_p = add_perturb(tmp_adj, k, v) # innormal_x_p = np.where(innormal_x_p<0.5, 0, 1) x_p, degree_p = normalize(innormal_x_p + np.eye(tmp_adj.shape[0])) x_p = torch.from_numpy(x_p.astype(np.float32)) x_p = x_p.cuda() output = model(features, x_p) if int(torch.argmax(output[k])) == int(torch.argmax(ori_output[k])): res.append(0) else: res.append(1) fooling_rate = float(sum(res)/len(res)) print ('the current train fooling rates are', fooling_rate) if fooling_rate > cur_foolingrate: cur_foolingrate = fooling_rate file_path = op.join(folder_path, '{1}_xi{2}_epoch100/perturbation_{1}_{0}.txt'.format(attack_epoch, args.dataset, args.radius)) with open(file_path) as f: for i in v: f.write(str(i) + '\n') results.append(fooling_rate) if epoch > 3: if fooling_rate == results[-2]: early_stop += 1 else: early_stop = 0 if early_stop == 15: break return cur_foolingrate train_foolrate = [] for i in range(0,10): fool_rate = universal_attack(i, 100) train_foolrate.append(fool_rate) print ('the final train fool rate', train_foolrate) ```
github_jupyter
# Understanding Structured Point Clouds (SPCs) Structured Point Clouds (SPC) is a differentiable, GPU-compatible, spatial-data structure which efficiently organizes 3D geometrically sparse information in a very compressed manner. ![alt text](../samples/spcTeapot.png "Structured Point Cloud Hierarchy") <b> When should you use it? </b> * The SPC data structure is very general, which makes it <mark>a suitable building block for a variety of applications</mark>. * Examples include: * [Representation & rendering of implicit 3D surfaces](https://nv-tlabs.github.io/nglod/) * Convolutions on voxels, meshes and point clouds * and more.. SPCs are easily convertible from point clouds and meshes, and can be optimized to represent encoded neural implicit fields. <b> In this tutorial you will learn to: </b> 1. Construct a SPC from triangular meshes and point clouds. 2. Visualize the SPC using ray-tracing functionality. 3. Become familiar with the internals of kaolin's SPC data structure Practitioners are encouraged to view the [documentation](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=spc#kaolin-ops-spc) for additional details about the internal workings of this data structure. <br> This tutorial is best run locally to observe the full output. ## Setup This tutorial assumes a minimal version of [kaolin v0.10.0](https://kaolin.readthedocs.io/en/latest/notes/installation.html). <br> In addition, the following libraries are required for this tutorial: ``` !pip install -q matplotlib !pip install -q termcolor !pip install -q ipywidgets from PIL import Image import torch import torch.nn.functional as F import numpy as np from matplotlib import pyplot as plt import ipywidgets as widgets from termcolor import colored import kaolin as kal from spc_formatting import describe_octree, color_by_level ``` To study the mechanics of the SPC structure, we'll need some auxilary functions (you may skip for now): <br> ``` def describe_tensor(torch_tensor, tensor_label, with_shape, with_content): if with_shape: print(f'"{tensor_label}" is a {torch_tensor.dtype} tensor of size {tuple(torch_tensor.shape)}') if with_content: print(f'Raw Content: \n{torch_tensor.cpu().numpy()}') def convert_texture_to_torch_sample_format(texture): """ Convert to (1, C, Tex-H, Tex-W) format """ return texture.unsqueeze(0).type(sampled_uvs.dtype).permute(0, 3, 1, 2) ``` ### Preliminaries: Load Mesh and sample as Point Cloud Throughout this tutorial we'll be using a triangular mesh as an example. <br> First, we import the mesh using kaolin: ``` # Path to some .obj file with textures mesh_path = "../samples/colored_sphere.obj" mesh = kal.io.obj.import_mesh(mesh_path, with_materials=True) print(f'Loaded mesh with {len(mesh.vertices)} vertices, {len(mesh.faces)} faces and {len(mesh.materials)} materials.') ``` Next, we'll oversample the mesh faces to make sure our SPC structure is densely populated and avoids "holes" at the highest resolution level. Note that our mesh face-vertices are mapped to some texture coordinates. Luckily, kaolin has a `sample_points` function that will take care of interpolating these coordinates for us. The sampled vertices will be returned along with the interpolated uv coordinates as well: ``` # Sample points over the mesh surface num_samples = 1000000 # Load the uv coordinates per face-vertex like "features" per face-vertex, # which sample_points will interpolate for new sample points. # mesh.uvs is a tensor of uv coordinates of shape (#num_uvs, 2), which we consider as "features" here # mesh.face_uvs_idx is a tensor of shape (#faces, 3), indexing which feature to use per-face-per-vertex # Therefore, face_features will be of shape (#faces, 3, 2) face_features = mesh.uvs[mesh.face_uvs_idx] # Kaolin assumes an exact batch format, we make sure to convert from: # (V, 3) to (1, V, 3) # (F, 3, 2) to (1, F, 3, 2) # where 1 is the batch size batched_vertices = mesh.vertices.unsqueeze(0) batched_face_features = face_features.unsqueeze(0) # sample_points is faster on cuda device batched_vertices = batched_vertices.cuda() faces = mesh.faces.cuda() batched_face_features = batched_face_features.cuda() sampled_verts, _, sampled_uvs = kal.ops.mesh.trianglemesh.sample_points(batched_vertices, faces, num_samples=num_samples, face_features=batched_face_features) print(f'Sampled {sampled_verts.shape[1]} points over the mesh surface:') print(f'sampled_verts is a tensor with batch size {sampled_verts.shape[0]},', f'with {sampled_verts.shape[1]} points of {sampled_verts.shape[2]}D coordinates.') print(f'sampled_uvs is a tensor with batch size {sampled_uvs.shape[0]},', f'representing the corresponding {sampled_uvs.shape[1]} {sampled_uvs.shape[2]}D UV coordinates.') ``` To finish our setup, we'll want to use the UV coordinates to perform texture sampling and obtain the RGB color of each point we have: ``` # Convert texture to sample-compatible format diffuse_color = mesh.materials[0]['map_Kd'] # Assumes a shape with a single material texture_maps = convert_texture_to_torch_sample_format(diffuse_color) # (1, C, Th, Tw) texture_maps = texture_maps.cuda() # Sample colors according to uv-coordinates sampled_uvs = kal.render.mesh.utils.texture_mapping(texture_coordinates=sampled_uvs, texture_maps=texture_maps, mode='nearest') # Unbatch vertices = sampled_verts.squeeze(0) vertex_colors = sampled_uvs.squeeze(0) # Normalize to [0,1] vertex_colors /= 255 print(f'vertices is a tensor of {vertices.shape}') print(f'vertex_colors is a tensor of {vertices.shape}') ``` ## 1. Create & Visualize SPC ### Create the SPC We start by converting our Point Cloud of continuous 3D coordinates to a Structured Point Cloud. <br> `unbatched_pointcloud_to_spc` will return a `Spc` object, a data class holding all Structured Point Cloud related information. <br> At the core of this object, points are kept in quantized coordinates using a compressed octree. <br> The returned object contains multiple low-level data structures which we'll explore in details in the next section. For now keep in mind that its important fields: `octree`, `features`, `point_hierarchy`, `pyramid` and `prefix`, represent our data structure. When constructing a `Spc` object, the resolution of quantized coordinates can be controlled by the octree `level` arg, such that: $resolution=2^{level}$ ``` # Our SPC will contain a hierarchy of multiple levels level = 3 spc = kal.ops.conversions.pointcloud.unbatched_pointcloud_to_spc(vertices, level, features=vertex_colors) ``` ### Set-up the camera The SPC data structure can be efficiently visualized using ray-tracing ops. <br> Note that SPC also supports differentiable rendering. In this tutorial we'll limit our demonstration to rendering this data structure efficiently. <br> Differentiable ray-tracing is beyond the scope of this guide, and will be covered in future tutorials. To begin our ray tracing implementation, we'll first need to set up our camera view and [generate some rays](https://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-generating-camera-rays). <br> We'll assume a pinhole camera model, and use the `look_at` function, which sets up a camera view originating at position `camera_from`, looking towards `camera_to`. <br> `width`, `height`, `mode` and `fov` will determine the dimensions of our view. ``` def _normalized_grid(width, height, device='cuda'): """Returns grid[x,y] -> coordinates for a normalized window. Args: width, height (int): grid resolution """ # These are normalized coordinates # i.e. equivalent to 2.0 * (fragCoord / iResolution.xy) - 1.0 window_x = torch.linspace(-1, 1, steps=width, device=device) * (width / height) window_y = torch.linspace(1,- 1, steps=height, device=device) coord = torch.stack(torch.meshgrid(window_x, window_y)).permute(1,2,0) return coord def look_at(camera_from, camera_to, width, height, mode='persp', fov=90.0, device='cuda'): """Vectorized look-at function, returns an array of ray origins and directions URL: https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/lookat-function """ camera_origin = torch.FloatTensor(camera_from).to(device) camera_view = F.normalize(torch.FloatTensor(camera_to).to(device) - camera_origin, dim=0) camera_right = F.normalize(torch.cross(camera_view, torch.FloatTensor([0,1,0]).to(device)), dim=0) camera_up = F.normalize(torch.cross(camera_right, camera_view), dim=0) coord = _normalized_grid(width, height, device=device) ray_origin = camera_right * coord[...,0,np.newaxis] * np.tan(np.radians(fov/2)) + \ camera_up * coord[...,1,np.newaxis] * np.tan(np.radians(fov/2)) + \ camera_origin + camera_view ray_origin = ray_origin.reshape(-1, 3) ray_offset = camera_view.unsqueeze(0).repeat(ray_origin.shape[0], 1) if mode == 'ortho': # Orthographic camera ray_dir = F.normalize(ray_offset, dim=-1) elif mode == 'persp': # Perspective camera ray_dir = F.normalize(ray_origin - camera_origin, dim=-1) ray_origin = camera_origin.repeat(ray_dir.shape[0], 1) else: raise ValueError('Invalid camera mode!') return ray_origin, ray_dir ``` Now generate some rays using the functions we've just created: ``` # ray_o and ray_d ~ torch.Tensor (width x height, 3) # represent rays origin and direction vectors ray_o, ray_d = look_at(camera_from=[-2.5,2.5,-2.5], camera_to=[0,0,0], width=1024, height=1024, mode='persp', fov=30, device='cuda') print(f'Total of {ray_o.shape[0]} rays generated.') ``` ### Render We're now ready to perform the actual ray tracing. <br> kaolin will "shoot" the rays for us, and perform an efficient intersection test between each ray and cell within the SPC structure. <br> In kaolin terminology, <b>nuggets</b> are "ray-cell intersections" (or rather "ray-point" intersections). <b>nuggets </b> are of represented by a structure of two tensors: `nugs_ridx` and `nugs_pidx`, <br>which form together pairs of `(index_to_ray, index_to_points)`. <br> Both tensors are 1-dimensional tensors of shape (`#num_intersection`,). ``` octree, features = spc.octrees, spc.features point_hierarchy, pyramid, prefix = spc.point_hierarchies, spc.pyramids[0], spc.exsum nugs_ridx, nugs_pidx, depth = kal.render.spc.unbatched_raytrace(octree, point_hierarchy, pyramid, prefix, ray_o, ray_d, level) print(f'Total of {nugs_ridx.shape[0]} nuggets were traced.\n') ``` Since we're assuming here our surface is opaque, for each ray, we only care about the <b>nugget</b> closest to the camera. <br> Note that per "ray-pack", the returned <b>nuggets</b> are already sorted by depth. <br> The method below returns a boolean mask which specifies which <b>nuggets</b> represent a "first-hit". ``` masked_nugs = kal.render.spc.mark_pack_boundaries(nugs_ridx) nugs_ridx = nugs_ridx[masked_nugs] nugs_pidx = nugs_pidx[masked_nugs] ``` Finally, for each ray that hit the surface, a corresponding "first-hit" nugget exists. <br> ``` # 1. We initialize an empty canvas. image = torch.ones_like(ray_o) # 2. We'll query all first-hit nuggets to obtain their corresponding point-id (which cell they hit in the SPC). ridx = nugs_ridx.long() pidx = nugs_pidx.long() - pyramid[1,level] # 3. We'll query the features auxilary structure to obtain the color. # 4. We set each ray value as the corresponding nugget color. image[ridx] = features[pidx] image = image.reshape(1024, 1024, 3) ``` Putting it all together, we write our complete `render()` function and display the trace using matplotlib: ``` import matplotlib.pyplot as plt def render(level): """ Create & render an image """ spc = kal.ops.conversions.pointcloud.unbatched_pointcloud_to_spc(vertices, level, vertex_colors) octree, features, point_hierarchy, pyramid, prefix = spc.octrees, spc.features, spc.point_hierarchies, spc.pyramids[0], spc.exsum nugs_ridx, nugs_pidx, depth = kal.render.spc.unbatched_raytrace(octree, point_hierarchy, pyramid, prefix, ray_o, ray_d, level) masked_nugs = kal.render.spc.mark_pack_boundaries(nugs_ridx) nugs_ridx = nugs_ridx[masked_nugs] nugs_pidx = nugs_pidx[masked_nugs] ridx = nugs_ridx.long() pidx = nugs_pidx.long() - pyramid[1,level] image = torch.ones_like(ray_o) image[ridx] = features[pidx] image = image.reshape(1024, 1024, 3) return image fig = plt.figure(figsize=(20,10)) # Render left image of level 3 SPC image1 = render(level=3) image1 = image1.cpu().numpy().transpose(1,0,2) ax = fig.add_subplot(1, 2, 1) ax.set_title("level 3", fontsize=26) ax.axis('off') plt.imshow(image1) # Render right image of level 5 SPC image2 = render(level=8) image2 = image2.cpu().numpy().transpose(1,0,2) ax = fig.add_subplot(1, 2, 2) ax.set_title("level 5", fontsize=26) ax.axis('off') plt.imshow(image2) plt.show() ``` Finally, putting it all together, we may also construct the following interactive demo: ``` def update_demo(widget_spc_level): image = render(widget_spc_level) plt.figure(figsize=(10,10)) plt.axis('off') plt.imshow(image.cpu().numpy().transpose(1,0,2)) plt.show() def show_interactive_demo(max_level=10): start_value = min(7, max_level) widget_spc_level = widgets.IntSlider(value=start_value, min=1, max=max_level, step=1, orientation='vertical', description='<h5>SPC Level:</h5>', disabled=False, layout=widgets.Layout(height='100%',)) out = widgets.interactive_output(update_demo, {'widget_spc_level': widget_spc_level}) display(widgets.HBox([widgets.VBox([widget_spc_level]), out])) show_interactive_demo() ``` ## 2. SPC internals In this section we'll explore the various components that make up the [SPC](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=spc#structured-point-clouds) we've just created. <br> We'll learn how data is stored, and how to view stored data. ### Boilerplate code Let's rebuild our SPC object with fewer levels, that will make the internals easier to study. <br> You may customize the number of levels and compare how the output changes. ``` level = 3 spc = kal.ops.conversions.pointcloud.unbatched_pointcloud_to_spc(vertices, level, features=vertex_colors) ``` Ok, let's see what we've got. ### octree The first field we'll look into, `octrees`, keeps the entire geometric structure in a compressed manner. <br> This is a huge advantage, as this structure is now small enough to fit our sparse data, which makes it very efficient. ``` octree = spc.octrees describe_tensor(torch_tensor=octree, tensor_label='octree', with_shape=True, with_content=True) print(f'\n"octrees" represents a hierarchy of {len(octree)} octree nodes.') print(f"Let's have a look at the binary representation and what it means:\n") describe_octree(octree, level) text_out = widgets.Output(layout={'border': '0.2px dashed black'}) with text_out: print('How to read the content of octrees?') print('- Each entry represents a single octree of 8 cells --> 8 bits.') print('- The bit position determines the cell index, in Morton Order.') print('- The bit value determines if the cell is occupied or not.') print(f'- If a cell is occupied, an additional octree may be generated in the next level, up till level {level}.') print('For example, an entry of 00000001 is a single level octree, where only the bottom-left most cell is occupied.') display(widgets.HBox([text_out])) ``` ##### Order of octants within partitioned cells ![alt text](../samples/octants.png "Octants") Notice the field is named in plural. That's because kaolin can batch multiple instances of octrees together within the same object. <br> ``` print(spc.batch_size) ``` Pay attention that `octrees` uses [packed representation](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.batch.html?highlight=packed#packed), meaning, there is no explicit batch dimension. <br> Instead, we track the length of each octree instance in a separate field: ``` octrees_lengths = spc.lengths describe_tensor(torch_tensor=octrees_lengths, tensor_label='lengths', with_shape=True, with_content=True) text_out = widgets.Output(layout={'border': '0.2px dashed black'}) with text_out: print('How to read the content of lengths?') print(f'- This Spc stores a batch of {len(spc.lengths)} octrees.') print(f'- The first octree is represented by {spc.lengths[0]} non-leaf cells.') print(f'- Therefore the information of the first octree is kept in bytes 0-{spc.lengths[0]-1} of the octrees field.') display(widgets.HBox([text_out])) ``` Advanced users who prefer a non object-oriented lower-level api can also use the following functionality which `kal.ops.conversions.pointcloud.unbatched_pointcloud_to_spc` employs under the hood: ``` from kaolin.ops.spc.points import quantize_points, points_to_morton, morton_to_points, unbatched_points_to_octree # Construct a batch of 2 octrees. For brevity, we'll use the same ocuupancy data for both. # 1. Convert continous to quantized coordinates # 2. Build the octrees points1 = quantize_points(vertices.contiguous(), level=2) octree1 = unbatched_points_to_octree(points1, level=2) points2 = quantize_points(vertices.contiguous(), level=3) octree2 = unbatched_points_to_octree(points2, level=3) # Batch 2 octrees together. For packed representations, this is just concatenation. octrees = torch.cat((octree1, octree2), dim=0) lengths = torch.tensor([len(octree1), len(octree2)], dtype=torch.int32) describe_tensor(torch_tensor=octrees, tensor_label='octrees', with_shape=True, with_content=True) print('') describe_tensor(torch_tensor=lengths, tensor_label='lengths', with_shape=True, with_content=True) ``` These structures form the bare minimum required to shift back to high-level api and construct a Spc object: ``` kal.rep.spc.Spc(octrees, lengths) ``` ### features So far we've lookied into how Structured Point Clouds keep track of occupancy. <br> Next we'll study how they keep track of features. The `features` field contains features information per cell. ``` features = spc.features def paint_features(features): plt.figure(figsize=(10,10)) plt.axis('off') plt.imshow(features.cpu().numpy()[None]) plt.show() print('In this tutorial, cell features are RGB colors:') describe_tensor(torch_tensor=features, tensor_label='features', with_shape=True, with_content=False) paint_features(features) text_out = widgets.Output(layout={'border': '0.2px dashed black'}) with text_out: print('How to read the content of features?') print(f'- We keep features only for leaf cells, a total of {features.shape[0]}.') print(f'- The number of leaf cells can be obtained by summarizing the "1" bits at level {spc.max_level},\n' \ ' the last level of the octree.') print(f'- The dimensionality of each attribute is {features.shape[1]} (e.g: RGB channels)') print('\nReminder - the highest level of occupancy octree is:') describe_octree(spc.octrees, level, limit_levels=[spc.max_level]) display(widgets.HBox([text_out])) ``` ### pyramid & exsum Since the occupancy information is [compressed](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=spc#octree) and [packed](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.batch.html?highlight=packed#packed), accessing level-specific information consistently involves cumulative summarization of the number of "1" bits. <br> It makes sense to calculate this information once and then cache it. <br> The `pyramid` field does exactly that: it keeps summarizes the number of occupied cells per level, and their cumsum, for fast level-indexing. ``` # A pyramid is kept per octree in the batch. # We'll study the pyramid of the first and only entry in the batch. pyramid = spc.pyramids[0] describe_tensor(torch_tensor=pyramid, tensor_label='pyramid', with_shape=True, with_content=True) out_left = widgets.Output(layout={'border': '0.2px dashed black'}) out_right = widgets.Output(layout={'border': '0.2px dashed black'}) print('\nHow to read the content of pyramids?') with out_left: print('"pyramid" summarizes the number of occupied \ncells per level, and their cumulative sum:\n') for i in range(pyramid.shape[-1]): if i ==0: print(f'Root node (implicitly defined):') elif i+1 < pyramid.shape[-1]: print(f'Level #{i}:') else: print(f'Final entry for total cumsum:') print(f'\thas {pyramid[0,i]} occupied cells') print(f'\tstart idx (cumsum excluding current level): {pyramid[1,i]}') with out_right: print(f'"octrees" represents a hierarchy of {len(spc.octrees)} octree nodes.') print(f"Each bit represents a cell occupancy:\n") describe_octree(octree, level) display(widgets.HBox([out_left, out_right])) ``` Similarly, kaolin keeps a complementary field, `exsum`, which tracks the cumulative summarization of bits per-octree to fast access parent-child information between levels: ``` exsum = spc.exsum describe_tensor(torch_tensor=exsum, tensor_label='exsum', with_shape=True, with_content=True) out_left = widgets.Output(layout={'border': '0.2px dashed black'}) out_right = widgets.Output(layout={'border': '0.2px dashed black'}) print('\nHow to read the content of exsum?') with out_left: print('"exsum" summarizes the cumulative number of occupied \ncells per octree, e.g: exclusive sum of "1" bits:\n') for i in range(exsum.shape[-1]): print(f'Cells in Octree #{i} start from cell idx: {exsum[i]}') with out_right: print(f'"octrees" represents a hierarchy of {len(octree)} octree nodes.') print(f"Each bit represents a cell occupancy:\n") describe_octree(octree, level) display(widgets.HBox([out_left, out_right])) ``` When using Spc objects, pyramids are implicitly created the first time they are needed so you don't have to worry about them. <br> For advanced users, the low-level api allows their explicit creation through `scan_octrees`: ``` lengths = torch.tensor([len(octrees)], dtype=torch.int32) max_level, pyramid, exsum = kal.ops.spc.spc.scan_octrees(octree, lengths) print('max_level:') print(max_level) print('\npyramid:') print(pyramid) print('\nexsum:') print(exsum) ``` ### point_hierarchies `point_hierarchies` is an auxilary field, which holds the *sparse* coordinates of each point / occupied cell within the octree, for easier access. Sparse coordinates are packed for all cells on all levels combined. ``` describe_tensor(torch_tensor=spc.point_hierarchies, tensor_label='point_hierarchies', with_shape=True, with_content=False) ``` We can use the information stored in the pyramids field to color the coordinates by level: ``` out_left = widgets.Output(layout={'border': '0.2px dashed black'}) out_right = widgets.Output(layout={'border': '0.2px dashed black', 'width': '60%'}) max_points_to_display = 17 # To avoid clutter with out_left: level_idx =0 point_idx = 0 remaining_cells_per_level = spc.pyramids[0,0].cpu().numpy().tolist() for coord in spc.point_hierarchies: if not remaining_cells_per_level[level_idx]: level_idx += 1 point_idx = 0 else: remaining_cells_per_level[level_idx] -= 1 if point_idx == max_points_to_display: print(colored(f'skipping more..', level_color)) elif point_idx < max_points_to_display: level_color = color_by_level(level_idx - 1) print(colored(f'Level #{level_idx}, Point #{point_idx}, ' \ f'Coords: {coord.cpu().numpy().tolist()}', level_color)) point_idx += 1 with out_right: print('How to read the content of point_hierarchies?') print(f'- Each cell / point is represented by {spc.point_hierarchies.shape[-1]} indices (xyz).') print('- Sparse coordinates are absolute: \n they are defined relative to the octree origin.') print('- Compare the point coordinates with the demo below.\n\n Remember: unoccupied cells are not displayed!') show_interactive_demo(max_level=spc.max_level) display(widgets.HBox([out_left, out_right])) ``` ## Where to go from here Structured Point Clouds support other useful operators which were not covered by this tutorial: 1. [Convolutions](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=SPC#kaolin.ops.spc.Conv3d) 2. [Querying points by location](https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=SPC#kaolin.ops.spc.unbatched_query) 3. [Differential ray-tracing ops](https://kaolin.readthedocs.io/en/latest/modules/kaolin.render.spc.html#kaolin-render-spc)
github_jupyter
# T81-558: Applications of Deep Neural Networks * Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). **Module 9 Assignment: Kaggle Submission** **Student Name: Your Name** # Assignment Instructions For this assignment you will begin by loading a pretrained neural network that I provide here: [transfer_9.h5](https://data.heatonresearch.com/data/t81-558/networks/transfer_9.h5). You will demonstrate your ability to transfer several layers from this neural network to create a new neural network to be used for feature engineering. The **transfer_9.h5** neural network is composed of the following four layers: ``` Model: "sequential_7" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_11 (Dense) (None, 25) 225 _________________________________________________________________ dense_12 (Dense) (None, 10) 260 _________________________________________________________________ dense_13 (Dense) (None, 3) 33 _________________________________________________________________ dense_14 (Dense) (None, 1) 4 ================================================================= Total params: 522 Trainable params: 522 Non-trainable params: 0 ``` You should only use the first three layers. The final dense layer should be removed, exposing the (None, 3) shaped layer as the new output layer. This is a 3-neuron layer. The output from these 3 layers will become your 3 engineered features. Complete the following tasks: * Load the Keras neural network **transfer_9.h5**. Note that you will need to download it to either your hard drive or GDrive (if you're using Google CoLab). Keras does not allow loading of a neural network across HTTP. * Create a new neural network with only the first 3 layers, drop the (None, 1) shaped layer. * Load the dataset [transfer_data.csv](https://data.heatonresearch.com/data/t81-558/datasets/transfer_data.csv). * Use all columns as input, but do not use *id* as input. You will need to save the *id* column to build your submission. * Do not zscore or transform the input columns. * Submit the output from the (None, 3) shaped layer, along with the corresponding *id* column. The three output neurons should create columns named *a*, *b*, and *c*. The submit file will look something like: |id|a|b|c| |-|-|-|-| |1|2.3602087|1.4411213|0| |2|0.067718446|1.0037427|0.52129996| |3|0.74778837|1.0647631|0.052594826| |4|1.0594225|1.1211816|0| |...|...|...|...| # Assignment Submit Function You will submit the 10 programming assignments electronically. The following submit function can be used to do this. My server will perform a basic check of each assignment and let you know if it sees any basic problems. **It is unlikely that should need to modify this function.** ``` import base64 import os import numpy as np import pandas as pd import requests # This function submits an assignment. You can submit an assignment as much as you like, only the final # submission counts. The paramaters are as follows: # data - Pandas dataframe output. # key - Your student key that was emailed to you. # no - The assignment class number, should be 1 through 1. # source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name. # . The number must match your assignment number. For example "_class2" for class assignment #2. def submit(data,key,no,source_file=None): if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.') if source_file is None: source_file = __file__ suffix = '_class{}'.format(no) if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix)) with open(source_file, "rb") as image_file: encoded_python = base64.b64encode(image_file.read()).decode('ascii') ext = os.path.splitext(source_file)[-1].lower() if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext)) r = requests.post("https://api.heatonresearch.com/assignment-submit", headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"), 'assignment': no, 'ext':ext, 'py':encoded_python}) if r.status_code == 200: print("Success: {}".format(r.text)) else: print("Failure: {}".format(r.text)) ``` # Google CoLab Instructions If you are using Google CoLab, it will be necessary to mount your GDrive so that you can send your notebook during the submit process. Running the following code will map your GDrive to /content/drive. ``` from google.colab import drive drive.mount('/content/drive') !ls /content/drive/My\ Drive/Colab\ Notebooks ``` # Assignment #9 Sample Code The following code provides a starting point for this assignment. ``` import os import pandas as pd from scipy.stats import zscore from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation, Dropout from tensorflow.keras.models import load_model import pandas as pd import io import requests import numpy as np from sklearn import metrics from sklearn.model_selection import KFold import sklearn from sklearn.linear_model import Lasso # This is your student key that I emailed to you at the beginnning of the semester. key = "PPboscDL2M94HCbkbvfOLakXXNy3dh5x2VV1Mlpm" # This is an example key and will not work. # You must also identify your source file. (modify for your local setup) # file='/content/drive/My Drive/Colab Notebooks/assignment_yourname_class9.ipynb' # Google CoLab # file='C:\\Users\\jeffh\\projects\\t81_558_deep_learning\\assignments\\assignment_yourname_class9.ipynb' # Windows file='/Users/jheaton/projects/t81_558_deep_learning/assignments/assignment_yourname_class9.ipynb' # Mac/Linux # Begin assignment model = load_model("/Users/jheaton/Downloads/transfer_9.h5") # modify to where you stored it df = pd.read_csv("https://data.heatonresearch.com/data/t81-558/datasets/transfer_data.csv") submit(source_file=file,data=df_submit,key=key,no=9) ```
github_jupyter
``` import tensorflow as tf import numpy as np from glob import glob from deeplab import DeepLabV3Plus from tensorflow.keras import backend as K from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint print('TensorFlow', tf.__version__) H, W = 512, 512 batch_size = 24 train_images = sorted(glob('resized_images/*')) train_masks = sorted(glob('resized_masks/*')) val_images = sorted(glob('validation_data/images/*')) val_masks = sorted(glob('validation_data/masks/*')) print(f'Found {len(train_images)} training images') print(f'Found {len(train_masks)} training masks') print(f'Found {len(val_images)} validation images') print(f'Found {len(val_masks)} validation masks') for i in range(len(train_masks)): assert train_images[i].split('/')[-1].split('.')[0] == train_masks[i].split('/')[-1].split('.')[0] for i in range(len(val_masks)): assert val_images[i].split('/')[-1].split('.')[0] == val_masks[i].split('/')[-1].split('.')[0] def random_scale(image, mask, min_scale=0.3, max_scale=1.5): random_scale = tf.random.uniform(shape=[1], minval=min_scale, maxval=max_scale) dims = tf.cast(tf.shape(image), dtype=tf.float32) new_dims = tf.cast(random_scale * dims[:2], dtype=tf.int32) scaled_image = tf.image.resize(image, size=new_dims, method='bilinear') scaled_mask = tf.image.resize(mask, size=new_dims, method='nearest') return scaled_image, scaled_mask def pad_inputs(image, mask, crop_height=H, crop_width=H, ignore_value=255, pad_value=0): dims = tf.cast(tf.shape(image), dtype=tf.float32) h_pad = tf.maximum(1 + crop_height - dims[0], 0) w_pad = tf.maximum(1 + crop_width - dims[1], 0) padded_image = tf.pad(image, paddings=[[0, h_pad], [0, w_pad], [0, 0]], constant_values=pad_value) padded_mask = tf.pad(mask, paddings=[[0, h_pad], [0, w_pad], [0, 0]], mode='CONSTANT', constant_values=ignore_value) return padded_image, padded_mask def random_crop(image, mask, crop_height=512, crop_width=512): image_dims = tf.shape(image) offset_h = tf.random.uniform(shape=(1,), maxval=image_dims[0]-crop_height, dtype=tf.int32)[0] offset_w = tf.random.uniform(shape=(1,), maxval=image_dims[1]-crop_height, dtype=tf.int32)[0] image = tf.image.crop_to_bounding_box(image, offset_height=offset_h, offset_width=offset_w, target_height=crop_height, target_width=crop_height) mask = tf.image.crop_to_bounding_box(mask, offset_height=offset_h, offset_width=offset_w, target_height=crop_height, target_width=crop_height) return image, mask def random_flip(image, mask): flip = tf.random.uniform(shape=[1,], minval=0, maxval=2, dtype=tf.int32)[0] image = tf.case([ (tf.greater(flip , 0), lambda : tf.image.flip_left_right(image)) ], default=lambda : image) mask = tf.case([ (tf.greater(flip , 0), lambda : tf.image.flip_left_right(mask)) ], default=lambda : mask) return image, mask def load_image(image_path, mask=False): img = tf.io.read_file(image_path) if mask: img = tf.image.decode_image(img, channels=1) img.set_shape([None, None, 1]) else: img = tf.image.decode_image(img, channels=3) img.set_shape([None, None, 3]) return img @tf.function() def preprocess_inputs(image_path, mask_path): with tf.device('/cpu:0'): image = load_image(image_path) mask = load_image(mask_path, mask=True) mask = tf.cast(mask > 0, dtype=tf.uint8) image, mask = random_scale(image, mask) image, mask = pad_inputs(image, mask) image, mask = random_crop(image, mask) image, mask = random_flip(image, mask) image = image[:, :, ::-1] - tf.constant([103.939, 116.779, 123.68]) return image, mask train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_masks)) train_dataset = train_dataset.shuffle(1024) train_dataset = train_dataset.map(map_func=preprocess_inputs, num_parallel_calls=tf.data.experimental.AUTOTUNE) train_dataset = train_dataset.batch(batch_size=batch_size, drop_remainder=True) train_dataset = train_dataset.repeat() train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE) val_dataset = tf.data.Dataset.from_tensor_slices((val_images, val_masks)) val_dataset = val_dataset.map(map_func=preprocess_inputs, num_parallel_calls=tf.data.experimental.AUTOTUNE) val_dataset = val_dataset.batch(batch_size=batch_size, drop_remainder=True) val_dataset= val_dataset.repeat() val_dataset= val_dataset.prefetch(tf.data.experimental.AUTOTUNE) @tf.function() def dice_coef(y_true, y_pred): mask = tf.equal(y_true, 255) mask = tf.logical_not(mask) y_true = tf.boolean_mask(y_true, mask) y_pred = tf.boolean_mask(y_pred, mask) y_true_f = K.flatten(y_true) y_pred = K.cast(y_pred, 'float32') y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32') intersection = y_true_f * y_pred_f score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f)) return score @tf.function() def loss(y_true, y_pred): mask = tf.equal(y_true, 255) mask = tf.logical_not(mask) y_true = tf.boolean_mask(y_true, mask) y_pred = tf.boolean_mask(y_pred, mask) return tf.losses.binary_crossentropy(y_true, y_pred) strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = DeepLabV3Plus(H, W) for layer in model.layers: if isinstance(layer, tf.keras.layers.BatchNormalization): layer.momentum = 0.9997 layer.epsilon = 1e-5 elif isinstance(layer, tf.keras.layers.Conv2D): layer.kernel_regularizer = tf.keras.regularizers.l2(1e-4) model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(learning_rate=3e-4), metrics=['accuracy', dice_coef]) tb = TensorBoard(log_dir='logs', write_graph=True, update_freq='batch') mc = ModelCheckpoint(filepath='top_weights.h5', monitor='val_dice_coef', mode='max', save_best_only='True', save_weights_only='True', verbose=1) callbacks = [mc, tb] model.fit(train_dataset, steps_per_epoch=len(train_images) // batch_size, epochs=10, validation_data=val_dataset, validation_steps=len(val_images) // batch_size, callbacks=callbacks) model.save_weights('last_epoch.h5') ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TensorBoard Scalars: Logging training metrics in Keras <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tensorboard/scalars_and_keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorboard/blob/master/docs/scalars_and_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tensorboard/blob/master/docs/scalars_and_keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ## Overview Machine learning invariably involves understanding key metrics such as loss and how they change as training progresses. These metrics can help you understand if you're [overfitting](https://en.wikipedia.org/wiki/Overfitting), for example, or if you're unnecessarily training for too long. You may want to compare these metrics across different training runs to help debug and improve your model. TensorBoard's **Scalars Dashboard** allows you to visualize these metrics using a simple API with very little effort. This tutorial presents very basic examples to help you learn how to use these APIs with TensorBoard when developing your Keras model. You will learn how to use the Keras TensorBoard callback and TensorFlow Summary APIs to visualize default and custom scalars. ## Setup ``` # Load the TensorBoard notebook extension. %load_ext tensorboard from datetime import datetime from packaging import version import tensorflow as tf from tensorflow import keras import numpy as np print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." ``` ## Set up data for a simple regression You're now going to use [Keras](https://www.tensorflow.org/guide/keras) to calculate a regression, i.e., find the best line of fit for a paired data set. (While using neural networks and gradient descent is [overkill for this kind of problem](https://stats.stackexchange.com/questions/160179/do-we-need-gradient-descent-to-find-the-coefficients-of-a-linear-regression-mode), it does make for a very easy to understand example.) You're going to use TensorBoard to observe how training and test **loss** change across epochs. Hopefully, you'll see training and test loss decrease over time and then remain steady. First, generate 1000 data points roughly along the line *y = 0.5x + 2*. Split these data points into training and test sets. Your hope is that the neural net learns this relationship. ``` data_size = 1000 # 80% of the data is for training. train_pct = 0.8 train_size = int(data_size * train_pct) # Create some input data between -1 and 1 and randomize it. x = np.linspace(-1, 1, data_size) np.random.shuffle(x) # Generate the output data. # y = 0.5x + 2 + noise y = 0.5 * x + 2 + np.random.normal(0, 0.05, (data_size, )) # Split into test and train pairs. x_train, y_train = x[:train_size], y[:train_size] x_test, y_test = x[train_size:], y[train_size:] ``` ## Training the model and logging loss You're now ready to define, train and evaluate your model. To log the *loss* scalar as you train, you'll do the following: 1. Create the Keras [TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) 2. Specify a log directory 3. Pass the TensorBoard callback to Keras' [Model.fit()](https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit). TensorBoard reads log data from the log directory hierarchy. In this notebook, the root log directory is ```logs/scalars```, suffixed by a timestamped subdirectory. The timestamped subdirectory enables you to easily identify and select training runs as you use TensorBoard and iterate on your model. ``` logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) model = keras.models.Sequential([ keras.layers.Dense(16, input_dim=1), keras.layers.Dense(1), ]) model.compile( loss='mse', # keras.losses.mean_squared_error optimizer=keras.optimizers.SGD(lr=0.2), ) print("Training ... With default parameters, this takes less than 10 seconds.") training_history = model.fit( x_train, # input y_train, # output batch_size=train_size, verbose=0, # Suppress chatty output; use Tensorboard instead epochs=100, validation_data=(x_test, y_test), callbacks=[tensorboard_callback], ) print("Average test loss: ", np.average(training_history.history['loss'])) ``` ## Examining loss using TensorBoard Now, start TensorBoard, specifying the root log directory you used above. Wait a few seconds for TensorBoard's UI to spin up. ``` %tensorboard --logdir logs/scalars ``` <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/scalars_loss.png?raw=1"/> You may see TensorBoard display the message "No dashboards are active for the current data set". That's because initial logging data hasn't been saved yet. As training progresses, the Keras model will start logging data. TensorBoard will periodically refresh and show you your scalar metrics. If you're impatient, you can tap the Refresh arrow at the top right. As you watch the training progress, note how both training and validation loss rapidly decrease, and then remain stable. In fact, you could have stopped training after 25 epochs, because the training didn't improve much after that point. Hover over the graph to see specific data points. You can also try zooming in with your mouse, or selecting part of them to view more detail. Notice the "Runs" selector on the left. A "run" represents a set of logs from a round of training, in this case the result of Model.fit(). Developers typically have many, many runs, as they experiment and develop their model over time. Use the Runs selector to choose specific runs, or choose from only training or validation. Comparing runs will help you evaluate which version of your code is solving your problem better. Ok, TensorBoard's loss graph demonstrates that the loss consistently decreased for both training and validation and then stabilized. That means that the model's metrics are likely very good! Now see how the model actually behaves in real life. Given the input data (60, 25, 2), the line *y = 0.5x + 2* should yield (32, 14.5, 3). Does the model agree? ``` print(model.predict([60, 25, 2])) # True values to compare predictions against: # [[32.0] # [14.5] # [ 3.0]] ``` Not bad! ## Logging custom scalars What if you want to log custom values, such as a [dynamic learning rate](https://www.jeremyjordan.me/nn-learning-rate/)? To do that, you need to use the TensorFlow Summary API. Retrain the regression model and log a custom learning rate. Here's how: 1. Create a file writer, using ```tf.summary.create_file_writer()```. 2. Define a custom learning rate function. This will be passed to the Keras [LearningRateScheduler](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LearningRateScheduler) callback. 3. Inside the learning rate function, use ```tf.summary.scalar()``` to log the custom learning rate. 4. Pass the LearningRateScheduler callback to Model.fit(). In general, to log a custom scalar, you need to use ```tf.summary.scalar()``` with a file writer. The file writer is responsible for writing data for this run to the specified directory and is implicitly used when you use the ```tf.summary.scalar()```. ``` logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S") file_writer = tf.summary.create_file_writer(logdir + "/metrics") file_writer.set_as_default() def lr_schedule(epoch): """ Returns a custom learning rate that decreases as epochs progress. """ learning_rate = 0.2 if epoch > 10: learning_rate = 0.02 if epoch > 20: learning_rate = 0.01 if epoch > 50: learning_rate = 0.005 tf.summary.scalar('learning rate', data=learning_rate, step=epoch) return learning_rate lr_callback = keras.callbacks.LearningRateScheduler(lr_schedule) tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) model = keras.models.Sequential([ keras.layers.Dense(16, input_dim=1), keras.layers.Dense(1), ]) model.compile( loss='mse', # keras.losses.mean_squared_error optimizer=keras.optimizers.SGD(), ) training_history = model.fit( x_train, # input y_train, # output batch_size=train_size, verbose=0, # Suppress chatty output; use Tensorboard instead epochs=100, validation_data=(x_test, y_test), callbacks=[tensorboard_callback, lr_callback], ) ``` Let's look at TensorBoard again. ``` %tensorboard --logdir logs/scalars ``` <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/scalars_custom_lr.png?raw=1"/> Using the "Runs" selector on the left, notice that you have a ```<timestamp>/metrics``` run. Selecting this run displays a "learning rate" graph that allows you to verify the progression of the learning rate during this run. You can also compare this run's training and validation loss curves against your earlier runs. How does this model do? ``` print(model.predict([60, 25, 2])) # True values to compare predictions against: # [[32.0] # [14.5] # [ 3.0]] ```
github_jupyter
# "[ALGO&DS] Reverse a Linked List" > "How to reverse a linked list both iteratively and recursively?" - toc:false - branch: master - badges: false - comments: true - author: Peiyi Hung - categories: [category, learning, algorithms] # Introduction Reversing a linked list is a classic problem by solving which we can get familiar with basic operations about a linked list. I got a hard time understanding this problem at first, so I decide to write a blog post explaining how this problem can be addressed to really understand it. This problem can be solved recursively and iteratively. I will explain how to solve this problem in these two ways and give Python implementaion of them. You can try on the codes in this post on [Leetcode](https://leetcode.com/problems/reverse-linked-list/). # The Problem The problem we want to solve is reserving a lined list. Here's it: > Given the head of a singly linked list, reverse the list, and return the reversed list. If you want to reverse a array, you can just use two pointers and keep swapping values in these pointers. However, you do not have the ability to access an element by its index using linked list, so we can not use the same strategy to reverse a linked list. A naive method to solve this problem is: 1. Find the last element of a linked list by scan the whole list, store this node to another list, and remove this node. 2. Keep doing this until the linked list is empty, and we can get a reversed linked list. Tis method takes $O(n^2)$ time and $O(n)$ extra space, which is inefficient. In the next two section, I will explain two method taking $O(n)$ time. # Recursively First, Let's solve this problem recursively. I will present the entire code and explain each step in detail. ``` class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next def reverseList(head: ListNode) -> ListNode: # base case if head is None or head.next is None: return head # revursive case p = reverseList(head.next) head.next.next = head head.next = None return p ``` A linked list is linked `ListNode`. We can access the value in a node by `ListNode.val` and the next node by `ListNode.next`. So there's a custom class in the code: ``` class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next ``` Since we want to solve this problem recursively, we have to discuss the base case and the recurrence relation. The base case is that your linked list is empty or only contains one node. If this is the case, you could just return the `head` because they are reversed. This is done in this code: ``` if head is None or head.next is None: return head ``` In the recursive case, we can first revserve the linked list after `head`: ``` p = reverseList(head.next) ``` assuming `reverseList` can really do that. Now `p` points to the last element of this linked list, the whole list except `head` is reversed and the next element of `head` is the last element of this reversed linked list. What we should do is to set the next element of `head.next` to `head` and chop off the "next" element of `head`. Here's the code: ``` head.next.next = head head.next = None ``` By doing this, we can reverse a linked list recursively. # Iteratively We can reverse a linked list iteratively by this code: ``` def reverseList(head: ListNode) -> ListNode: prev = None curr = head while curr: temp = curr.next curr.next = prev prev = curr curr = temp return prev ``` In the iterative case, we keep reverse one node in the linked list. `prev` points to the head of the reversed part, and `curr` refers to the one of the part that have not been reversed. How do we reverse a node? If we want to reverse a node `curr`, we have to set the next of `curr` to `prev` and the new `prev` to `curr`. We do this by: ``` curr.next = prev prev = curr ``` However, if we do this, we would lost the pointer to the next element that has not been reversed. Thus, before we reverse `curr`, we should temperarily store where `curr.next` is. After we reversed a node, the "unreversed" part would be `temp`, so we assign `temp` to curr. That is, `temp = curr.next` and `curr = temp`. If `curr` reach the end of a linked list, then we have reversed all elements and we should return `prev` which points to the head of the reversed list.
github_jupyter
Cat Dog Classification === ``` import tensorflow as tf import os import re import numpy as np import zipfile import matplotlib.pyplot as plt from tensorflow.contrib import learn from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn import RunConfig as run_config slim = tf.contrib.slim ``` ### Utility function Image reader, Input pipeline, etc. ``` CAT = 0 DOG = 1 IS_LOW_MEMORY_MODE = True cwd = os.getcwd() np.random.seed(2124) def prepare_file(): file_list = ['train', 'test'] valid = True for i in range(len(file_list)): filename = file_list[i] + '.zip' dest_filename = os.path.join(cwd, 'data', filename) if not os.path.exists(dest_filename): print('Please download ' + filename + ' and put on src/data folder') url = "https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition/download/" print(url + filename) valid = False continue images_path = os.path.join(cwd, 'data', file_list[i]) zip = zipfile.ZipFile(dest_filename) if not os.path.exists(images_path): print('Extracting...') zip.extractall(os.path.join(cwd, 'data')) return valid def read_image_label_list(folder_dir): dir_list = os.listdir(os.path.join(cwd, folder_dir)) filenames = [] labels = [] for i, d in enumerate(dir_list): if re.search("train", folder_dir): if re.search("cat", d): labels.append(CAT) else: labels.append(DOG) else: labels.append(-1) filenames.append(os.path.join(cwd, folder_dir, d)) return filenames, labels def read_images_from_disk(input_queue): filename = input_queue[0] label = input_queue[1] file_contents = tf.read_file(filename) image = tf.image.decode_image(file_contents, channels=3) image.set_shape([None, None, 3]) return image, label def gen_input_fn(image_list, label_list, batch_size, shuffle): def input_fn(): images = tf.convert_to_tensor(image_list, dtype=tf.string) labels = tf.convert_to_tensor(label_list, dtype=tf.int32) input_queue = tf.train.slice_input_producer( [images, labels], capacity=batch_size * 5, shuffle=shuffle, name="file_input_queue" ) image, label = read_images_from_disk(input_queue) image = tf.image.resize_images(image, (224, 224), tf.image.ResizeMethod.NEAREST_NEIGHBOR) image_batch, label_batch = tf.train.batch( [image, label], batch_size=batch_size, num_threads=1, name="batch_queue", capacity=batch_size * 10, allow_smaller_final_batch = False ) return ( tf.identity(image_batch, name="features"), tf.identity(label_batch, name="label") ) return input_fn def train_valid_input_fn(data_dir, train_batch_size, valid_batch_size=None): img, labels = read_image_label_list(data_dir) img = np.array(img) labels = np.array(labels) data_size = img.shape[0] print("Data size: " + str(data_size)) split = int(0.7 * data_size) random_seq = np.random.permutation(data_size) img = img[random_seq] labels = labels[random_seq] if valid_batch_size == None: valid_batch_size = train_batch_size return ( gen_input_fn(img[0:split], labels[0:split], train_batch_size, shuffle = True), gen_input_fn(img[split:], labels[split:], valid_batch_size, shuffle = False) ) def test_input_fn(data_dir, batch_size): image_list, label_list = read_image_label_list(data_dir) return gen_input_fn(image_list, label_list, batch_size, shuffle = False), image_list ``` ### Preview Data Check correctness of data ``` if prepare_file(): print "Files are ready \o/" def plot_img(data, label=None): plt.ion() plt.figure() plt.imshow(data) if label is not None: plt.title(label) def preview_img(): img_preview = tf.Graph() with img_preview.as_default(): tensor_train, _ = train_valid_input_fn('data/train', 5) result = tf.tuple(tensor_train()) with tf.Session(graph=img_preview) as sess: sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) images, labels = sess.run(result) for i in range(len(images)): plot_img(images[i], str(labels[i])) coord.request_stop() coord.join(threads) sess.close() preview_img() ``` ### Define Model Create a model for using in Estimator ``` def catdog_model(inputs, is_training): with tf.variable_scope('catdog', values=[inputs]): with slim.arg_scope( [slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu6, weights_initializer=tf.truncated_normal_initializer(0.0, 0.01)): net = inputs if IS_LOW_MEMORY_MODE == False: net = slim.repeat(net, 2, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool2d(net, [2, 2], scope='pool3') net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4') net = slim.max_pool2d(net, [2, 2], scope='pool4') net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5') net = slim.max_pool2d(net, [2, 2], scope='pool5') net = tf.reshape(net, [-1, 7 * 7 * 512]) net = slim.fully_connected(net, 2048, scope='fc6') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout6') net = slim.fully_connected(net, 2048, scope='fc7') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout7') net = slim.fully_connected(net, 2, activation_fn=None, scope='fc8') else: # Model for my Mac T_T net = tf.image.resize_images(net, (72, 72), tf.image.ResizeMethod.NEAREST_NEIGHBOR) net = slim.repeat(net, 1, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool2d(net, [2, 2], scope='pool3') net = tf.reshape(net, [-1, 9 * 9 * 256]) net = slim.fully_connected(net, 1024, scope='fc4') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout4') net = slim.fully_connected(net, 1024, scope='fc5') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout5') net = slim.fully_connected(net, 2, activation_fn=None, scope='fc6') return net def catdog_model_fn(features, labels, mode, params): is_training = False if mode == learn.ModeKeys.TRAIN: is_training = True output = catdog_model(features, is_training) log_loss = None train_op = None eval_metric_ops = None softmax_predictions = tf.nn.softmax(output) if mode != learn.ModeKeys.INFER: onehot_labels = tf.one_hot( tf.cast(labels, tf.int32), depth = 2 ) log_loss = tf.identity( tf.losses.log_loss( onehot_labels, tf.nn.softmax(output), reduction = tf.losses.Reduction.MEAN ), name = "log_loss_tensor" ) eval_metric_ops = { "log_loss": log_loss } if mode == learn.ModeKeys.TRAIN: train_op = tf.contrib.layers.optimize_loss( loss = log_loss, global_step = tf.contrib.framework.get_global_step(), learning_rate = params['learning_rate'], optimizer = "Adam" ) predictions = { 'predict': softmax_predictions } return model_fn.ModelFnOps( mode = mode, predictions = predictions, loss = log_loss, train_op = train_op, eval_metric_ops = eval_metric_ops ) ``` Define model classifier ``` def feature_engineering_fn(features, labels): features = tf.to_float(features) features = tf.map_fn(tf.image.per_image_standardization, features) return features, labels tf.logging.set_verbosity(tf.logging.ERROR) model_path = '_model/catdog_low' if IS_LOW_MEMORY_MODE else '_model/catdog_vgg' classifier = learn.Estimator( model_fn = catdog_model_fn, model_dir = model_path + '_v2', config = run_config( save_summary_steps = 10, keep_checkpoint_max = 3, save_checkpoints_steps = 75 ), feature_engineering_fn = feature_engineering_fn, params = { 'learning_rate': 0.01 } ) train_input_fn, validate_input_fn = train_valid_input_fn('data/train', 32, 64) logging_hook = tf.train.LoggingTensorHook( tensors = { 'log_loss': 'log_loss_tensor' }, every_n_iter = 3 ) validation_monitor = tf.contrib.learn.monitors.ValidationMonitor( input_fn = validate_input_fn, eval_steps = 30, every_n_steps = 100, name = 'Validatation' ) ``` Let its trainnnn !!! ``` tf.logging.set_verbosity(tf.logging.INFO) classifier.fit( input_fn = train_input_fn, steps = 100, monitors = [logging_hook, validation_monitor] ) classifier.evaluate( input_fn = validate_input_fn, steps = 75 ) ``` Let's predicttt !!! ``` test_fn, image_test_list = test_input_fn('data/test', 32) test_n = len(image_test_list) print("Test size: %d" % test_n) result_file = open(os.path.join(cwd, 'result/result.txt'), 'w+') result_file.write('id,label\n') predictions = classifier.predict(input_fn = test_fn, as_iterable=True) for i, p in enumerate(predictions): if i >= test_n: break id = image_test_list[i].split("/")[-1] id = id.split(".")[0] if i % 100 == 0: print("Predict %d %s: %f" % (i, image_test_list[i], p["predict"][1])) result_file.write("%s,%f\n" % (id, p["predict"][1])) result_file.flush() result_file.close() print('Finish!!') ```
github_jupyter
``` knitr::opts_chunk$set(cache=TRUE) knitr::opts_chunk$set(warning = FALSE) ``` # RML Notes + [RML Package Homepage](aka.ms/RML) The `MicrosoftRML` (or `RML` for short) package is a state-of-the-art package of machine learning algorithms developed by Microsoft's Algorithms Development team and Microsoft Research. It provides a suite of _transformers_ and _learners_ that make it easy to analyze high-dimensional datasets, such as those arising from text datasets. ## Installation Instructions + If you have corpnet access, review the installation instructions [here](https://microsoft.sharepoint.com/teams/TLC/_layouts/15/start.aspx#/SitePages/RML_Install.aspx). # Using RML The `MicrosoftRML` package provides new, highly performant implementations of machine learning algorithms for classification, regression, and anamoly detection, that are especially well-equipped for handling large datasets. In addition to these fast learning algorithms (called _learners_), the `RML` package also provides _transformers_, for feature engineering. We outline the various learners and transformers in the following sections. ## Transformers The _transformers_ in the `RML` package are labelled with the prefix `mt`. These can be used inside any of the `mxTransforms` calls of the _learners_ we describe in the following section. We outline most of the transformers in the table below; _transformer_ | Use | Additional Parameters | ------------ | :--------: | :-------: | `mtText` | bag of counts of n-grams | `ngramLength` | `mtCat` | create separate variables for each variable string | `maxNumTerms` | `mtCatHash` | same as `mtCat` but with hashing| `hashBits`| `mtWordBag` | bag of counts of n-grams | `ngramLength` | `mtWordHashBag` | same as `mtWordBag` but with hashing | `hashBits` | `mtConcat` | concatenation of multiple text columns into a single vector| none| The hash equivalents of the text transforms use hashing to create dictionaries rather than counting. Hashing is typically more performant because it does not require an initial pass over the data to determine the dictionary, and therefore can be more performant than `mtCat`, which could run out of memory because of huge dictionary size. However, caution must be taken in specifying the number of _hashBits_: if too small, collisions may occur; if too large, you may end up with lots of redundant features. ## Learners In addition to the fast feature engineering functions listed in the table above, `RML` adds a number of new learning algorithms for regression, clasification and anamoly detection. The algorithms we'll take a look at today are listed in the table below, along with some of their important parameters: _learner_ | Use | Additional Parameters | ------------ | :--------: | -----------: | `mxFastForest` | fast random forest | `nTree` | `mxFastTree` | fast decision tree | `numBins` | `mxLogisticReg` | elastic-net logistic regression | `l1Weight`, `l2Weight` | `mxFastLinear` | SDCA linear binary classifer and regression | `l1Weight`, `l2Weight` | `mxNeuralNet` | classification and regression neural networks, with GPU acceleration | `acceleeration`, `numHiddenNodes`, `optimizer`| `mxOneClassSvm` | binary support vector machine | `kernel` | # Getting Started with RML ``` packageVersion("RevoScaleR") packageVersion("MicrosoftML") ``` If you are missing either of the above packages, please go back and refer to the installation instructions. # Natural Language Processing with `RML` Let's take a look at using `RML` to estimate a model that would be very hard to do with `RevoScaleR`. In particular, there are virtually no functionality in `RevoScaleR` for handling large text data. We will use `RML` to transform text data into useful features that we can use in a logistic regression learner. In order to deal with the high cardinality of text data, we will use the penalized regression models in `RML`. ## IMDB Data For this example, we will analyze IMDB movies reviews and the sentiment associated with the review. The data are available [here](http://ai.stanford.edu/~amaas/data/sentiment/). I've also saved the data on a public facing Azure Blob Container [here](http://alizaidi.blob.core.windows.net/training/aclImdb_v1.tar.gz). The data are saved as separate text files per review, and are separated into train and test sets, and further by positive and negative sentiments: Data Hierarchy + train - pos - neg + test - pos - neg Let's use the `readLines` function in R to convert these datasets into R `data.frames`. ``` # load imdb data --- cwd <- getwd() options(stringsAsFactors = FALSE) imdb_dir <- "/datadrive/aclImdb/" read_reviews <- function(path, sentiment) { reviews <- lapply(path, readLines) reviews <- as.vector(unlist(reviews)) reviews_df <- as.data.frame(matrix(reviews, ncol = 1)) reviews_df$sentiment <- sentiment names(reviews_df)[1] <- 'review' return(reviews_df) } setwd(imdb_dir) make_df <- function(path = "train") { pos_files <- list.files(paste(path, "pos", sep = "/"), full.names = TRUE) train_positive <- read_reviews(pos_files, 1) neg_files <- list.files(paste(path, "neg", sep = "/"), full.names = TRUE) train_negative <- read_reviews(neg_files, 0) train_df <- rbind(train_positive, train_negative) } # training sets ----------------------------------------------------------- train_df <- make_df("train") # test sets --------------------------------------------------------------- test_df <- make_df("test") setwd(cwd) ``` ### Applying Transformers to Create Text Features Our compiled `data.frame` of IMDB data reviews looks rather simple. It is is a `data.frame` of two columns, one containing the raw review, and the sescond containing the sentiment binary variable: positive or negative. By itself, the raw text data source isn't a very helpful feature variable for predicting the sentiment value. However, we can create/engineer a large amount of feature variables using the text column. As a first pass, we might even consider using the text data source as a collection of words, and try to use each word individually as it's own column. This will be the union of all the words that appear in any review, so will yield a very high cardinality/dimensionality feature matrix with large sparsity (i.e., any given review will only contain a small subset of all the words in the reviews "dictionary"). Next, we can use the `mxLogisticReg` function in RML. The `mxLogisticReg` function contains arguments for the hyperparameter weights for each of the penalty terms. Moreover, we will utilize a `mxTransforms` call to add a list of featurizers/transformers for engineering. While this feature engineering step might require multiple iterations and use cross-validation to pick the best choice, we will start with a text transformation and create _ngrams_ of length 3. This will create a _continguous_ collection of three words that can be then used as predictors. This is a simple method of thinking of possible interaction of words as possible predictors for our sentiment response. ``` library(MicrosoftML) library(dplyr) train_sample <- train_df %>% sample_n(1000, replace = FALSE) system.time(logit_model <- logisticRegression(sentiment ~ reviewTran, data = train_sample, l1Weight = 0.05, l2Weight = 0.05, mlTransforms = list(featurizeText(vars = c(reviewTran = "review"), language = "English", stopwordsRemover = stopwordsDefault(), keepPunctuations = FALSE))) ) system.time(fast_linear <- mxFastLinear(sentiment ~ reviewTran, data = train_sample, l1Weight = 0.05, l2Weight = 0.05, mxTransforms = list(mtText(vars = c(reviewTran = "review"), language = "English", stopwordsRemover = maPredefinedStopwords(), keepPunctuations = FALSE, ngramLength = 3))) ) ``` ### Using the Pipeline API ``` library(magrittr) review_logit <- train_df %>% featurize(mtText(vars = c(reviewTran = "review"), stopwordsRemover = maPredefinedStopwords(), keepPunctuations = FALSE, ngramLength = 3)) %>% train(formula = sentiment ~ reviewTran, lr = LogisticReg(l2Weight = 0.05, l1Weight = 0.05)) %>% run ``` ### Testing the Logit Model In order to predict our classifer on test data, we will use the `mxPredict` function from the `RML` package. ``` predictions <- mxPredict(logit_model, data = test_df, extraVarsToWrite = "sentiment") roc_results <- rxRoc(actualVarName = "sentiment", predVarNames = "Probability.1", data = predictions) roc_results$predVarName <- factor(roc_results$predVarName) plot(roc_results) ``` ##### The Pipeline API Why not use pipes! The pipeline API is still a work in progress, and the example below is just to show some of it's features. The API will support modifying pipelines, and additional featurization modules. ``` options(stringsAsFactors = TRUE) predictions_pipeline <- logit_model %>% mxPredict(data = test_df, extraVarsToWrite = "sentiment") %>% rxRocCurve(actualVarName = "sentiment", predVarNames = "Probability.1", data = .) ``` ### Testing the SDCA Model ``` predictions <- mxPredict(logit_model, data = test_df, extraVarsToWrite = "sentiment") roc_results <- rxRoc(actualVarName = "sentiment", predVarNames = "Probability.1", data = predictions) roc_results$predVarName <- factor(roc_results$predVarName) plot(roc_results) ``` ## Neural Networks Let's try to estimate another binary classifier from this dataset, but with a Neural Network architecture rather than a logistic regression model. In the following chunk, we call our neural network model, and set the optimizer to be a stochastic gradient descent optimizer with a learning rate of 0.2. Furthermore, we use the `type` argument to ensure we are learning a binary classifier. By default our network architecture will have 100 hidden nodes. ``` nn_sentiment <- mxNeuralNet(sentiment ~ reviewTran, data = train_df, type = "binary", mxTransforms = list(mtText(vars = c(reviewTran = "review"), stopwordsRemover = maPredefinedStopwords(), keepPunctuations = FALSE, ngramLength = 3)), optimizer = maOptimizerSgd(learningRate = 0.2)) ``` ### Scoring the Neural Net We can similary score our results from the neural network model ``` predictions <- mxPredict(nn_sentiment, data = test_df, extraVarsToWrite = "sentiment") roc_results <- rxRoc(actualVarName = "sentiment", predVarNames = "Probability.1", data = predictions) roc_results$predVarName <- factor(roc_results$predVarName) plot(roc_results) ```
github_jupyter
# Analysis of TRPO ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.stats as sts plt.close('all') delta = 0.2 def read_data(path): df = pd.read_csv(path + 'progress.csv') iterations = len(df) batch_size = df['EpLenMean'] sigma_1 = [] sigma_2 = [] theta = [] for i in range(iterations): policy_params = np.load(path + 'weights_' + str(i) + '.npy') #iws = np.load(path + 'iws_' + str(i) + '.npy') sigma_1.append(np.exp(policy_params[-2])) sigma_2.append(np.exp(policy_params[-1])) theta.append(policy_params[0]) df['Sigma1'] = sigma_1 df['Sigma2'] = sigma_2 df['Theta'] = theta df['CumAvgRew'] = np.cumsum(df['EpRewMean'])/iterations return df def plot_data(dfs, columns, bottom=-np.infty, top=np.infty, rng=None): fig = plt.figure() ax = fig.add_subplot(111) if type(dfs) is not list: dfs = [dfs] n_subplots = len(dfs) for i in range(n_subplots): df = dfs[i] if rng is not None: df = df[rng] ax.set_xlabel('Iteration') x = range(len(df)) for col in columns: y = np.clip(df[col], bottom, top) ax.plot(x, y, label=col) ax.legend() return fig def plot_ci(mean, std, conf, n_runs): fig = plt.figure() ax = fig.add_subplot(111) ax.plot(mean.index, mean) interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/np.sqrt(n_runs)) ax.fill_between(mean.index, interval[0], interval[1], alpha=0.3) return fig ``` # LQG ## Setting: * Policy mean: linear w/o bias * Policy std: one logstd parameter * Available random seeds: 0, 27, 62, 315, 640 * Batch size: 100 * delta = 0.2 * Implementation: baselines * Task variant: ifqi * Horizon: 200 ### Performance (undiscounted) on 5 different random seeds **Available data** ``` common_path = '../results/trpo/lqg/seed_' seeds = [0, 27, 62, 315, 640] dfs = [] for s in seeds: dfs.append(read_data(common_path + str(s) + '/')) plot_data(dfs, ['EpRewMean']) plot_data(dfs, ['Theta']) n_iter = min(len(df) for df in dfs) n_runs = len(dfs) print('Number of runs:', n_runs) print('Number of iterations per run:', n_iter) print('Columns:', list(dfs[0])) concat_df = pd.concat(dfs, axis=1) mean_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).mean() std_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).std() conf = 0.95 print('Average performance with %i%% confidence intervals:' % (conf*100)) mean = mean_df['EpRewMean'] std = std_df['EpRewMean'] plot_ci(mean, std, conf, n_runs) cum_mean = mean_df['CumAvgRew'][len(mean_df)-1] cum_std = std_df['CumAvgRew'][len(mean_df)-1] interval = sts.t.interval(conf, n_runs-1,loc=cum_mean,scale=cum_std/np.sqrt(n_runs)) print('Average cumulative reward: %f, c.i. %s' % (cum_mean, interval)) ``` # Cartpole ## Setting: * Policy mean: linear with bias * Policy std: one logstd parameter for each action dimension * Available random seeds: 0, 27, 62, 315 * Batch size: 100 * delta = 0.2 * Implementation: baselines * Task variant: gym * Horizon: 200 ### Performance (undiscounted) on 4 different random seeds **Available data** ``` common_path = '../results/trpo/cartpole/seed_' seeds = [0, 27, 62, 315] dfs = [] for s in seeds: dfs.append(read_data(common_path + str(s) + '/')) plot_data(dfs, ['EpRewMean']) n_iter = min(len(df) for df in dfs) n_runs = len(dfs) print('Number of runs:', n_runs) print('Number of iterations per run:', n_iter) print('Columns:', list(dfs[0])) concat_df = pd.concat(dfs, axis=1) mean_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).mean() std_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).std() conf = 0.95 print('Average performance with %i%% confidence intervals:' % (conf*100)) mean = mean_df['EpRewMean'] std = std_df['EpRewMean'] plot_ci(mean, std, conf, n_runs) cum_mean = mean_df['CumAvgRew'][len(mean_df)-1] cum_std = std_df['CumAvgRew'][len(mean_df)-1] interval = sts.t.interval(conf, n_runs-1,loc=cum_mean,scale=cum_std/np.sqrt(n_runs)) print('Average cumulative reward: %f, c.i. %s' % (cum_mean, interval)) ``` # Swimmer ## Setting: * Policy mean: 64x64 tanh with biases * Policy std: one logstd parameter for each action dimension * Available random seeds: 0, 27, 62, 315, 640 * Batch size: 100 * delta = 0.2 * Implementation: baselines * Task variant: gym * Horizon: 500 ### Performance (undiscounted) on 5 different random seeds **Available data** ``` common_path = '../results/trpo/swimmer/seed_' seeds = [0, 27, 62, 315, 640] dfs = [] for s in seeds: dfs.append(read_data(common_path + str(s) + '/')) plot_data(dfs, ['EpRewMean']) n_iter = min(len(df) for df in dfs) n_runs = len(dfs) print('Number of runs:', n_runs) print('Number of iterations per run:', n_iter) print('Columns:', list(dfs[0])) concat_df = pd.concat(dfs, axis=1) mean_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).mean() std_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).std() conf = 0.95 print('Average performance with %i%% confidence intervals:' % (conf*100)) mean = mean_df['EpRewMean'] std = std_df['EpRewMean'] plot_ci(mean, std, conf, n_runs) cum_mean = mean_df['CumAvgRew'][len(mean_df)-1] cum_std = std_df['CumAvgRew'][len(mean_df)-1] interval = sts.t.interval(conf, n_runs-1,loc=cum_mean,scale=cum_std/np.sqrt(n_runs)) print('Average cumulative reward: %f, c.i. %s' % (cum_mean, interval)) ```
github_jupyter
# 準備 ``` # バージョン指定時にコメントアウト #!pip install torch==1.7.0 #!pip install torchvision==0.8.1 import torch import torchvision # バージョンの確認 print(torch.__version__) print(torchvision.__version__) # Google ドライブにマウント from google.colab import drive drive.mount('/content/gdrive') %cd '/content/gdrive/MyDrive/Colab Notebooks/gan_sample/chapter2' import os import numpy as np import torch import torch.nn as nn import torch.optim as optimizers import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision import torchvision.transforms as transforms import matplotlib import matplotlib.pyplot as plt %matplotlib inline ``` # データセットの作成 ``` np.random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # データの取得 root = os.path.join('data', 'mnist') transform = transforms.Compose([transforms.ToTensor(), lambda x: x.view(-1)]) mnist_train = \ torchvision.datasets.MNIST(root=root, download=True, train=True, transform=transform) mnist_test = \ torchvision.datasets.MNIST(root=root, download=True, train=False, transform=transform) train_dataloader = DataLoader(mnist_train, batch_size=100, shuffle=True) test_dataloader = DataLoader(mnist_test, batch_size=100, shuffle=False) ``` # ネットワークの定義 ``` class VAE(nn.Module): def __init__(self, device='cpu'): super().__init__() self.device = device self.encoder = Encoder(device=device) self.decoder = Decoder(device=device) def forward(self, x): # エンコーダ mean, var = self.encoder(x) # 潜在変数の作成 z = self.reparameterize(mean, var) # デコーダ y = self.decoder(z) #生成画像yと潜在変数zが返り値 return y, z # 潜在変数zの作成 def reparameterize(self, mean, var): # 標準正規分布の作成 eps = torch.randn(mean.size()).to(self.device) # 再パラメータ化トリック z = mean + torch.sqrt(var) * eps return z # 誤差の計算 def lower_bound(self, x): # 平均と分散のベクトルを計算 mean, var = self.encoder(x) # 平均と分散から潜在変数zを作成 z = self.reparameterize(mean, var) # 潜在変数zから生成画像を作成 y = self.decoder(z) # 再構成誤差 reconst = - torch.mean(torch.sum(x * torch.log(y) + (1 - x) * torch.log(1 - y), dim=1)) # 正則化 kl = - 1/2 * torch.mean(torch.sum(1 + torch.log(var) - mean**2 - var, dim=1)) # 再構成誤差 + 正則化 L = reconst + kl return L class Encoder(nn.Module): def __init__(self, device='cpu'): super().__init__() self.device = device self.l1 = nn.Linear(784, 200) self.l_mean = nn.Linear(200, 10) self.l_var = nn.Linear(200, 10) def forward(self, x): # 784次元から200次元 h = self.l1(x) # 活性化関数 h = torch.relu(h) # 200次元から10次元の平均 mean = self.l_mean(h) # 200次元から10次元の分散 var = self.l_var(h) # 活性化関数softplus var = F.softplus(var) return mean, var class Decoder(nn.Module): def __init__(self, device='cpu'): super().__init__() self.device = device self.l1 = nn.Linear(10, 200) self.out = nn.Linear(200, 784) def forward(self, x): # 10次元から200次元 h = self.l1(x) # 活性化関数 h = torch.relu(h) # 200次元から784次元 h = self.out(h) # シグモイド関数 y = torch.sigmoid(h) return y ``` # 学習の実行 ``` # モデルの設定 model = VAE(device=device).to(device) # 損失関数の設定 criterion = model.lower_bound # 最適化関数の設定 optimizer = optimizers.Adam(model.parameters()) print(model) epochs = 10 # エポックのループ for epoch in range(epochs): train_loss = 0. # バッチサイズのループ for (x, _) in train_dataloader: x = x.to(device) # 訓練モードへの切替 model.train() # 本物画像と生成画像の誤差計算 loss = criterion(x) # 勾配の初期化 optimizer.zero_grad() # 誤差の勾配計算 loss.backward() # パラメータの更新 optimizer.step() # 訓練誤差の更新 train_loss += loss.item() train_loss /= len(train_dataloader) print('Epoch: {}, Loss: {:.3f}'.format( epoch+1, train_loss )) ``` # 画像の生成 ``` # ノイズの作成数 batch_size=8 # デコーダ入力用に標準正規分布に従う10次元のノイズを作成 z = torch.randn(batch_size, 10, device = device) # 評価モードへの切替 model.eval() # デコーダにノイズzを入力 images = model.decoder(z) images = images.view(-1, 28, 28) images = images.squeeze().detach().cpu().numpy() for i, image in enumerate(images): plt.subplot(2, 4, i+1) plt.imshow(image, cmap='binary_r') plt.axis('off') plt.tight_layout() plt.show() fig = plt.figure(figsize=(10, 3)) model.eval() for x, t in test_dataloader: # 本物画像 for i, im in enumerate(x.view(-1, 28, 28).detach().numpy()[:10]): ax = fig.add_subplot(3, 10, i+1, xticks=[], yticks=[]) ax.imshow(im, 'gray') x = x.to(device) # 本物画像から生成画像 y, z = model(x) y = y.view(-1, 28, 28) for i, im in enumerate(y.cpu().detach().numpy()[:10]): ax = fig.add_subplot(3, 10, i+11, xticks=[], yticks=[]) ax.imshow(im, 'gray') # 1つ目の画像と2つ目の画像の潜在変数を連続的に変化 z1to0 = torch.cat([z[1] * (i * 0.1) + z[0] * ((9 - i) * 0.1) for i in range(10)]).reshape(10,10) y2 = model.decoder(z1to0).view(-1, 28, 28) for i, im in enumerate(y2.cpu().detach().numpy()): ax = fig.add_subplot(3, 10, i+21, xticks=[], yticks=[]) ax.imshow(im, 'gray') break ```
github_jupyter
___ <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a> ___ <center><em>Copyright Pierian Data</em></center> <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center> # NumPy Exercises - Solutions Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks and then you'll be asked some more complicated questions. <div class="alert alert-danger" style="margin: 10px"><strong>IMPORTANT NOTE!</strong> Make sure you don't run the cells directly above the example output shown, <br>otherwise you will end up writing over the example output!</div> #### 1. Import NumPy as np ``` import numpy as np ``` #### 2. Create an array of 10 zeros ``` # CODE HERE # DON'T WRITE HERE np.zeros(10) ``` #### 3. Create an array of 10 ones ``` # DON'T WRITE HERE np.ones(10) ``` #### 4. Create an array of 10 fives ``` # DON'T WRITE HERE np.ones(10) * 5 ``` #### 5. Create an array of the integers from 10 to 50 ``` # DON'T WRITE HERE np.arange(10,51) ``` #### 6. Create an array of all the even integers from 10 to 50 ``` # DON'T WRITE HERE np.arange(10,51,2) ``` #### 7. Create a 3x3 matrix with values ranging from 0 to 8 ``` # DON'T WRITE HERE np.arange(9).reshape(3,3) ``` #### 8. Create a 3x3 identity matrix ``` # DON'T WRITE HERE np.eye(3) ``` #### 9. Use NumPy to generate a random number between 0 and 1<br><br>&emsp;NOTE: Your result's value should be different from the one shown below. ``` # DON'T WRITE HERE np.random.rand(1) ``` #### 10. Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution<br><br>&emsp;&ensp;NOTE: Your result's values should be different from the ones shown below. ``` # DON'T WRITE HERE np.random.randn(25) ``` #### 11. Create the following matrix: ``` # DON'T WRITE HERE np.arange(1,101).reshape(10,10) / 100 ``` #### 12. Create an array of 20 linearly spaced points between 0 and 1: ``` # DON'T WRITE HERE np.linspace(0,1,20) ``` ## Numpy Indexing and Selection Now you will be given a starting matrix (be sure to run the cell below!), and be asked to replicate the resulting matrix outputs: ``` # RUN THIS CELL - THIS IS OUR STARTING MATRIX mat = np.arange(1,26).reshape(5,5) mat ``` #### 13. Write code that reproduces the output shown below.<br><br>&emsp;&ensp;Be careful not to run the cell immediately above the output, otherwise you won't be able to see the output any more. ``` # CODE HERE # DON'T WRITE HERE mat[2:,1:] ``` #### 14. Write code that reproduces the output shown below. ``` # DON'T WRITE HERE mat[3,4] ``` #### 15. Write code that reproduces the output shown below. ``` # DON'T WRITE HERE mat[:3,1:2] ``` #### 16. Write code that reproduces the output shown below. ``` # DON'T WRITE HERE mat[4,:] ``` #### 17. Write code that reproduces the output shown below. ``` # DON'T WRITE HERE mat[3:5,:] ``` ## NumPy Operations #### 18. Get the sum of all the values in mat ``` # DON'T WRITE HERE mat.sum() ``` #### 19. Get the standard deviation of the values in mat ``` # DON'T WRITE HERE mat.std() ``` #### 20. Get the sum of all the columns in mat ``` # DON'T WRITE HERE mat.sum(axis=0) ``` ## Bonus Question We worked a lot with random data with numpy, but is there a way we can insure that we always get the same random numbers? What does the seed value mean? Does it matter what the actual number is? [Click Here for a Hint](https://www.google.com/search?q=numpy+random+seed) ``` np.random.seed(101) ``` # Great Job!
github_jupyter
``` # 기본 환경 로드 %run ./env.ipynb ``` # Fit Data Model ETL를 통해 생성된 데이터셋을 이용하여 훈련을 통해 데이터 모델을 생성합니다. DNN 모델과 비교를 위해 RandomForest 모델을 훈련해봅니다. ``` from utils import * sdate = get_env_sdate(default = "2018070108") path_base = get_env_path_base(default = "/root/mnt/dfs/notebooks-skp/mnist") path_data = get_env_path_date(default = "/root/mnt/dfs/data/mnist") print("sdate: {}".format(sdate)) print("path_base: {}".format(path_base)) print("path_data: {}".format(path_data)) # 데이터 로드 import os from sklearn.externals import joblib path_etl = os.path.join(path_data, "etl") path_etl_sdate = os.path.join(path_etl, sdate) path_train_xs = os.path.join(path_etl_sdate, "rf-train_xs.pkl") path_train_ys = os.path.join(path_etl_sdate, "rf-train_ys.pkl") np_train_xs = joblib.load(path_train_xs) np_train_ys = joblib.load(path_train_ys) print(np_train_xs.shape, np_train_ys.shape) path_test_xs = os.path.join(path_etl_sdate, "rf-test_xs.pkl") path_test_ys = os.path.join(path_etl_sdate, "rf-test_ys.pkl") np_test_xs = joblib.load(path_test_xs) np_test_ys = joblib.load(path_test_ys) print(np_test_xs.shape, np_test_ys.shape) dim_x = np_train_xs.shape[1] n_class = 10 from sklearn import model_selection from sklearn.ensemble import RandomForestClassifier # Create a random forest classifier. By convention, clf means 'classifier' clf = RandomForestClassifier(n_jobs=6) # Train the classifier to take the training features and learn how they relate # to the training y (the species) clf.fit(np_train_xs, np_train_ys) np_pred_ys = clf.predict(np_test_xs) from sklearn import metrics from sklearn.metrics import classification_report print("\n") print(classification_report(np_test_ys, np_pred_ys)) ``` 전반적으로 DNN 모델보다 낮은 결과를 보여줌. ``` # RandomForest를 통해 얻은 Importance Feature를 출력해봅니다. top = 20 importances = clf.feature_importances_ std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0) indices = np.argsort(importances)[::-1][:top] # Print the feature ranking print("Feature ranking:") for i, f in enumerate(range(np_train_xs.shape[1])): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) if (i >= top-1): break # Plot the feature importances of the forest from IPython.core.pylabtools import figsize figsize(14, 14) plt.figure() plt.title("Feature importances") plt.bar(range(top), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(top), indices) plt.xlim([-1, top]) plt.show() ``` 이미지에서 숫자 부분이 중간 부분에 위치하기 때문에 주요 feature 가 중앙 위치에서 뽑힌 것을 볼 수 있습니다.
github_jupyter
# Plotting kde objects ``` import scipy.stats as stats import matplotlib.pyplot as plt %matplotlib inline ``` # 1d kde ``` kde = stats.gaussian_kde(np.random.normal(loc=50, scale=5, size=100000)) x = np.arange(0, 100, 1) plt.plot(x, kde(x)) plt.show() ``` ## 2d kde ``` from scipy import stats def measure(n): "Measurement model, return two coupled measurements." m1 = np.random.normal(size=n) m2 = np.random.normal(scale=0.5, size=n) return m1+m2, m1-m2 m1, m2 = measure(2000) xmin = m1.min() xmax = m1.max() ymin = m2.min() ymax = m2.max() X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] positions = np.vstack([X.ravel(), Y.ravel()]) values = np.vstack([m1, m2]) kernel = stats.gaussian_kde(values) Z = np.reshape(kernel(positions).T, X.shape) import numpy as np import matplotlib.pyplot as plt from scipy.stats import kde np.random.seed(1977) # Generate 200 correlated x,y points data = np.random.multivariate_normal([0, 0], [[1, 0.5], [0.5, 3]], 200) x, y = data.T nbins = 20 fig, axes = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True) axes[0, 0].set_title('Scatterplot') axes[0, 0].plot(x, y, 'ko') axes[0, 1].set_title('Hexbin plot') axes[0, 1].hexbin(x, y, gridsize=nbins) axes[1, 0].set_title('2D Histogram') axes[1, 0].hist2d(x, y, bins=nbins) # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents k = kde.gaussian_kde(data.T) xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j] zi = k(np.vstack([xi.flatten(), yi.flatten()])) axes[1, 1].set_title('Gaussian KDE') axes[1, 1].pcolormesh(xi, yi, zi.reshape(xi.shape)) fig.tight_layout() plt.show() size = 1000 kde = stats.gaussian_kde( [np.random.normal(loc=40, scale=10, size=size), np.random.normal(loc=55, scale=3, size=size)] ) font = {'family' : 'normal', 'size' : 14} plt.rc('font', **font) start = 0 end = 100 step = 1 i = np.arange(start, end, step) nbins = len(i) xi,yi = np.mgrid[i.min():i.max():nbins*1j, i.min():i.max():nbins*1j] zi = kde(np.vstack([xi.flatten(), yi.flatten()])) fig = plt.figure(1) plt.pcolormesh(xi, yi, zi.reshape(xi.shape)) plt.title('2d-KDE') plt.xlabel('x') plt.ylabel('y') plt.show() #fig.savefig('/home/nick/test.png', bbox_inches='tight') size = 1000 kde = stats.gaussian_kde( [np.random.normal(loc=40, scale=10, size=size), np.random.normal(loc=55, scale=3, size=size)] ) f, axarr = plt.subplots(2) start = 0 end = 100 step = 1 i = np.arange(start, end, step) nbins = len(i) xi,yi = np.mgrid[i.min():i.max():nbins*1j, i.min():i.max():nbins*1j] zi = kde(np.vstack([xi.flatten(), yi.flatten()])) #fig = plt.figure(1) axarr[0].pcolormesh(xi, yi, zi.reshape(xi.shape)) #plt.title('2d-KDE') #plt.xlabel('x') #plt.ylabel('y') #plt.show() #fig.savefig('/home/nick/test.png', bbox_inches='tight') ``` # Plotting sandbox ``` plt.figure(1) plt.subplot(211) plt.plot(range(10), lw=10, alpha=0.1) plt.subplot(212) plt.plot(range(10), 'ro', alpha=0.5) plt.show() plt.subplot? x = np.arange(0, 10, 0.1) vals = kde.resample(size=100) plt.figure(1) plt.hist(vals[0,], 30) plt.plot(x, kde(x)) plt.show() ``` # KDE intersection ``` size = 1000 kde1 = stats.gaussian_kde( [np.random.normal(loc=40, scale=10, size=size), np.random.normal(loc=55, scale=3, size=size)] ) kde2 = stats.gaussian_kde( [np.random.normal(loc=55, scale=10, size=size), np.random.normal(loc=70, scale=3, size=size)] ) kde3 = stats.gaussian_kde( [np.random.normal(loc=40, scale=10, size=size), np.random.normal(loc=55, scale=3, size=size)] ) print kde1.integrate_kde(kde2) print kde1.integrate_kde(kde3) kde1 = stats.gaussian_kde(np.random.normal(loc=30, scale=10, size=size)) kde2 = stats.gaussian_kde(np.random.normal(loc=70, scale=10, size=size)) print kde1.integrate_kde(kde1) print kde1.integrate_kde(kde2) # calculating intersection def kde_intersect(kde1, kde2, start=0, end=100, step=0.1): # evalution grid x = np.arange(start,end,step) # calculate intersection densities pmin = np.min(np.c_[kde1(x),kde2(x)], axis=1) # integrate areas under curves total = kde1.integrate_box_1d(start,end) + kde2.integrate_box_1d(start,end) #total = np.trapz(y=kde1(x), x=x) + np.trapz(y=kde2(x), x=x) intersection = np.trapz(y=pmin,x=x) print 'kde1 max: {}'.format(np.max(kde1(x))) print 'kde2 max: {}'.format(np.max(kde2(x))) print 'pmin max: {}'.format(np.max(pmin)) print 'total: {}'.format(total) print 'int: {}'.format(intersection) # overlap coefficient return 2 * intersection / float(total) kde1 = stats.gaussian_kde(np.random.normal(loc=1.67, scale=0.01, size=size)) kde2 = stats.gaussian_kde(np.random.normal(loc=1.68, scale=0.01, size=size)) print kde_intersect(kde1, kde1) #print kde_intersect(kde1, kde2) # calculating intersection def kde_intersect(kde1, kde2, start=0, end=100, step=0.1): # evalution grid x = np.arange(start,end,step) # kde integrations int1 = kde1.integrate_box_1d(start,end) int2 = kde2.integrate_box_1d(start,end) # kde scaled evaluated values s1 = int1 / np.max(kde1(x)) * kde1(x) s2 = int2 / np.max(kde2(x)) * kde2(x) # calculate intersection densities pmin = np.min(np.c_[s1,s2], axis=1) # integrate areas under curves total = kde1.integrate_box_1d(start,end) + kde2.integrate_box_1d(start,end) intersection = np.trapz(y=pmin,x=x) print 'kde1 max: {}'.format(np.max(kde1(x))) print 'kde2 max: {}'.format(np.max(kde2(x))) print 'pmin max: {}'.format(np.max(pmin)) print 'total: {}'.format(total) print 'inter: {}'.format(intersection) # overlap coefficient return 2 * intersection / float(total) kde1 = stats.gaussian_kde(np.random.normal(loc=1.67, scale=0.01, size=size)) kde2 = stats.gaussian_kde(np.random.normal(loc=1.68, scale=0.01, size=size)) print kde_intersect(kde1, kde1) #print kde_intersect(kde1, kde2) # calculating BD shift as 1 - kde_intersection kde1 = stats.gaussian_kde(np.random.normal(loc=1.67, scale=0.01, size=size)) kde2 = stats.gaussian_kde(np.random.normal(loc=1.68, scale=0.01, size=size)) x = np.arange(1.6,1.76,0.001) plt.figure(1) plt.fill_between(x, kde1(x), color='b', alpha=0.3) plt.fill_between(x, kde2(x), color='r', alpha=0.3) plt.show() BD_shift = 1 - kde_intersect(kde1, kde2, start=0, end=2, step=0.01) print 'BD shift (1 - kde_intersection): {0:.3f}'.format(BD_shift) # calculating BD shift as 1 - kde_intersection kde1 = stats.gaussian_kde(np.random.normal(loc=1.67, scale=0.01, size=size)) kde2 = stats.gaussian_kde(np.random.normal(loc=1.695, scale=0.01, size=size)) x = np.arange(1.6,1.76,0.001) plt.figure(1) plt.fill_between(x, kde1(x), color='b', alpha=0.3) plt.fill_between(x, kde2(x), color='r', alpha=0.3) plt.show() BD_shift = 1 - kde_intersect(kde1, kde2, start=0, end=2, step=0.01) print 'BD shift (1 - kde_intersection): {0:.3f}'.format(BD_shift) KernelDensity(kernel='gaussian').fit(vals) ```
github_jupyter
## Understanding ROS Topics This tutorial introduces ROS topics as well as using the `rostopic` and commandline tool. Source: [ROS Wiki](http://wiki.ros.org/ROS/Tutorials/UnderstandingTopics) Let's start by making sure that we have `roscore` running: ``` %%bash rosnode list ``` If an error is shown, you need to launch the `roscore` with: ``` %%bash --bg roscore ``` For this tutorial we will also use turtlesim. ``` %%bash --bg rosrun turtlesim turtlesim_node ``` We'll also need something to drive the turtle around with: ``` import rospy rospy.init_node('teleop_turtle') import widgets.teleop_buttons ``` ### Using `rqt_graph` `rqt_graph` creates a dynamic graph of what's going on in the system. ``` %%bash --bg rosrun rqt_graph rqt_graph ``` You will see something similar to: ![](http://wiki.ros.org/ROS/Tutorials/UnderstandingTopics?action=AttachFile&do=get&target=rqt_graph_turtle_key.png) As of ROS Hydro, the topic name `/command_velocity` was changed to `/cmd_vel`. If you place your mouse over `/turtle1/cmd_vel` it will highlight the ROS nodes (here blue and green) and topics (here red). As you can see, the `turtlesim` and `teleop_turtle` nodes are communicating on the topic named `/turtle1/cmd_vel`. ![](http://wiki.ros.org/ROS/Tutorials/UnderstandingTopics?action=AttachFile&do=get&target=rqt_graph_turtle_key2.png) ### Introducing `rostopic` The `rostopic` tool allows you to get information about ROS **topics**. You can use the help option to get the available sub-commands for `rostopic`: ``` %%bash rostopic -h ``` `rostopic list` returns a list of all topics currently subscribed to and published. ``` %%bash rostopic list -v ``` Communication on topics happens by sending ROS **messages** between nodes. For the publisher and subscriber to communicate, they must send and receive the same **type** of message. This means that a topic type is defined by the message type published on it. The type of the message sent on a topic can be determined using rostopic type. ``` %%bash rostopic type /turtle1/cmd_vel ``` We can look at the details of the message using `rosmsg`: ``` %%bash rosmsg show geometry_msgs/Twist ``` `rostopic pub` publishes data on to a topic currently advertised. ``` %%bash rostopic pub -1 /turtle1/cmd_vel geometry_msgs/Twist -- \ '[2.0, 0.0, 0.0]' '[0.0, 0.0, 1.8]' ``` The previous command will send a single message to turtlesim telling it to move with an linear velocity of 2.0, and an angular velocity of 1.8 . This is a pretty complicated example, so lets look at each argument in detail. * This command will publish messages to a given topic: `rostopic pub` * This option (dash-one) causes rostopic to only publish one message then exit: `-1` * This is the name of the topic to publish to: `/turtle1/cmd_vel` * This is the message type to use when publishing to the topic: `geometry_msgs/Twist` * This option (double-dash) tells the option parser that none of the following arguments is an option. This is required in cases where your arguments have a leading dash -, like negative numbers. `--` * As noted before, a geometry_msgs/Twist msg has two vectors of three floating point elements each: `linear` and `angular`. In this case, `'[2.0, 0.0, 0.0]'` becomes the linear value with x=2.0, y=0.0, and z=0.0, and `'[0.0, 0.0, 1.8]'` is the angular value with x=0.0, y=0.0, and z=1.8. These arguments are actually in YAML syntax, which is described more in the [YAML command line documentation](http://wiki.ros.org/ROS/YAMLCommandLine). `'[2.0, 0.0, 0.0]' '[0.0, 0.0, 1.8]'` You may have noticed that the turtle has stopped moving; this is because the turtle requires a steady stream of commands at 1 Hz to keep moving. Now that you understand how ROS topics work, let's look at how [services and parameters](UnderstandingServicesParams.ipynb) work.
github_jupyter
# Hierarchical Clustering **Hierarchical clustering** refers to a class of clustering methods that seek to build a **hierarchy** of clusters, in which some clusters contain others. In this assignment, we will explore a top-down approach, recursively bipartitioning the data using k-means. **Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook. ## Import packages The following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read [this page](https://turi.com/download/upgrade-graphlab-create.html). ``` import graphlab import matplotlib.pyplot as plt import numpy as np import sys import os import time from scipy.sparse import csr_matrix from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances %matplotlib inline '''Check GraphLab Create version''' from distutils.version import StrictVersion assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.' ``` ## Load the Wikipedia dataset ``` wiki = graphlab.SFrame('people_wiki.gl/') ``` As we did in previous assignments, let's extract the TF-IDF features: ``` wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text']) ``` To run k-means on this dataset, we should convert the data matrix into a sparse matrix. ``` from em_utilities import sframe_to_scipy # converter # This will take about a minute or two. tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf') ``` To be consistent with the k-means assignment, let's normalize all vectors to have unit norm. ``` from sklearn.preprocessing import normalize tf_idf = normalize(tf_idf) ``` ## Bipartition the Wikipedia dataset using k-means Recall our workflow for clustering text data with k-means: 1. Load the dataframe containing a dataset, such as the Wikipedia text dataset. 2. Extract the data matrix from the dataframe. 3. Run k-means on the data matrix with some value of k. 4. Visualize the clustering results using the centroids, cluster assignments, and the original dataframe. We keep the original dataframe around because the data matrix does not keep auxiliary information (in the case of the text dataset, the title of each article). Let us modify the workflow to perform bipartitioning: 1. Load the dataframe containing a dataset, such as the Wikipedia text dataset. 2. Extract the data matrix from the dataframe. 3. Run k-means on the data matrix with k=2. 4. Divide the data matrix into two parts using the cluster assignments. 5. Divide the dataframe into two parts, again using the cluster assignments. This step is necessary to allow for visualization. 6. Visualize the bipartition of data. We'd like to be able to repeat Steps 3-6 multiple times to produce a **hierarchy** of clusters such as the following: ``` (root) | +------------+-------------+ | | Cluster Cluster +------+-----+ +------+-----+ | | | | Cluster Cluster Cluster Cluster ``` Each **parent cluster** is bipartitioned to produce two **child clusters**. At the very top is the **root cluster**, which consists of the entire dataset. Now we write a wrapper function to bipartition a given cluster using k-means. There are three variables that together comprise the cluster: * `dataframe`: a subset of the original dataframe that correspond to member rows of the cluster * `matrix`: same set of rows, stored in sparse matrix format * `centroid`: the centroid of the cluster (not applicable for the root cluster) Rather than passing around the three variables separately, we package them into a Python dictionary. The wrapper function takes a single dictionary (representing a parent cluster) and returns two dictionaries (representing the child clusters). ``` def bipartition(cluster, maxiter=400, num_runs=4, seed=None): '''cluster: should be a dictionary containing the following keys * dataframe: original dataframe * matrix: same data, in matrix format * centroid: centroid for this particular cluster''' data_matrix = cluster['matrix'] dataframe = cluster['dataframe'] # Run k-means on the data matrix with k=2. We use scikit-learn here to simplify workflow. kmeans_model = KMeans(n_clusters=2, max_iter=maxiter, n_init=num_runs, random_state=seed, n_jobs=1) kmeans_model.fit(data_matrix) centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_ # Divide the data matrix into two parts using the cluster assignments. data_matrix_left_child, data_matrix_right_child = data_matrix[cluster_assignment==0], \ data_matrix[cluster_assignment==1] # Divide the dataframe into two parts, again using the cluster assignments. cluster_assignment_sa = graphlab.SArray(cluster_assignment) # minor format conversion dataframe_left_child, dataframe_right_child = dataframe[cluster_assignment_sa==0], \ dataframe[cluster_assignment_sa==1] # Package relevant variables for the child clusters cluster_left_child = {'matrix': data_matrix_left_child, 'dataframe': dataframe_left_child, 'centroid': centroids[0]} cluster_right_child = {'matrix': data_matrix_right_child, 'dataframe': dataframe_right_child, 'centroid': centroids[1]} return (cluster_left_child, cluster_right_child) ``` The following cell performs bipartitioning of the Wikipedia dataset. Allow 20-60 seconds to finish. Note. For the purpose of the assignment, we set an explicit seed (`seed=1`) to produce identical outputs for every run. In pratical applications, you might want to use different random seeds for all runs. ``` wiki_data = {'matrix': tf_idf, 'dataframe': wiki} # no 'centroid' for the root cluster left_child, right_child = bipartition(wiki_data, maxiter=100, num_runs=6, seed=1) ``` Let's examine the contents of one of the two clusters, which we call the `left_child`, referring to the tree visualization above. ``` left_child ``` And here is the content of the other cluster we named `right_child`. ``` right_child ``` ## Visualize the bipartition We provide you with a modified version of the visualization function from the k-means assignment. For each cluster, we print the top 5 words with highest TF-IDF weights in the centroid and display excerpts for the 8 nearest neighbors of the centroid. ``` def display_single_tf_idf_cluster(cluster, map_index_to_word): '''map_index_to_word: SFrame specifying the mapping betweeen words and column indices''' wiki_subset = cluster['dataframe'] tf_idf_subset = cluster['matrix'] centroid = cluster['centroid'] # Print top 5 words with largest TF-IDF weights in the cluster idx = centroid.argsort()[::-1] for i in xrange(5): print('{0:s}:{1:.3f}'.format(map_index_to_word['category'][idx[i]], centroid[idx[i]])), print('') # Compute distances from the centroid to all data points in the cluster. distances = pairwise_distances(tf_idf_subset, [centroid], metric='euclidean').flatten() # compute nearest neighbors of the centroid within the cluster. nearest_neighbors = distances.argsort() # For 8 nearest neighbors, print the title as well as first 180 characters of text. # Wrap the text at 80-character mark. for i in xrange(8): text = ' '.join(wiki_subset[nearest_neighbors[i]]['text'].split(None, 25)[0:25]) print('* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki_subset[nearest_neighbors[i]]['name'], distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else '')) print('') ``` Let's visualize the two child clusters: ``` display_single_tf_idf_cluster(left_child, map_index_to_word) display_single_tf_idf_cluster(right_child, map_index_to_word) ``` The left cluster consists of athletes, whereas the right cluster consists of non-athletes. So far, we have a single-level hierarchy consisting of two clusters, as follows: ``` Wikipedia + | +--------------------------+--------------------+ | | + + Athletes Non-athletes ``` Is this hierarchy good enough? **When building a hierarchy of clusters, we must keep our particular application in mind.** For instance, we might want to build a **directory** for Wikipedia articles. A good directory would let you quickly narrow down your search to a small set of related articles. The categories of athletes and non-athletes are too general to facilitate efficient search. For this reason, we decide to build another level into our hierarchy of clusters with the goal of getting more specific cluster structure at the lower level. To that end, we subdivide both the `athletes` and `non-athletes` clusters. ## Perform recursive bipartitioning ### Cluster of athletes To help identify the clusters we've built so far, let's give them easy-to-read aliases: ``` athletes = left_child non_athletes = right_child ``` Using the bipartition function, we produce two child clusters of the athlete cluster: ``` # Bipartition the cluster of athletes left_child_athletes, right_child_athletes = bipartition(athletes, maxiter=100, num_runs=6, seed=1) ``` The left child cluster mainly consists of baseball players: ``` display_single_tf_idf_cluster(left_child_athletes, map_index_to_word) ``` On the other hand, the right child cluster is a mix of players in association football, Austrailian rules football and ice hockey: ``` display_single_tf_idf_cluster(right_child_athletes, map_index_to_word) ``` Our hierarchy of clusters now looks like this: ``` Wikipedia + | +--------------------------+--------------------+ | | + + Athletes Non-athletes + | +-----------+--------+ | | | association football/ + Austrailian rules football/ baseball ice hockey ``` Should we keep subdividing the clusters? If so, which cluster should we subdivide? To answer this question, we again think about our application. Since we organize our directory by topics, it would be nice to have topics that are about as coarse as each other. For instance, if one cluster is about baseball, we expect some other clusters about football, basketball, volleyball, and so forth. That is, **we would like to achieve similar level of granularity for all clusters.** Notice that the right child cluster is more coarse than the left child cluster. The right cluster posseses a greater variety of topics than the left (ice hockey/association football/Austrialian football vs. baseball). So the right child cluster should be subdivided further to produce finer child clusters. Let's give the clusters aliases as well: ``` baseball = left_child_athletes ice_hockey_football = right_child_athletes ``` ### Cluster of ice hockey players and football players In answering the following quiz question, take a look at the topics represented in the top documents (those closest to the centroid), as well as the list of words with highest TF-IDF weights. Let us bipartition the cluster of ice hockey and football players. ``` left_child_ihs, right_child_ihs = bipartition(ice_hockey_football, maxiter=100, num_runs=6, seed=1) display_single_tf_idf_cluster(left_child_ihs, map_index_to_word) display_single_tf_idf_cluster(right_child_ihs, map_index_to_word) ``` ### 2. Which diagram best describes the hierarchy right after splitting the `ice_hockey_football` cluster? Refer to the quiz form for the diagrams. **Caution**. The granularity criteria is an imperfect heuristic and must be taken with a grain of salt. It takes a lot of manual intervention to obtain a good hierarchy of clusters. * **If a cluster is highly mixed, the top articles and words may not convey the full picture of the cluster.** Thus, we may be misled if we judge the purity of clusters solely by their top documents and words. * **Many interesting topics are hidden somewhere inside the clusters but do not appear in the visualization.** We may need to subdivide further to discover new topics. For instance, subdividing the `ice_hockey_football` cluster led to the appearance of runners and golfers. ### Cluster of non-athletes Now let us subdivide the cluster of non-athletes. ``` # Bipartition the cluster of non-athletes left_child_non_athletes, right_child_non_athletes = bipartition(non_athletes, maxiter=100, num_runs=6, seed=1) display_single_tf_idf_cluster(left_child_non_athletes, map_index_to_word) display_single_tf_idf_cluster(right_child_non_athletes, map_index_to_word) ``` Neither of the clusters show clear topics, apart from the genders. Let us divide them further. ``` male_non_athletes = left_child_non_athletes female_non_athletes = right_child_non_athletes ``` ### 3. Let us bipartition the clusters `male_non_athletes` and `female_non_athletes`. Which diagram best describes the resulting hierarchy of clusters for the non-athletes? Refer to the quiz for the diagrams. **Note**. Use `maxiter=100, num_runs=6, seed=1` for consistency of output.
github_jupyter
``` import cranet from cranet import nn, optim from cranet.nn import functional as F from cranet.util import load_pickle from cranet.data import Dataset, DataLoader import numpy as np from matplotlib import pyplot as plt from sklearn.metrics import confusion_matrix import itertools import os print(cranet.__version__) class MnistDataset(Dataset): train_img = 'train-images-idx3-ubyte' train_lab = 'train-labels-idx1-ubyte' test_img = 't10k-images-idx3-ubyte' test_lab = 't10k-labels-idx1-ubyte' def __init__(self, root, mode, transform=None, transform_target=None): self.mode = mode self.transform = transform self.transform_target = transform_target self.images = [] self.labels = [] self._load_data(os.path.join(root, 'MNIST', 'raw')) def _load_data(self, data_dir): if self.mode == 'train': image_file = os.path.join(data_dir, self.train_img) label_file = os.path.join(data_dir, self.train_lab) elif self.mode == 'test': image_file = os.path.join(data_dir, self.test_img) label_file = os.path.join(data_dir, self.test_lab) else: raise RuntimeError('mode must be train or test') with open(image_file, 'rb') as f: f.read(4) # magic self.size = int.from_bytes(f.read(4), "big") r = int.from_bytes(f.read(4), "big") c = int.from_bytes(f.read(4), "big") for _ in range(self.size): mat = [] for i in range(r): mat.append([]) for j in range(c): mat[i].append(int.from_bytes(f.read(1), "big")) self.images.append(np.array(mat)) with open(label_file, 'rb') as f: f.read(4) # magic sz = int.from_bytes(f.read(4), "big") # size assert self.size == sz for _ in range(self.size): lab = np.array(int.from_bytes(f.read(1), "big")) self.labels.append(lab) def __len__(self): return self.size def __getitem__(self, idx): img = self.images[idx] lab = self.labels[idx] if self.transform is not None: img = self.transform(img) if self.transform_target is not None: lab = self.transform_target(lab) return img, lab def transform(img): return img.reshape(1, 28, 28) / 256 def transform_target(lab): return np.array([lab]) def trans(x): x = x.reshape(1, 28, 28) return cranet.as_tensor(x) def trans_lab(x): return cranet.as_tensor(x) train_ds = MnistDataset('data', 'train', trans, trans_lab) test_ds = MnistDataset('data', 'test', trans, trans_lab) train_ld = DataLoader(train_ds, 64) test_ld = DataLoader(test_ds, 1000) sample_image_batch, sample_label_batch = next(iter(train_ld)) sample_image = sample_image_batch.numpy()[0] sample_label = sample_label_batch.numpy()[0] plt.imshow(sample_image.reshape(28, 28)) print(sample_label) class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(1, 16, 3, 1) self.conv2 = nn.Conv2d(16, 32, 3, 1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) self.fc1 = nn.Linear(4608, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = F.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output model = Model() print(model) optm = optim.Adam(model.parameters()) def train(model, train_loader, optimizer, epoch): model.train() for batch_idx, (inp, lab) in enumerate(train_loader): optimizer.zero_grad() out = model(inp) loss = F.nll_loss(out, lab) loss.backward() optimizer.step() loss_v = loss.item() if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * 64, len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss_v)) def test(model, loader, label=''): model.eval() loss = 0 correct = 0 with cranet.no_grad(): for inp, lab in loader: out = model(inp) loss += F.nll_loss(out, lab, reduction='sum').item() pre = out.numpy().argmax(axis=1) correct += (pre == lab.numpy()).sum().item() data_size = len(loader.dataset) loss /= data_size accu = correct / data_size print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( label, loss, correct, data_size, accu*100.)) return accu, loss train_loss = [] train_accu = [] test_loss = [] test_accu = [] for epoch in range(10): train(model, train_ld, optm, epoch) accu, loss = test(model, train_ld, 'Train') train_loss.append(loss) train_accu.append(accu) accu, loss = test(model, test_ld, 'Test') test_loss.append(loss) test_accu.append(accu) plt.figure() plt.title('loss') plt.plot(train_loss, label='train loss') plt.plot(test_loss, label='test loss') plt.legend() plt.show() plt.figure() plt.title("accuracy") plt.plot(train_accu, label='train_accu') plt.plot(test_accu, label='test_accu') plt.legend() plt.show() def eval(model, loader): pre_arr = [] tar_arr = [] for inp, tar in loader: out = model(inp) pre = out.numpy().argmax(axis=1) pre_arr.append(pre) tar_arr.append(tar.numpy()) pre_arr = np.concatenate(pre_arr) tar_arr = np.concatenate(tar_arr) return confusion_matrix(tar_arr, pre_arr) cm = eval(model, test_ld) def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): """ given a sklearn confusion matrix (cm), make a nice plot Arguments --------- cm: confusion matrix from sklearn.metrics.confusion_matrix target_names: given classification classes such as [0, 1, 2] the class names, for example: ['high', 'medium', 'low'] title: the text to display at the top of the matrix cmap: the gradient of the values displayed from matplotlib.pyplot.cm see http://matplotlib.org/examples/color/colormaps_reference.html plt.get_cmap('jet') or plt.cm.Blues normalize: If False, plot the raw numbers If True, plot the proportions Usage ----- plot_confusion_matrix(cm = cm, # confusion matrix created by # sklearn.metrics.confusion_matrix normalize = True, # show proportions target_names = y_labels_vals, # list of names of the classes title = best_estimator_name) # title of graph Citiation --------- http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """ accuracy = np.trace(cm) / float(np.sum(cm)) misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format( accuracy, misclass)) plt.show() plot_confusion_matrix(cm, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) ```
github_jupyter
# How to create gates from physical processes This tutorial shows how to use the `InterpolatedDenseOp` and `InterpolatedOpFactory` to create quick-to-evaluate operations by interpolating between the discrete points at quick a more computationally-intensive process is performed. Often the computationally intensive process simulates the physics of a qubit gate, and would not practially work as a custom model operation because of the time required to evaluate it. In order to turn such physical processes into gates, you should implement a custom `PhysicalProcess` object and then use the `InterpolatedDenseOp` or `InterpolatedOpFactory` class to interpolate the values of the custom process on a set of pre-defined points. All the physics simulation is then done at the time of creating the interpolated operation (or factory), after which the object can be saved for later use. An `InterpolatedDenseOp` or `InterpolatedOpFactory` object can be evaluated at any parameter-space point within the ranges over which the initial interpolation was performed. All of this functionality is currently provided within the `pygsti.extras.interpygate` sub-package. This tutorial demonstrates how to setup a custom physical process and create an interpolated gate and factory object from it. We'll begin by some standard imports and by importing the `interpygate` sub-package. We get a MPI communicator if we can, as usually the physical simulation is performed using multiple processors. ``` import numpy as np from scipy.linalg import expm import pygsti import pygsti.extras.interpygate as interp try: from mpi4py import MPI comm = MPI.COMM_WORLD except ImportError: comm = None ``` ## Defining a physical process We create a physical process simulator by deriving from the `PhysicalProcess` class and implementing its `create_process_matrix` function. This is the computationally intensive method that generates a process matrix based on some set of parameters. Every physical process has a fixed number of parameters that define the space that will be interpolated over. The generated process matrix is expected to be in whatever basis the ultimate `Model` operations will be in - usually the Pauli-product basis specified by `"pp"` - and have a fixed shape. This shape, given by `process_shape` below, is almost always a square matrix of dimension $4^n$ where $n$ is the number of qubits. Specifying an auxiliary information shape (`aux_shape` below) and implementing the `create_aux_info` will allow additional (floating point) values that describe the process to be interpolated. Below we create a physical process that evolves a quantum state for some time (also a parameter) using a parameterized Lindbladian. Process tomography is used to construct a process matrix from the state evolution. The process has 6 parameters. ``` class ExampleProcess(interp.PhysicalProcess): def __init__(self): self.Hx = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, -1], [0, 0, 1, 0]], dtype='float') self.Hy = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, -1, 0, 0]], dtype='float') self.Hz = np.array([[0, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 0]], dtype='float') self.dephasing_generator = np.diag([0, -1, -1, 0]) self.decoherence_generator = np.diag([0, -1, -1, -1]) num_params = 6 # omega (0), phase (1), detuning (2), dephasing (3), decoherence (4), time (5) process_shape = (4, 4) super().__init__(num_params, process_shape, aux_shape=()) # our auxiliary information is a single float (None means no info) def advance(self, state, v): """ Evolves `state` in time """ state = np.array(state, dtype='complex') omega, phase, detuning, dephasing, decoherence, t = v #Here are all our parameters H = (omega * np.cos(phase) * self.Hx + omega * np.sin(phase) * self.Hy + detuning * self.Hz) L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator process = pygsti.tools.change_basis(expm((H + L) * t),'pp', 'col') state = interp.unvec(np.dot(process, interp.vec(np.outer(state, state.conj())))) return state def create_process_matrix(self, v, comm=None): def state_to_process_mxs(state): return self.advance(state, v) processes = interp.run_process_tomography(state_to_process_mxs, n_qubits=1, basis='pp', comm=comm) # returns None on all but root processor return np.array(processes) if (processes is not None) else None def create_aux_info(self, v, comm=None): omega, phase, detuning, dephasing, decoherence, t = v return t*omega # matches aux_shape=() above ``` We can call `create_process_matrix` to generate a process matrix at a given set of parameters. Below we compute the ideal "target" operation by choosing the parameters corresponding to no errors. ``` example_process = ExampleProcess() target_mx = example_process.create_process_matrix(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi/2]), comm=comm) target_op = pygsti.modelmembers.operations.StaticArbitraryOp(target_mx) print(target_op) ``` ### Making things more efficient We note that since our physical process is just an evolution in time, process matrices corresponding to different values of (just) the *time* parameter are especially easy to compute - a single evolution could compute, in one shot, the process matrices for an entire range of times. The `PhysicalProcess` class contains support for such "easy-to-compute" parameters via the `num_params_evaluated_as_group` argument to its constructor. This argument defaults to 0, and specifies how many of the parameters, starting with the last one and working backward, should be evaluated within the same function call. If `num_params_evaluated_as_group` is set higher than 0, the derived class must implement the `create_process_matrices` and (optionally) `create_aux_infos` methods instead of `create_process_matrix` and `create_aux_info`. These methods take an additional `grouped_v` argument that contains *arrays* of values for the final `num_params_evaluated_as_group` parameters, and are expected return arrays of process matrices with corresponding shape (i.e., there is a leading index in the retured values for each "grouped" parameter). We demonstrate this more complex usage below, where values for our final *time* argument are handled all at once. ``` class ExampleProcess_GroupTime(interp.PhysicalProcess): def __init__(self): self.Hx = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, -1], [0, 0, 1, 0]], dtype='float') self.Hy = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, -1, 0, 0]], dtype='float') self.Hz = np.array([[0, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 0]], dtype='float') self.dephasing_generator = np.diag([0, -1, -1, 0]) self.decoherence_generator = np.diag([0, -1, -1, -1]) num_params = 6 # omega (0), phase (1), detuning (2), dephasing (3), decoherence (4), time (5) process_shape = (4, 4) super().__init__(num_params, process_shape, aux_shape=(), # a single float num_params_evaluated_as_group=1) # time values can be evaluated all at once def advance(self, state, v, times): state = np.array(state, dtype='complex') omega, phase, detuning, dephasing, decoherence = v H = (omega * np.cos(phase) * self.Hx + omega * np.sin(phase) * self.Hy + detuning * self.Hz) L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator processes = [pygsti.tools.change_basis(expm((H + L) * t),'pp', 'col') for t in times] states = [interp.unvec(np.dot(process, interp.vec(np.outer(state, state.conj())))) for process in processes] return states def create_process_matrices(self, v, grouped_v, comm=None): assert(len(grouped_v) == 1) # we expect a single "grouped" parameter times = grouped_v[0] def state_to_process_mxs(state): return self.advance(state, v, times) processes = interp.run_process_tomography(state_to_process_mxs, n_qubits=1, basis='pp', time_dependent=True, comm=comm) return np.array(processes) if (processes is not None) else None def create_aux_infos(self, v, grouped_v, comm=None): omega, phase, detuning, dephasing, decoherence = v times = grouped_v[0] return np.array([t*omega for t in times], 'd') ``` We can similarly create a target operation from this physical process, but now we must specify a list of times. ``` example_process = ExampleProcess_GroupTime() target_mx = example_process.create_process_matrices(np.array([1.0, 0.0, 0.0, 0.0, 0.0]), [[np.pi/2]], comm=comm)[0] target_op = pygsti.modelmembers.operations.StaticArbitraryOp(target_mx) print(target_op) ``` ## Creating an interpolated operation (gate) Now that we've done the hard work of creating the physical process, it's easy to create an operator that evaluates the physical process on a grid of points and interpolates between them. The resulting `InterpolatedDenseOp` can be evaluated (i.e. `from_vector` can be invoked) at any point within the range being interpolated. The parameters of the resulting `InterpolatedDenseOp` are the same as those of the underlying `PhysicalProcess`, and ranges are specified using either a *(min, max, num_points)* tuple or an array of values. Below we use only 2 points in most directions so it doesn't take too long to run. Creating the object also requires a target operation, for which we use `target_op` as defined above. This is required because internally it is the *error generator* rather than the process matrix itself that is interpolated. The target operation can be parameterized by any contiguous subset of the physical process's parameters, starting with the first one. In our example, `target_op` is a `StaticArbitraryOp` and so takes 0 parameters. This should be interpreted as the "first 0 parameters of our example process". ``` param_ranges = ([(0.9, 1.1, 2), # omega (-.1, .1, 2), # phase (-.1, .1, 2), # detuning (0, 0.1, 2), # dephasing (0, 0.1, 2), # decoherence np.linspace(np.pi / 2, np.pi / 2 + .5, 10) # time ]) interp_op = interp.InterpolatedDenseOp.create_by_interpolating_physical_process( target_op, example_process, param_ranges, comm=comm) ``` The created `interp_op` can then be evaluated (quickly) at points in parameter space. ``` interp_op.from_vector([1.1, 0.01, 0.01, 0.055, 0.055, 1.59]) interp_op.to_dense() ``` The auxiliary information can be retrieved from any interpolated operator via its `aux_info` attribute. ``` interp_op.aux_info ``` ## Creating an interpolated operation factory Operation factories in pyGSTi take "arguments" provided by in-circuit labels and produce operations. For example, the value of the rotation angle might be specified over a continuous interval by the algorithm being run, rather than being noise parameter that is fit to data when a model is optimized (e.g. in GST). The `InterpolatedOpFactory` object interpolates a physical process, similar to `InterpolatedDenseOp`, but allows the user to divide the parameters of the physical process into *factory arguments* and *operation parameters*. The first group is meant to range over different intended (target) operations, and the latter group is meant to be unkonwn quantities determined by fitting a model to data. To create an `InterpolatedOpFactory`, we must first create a custom factory class that creates the target operation corresponding to a given set of arguments. As in the case of `InterpolatedDenseOp`, the target operations can be parameterized by any contiguous subset of the factory's parameters, starting with the first one. We choose to make a factory that takes as arguments the *time* and *omega* physical process parameters. ``` class TargetOpFactory(pygsti.modelmembers.operations.OpFactory): def __init__(self): self.process = ExampleProcess_GroupTime() pygsti.modelmembers.operations.OpFactory.__init__(self, state_space=1, evotype="densitymx") def create_object(self, args=None, sslbls=None): assert(sslbls is None) assert(len(args) == 2) # t (time), omega t, omega = args mx = self.process.create_process_matrices(np.array([omega, 0.0, 0.0, 0.0, 0.0]), [[t]], comm=None)[0] #mx = self.process.create_process_matrix(np.array([omega, 0.0, 0.0, 0.0, 0.0, t]), comm=None) # Use this if using our initial ExampleProcess above. return pygsti.modelmembers.operations.StaticArbitraryOp(mx) ``` We can then create an `InterpolatedOpFactory` similarly to how we created an `InterpolatedDenseOp` except now we separately specify factory argument and optimization parameter ranges, and specify which of the underlying physical process's parameters are turned into factory arguments (`arg_indices` below). ``` arg_ranges = [np.linspace(np.pi / 2, np.pi / 2 + .5, 10), # time (0.9, 1.1, 2) # omega ] param_ranges = [(-.1, .1, 2), # phase (-.1, .1, 2), # detuning (0, 0.1, 2), # dephasing (0, 0.1, 2) # decoherence ] arg_indices = [5, 0] #indices for time and omega within ExampleProcess_GroupTime's parameters opfactory = interp.InterpolatedOpFactory.create_by_interpolating_physical_process( TargetOpFactory(), example_process, arg_ranges, param_ranges, arg_indices, comm=comm) ``` Note that the factory has only 4 parameters (whereas the physical process and the interpolated operator we made above have 6). This is because 2 of the physical process parameters have been turned into factory arguments. ``` print(opfactory.num_params) print(interp_op.num_params) print(example_process.num_params) ``` We can use the factory to create an `InterpolatedDenseOp` operation at a given *time* and *omega* pair: ``` opfactory.from_vector(np.array([0.01, 0.01, 0.055, 0.055])) op = opfactory.create_op((1.59, 1.1)) op.to_dense() op.aux_info ```
github_jupyter
# Description * This is the one used # Setting variables ``` import os ## What is the base directory? baseDir = '/home/sam/notebooks/hemp_microbiome/data/ITS_OTUs/' ## What directory do you want to work in and keep all subsequent files in? workDir = os.path.join(baseDir, "OTU_binning") ## Where is your final QC'ed and ITSxed sequence fasta file? #qcFinal = os.path.join(workDir, 'QC', 'ITSx', 'main.rename.fasta') qcFinal = os.path.join(baseDir, 'QC', 'finalQC.fasta') ## What database do you want to use for UCHIME chimera filtering? UC_db = '/home/sam/databases/UNITE/uchime_reference_dataset_28.06.2017/ITS1_ITS2_datasets/uchime_reference_dataset_ITS1_28.06.2017.fasta' ## What database do you want to base your taxonomy on when using USEARCH SINTAX? #US_dbDir = '~/databases/UNITE/utax_reference_dataset_01.12.2017.fasta' #US_dbSeqs = os.path.join(US_dbDir, 'refdb.udb') US_dbDir = '/home/sam/databases/UNITE/sintaxDB' US_dbSeqs = os.path.join(US_dbDir, 'utax_reference_dataset_01.12.2017.fasta') nprocs = 10 ``` # Init ``` import sys import glob import pandas as pd from Bio import SeqIO import multiprocessing import uuid %load_ext rpy2.ipython %%R library(ggplot2) library(dplyr) library(tidyr) library(gridExtra) if not os.path.isdir(workDir): print ("Working directory does not exist. Making it now") os.makedirs(workDir) %cd $workDir ``` ### Symlinking qc'ed seq file ``` !ln -s $qcFinal . qcFinal = os.path.split(qcFinal)[1] !ls -thlc $qcFinal ``` # Full Derepication sequences ``` FullDerepFile = os.path.splitext(qcFinal)[0] + '.unique.fasta' FullDerepUC = os.path.splitext(qcFinal)[0] + '.unique.uc' !vsearch --threads $nprocs \ --derep_fulllength $qcFinal \ --minuniquesize 2 \ --sizein \ --sizeout \ --fasta_width 0 \ --uc $FullDerepUC \ --output $FullDerepFile ret = !grep -c ">" $FullDerepFile print ('Number of unique sequences: {}'.format(ret[0])) !head -n 10 $FullDerepFile !head -n 10 $FullDerepUC ``` # Keep just ITS1 region This removes mainly part of the 5.8S ``` def ITSx_func(filename): workername = str(multiprocessing.Process()) workername = str(uuid.uuid3(uuid.NAMESPACE_DNS, workername)) ITS1_file = workername + '.ITS1.partial_output.fasta' noDet_file = workername + '.no_detections.partial_output.fasta' subfastafile = workername + '.sub.fasta' ITSx_output = workername + '.ITSx_output' ITSx_output_ITS1 = ITSx_output + '.ITS1.fasta' ITSx_output_noDet = ITSx_output + '_no_detections.fasta' remove_output = ITSx_output + '*' !touch $ITS1_file !touch $noDet_file fasta_sequences = SeqIO.parse(open(filename),'fasta') for fasta in fasta_sequences: SeqIO.write(fasta, subfastafile, "fasta") !ITSx -i $subfastafile \ -o $ITSx_output \ --save_regions ITS1 \ --cpu 1 \ --preserve T \ --reset F \ -t all \ --heuristics \ --table F \ --silent T \ --detailed_results F \ --graphical F !cat $ITSx_output_ITS1 >> $ITS1_file !cat $ITSx_output_noDet >> $noDet_file !rm $remove_output !rm $subfastafile ITSxdir = os.path.join(workDir, "ITSx_output") if not os.path.isdir(ITSxdir): print ("ITSx directory does not exist. Making it now") os.makedirs(ITSxdir) %cd $ITSxdir FullDerepFile = os.path.join(workDir, FullDerepFile) !fasta-splitter.pl \ --n-parts $nprocs \ $FullDerepFile partialfastalist = [f for f in os.listdir(ITSxdir) if 'finalQC.unique.part-' in f] pool = multiprocessing.Pool(processes=nprocs) if __name__ == '__main__': # calculate the chunk size as an integer pool.map(ITSx_func, partialfastalist) pool.close() pool.terminate() pool.join() !cat *.no_detections.partial_output.fasta > no_ITS_detections.fasta !cat *.ITS1.partial_output.fasta > ITS1_sequences.fasta #!rm *.no_detections.partial_output.fasta #!rm *.ITS1.partial_output.fasta !grep -c ">" ITS1_sequences.fasta !head ITS1_sequences.fasta ``` # Add N's to the end of the sequence for help with clustering ``` %cd $workDir ITSx_final = os.path.join(ITSxdir, 'ITS1_sequences.fasta') !python2 /home/sam/repo/ITS_trim.py \ -F $ITSx_final \ -d $workDir \ -f fasta ``` # Precluster sequences at 98% ``` trimmedUniqITSx = os.path.join(workDir, 'globtrim.fasta') qcFinalUniqPreC = os.path.splitext(qcFinal)[0] + '.preclustered.fasta' PreCucfile = os.path.splitext(qcFinal)[0] + '.preclustered.uc' !vsearch --threads $nprocs\ --cluster_size $trimmedUniqITSx \ --id 0.98 \ --strand plus \ --sizein \ --sizeout \ --fasta_width 0 \ --uc $PreCucfile \ --centroids $qcFinalUniqPreC ret = !grep -c ">" $qcFinalUniqPreC print ('Number of 98% clustered sequences: {}'.format(ret[0])) !tail -n 10 $qcFinalUniqPreC ``` # Filtering chimeras ### De novo chimera filtering ``` noChimera1 = os.path.splitext(qcFinal)[0] + '.denovo.nonchimeras.fasta' !vsearch --threads $nprocs \ --uchime_denovo $qcFinalUniqPreC \ --sizein \ --sizeout \ --fasta_width 0 \ --nonchimeras $noChimera1 ret = !grep -c ">" $noChimera1 print ('Number of sequences after de novo chimera detection: {}'.format(ret[0])) ``` ### Chimera filtering based on references ``` noChimera2 = os.path.splitext(qcFinal)[0] + '.ref.nonchimeras.fasta' !vsearch --threads $nprocs \ --uchime_ref $noChimera1 \ --db $UC_db \ --sizein \ --sizeout \ --fasta_width 0 \ --nonchimeras $noChimera2 ret = !grep -c ">" $noChimera2 print ('Number of sequences after reference based chimera detection: {}'.format(ret[0])) !head $noChimera2 ``` # Readmapping and Reinflating Reinflating from full dereplication ``` print(trimmedUniqITSx) print(PreCucfile) print(noChimera2) readmap1 = 'final.nonchimeras.derep.fasta' !perl /home/sam/repo/map.pl \ $trimmedUniqITSx \ $PreCucfile \ $noChimera2 > $readmap1 ret = !grep -c ">" $readmap1 print ('Number of unique non-chimeric sequences: {}'.format(ret[0])) !head $readmap1 ``` Reinflating from per sample dereplication. Do this in the in Reinflation.ipynb ``` readmap2 = 'final.nochimeras.fasta' ret = !grep -c ">" $readmap2 print ('Number of total non-chimeric sequences: {}'.format(ret[0])) !head $readmap2 ``` # Cluster OTUs at 97% ``` otu_file = 'final.otu.fasta' otu_ucfile = 'final.otu.uc' otutabout_file = 'final.otutab.txt' !vsearch --threads $nprocs \ --cluster_size $readmap2 \ --id 0.97 \ --strand plus \ --fasta_width 0 \ --uc $otu_ucfile \ --relabel OTU_ \ --centroids $otu_file \ --otutabout $otutabout_file ret = !grep -c ">" $otu_file print ('Number of final OTUs: {}'.format(ret[0])) !head $otu_file !tail $otu_file # Remove N's otu_file_noN = 'otu_noN.fasta' !sed -e 's/n//g' $otu_file > $otu_file_noN !head $otu_file_noN !head $otutabout_file ``` # Adding taxonomy ### Taxonomy with vsearch sintax ``` !head $US_dbSeqs taxfile = "otu_tax_Vsintax_unite.txt" !/home/sam/repo/vsearch-2.8.0/bin/vsearch --sintax $otu_file_noN \ --db $US_dbSeqs \ --tabbedout $taxfile \ --sintax_cutoff 0.8 \ --threads $nprocs !head $taxfile !grep -c "d:Fungi" $taxfile !grep -c "OTU" $taxfile # make sure no existing biome file otu_biom_file = 'otu_table.biom' otuTax = os.path.join('sintax_output', 'sintax_table_unite_97_otu_reformatted.txt') otu_biom_tax_file = 'otu_table_wtax.biom' if os.path.isfile(otu_biom_tax_file): os.unlink(otu_biom_tax_file) # bio add-metadata cmd = """biom add-metadata -i {} -o {} \ --observation-metadata-fp {} \ --sc-separated taxonomy \ --float-fields consensus \ --int-fields numhits \ --observation-header OTUID,taxonomy,consensus,numhits""" cmd = cmd.format(otu_biom_file, otu_biom_tax_file, otuTax) !$cmd ``` # Final files ``` print ('OTU table: {}'.format(otutabout_file)) print ('OTU file: {}'.format(otu_file_noN)) print ('Taxonomy file: {}'.format(taxfile)) # Number of OTUs !grep -c "OTU_" $otutabout_file !grep -c ">" $otu_file_noN !grep -c "OTU_" $taxfile # total number of samples sys.stdout.write('Total number of samples: ') !head -n 1 $otutabout_file | perl -pe 's/[^\t]+\t//; s/\t/\n/g' | wc -l ``` # Playground This is where code testing happens ``` !head -n 2 $qcFinalUniq ```
github_jupyter
# <center> <img src="https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # **Space X Falcon 9 First Stage Landing Prediction** ## Lab 2: Data wrangling Estimated time needed: **60** minutes In this lab, we will perform some Exploratory Data Analysis (EDA) to find some patterns in the data and determine what would be the label for training supervised models. In the data set, there are several different cases where the booster did not land successfully. Sometimes a landing was attempted but failed due to an accident; for example, <code>True Ocean</code> means the mission outcome was successfully landed to a specific region of the ocean while <code>False Ocean</code> means the mission outcome was unsuccessfully landed to a specific region of the ocean. <code>True RTLS</code> means the mission outcome was successfully landed to a ground pad <code>False RTLS</code> means the mission outcome was unsuccessfully landed to a ground pad.<code>True ASDS</code> means the mission outcome was successfully landed on a drone ship <code>False ASDS</code> means the mission outcome was unsuccessfully landed on a drone ship. In this lab we will mainly convert those outcomes into Training Labels with `1` means the booster successfully landed `0` means it was unsuccessful. Falcon 9 first stage will land successfully ![](https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/api/Images/landing\_1.gif) Several examples of an unsuccessful landing are shown here: ![](https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/api/Images/crash.gif) ## Objectives Perform exploratory Data Analysis and determine Training Labels * Exploratory Data Analysis * Determine Training Labels *** ## Import Libraries and Define Auxiliary Functions We will import the following libraries. ``` # Pandas is a software library written for the Python programming language for data manipulation and analysis. import pandas as pd #NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays import numpy as np ``` ### Data Analysis Load Space X dataset, from last section. ``` df=pd.read_csv("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/dataset_part_1.csv") ``` Identify and calculate the percentage of the missing values in each attribute ``` df.isnull().sum()/df.count()*100 ``` Identify which columns are numerical and categorical: ``` df.dtypes ``` ### TASK 1: Calculate the number of launches on each site The data contains several Space X launch facilities: <a href='https://en.wikipedia.org/wiki/List_of_Cape_Canaveral_and_Merritt_Island_launch_sites?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01'>Cape Canaveral Space</a> Launch Complex 40 <b>VAFB SLC 4E </b> , Vandenberg Air Force Base Space Launch Complex 4E <b>(SLC-4E)</b>, Kennedy Space Center Launch Complex 39A <b>KSC LC 39A </b>.The location of each Launch Is placed in the column <code>LaunchSite</code> Next, let's see the number of launches for each site. Use the method <code>value_counts()</code> on the column <code>LaunchSite</code> to determine the number of launches on each site: ``` # Apply value_counts() on column LaunchSite df["LaunchSite"].value_counts() ``` Each launch aims to an dedicated orbit, and here are some common orbit types: * <b>LEO</b>: Low Earth orbit (LEO)is an Earth-centred orbit with an altitude of 2,000 km (1,200 mi) or less (approximately one-third of the radius of Earth),\[1] or with at least 11.25 periods per day (an orbital period of 128 minutes or less) and an eccentricity less than 0.25.\[2] Most of the manmade objects in outer space are in LEO <a href='https://en.wikipedia.org/wiki/Low_Earth_orbit?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01'>\[1]</a>. * <b>VLEO</b>: Very Low Earth Orbits (VLEO) can be defined as the orbits with a mean altitude below 450 km. Operating in these orbits can provide a number of benefits to Earth observation spacecraft as the spacecraft operates closer to the observation<a href='https://www.researchgate.net/publication/271499606_Very_Low_Earth_Orbit_mission_concepts_for_Earth_Observation_Benefits_and_challenges?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01'>\[2]</a>. * <b>GTO</b> A geosynchronous orbit is a high Earth orbit that allows satellites to match Earth's rotation. Located at 22,236 miles (35,786 kilometers) above Earth's equator, this position is a valuable spot for monitoring weather, communications and surveillance. Because the satellite orbits at the same speed that the Earth is turning, the satellite seems to stay in place over a single longitude, though it may drift north to south,” NASA wrote on its Earth Observatory website <a href="https://www.space.com/29222-geosynchronous-orbit.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01" >\[3] </a>. * <b>SSO (or SO)</b>: It is a Sun-synchronous orbit also called a heliosynchronous orbit is a nearly polar orbit around a planet, in which the satellite passes over any given point of the planet's surface at the same local mean solar time <a href="https://en.wikipedia.org/wiki/Sun-synchronous_orbit?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">\[4] <a>. * <b>ES-L1 </b>:At the Lagrange points the gravitational forces of the two large bodies cancel out in such a way that a small object placed in orbit there is in equilibrium relative to the center of mass of the large bodies. L1 is one such point between the sun and the earth <a href="https://en.wikipedia.org/wiki/Lagrange_point?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01#L1_point">\[5]</a> . * <b>HEO</b> A highly elliptical orbit, is an elliptic orbit with high eccentricity, usually referring to one around Earth <a href="https://en.wikipedia.org/wiki/Highly_elliptical_orbit?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">\[6]</a>. * <b> ISS </b> A modular space station (habitable artificial satellite) in low Earth orbit. It is a multinational collaborative project between five participating space agencies: NASA (United States), Roscosmos (Russia), JAXA (Japan), ESA (Europe), and CSA (Canada)<a href="https://en.wikipedia.org/wiki/International_Space_Station?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01"> \[7] </a> * <b> MEO </b> Geocentric orbits ranging in altitude from 2,000 km (1,200 mi) to just below geosynchronous orbit at 35,786 kilometers (22,236 mi). Also known as an intermediate circular orbit. These are "most commonly at 20,200 kilometers (12,600 mi), or 20,650 kilometers (12,830 mi), with an orbital period of 12 hours <a href="https://en.wikipedia.org/wiki/List_of_orbits?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01"> \[8] </a> * <b> HEO </b> Geocentric orbits above the altitude of geosynchronous orbit (35,786 km or 22,236 mi) <a href="https://en.wikipedia.org/wiki/List_of_orbits?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01"> \[9] </a> * <b> GEO </b> It is a circular geosynchronous orbit 35,786 kilometres (22,236 miles) above Earth's equator and following the direction of Earth's rotation <a href="https://en.wikipedia.org/wiki/Geostationary_orbit?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01"> \[10] </a> * <b> PO </b> It is one type of satellites in which a satellite passes above or nearly above both poles of the body being orbited (usually a planet such as the Earth <a href="https://en.wikipedia.org/wiki/Polar_orbit?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01"> \[11] </a> some are shown in the following plot: ![](https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/api/Images/Orbits.png) ### TASK 2: Calculate the number and occurrence of each orbit Use the method <code>.value_counts()</code> to determine the number and occurrence of each orbit in the column <code>Orbit</code> ``` # Apply value_counts on Orbit column df["Orbit"].value_counts("Orbit") ``` ### TASK 3: Calculate the number and occurence of mission outcome per orbit type Use the method <code>value_counts()</code> to determine the number and occurrence of each orbit in the column <code>Outcome</code> , then assign it to the variable <code>landing_outcomes</code>: ``` # landing_outcomes = values on Outcome column landing_outcomes = df["Outcome"].value_counts() ``` <code>True Ocean</code> means the mission outcome was successfully landed to a specific region of the ocean while <code>False Ocean</code> means the mission outcome was unsuccessfully landed to a specific region of the ocean. <code>True RTLS</code> means the mission outcome was successfully landed to a ground pad <code>False RTLS</code> means the mission outcome was unsuccessfully landed to a ground pad.<code>True ASDS</code> means the mission outcome was successfully landed to a drone ship <code>False ASDS</code> means the mission outcome was unsuccessfully landed to a drone ship. <code>None ASDS</code> and <code>None None</code> these represent a failure to land. ``` for i,outcome in enumerate(landing_outcomes.keys()): print(i,outcome) ``` We create a set of outcomes where the second stage did not land successfully: ``` bad_outcomes=set(landing_outcomes.keys()[[1,3,5,6,7]]) bad_outcomes ``` ### TASK 4: Create a landing outcome label from Outcome column Using the <code>Outcome</code>, create a list where the element is zero if the corresponding row in <code>Outcome</code> is in the set <code>bad_outcome</code>; otherwise, it's one. Then assign it to the variable <code>landing_class</code>: ``` # landing_class = 0 if bad_outcome # landing_class = 1 otherwise landing_class = [] for key,value in df["Outcome"].items(): if value in bad_outcomes: landing_class.append(0) else: landing_class.append(1) ``` This variable will represent the classification variable that represents the outcome of each launch. If the value is zero, the first stage did not land successfully; one means the first stage landed Successfully ``` df['Class']= landing_class df[['Class']].head(8) df.head(5) ``` We can use the following line of code to determine the success rate: ``` df["Class"].mean() ``` We can now export it to a CSV for the next section,but to make the answers consistent, in the next lab we will provide data in a pre-selected date range. ``` df.to_csv("dataset_part_2.csv", index=False) ``` <code>df.to_csv("dataset_part\_2.csv", index=False)</code> ## Authors <a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. <a href="https://www.linkedin.com/in/nayefaboutayoun/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">Nayef Abou Tayoun</a> is a Data Scientist at IBM and pursuing a Master of Management in Artificial intelligence degree at Queen's University. ## Change Log | Date (YYYY-MM-DD) | Version | Changed By | Change Description | | ----------------- | ------- | ---------- | ----------------------- | | 2020-09-20 | 1.0 | Joseph | Modified Multiple Areas | | 2020-11-04 | 1.1. | Nayef | updating the input data | | 2021-05-026 | 1.1. | Joseph | updating the input data | Copyright © 2021 IBM Corporation. All rights reserved.
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Estimators <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/beta/guide/estimators"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/estimators/index.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/estimators/index.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/guide/estimators/index.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This document introduces `tf.estimator`—a high-level TensorFlow API that greatly simplifies machine learning programming. Estimators encapsulate the following actions: * training * evaluation * prediction * export for serving You may either use the pre-made Estimators we provide or write your own custom Estimators. All Estimators—whether pre-made or custom—are classes based on the `tf.estimator.Estimator` class. For a quick example try [Estimator tutorials](../../tutorials/estimators/linear.ipynb). For an overview of the API design, see the [white paper](https://arxiv.org/abs/1708.02637). Note: TensorFlow also includes a deprecated `Estimator` class at `tf.contrib.learn.Estimator`, which you should not use. ## Estimator advantages Estimators provide the following benefits: * You can run Estimator-based models on a local host or on a distributed multi-server environment without changing your model. Furthermore, you can run Estimator-based models on CPUs, GPUs, or TPUs without recoding your model. * Estimators simplify sharing implementations between model developers. * You can develop a state of the art model with high-level intuitive code. In short, it is generally much easier to create models with Estimators than with the low-level TensorFlow APIs. * Estimators are themselves built on `tf.keras.layers`, which simplifies customization. * Estimators build the graph for you. * Estimators provide a safe distributed training loop that controls how and when to: * build the graph * initialize variables * load data * handle exceptions * create checkpoint files and recover from failures * save summaries for TensorBoard When writing an application with Estimators, you must separate the data input pipeline from the model. This separation simplifies experiments with different data sets. ## Pre-made Estimators Pre-made Estimators enable you to work at a much higher conceptual level than the base TensorFlow APIs. You no longer have to worry about creating the computational graph or sessions since Estimators handle all the "plumbing" for you. Furthermore, pre-made Estimators let you experiment with different model architectures by making only minimal code changes. `tf.estimator.DNNClassifier`, for example, is a pre-made Estimator class that trains classification models based on dense, feed-forward neural networks. ### Structure of a pre-made Estimators program A TensorFlow program relying on a pre-made Estimator typically consists of the following four steps: #### 1. Write one or more dataset importing functions. For example, you might create one function to import the training set and another function to import the test set. Each dataset importing function must return two objects: * a dictionary in which the keys are feature names and the values are Tensors (or SparseTensors) containing the corresponding feature data * a Tensor containing one or more labels For example, the following code illustrates the basic skeleton for an input function: ``` def input_fn(dataset): ... # manipulate dataset, extracting the feature dict and the label return feature_dict, label ``` See [data guide](../../guide/data.md) for details. #### 2. Define the feature columns. Each `tf.feature_column` identifies a feature name, its type, and any input pre-processing. For example, the following snippet creates three feature columns that hold integer or floating-point data. The first two feature columns simply identify the feature's name and type. The third feature column also specifies a lambda the program will invoke to scale the raw data: ``` # Define three numeric feature columns. population = tf.feature_column.numeric_column('population') crime_rate = tf.feature_column.numeric_column('crime_rate') median_education = tf.feature_column.numeric_column( 'median_education', normalizer_fn=lambda x: x - global_education_mean) ``` For further information, it is recommended to check this [tutorial](https://www.tensorflow.org/beta/tutorials/keras/feature_columns). #### 3. Instantiate the relevant pre-made Estimator. For example, here's a sample instantiation of a pre-made Estimator named `LinearClassifier`: ``` # Instantiate an estimator, passing the feature columns. estimator = tf.estimator.LinearClassifier( feature_columns=[population, crime_rate, median_education]) ``` For further information, it is recommended to check this [tutorial](https://www.tensorflow.org/beta/tutorials/estimators/linear). #### 4. Call a training, evaluation, or inference method. For example, all Estimators provide a `train` method, which trains a model. ``` # `input_fn` is the function created in Step 1 estimator.train(input_fn=my_training_set, steps=2000) ``` You can see an example of this below. ### Benefits of pre-made Estimators Pre-made Estimators encode best practices, providing the following benefits: * Best practices for determining where different parts of the computational graph should run, implementing strategies on a single machine or on a cluster. * Best practices for event (summary) writing and universally useful summaries. If you don't use pre-made Estimators, you must implement the preceding features yourself. ## Custom Estimators The heart of every Estimator—whether pre-made or custom—is its *model function*, which is a method that builds graphs for training, evaluation, and prediction. When you are using a pre-made Estimator, someone else has already implemented the model function. When relying on a custom Estimator, you must write the model function yourself. ## Recommended workflow 1. Assuming a suitable pre-made Estimator exists, use it to build your first model and use its results to establish a baseline. 2. Build and test your overall pipeline, including the integrity and reliability of your data with this pre-made Estimator. 3. If suitable alternative pre-made Estimators are available, run experiments to determine which pre-made Estimator produces the best results. 4. Possibly, further improve your model by building your own custom Estimator. ``` from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf import tensorflow_datasets as tfds tfds.disable_progress_bar() ``` ## Create an Estimator from a Keras model You can convert existing Keras models to Estimators with `tf.keras.estimator.model_to_estimator`. Doing so enables your Keras model to access Estimator's strengths, such as distributed training. Instantiate a Keras MobileNet V2 model and compile the model with the optimizer, loss, and metrics to train with: ``` keras_mobilenet_v2 = tf.keras.applications.MobileNetV2( input_shape=(160, 160, 3), include_top=False) estimator_model = tf.keras.Sequential([ keras_mobilenet_v2, tf.keras.layers.Flatten(), tf.keras.layers.Dense(1, activation='softmax') ]) # Compile the model estimator_model.compile( optimizer='adam', loss='binary_crossentropy', metric='accuracy') ``` Create an `Estimator` from the compiled Keras model. The initial model state of the Keras model is preserved in the created `Estimator`: ``` est_mobilenet_v2 = tf.keras.estimator.model_to_estimator(keras_model=estimator_model) ``` Treat the derived `Estimator` as you would with any other `Estimator`. ``` IMG_SIZE = 160 # All images will be resized to 160x160 def preprocess(image, label): image = tf.cast(image, tf.float32) image = (image/127.5) - 1 image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE)) return image, label def train_input_fn(batch_size): data = tfds.load('cats_vs_dogs', as_supervised=True) train_data = data['train'] train_data = train_data.map(preprocess).shuffle(500).batch(batch_size) return train_data ``` To train, call Estimator's train function: ``` est_mobilenet_v2.train(input_fn=lambda: train_input_fn(32), steps=500) ``` Similarly, to evaluate, call the Estimator's evaluate function: ``` est_mobilenet_v2.evaluate(input_fn=lambda: train_input_fn(32), steps=10) ``` For more details, please refer to the documentation for `tf.keras.estimator.model_to_estimator`.
github_jupyter
# PIO Programming Resources: * [RP2040 Datasheet Section 3.4](https://datasheets.raspberrypi.com/rp2040/rp2040-datasheet.pdf) Life with David ## Setting multiple pins from Python ``` %serialconnect from machine import Pin import time from rp2 import PIO, StateMachine, asm_pio # decorator to translate to PIO machine code @asm_pio( out_init = (rp2.PIO.OUT_LOW,) * 8, # initialize 8 consecutive pins out_shiftdir = rp2.PIO.SHIFT_RIGHT) # output lsb bits first def parallel_prog(): pull(block) # pull data from Tx FIFO. Wait for data out(pins, 8) # send 8 bits from OSR to pins # create an instance of the state machine sm = StateMachine(0, parallel_prog, freq=1000000, out_base=Pin(0)) # start the state machine sm.active(1) for n in range(256): sm.put(n) time.sleep(0.01) ``` ## Writing Stepper Steps to pins ``` %serialconnect from machine import Pin import time from rp2 import PIO, StateMachine, asm_pio # decorator to translate to PIO machine code @asm_pio( out_init = (rp2.PIO.OUT_LOW,) * 4, # initialize 8 consecutive pins out_shiftdir = rp2.PIO.SHIFT_RIGHT) # output lsb bits first def stepper_step(): pull(block) # pull data from Tx FIFO. Wait for data out(pins, 4) # send 8 bits from OSR to pins # create an instance of the state machine sm = StateMachine(0, stepper_step, freq=1000000, out_base=Pin(0)) # start the state machine sm.active(1) step_sequence = [8, 12, 4, 6, 2, 3, 1, 9] for n in range(500): sm.put(step_sequence[n % len(step_sequence)]) time.sleep(0.01) %serialconnect from machine import Pin import time from rp2 import PIO, StateMachine, asm_pio # decorator to translate to PIO machine code @asm_pio( set_init = rp2.PIO.OUT_LOW) def count_blink(): pull() mov(x, osr) label("count") set(pins, 1) set(y, 100) label("on") nop() [1] jmp(y_dec, "on") set(pins, 0) nop() [19] nop() [19] nop() [19] nop() [19] jmp(x_dec, "count") # create an instance of the state machine sm = StateMachine(0, count_blink, freq=2000, set_base=Pin(25)) # start the state machine sm.active(1) sm.put(20) %serialconnect from machine import Pin import time from rp2 import PIO, StateMachine, asm_pio # decorator to translate to PIO machine code @asm_pio( out_init = (rp2.PIO.OUT_LOW,) * 4, # initialize 8 consecutive pins out_shiftdir = rp2.PIO.SHIFT_RIGHT) # output lsb bits first def stepper_step(): pull(block) # pull data from Tx FIFO. Wait for data out(pins, 4) # send 8 bits from OSR to pins # create an instance of the state machine sm = StateMachine(0, stepper_step, freq=1000000, out_base=Pin(0)) # start the state machine sm.active(1) step_sequence = [8, 12, 4, 6, 2, 3, 1, 9] def step(): pos = 0 while True: coils = step_sequence[pos % len(step_sequence)] yield coils pos += 1 stepper = step() for k in range(10): c = next(stepper) print(c) #for n in range(100): # for step in step_sequence: # sm.put(step) # time.sleep(0.01) ``` ## Interacting PIO programs Section 3.2.7 of the RP2040 data sheet describes how interactions between state machines on the same PIO processor can be managed. Here will demonstrate this in a few steps. For the first step, we create a counter that accepts an integer from the FIFO buffer, then blinks an led a fixed number of times. ``` %serialconnect from machine import Pin import time import rp2 from rp2 import PIO, StateMachine, asm_pio @asm_pio(out_init = rp2.PIO.OUT_LOW) def count_blink(): pull(block) # wait for data on Tx FIFO set(pins, 1) set(x, osr) # create an instance of the state machine sm0 = StateMachine(0, count_blink, freq=2000, out_base=Pin(25)) # start the state machine sm0.active(1) sm0.put(1) time.sleep(2) sm0.active(0) %serialconnect from machine import Pin import time from rp2 import PIO, StateMachine, asm_pio # decorator to translate to PIO machine code @asm_pio( out_init = (rp2.PIO.OUT_LOW,) * 4, # initialize 8 consecutive pins out_shiftdir = rp2.PIO.SHIFT_RIGHT) # output lsb bits first def stepper_step(): pull(block) # pull data from Tx FIFO. Wait for data out(pins, 4) # send 8 bits from OSR to pins # create an instance of the state machine sm = StateMachine(0, stepper_step, freq=1000000, out_base=Pin(0)) # start the state machine sm.active(1) step_sequence = [8, 12, 4, 6, 2, 3, 1, 9] for n in range(1000): sm.put(step_sequence[n % len(step_sequence)]) time.sleep(0.01) ```
github_jupyter
# Задание 1.1 - Метод К-ближайших соседей (K-neariest neighbor classifier) В первом задании вы реализуете один из простейших алгоритмов машинного обучения - классификатор на основе метода K-ближайших соседей. Мы применим его к задачам - бинарной классификации (то есть, только двум классам) - многоклассовой классификации (то есть, нескольким классам) Так как методу необходим гиперпараметр (hyperparameter) - количество соседей, мы выберем его на основе кросс-валидации (cross-validation). Наша основная задача - научиться пользоваться numpy и представлять вычисления в векторном виде, а также ознакомиться с основными метриками, важными для задачи классификации. Перед выполнением задания: - запустите файл `download_data.sh`, чтобы скачать данные, которые мы будем использовать для тренировки - установите все необходимые библиотеки, запустив `pip install -r requirements.txt` (если раньше не работали с `pip`, вам сюда - https://pip.pypa.io/en/stable/quickstart/) Если вы раньше не работали с numpy, вам может помочь tutorial. Например этот: http://cs231n.github.io/python-numpy-tutorial/ ``` import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm_notebook %matplotlib inline %load_ext autoreload %autoreload 2 from dataset import load_svhn from knn import KNN from metrics import binary_classification_metrics, multiclass_accuracy ``` # Загрузим и визуализируем данные В задании уже дана функция `load_svhn`, загружающая данные с диска. Она возвращает данные для тренировки и для тестирования как numpy arrays. Мы будем использовать цифры из датасета Street View House Numbers (SVHN, http://ufldl.stanford.edu/housenumbers/), чтобы решать задачу хоть сколько-нибудь сложнее MNIST. ``` train_X, train_y, test_X, test_y = load_svhn("data", max_train=1000, max_test=100) samples_per_class = 5 # Number of samples per class to visualize plot_index = 1 for example_index in range(samples_per_class): for class_index in range(10): plt.subplot(5, 10, plot_index) image = train_X[train_y == class_index][example_index] plt.imshow(image.astype(np.uint8)) plt.axis('off') plot_index += 1 ``` # Сначала реализуем KNN для бинарной классификации В качестве задачи бинарной классификации мы натренируем модель, которая будет отличать цифру 0 от цифры 9. ``` binary_train_mask = (train_y == 0) | (train_y == 9) binary_train_X = train_X[binary_train_mask] binary_train_y = train_y[binary_train_mask] == 0 binary_test_mask = (test_y == 0) | (test_y == 9) binary_test_X = test_X[binary_test_mask] binary_test_y = test_y[binary_test_mask] == 0 binary_train_X = binary_train_X.reshape(binary_train_X.shape[0], -1) binary_test_X = binary_test_X.reshape(binary_test_X.shape[0], -1) knn_classifier = KNN(k=1) knn_classifier.fit(binary_train_X, binary_train_y) ``` ## Пришло время написать код! Последовательно реализуйте функции `compute_distances_two_loops`, `compute_distances_one_loop` и `compute_distances_no_loops` в файле `knn.py`. Эти функции строят массив расстояний между всеми векторами в тестовом наборе и в тренировочном наборе. В результате они должны построить массив размера `(num_test, num_train)`, где координата `[i][j]` соотвествует расстоянию между i-м вектором в test (`test[i]`) и j-м вектором в train (`train[j]`). **Обратите внимание** Для простоты реализации мы будем использовать в качестве расстояния меру L1 (ее еще называют [Manhattan distance](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%B3%D0%BE%D1%80%D0%BE%D0%B4%D1%81%D0%BA%D0%B8%D1%85_%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB%D0%BE%D0%B2)). ![image.png](attachment:image.png) ``` dists = knn_classifier.compute_distances_two_loops(binary_test_X) assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10]))) dists = knn_classifier.compute_distances_one_loop(binary_test_X) assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10]))) dists = knn_classifier.compute_distances_no_loops(binary_test_X) assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10]))) # Lets look at the performance difference %timeit knn_classifier.compute_distances_two_loops(binary_test_X) %timeit knn_classifier.compute_distances_one_loop(binary_test_X) %timeit knn_classifier.compute_distances_no_loops(binary_test_X) prediction = knn_classifier.predict(binary_test_X) precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y) print("KNN with k = %s" % knn_classifier.k) print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1)) knn_classifier_3 = KNN(k=3) knn_classifier_3.fit(binary_train_X, binary_train_y) prediction = knn_classifier_3.predict(binary_test_X) precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y) print("KNN with k = %s" % knn_classifier_3.k) print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1)) ``` # Кросс-валидация (cross-validation) Попробуем найти лучшее значение параметра k для алгоритма KNN! Для этого мы воспользуемся k-fold cross-validation (https://en.wikipedia.org/wiki/Cross-validation_(statistics)#k-fold_cross-validation). Мы разделим тренировочные данные на 5 фолдов (folds), и по очереди будем использовать каждый из них в качестве проверочных данных (validation data), а остальные -- в качестве тренировочных (training data). В качестве финальной оценки эффективности k мы усредним значения F1 score на всех фолдах. После этого мы просто выберем значение k с лучшим значением метрики. *Бонус*: есть ли другие варианты агрегировать F1 score по всем фолдам? Напишите плюсы и минусы в клетке ниже. ``` num_folds = 5 train_folds_X = [] train_folds_y = [] num_in_fold = binary_train_X.shape[0] // num_folds train_folds_X = np.array([binary_train_X[num_in_fold * i : num_in_fold * (i + 1)] for i in range(num_folds)]) train_folds_y = np.array([binary_train_y[num_in_fold * i : num_in_fold * (i + 1)] for i in range(num_folds)]) k_choices = [1, 2, 3, 5, 8, 10, 15, 20, 25, 50] k_to_f1 = {} # dict mapping k values to mean F1 scores (int -> float) for k in k_choices: f1_ = 0 for i in range(num_folds): train_X_ = np.vstack(train_folds_X[[j for j in range(num_folds) if j != i]]) train_y_ = np.hstack(train_folds_y[[j for j in range(num_folds) if j != i]]) test_X_ = train_folds_X[i] test_y_ = train_folds_y[i] knn_classifier_ = KNN(k) knn_classifier_.fit(train_X_, train_y_) prediction = knn_classifier_.predict(test_X_) f1_ += binary_classification_metrics(prediction, test_y_)[2] f1_ /= num_folds k_to_f1[k] = f1_ for key, val in sorted(k_to_f1.items(), key=lambda kv: kv[1], reverse=True): print('k = %d, f1 = %f' % (key, val)) ``` ### Проверим, как хорошо работает лучшее значение k на тестовых данных (test data) ``` best_k = 3 best_knn_classifier = KNN(k=best_k) best_knn_classifier.fit(binary_train_X, binary_train_y) prediction = best_knn_classifier.predict(binary_test_X) precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y) print("Best KNN with k = %s" % best_k) print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1)) ``` # Многоклассовая классификация (multi-class classification) Переходим к следующему этапу - классификации на каждую цифру. ``` train_X = train_X.reshape(train_X.shape[0], -1) test_X = test_X.reshape(test_X.shape[0], -1) knn_classifier = KNN(k=1) knn_classifier.fit(train_X, train_y) predict = knn_classifier.predict(test_X, 1) accuracy = multiclass_accuracy(predict, test_y) print("Accuracy: %4.2f" % accuracy) ``` Снова кросс-валидация. Теперь нашей основной метрикой стала точность (accuracy), и ее мы тоже будем усреднять по всем фолдам. ``` num_folds = 5 train_folds_X = [] train_folds_y = [] num_in_fold = train_X.shape[0] // num_folds train_folds_X = np.array([train_X[num_in_fold * i : num_in_fold * (i + 1)] for i in range(num_folds)]) train_folds_y = np.array([train_y[num_in_fold * i : num_in_fold * (i + 1)] for i in range(num_folds)]) k_choices = [1, 2, 3, 5, 8, 10, 15, 20, 25, 50] k_to_accuracy = {} for k in tqdm_notebook(k_choices): accuracy_ = 0 for i in range(num_folds): train_X_ = np.vstack(train_folds_X[[j for j in range(num_folds) if j != i]]) train_y_ = np.hstack(train_folds_y[[j for j in range(num_folds) if j != i]]) test_X_ = train_folds_X[i] test_y_ = train_folds_y[i] knn_classifier_ = KNN(k) knn_classifier_.fit(train_X_, train_y_) prediction = knn_classifier_.predict(test_X_) accuracy_ += multiclass_accuracy(prediction, test_y_) accuracy_ /= num_folds k_to_accuracy[k] = accuracy_ for key, val in sorted(k_to_accuracy.items(), key=lambda kv: kv[1], reverse=True): print('k = %d, f1 = %f' % (key, val)) ``` ### Финальный тест - классификация на 10 классов на тестовой выборке (test data) Если все реализовано правильно, вы должны увидеть точность не менее **0.2**. ``` best_k = 1 best_knn_classifier = KNN(k=best_k) best_knn_classifier.fit(train_X, train_y) prediction = best_knn_classifier.predict(test_X) accuracy = multiclass_accuracy(prediction, test_y) print("Accuracy: %4.2f" % accuracy) ```
github_jupyter
# Plot3D Python Tutorial In this tutorial you will learn about the Plot3D NASA Standard and how to use NASA's Plot3D python library to read, write, find connectivity, split blocks, and find periodicity. ## About Plot3D Plot3D is a standard for defining a simple structured grid. This standard was developed in the 1980's [User Manual](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&cad=rja&uact=8&ved=2ahUKEwiLm_2Q8JjzAhUCB50JHTfFCM4QFnoECAMQAQ&url=https%3A%2F%2Fwww.grc.nasa.gov%2Fwww%2Fwinddocs%2Ftowne%2Fplotc%2Fplotc_p3d.html&usg=AOvVaw0iKPGjnhjiQA9AFZcFhkEE) To understand the plot3D standard, we must first start with the definition of an array. The figure below shows a box with 6 faces and 8 verticies represented by black dots. Now if you were to discretize a geometry into many of these boxes all connected to each other. You would have many x,y,z points. To organize things we arrange all this points into an array of x's, y'z, z's and we label them as capital X, Y, Z. ![Array](https://raw.githubusercontent.com/nasa/Plot3D_utilities/main/colab/images/array_definition.png) So what does this mean? how is this helpful. It depends on how to arrange the array. If you have a single dimensional array of x like x<sub>1</sub>,x<sub>2</sub>,x<sub>3</sub>, ..., x<sub>n</sub>. This isn't particularly useful because it's hard to split it in to faces - just try it with the simple box above. So what we do instead is represent x as a 3-dimensional array instead of a single dimension. For example x[0,0,0] or x<sub>0,0,0</sub> = some value. The image below shows how we arrange the x values of each of the vertices. ![Array](https://raw.githubusercontent.com/nasa/Plot3D_utilities/main/colab/images/array_definition2.png) With this new arrangement of x into a 3-dimensional array, x[i,j,k]. We can easily extract a face. For example the front face is defined by x[0,0,0] x[1,0,0], x[0,1,0], x[1,1,0]. Do you notice anything interesting from this array? The third index "k" is 0. **To define a face you simply set either i, j, or k to be a constant value.** For outer faces you would use KMIN or KMAX. Depending on the programming language the indicies may start at 1 or 0. In python we start at 0 and end at n-1. [More info on Python Arrays](https://www.w3schools.com/python/python_arrays.asp) # Environment Setup This step is relatively short. Run the code below to install plot3d ``` !pip install plot3d ``` # Reading and Writing a mesh file In simple words, a mesh is a collection of boxes arranged to form a shape. In this example we will explore reading a mesh in ASCII and saving it into a binary format. ## Step 1: Load the functions from the library ``` from plot3d import read_plot3D, write_plot3D,Block import pickle import pprint ``` ## Step 2: Download and read the mesh file The code below reads the plot3D into a variable called blocks. "blocks" is a variable representing an array of plot3D blocks. You can think of a block as a 6 sided cube but inside the cube you have multiple smaller cubes. Cubes can be stretched and wrapped such that two ends are connected. This is called an o-mesh. We will plot this in a later step. ``` !wget https://nasa-public-data.s3.amazonaws.com/plot3d_utilities/PahtCascade-ASCII.xyz blocks = read_plot3D('PahtCascade-ASCII.xyz',binary=False) # Reading plot3D write_plot3D('PahtCascade-binary.xyz',blocks,binary=True) # Writing plot3D to binary file ``` ### Plotting the Mesh The function below shows how we can plot an outline of the mesh ``` import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d import numpy as np def plot_block_outline(block:Block,ax:axes3d): IMAX,JMAX,KMAX = block.X.shape X = block.X Y = block.Y Z = block.Z for i in [0,IMAX-1]: # Plots curves at constant I bounds for j in [0,JMAX-1]: x = X[i,j,:] y = Y[i,j,:] z = Z[i,j,:] ax.plot3D(x,y,z) for j in [0,JMAX-1]: # Plots curves at constant I bounds for k in [0,KMAX-1]: x = X[:,j,k] y = Y[:,j,k] z = Z[:,j,k] ax.plot3D(x,y,z) for i in [0,IMAX-1]: # Plots curves at constant I bounds for k in [0,KMAX-1]: x = X[i,:,k] y = Y[i,:,k] z = Z[i,:,k] ax.plot3D(x,y,z) ``` Try playing with this code to see if you can plot one block at a time. Also try changing the rotation ``` fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plot_block_outline(blocks[0],ax) plot_block_outline(blocks[1],ax) ax.view_init(30,45) print("we have " + str(len(blocks)) + " blocks") ``` # Finding Connectivity Connectivity tells the solver how information should transfer between Faces. For example, lets look at the example above. We have 2 blocks `blocks[0]` and `blocks[1]` These are connected via the pressure side of the blade. One of the features of the plot3d library is the ability to find connected faces between blocks as well as all the connected points. ## Finding Connected Faces The function `connectivity` takes in a parameter for blocks. The output is a list of face matches between blocks along with the outer faces which are faces that do not have a connection to another block. ``` from plot3d import connectivity, connectivity_fast face_matches, outer_faces_formatted = connectivity_fast(blocks) # Running this code will take a while depending on how fast google colab is on a given day. # or you can call `face_matches, outer_faces_formatted = connectivity_fast(blocks)` which works too but might be slower # Saving the results with open('connectivity.pickle','wb') as f: pickle.dump({"face_matches":face_matches, "outer_faces":outer_faces_formatted},f) ``` ### Representing connected faces `face_matches` contains matching face diagonals. The simpliest way to represent a face match is to use the following standard: Block[0]-Face lower corner represented as [IMIN,JMIN,KMIN] and Face Upper corner represented as [IMAX,JMAX,KMAX] **is matched to** Block[1]-Face lower corner represented as [IMIN,JMIN,KMIN] and Face Upper corner represented as [IMAX,JMAX,KMAX]. ![Face matching](https://github.com/nasa/Plot3D_utilities/blob/main/docs/_static/block_face_matching.png?raw=true) `face_matches[0]['match']` is a dataframe of connected points. This is available to you in case you want to use it. You can see that below when we print out the dictionary ``` with open('connectivity.pickle','rb') as f: data = pickle.load(f) face_matches = data['face_matches'] outer_faces = data['outer_faces'] face_matches pp = pprint.PrettyPrinter(indent=4) pp.pprint(face_matches[0]) ``` ## Plotting Connected Faces ``` from matplotlib import cm import numpy as np def select_multi_dimensional(T:np.ndarray,dim1:tuple,dim2:tuple, dim3:tuple): """Takes a block (T) and selects X,Y,Z from the block given a face's dimensions theres really no good way to do this in python Args: T (np.ndarray): arbitrary array so say a full matrix containing X dim1 (tuple): 20,50 this selects X in the i direction from i=20 to 50 dim2 (tuple): 40,60 this selects X in the j direction from j=40 to 60 dim3 (tuple): 10,20 this selects X in the k direction from k=10 to 20 Returns: np.ndarray: returns X or Y or Z given some range of I,J,K """ if dim1[0] == dim1[1]: return T[ dim1[0], dim2[0]:dim2[1]+1, dim3[0]:dim3[1]+1 ] if dim2[0] == dim2[1]: return T[ dim1[0]:dim1[1]+1, dim2[0], dim3[0]:dim3[1]+1 ] if dim3[0] == dim3[1]: return T[ dim1[0]:dim1[1]+1, dim2[0]:dim2[1]+1, dim3[0] ] return T[dim1[0]:dim1[1], dim2[0]:dim2[1], dim3[0]:dim3[1]] def plot_face(face_matches,blocks): for fm in face_matches: block_index1 = fm['block1']['block_index'] I1 = [fm['block1']['IMIN'],fm['block1']['IMAX']] # [ IMIN IMAX ] J1 = [fm['block1']['JMIN'],fm['block1']['JMAX']] # [ JMIN JMAX ] K1 = [fm['block1']['KMIN'],fm['block1']['KMAX']] # [ KMIN KMAX ] block_index2 = fm['block2']['block_index'] I2 = [fm['block2']['IMIN'],fm['block2']['IMAX']] # [ IMIN IMAX ] J2 = [fm['block2']['JMIN'],fm['block2']['JMAX']] # [ JMIN JMAX ] K2 = [fm['block2']['KMIN'],fm['block2']['KMAX']] # [ KMIN KMAX ] X1 = select_multi_dimensional(blocks[block_index1].X, (I1[0],I1[1]), (J1[0],J1[1]), (K1[0],K1[1])) Y1 = select_multi_dimensional(blocks[block_index1].Y, (I1[0],I1[1]), (J1[0],J1[1]), (K1[0],K1[1])) Z1 = select_multi_dimensional(blocks[block_index1].Z, (I1[0],I1[1]), (J1[0],J1[1]), (K1[0],K1[1])) X2 = select_multi_dimensional(blocks[block_index2].X, (I2[0],I2[1]), (J2[0],J2[1]), (K2[0],K2[1])) Y2 = select_multi_dimensional(blocks[block_index2].Y, (I2[0],I2[1]), (J2[0],J2[1]), (K2[0],K2[1])) Z2 = select_multi_dimensional(blocks[block_index2].Z, (I2[0],I2[1]), (J2[0],J2[1]), (K2[0],K2[1])) # return X1 surf1 = ax.plot_surface(X1, Y1, Z1, cmap=cm.coolwarm, linewidth=0, antialiased=True) surf2 = ax.plot_surface(X2, Y2, Z2, cmap=cm.coolwarm, linewidth=0, antialiased=True) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plot_block_outline(blocks[0],ax) plot_block_outline(blocks[1],ax) plot_face(face_matches,blocks) ax.view_init(30,45) ``` ## Periodic Faces Perodicity is a subset of connectivity. **It relates to how Faces of blocks are connected when rotated by an angle.** You can think of an apple pie and slice it up into equal slices. Say you put in a filling into one of those slices. The filling will splurge over to the other slices. This is kind of what perodicity means. Data goes into one slice, it is transfered into other slices. You can also think of the game Portal. In turbomachinery, simulating an entire turbine wheel requires many points, it is easier to break it into pie slices and apply periodicity/connectivity to the sides. ``` from plot3d import periodicity, Face # This step may take a while. It is looking for periodicity for all surfaces that have constant "k" periodic_surfaces, outer_faces_to_keep,periodic_faces,outer_faces = periodicity(blocks,outer_faces,face_matches,periodic_direction='k',rotation_axis='x',nblades=55) with open('connectivity-periodic.pickle','wb') as f: [m.pop('match',None) for m in face_matches] # Remove the dataframe pickle.dump({"face_matches":face_matches, "outer_faces":outer_faces_to_keep, "periodic_surfaces":periodic_surfaces},f) ``` ## Plotting Periodic Faces This function outputs 4 things 1. periodic_surfaces - this is list of all the surfaces/faces that match when rotated by an angle formatted as a dictionary. 2. outer_faces_to_keep - These are the list of outer faces that are not periodic formatted as a dictionary. 3. periodic_faces - is a list of `Face` objects that are connected to each other organized as a list of tuples: [Face1, Face2] where Face 1 will contain the block number and the diagonals [IMIN,JMIN,KMIN,IMAX,JMAX,KMAX]. Example: blk: 1 [168,0,0,268,100,0]. 4. outer_faces - This is a list of outer faces save as a list of Faces Try running the codes below to see how each of the variables is structured ``` periodic_faces outer_faces ``` Code below shows how to plot all the periodic surfaces. Matplotlib is not easy to use in colab environment. You can't really zoom or rotate. It is encouraged for you to use paraview to plot. There is a tutorial and examples in the docs https://nasa.github.io/Plot3D_utilities/docs/build/html/index.html ``` fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plot_block_outline(blocks[0],ax) plot_block_outline(blocks[1],ax) plot_face(periodic_surfaces[0:1],blocks) ax.view_init(30,45) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plot_block_outline(blocks[0],ax) plot_block_outline(blocks[1],ax) plot_face(periodic_surfaces[2:],blocks) ax.view_init(30,45) ``` # Advance Topics ## Splitting the Blocks for Computational Efficiency When solving a plot3D block it is often useful to break it into smaller blocks of a certain size. This will improve the speed by splitting the blocks and allowing each CPU core to solve a part of the mesh. BUT we also need to maintain something called multi-grid. ### Multi-grid concept Mulit-grid is a concept where you take a gird say 4x4 and you solve it as a 2x2 then interpolate the results on to the larger grid. The idea of solving a coarse grid and translating the solution onto a finer grid allows you to reach a converged solution much faster. So that's the benefits, what are the requirements? To achieve multi-grid you need to have something called GCD - greatest common divisor. What does this even mean? If your grid/block is 257 x 101 x 33 in size this means that the largest divisor is 4. This means we can reduce the mesh about 2 times 257x101x33 (Fine) 129x51x17 (Coarse) (Coarse)/2 65x26x9 (Coarser) (fine)/4 Try to example below to find GCD of a grid ``` from math import gcd grid_size = [257,101,33] grid_size = [g-1 for g in grid_size] temp = gcd(grid_size[0],gcd(grid_size[1],grid_size[2])) print("Greatest common divisor is " + str(temp)) ``` ## Block split example ``` from plot3d import split_blocks, Direction blocks = read_plot3D('PahtCascade-ASCII.xyz',binary=False) # Reading plot3D blocks_split = split_blocks(blocks,300000, direction=Direction.i) write_plot3D('PahtCascade-Split.xyz',blocks_split,binary=True) ``` ### Connectivity using split blocks ``` face_matches, outer_faces_formatted = connectivity(blocks_split) with open('connectivity-block-split.pickle','wb') as f: pickle.dump({"face_matches":face_matches, "outer_faces":outer_faces_formatted},f) print("There are {0} face matches".format(len(face_matches))) print("There are {0} outer faces".format(len(outer_faces_formatted))) # Displaying face matches face_matches # Displaying outer_faces outer_faces_formatted ``` Plotting the Connectivity example ``` fig = plt.figure() ax = fig.add_subplot(111, projection='3d') [plot_block_outline(block,ax) for block in blocks_split] plot_face(face_matches,blocks_split) ax.view_init(30,45) ``` ### Periodicity using split blocks Periodicity using split blocks is a bit interesting. There's lots of partial face matches etc. Again, it's really helpful to visualize using paraview than python, but I'll try to plot it for you anyways ``` with open('connectivity-block-split.pickle','rb') as f: data = pickle.load(f) face_matches = data['face_matches'] outer_faces = data['outer_faces'] blocks_split = read_plot3D('PahtCascade-Split.xyz', binary = True, big_endian=True) periodic_surfaces, outer_faces_to_keep,periodic_faces,outer_faces = find_periodicity(blocks_split,outer_faces,periodic_direction='k',rotation_axis='x',nblades=55) with open('connectivity-block-split_v02.pickle','wb') as f: [m.pop('match',None) for m in face_matches] # Remove the dataframe pickle.dump({"face_matches":face_matches, "outer_faces":outer_faces_to_keep, "periodic_surfaces":periodic_surfaces},f) # Append periodic surfaces to face_matches face_matches.extend(periodic_surfaces) # Displaying periodic surfaces periodic_surfaces # Displaying face matches face_matches ```
github_jupyter
<small><small><i> All the IPython Notebooks in **[Python Natural Language Processing](https://github.com/milaan9/Python_Python_Natural_Language_Processing)** lecture series by **[Dr. Milaan Parmar](https://www.linkedin.com/in/milaanparmar/)** are available @ **[GitHub](https://github.com/milaan9)** </i></small></small> <a href="https://colab.research.google.com/github/milaan9/Python_Python_Natural_Language_Processing/blob/main/06_Named_Entity_Recognition.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # 06 Named Entity Recognition (NER) (also known as entity identification, entity chunking and entity extraction) is a subtask of information extraction that seeks to locate and classify named entities mentioned in unstructured text into pre-defined categories such as person names, organizations, locations, medical codes spaCy has an **'ner'** pipeline component that identifies token spans fitting a predetermined set of named entities. These are available as the **`ents`** property of a **`Doc`** object. https://spacy.io/usage/training#ner ``` # Perform standard imports import spacy nlp = spacy.load('en_core_web_sm') # Write a function to display basic entity info: def show_ents(doc): if doc.ents: for ent in doc.ents: print(ent.text+' - '+ent.label_+' - '+str(spacy.explain(ent.label_))) else: print('No named entities found.') doc = nlp(u'Hi, everyone welcome to Milaan Parmar CS tutorial on NPL') show_ents(doc) doc = nlp(u'May I go to England or Canada, next month to see the virus report?') show_ents(doc) ``` ## Entity annotations `Doc.ents` are token spans with their own set of annotations. <table> <tr><td>`ent.text`</td><td>The original entity text</td></tr> <tr><td>`ent.label`</td><td>The entity type's hash value</td></tr> <tr><td>`ent.label_`</td><td>The entity type's string description</td></tr> <tr><td>`ent.start`</td><td>The token span's *start* index position in the Doc</td></tr> <tr><td>`ent.end`</td><td>The token span's *stop* index position in the Doc</td></tr> <tr><td>`ent.start_char`</td><td>The entity text's *start* index position in the Doc</td></tr> <tr><td>`ent.end_char`</td><td>The entity text's *stop* index position in the Doc</td></tr> </table> ``` doc = nlp(u'Can I please borrow 500 dollars from Blake to buy some Microsoft stock?') for ent in doc.ents: print(ent.text, ent.start, ent.end, ent.start_char, ent.end_char, ent.label_) ``` ## NER Tags Tags are accessible through the `.label_` property of an entity. <table> <tr><th>TYPE</th><th>DESCRIPTION</th><th>EXAMPLE</th></tr> <tr><td>`PERSON`</td><td>People, including fictional.</td><td>*Fred Flintstone*</td></tr> <tr><td>`NORP`</td><td>Nationalities or religious or political groups.</td><td>*The Republican Party*</td></tr> <tr><td>`FAC`</td><td>Buildings, airports, highways, bridges, etc.</td><td>*Logan International Airport, The Golden Gate*</td></tr> <tr><td>`ORG`</td><td>Companies, agencies, institutions, etc.</td><td>*Microsoft, FBI, MIT*</td></tr> <tr><td>`GPE`</td><td>Countries, cities, states.</td><td>*France, UAR, Chicago, Idaho*</td></tr> <tr><td>`LOC`</td><td>Non-GPE locations, mountain ranges, bodies of water.</td><td>*Europe, Nile River, Midwest*</td></tr> <tr><td>`PRODUCT`</td><td>Objects, vehicles, foods, etc. (Not services.)</td><td>*Formula 1*</td></tr> <tr><td>`EVENT`</td><td>Named hurricanes, battles, wars, sports events, etc.</td><td>*Olympic Games*</td></tr> <tr><td>`WORK_OF_ART`</td><td>Titles of books, songs, etc.</td><td>*The Mona Lisa*</td></tr> <tr><td>`LAW`</td><td>Named documents made into laws.</td><td>*Roe v. Wade*</td></tr> <tr><td>`LANGUAGE`</td><td>Any named language.</td><td>*English*</td></tr> <tr><td>`DATE`</td><td>Absolute or relative dates or periods.</td><td>*20 July 1969*</td></tr> <tr><td>`TIME`</td><td>Times smaller than a day.</td><td>*Four hours*</td></tr> <tr><td>`PERCENT`</td><td>Percentage, including "%".</td><td>*Eighty percent*</td></tr> <tr><td>`MONEY`</td><td>Monetary values, including unit.</td><td>*Twenty Cents*</td></tr> <tr><td>`QUANTITY`</td><td>Measurements, as of weight or distance.</td><td>*Several kilometers, 55kg*</td></tr> <tr><td>`ORDINAL`</td><td>"first", "second", etc.</td><td>*9th, Ninth*</td></tr> <tr><td>`CARDINAL`</td><td>Numerals that do not fall under another type.</td><td>*2, Two, Fifty-two*</td></tr> </table> ___ ### **Adding a Named Entity to a Span** Normally we would have spaCy build a library of named entities by training it on several samples of text.<br>In this case, we only want to add one value: ``` doc = nlp(u'Arthur to build a U.K. factory for $6 million') show_ents(doc) ``` Add **Milaan** as **PERSON** ``` from spacy.tokens import Span # Get the hash value of the ORG entity label ORG = doc.vocab.strings[u'PERSON'] # Create a Span for the new entity new_ent = Span(doc, 0, 1, label=ORG) # Add the entity to the existing Doc object doc.ents = list(doc.ents) + [new_ent] ``` <font color=green>In the code above, the arguments passed to `Span()` are:</font> - `doc` - the name of the Doc object - `0` - the *start* index position of the span - `1` - the *stop* index position (exclusive) - `label=PERSON` - the label assigned to our entity ``` show_ents(doc) ``` ___ ## Adding Named Entities to All Matching Spans What if we want to tag *all* occurrences of "WORDS"? WE NEED TO use the PhraseMatcher to identify a series of spans in the Doc: ``` doc = nlp(u'Our company plans to introduce a new vacuum cleaner. ' u'If successful, the vacuum cleaner will be our first product.') show_ents(doc) # Import PhraseMatcher and create a matcher object: from spacy.matcher import PhraseMatcher matcher = PhraseMatcher(nlp.vocab) # Create the desired phrase patterns: phrase_list = ['vacuum cleaner', 'vacuum-cleaner'] phrase_patterns = [nlp(text) for text in phrase_list] # Apply the patterns to our matcher object: matcher.add('newproduct', None, *phrase_patterns) # Apply the matcher to our Doc object: matches = matcher(doc) # See what matches occur: matches # Here we create Spans from each match, and create named entities from them: from spacy.tokens import Span PROD = doc.vocab.strings[u'PRODUCT'] new_ents = [Span(doc, match[1],match[2],label=PROD) for match in matches] doc.ents = list(doc.ents) + new_ents show_ents(doc) ``` ___ ## Counting Entities While spaCy may not have a built-in tool for counting entities, we can pass a conditional statement into a list comprehension: ``` doc = nlp(u'Originally priced at $29.50, the sweater was marked down to five dollars.') show_ents(doc) len([ent for ent in doc.ents if ent.label_=='MONEY']) spacy.__version__ doc = nlp(u'Originally priced at $29.50,\nthe sweater was marked down to five dollars.') show_ents(doc) ``` ### <font color=blue>However, there is a simple fix that can be added to the nlp pipeline:</font> https://spacy.io/usage/processing-pipelines ``` # Quick function to remove ents formed on whitespace: def remove_whitespace_entities(doc): doc.ents = [e for e in doc.ents if not e.text.isspace()] return doc # Insert this into the pipeline AFTER the ner component: nlp.add_pipe(remove_whitespace_entities, after='ner') # Rerun nlp on the text above, and show ents: doc = nlp(u'Originally priced at $29.50,\nthe sweater was marked down to five dollars.') show_ents(doc) ``` For more on **Named Entity Recognition** visit https://spacy.io/usage/linguistic-features#101 ___ ## Noun Chunks `Doc.noun_chunks` are *base noun phrases*: token spans that include the noun and words describing the noun. Noun chunks cannot be nested, cannot overlap, and do not involve prepositional phrases or relative clauses.<br> Where `Doc.ents` rely on the **ner** pipeline component, `Doc.noun_chunks` are provided by the **parser**. ### `noun_chunks` components: <table> <tr><td>`.text`</td><td>The original noun chunk text.</td></tr> <tr><td>`.root.text`</td><td>The original text of the word connecting the noun chunk to the rest of the parse.</td></tr> <tr><td>`.root.dep_`</td><td>Dependency relation connecting the root to its head.</td></tr> <tr><td>`.root.head.text`</td><td>The text of the root token's head.</td></tr> </table> ``` doc = nlp(u"Autonomous cars shift insurance liability toward manufacturers.") for chunk in doc.noun_chunks: print(chunk.text+' - '+chunk.root.text+' - '+chunk.root.dep_+' - '+chunk.root.head.text) ``` ### `Doc.noun_chunks` is a generator function Previously we mentioned that `Doc` objects do not retain a list of sentences, but they're available through the `Doc.sents` generator.<br>It's the same with `Doc.noun_chunks` - lists can be created if needed: ``` len(doc.noun_chunks) len(list(doc.noun_chunks)) ``` For more on **noun_chunks** visit https://spacy.io/usage/linguistic-features#noun-chunks
github_jupyter
Pix2Pix dataset: downloading and preprocessing ========================================= The pix2pix dataset has been released as part of the paper "Image-to-Image Translation with Conditional Adversarial Networks" [arxiv](https://arxiv.org/abs/1611.07004) and it contains five different datasets: cityscapes, edges2handbags, edges2shoes, facades, maps. All these datasets are a pixel to pixle conversion from a domain (e.g. satellite images) to another (map images). Downloading the dataset ------------------------------------ As first step we need to download one of the datasets. The following code has been readapted from [this repository](https://github.com/affinelayer/pix2pix-tensorflow) and allows downloading the dataset and extracting the compressed files into local folders. Here, we only download the smallest dataset, but you can uncomment one of the other url if you want to download a different one. The size of each dataset is reported on the right. ``` from urllib2 import urlopen import tarfile import tempfile #dataset_name = "cityscapes" #111 MB #dataset_name = "edges2handbags" #8.0 GB #dataset_name = "edges2shoes" #2.3 GB dataset_name = "facades" #31 MB #dataset_name = "maps" #253 MB url = "https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/" + dataset_name + ".tar.gz" #31 MB with tempfile.TemporaryFile() as tmp: print("Downloading dataset from: " + url) print("Done!") shutil.copyfileobj(urlopen(url), tmp) print("Extracting compressed file...") tmp.seek(0) tar = tarfile.open(fileobj=tmp) tar.extractall() tar.close() print("Done!") ``` Preprocessing in Numpy ------------------------------------ Once the dataset has been downloaded it is possible to preprocess it in order to have valid files that we can use in Tensorflow. This snippet generate Numpy files ready to load in memory via the `numpy.load()` method. ``` import os from PIL import Image import numpy as np #dataset_name = "cityscapes" #dataset_name = "edges2handbags" #dataset_name = "edges2shoes" dataset_name = "facades" #dataset_name = "maps" np_path = "./" + dataset_name header = "./" + dataset_name + "/train/" dataset_features_list = list() dataset_labels_list = list() img_counter = 0 for filename in os.listdir(header): #print(filename) img_array = np.asarray(Image.open(header + filename)) width = img_array.shape[1] half_width = int(img_array.shape[1] / 2.0) first_img_array = img_array[:,0:half_width,:] second_img_array = img_array[:,half_width:width,:] dataset_features_list.append(first_img_array) dataset_labels_list.append(0) dataset_features_list.append(second_img_array) dataset_labels_list.append(1) img_counter += 1 dataset_features_matrix = np.array(dataset_features_list) dataset_labels_array = np.array(dataset_labels_list) print("Processed " + str(img_counter) + " images...") print("Dataset (features) shape: " + str(dataset_features_matrix.shape)) print("Dataset (labels) shape: " + str(dataset_labels_array.shape)) print("Saving numpy files...") np.save(np_path + "_features", dataset_features_matrix) np.save(np_path + "_labels", dataset_labels_array) print("Done!") ``` Loading the Numpy dataset ---------------------------------------- Once the numpy files have been stored it is possible to load them in memory and access the images. The following code load the numpy dataset and shows a random image with its label. ``` from matplotlib import pyplot as plt %matplotlib inline #dataset_name = "cityscapes" #dataset_name = "edges2handbags" #dataset_name = "edges2shoes" dataset_name = "facades" #dataset_name = "maps" np_path = "./" + dataset_name features_matrix = np.load(np_path + "_features.npy") labels_array = np.load(np_path + "_labels.npy") dataset_size = features_matrix.shape[0] random_int = np.random.randint(dataset_size) image = features_matrix[random_int,:,:,:] label = labels_array[random_int] print("Label: " + str(label)) plt.imshow(image) plt.show() ```
github_jupyter
# Autograd: automatic differentiation The ``autograd`` package provides automatic differentiation for all operations on Tensors. It is a define-by-run framework, which means that your backprop is defined by how your code is run, and that every single iteration can be different. ``` #importing pytorch import torch ``` Create a tensor: ``` # Create a 2x2 tensor with gradient-accumulation capabilities x = torch.tensor([[1, 2], [3, 4]], requires_grad=True, dtype=torch.float32) print(x) ``` Do an operation on the tensor: ``` # Deduct 2 from all elements y = x - 2 print(y) ``` ``y`` was created as a result of an operation, so it has a ``grad_fn``. ``` print(y.grad_fn) # What's happening here? print(x.grad_fn) # Let's go further y.grad_fn y.grad_fn.next_functions[0][0] y.grad_fn.next_functions[0][0].variable # Do more operations on y z = y * y * 3 a = z.mean() # average print(z) print(a) ``` ## Gradients Let's backprop now `out.backward()` is equivalent to doing `out.backward(torch.tensor([1.0]))` ``` # Backprop a.backward() ``` Print gradients $\frac{\text{d}a}{\text{d}x}$. ``` # Compute it by hand BEFORE executing this print(x.grad) ``` You can do many crazy things with autograd! > With Great *Flexibility* Comes Great Responsibility ``` # Dynamic graphs x = torch.randn(3, requires_grad=True) y = x * 2 i = 0 while y.data.norm() < 1000: y = y * 2 i += 1 print(y) # If we don't run backward on a scalar we need to specify the grad_output gradients = torch.FloatTensor([0.1, 1.0, 0.0001]) y.backward(gradients) print(x.grad) # BEFORE executing this, can you tell what would you expect it to print? print(i) ``` ## Inference ``` # This variable decides the tensor's range below n = 3 # Both x and w that allows gradient accumulation x = torch.arange(1., n + 1, requires_grad=True) w = torch.ones(n, requires_grad=True) z = w @ x z.backward() print(x.grad, w.grad, sep='\n') # Only w that allows gradient accumulation x = torch.arange(1., n + 1) w = torch.ones(n, requires_grad=True) z = w @ x z.backward() print(x.grad, w.grad, sep='\n') x = torch.arange(1., n + 1) w = torch.ones(n, requires_grad=True) # Regardless of what you do in this context, all torch tensors will not have gradient accumulation with torch.no_grad(): z = w @ x try: z.backward() # PyTorch will throw an error here, since z has no grad accum. except RuntimeError as e: print('RuntimeError!!! >:[') print(e) ``` ## More stuff Documentation of the automatic differentiation package is at http://pytorch.org/docs/autograd.
github_jupyter
Preprocessing an input string is the basis needed to do higher-level operations such as tokenizing. ``` input_str = '༆ ཤི་བཀྲ་ཤིས་ tr བདེ་་ལེ གས། བཀྲ་ཤིས་བདེ་ལེགས་༡༢༣ཀཀ། མཐའི་རྒྱ་མཚོར་གནས་པའི་ཉས་ཆུ་འཐུང་།། །།' ``` # pybotextchunks.py This class is a wrapper around PyBoChunk (that it subclasses). Its main purpose is to provide the input to be fed into the trie within tokenizer.py Here is what it produces: ``` from pybo import PyBoTextChunks text_chunks = PyBoTextChunks(input_str) output = text_chunks.serve_syls_to_trie() print(f'The output is a {type(output)} of {type(output[0])} containing each {len(output[0])} elements.') print('\tex: ', output[0]) print(f'The first element is either a {type(output[0][0])} or a {type(output[1][0])} of ints') print('\tex:', output[0][0], 'or', output[1][0]) ``` See below for the second element, it is the output of PyBoChunk. ``` for n, trie_input in enumerate(output): print(f'{n+1}.\t {trie_input[0]}') ``` The first element provides the information to the trie that a given chunk is a syllable, and thus should be fed to the trie, or that it is not a syllable. In case it is a syllable, its elements are indices of the characters that constitute the syllable: ``` print('\t', input_str) for n, trie_input in enumerate(output): syl = trie_input[0] if syl: syllable = [input_str[s] for s in syl] + ['་'] print(f'{n+1}.\t{syllable}') else: print(f'{n+1}.') ``` Punctuation chunks are left aside, non-Tibetan parts also and the content of the syllable has been cleaned. In token 6, the double tsek has been normalized to a single tsek. In token 7, the space in the middle has been left aside. On top of this, the non-syllabic chunks give us a cue that any ongoing word has to end there. Now, this cleaned content can be fed in the trie, syllable after syllable, character after character. # pybochunk.py This class is powered by the chunking framework defined in BoChunk (that it subclasses). It implements the following chunking pipeline: - turn the input string into a sequence of alternating "bo" and "non-bo" chunks - turn the "bo" chunks into a sequence of alternating "punct" and "bo" chunks - turn the remaining "bo" chunks into a sequence of alternating "sym" and "bo" chunks - turn the remaining "bo" chunks into a sequence of alternating "num" and "bo" chunks - turn the remaining "bo" chunks into syllables - concatenate the chunks containing only spaces to the preceding one The last pipe is created by using the building blocks of BoChunk, but implemented here, since this treatment merges two chunks given a condition, while BoChunk is a chunking framework. ``` from pybo import PyBoChunk chunks = PyBoChunk(input_str) output = chunks.chunk() ``` The raw output: ``` for n, chunk in enumerate(output): print(f'{n+1}.\t{chunk}') ``` The last two elements in the tuple are: - the starting index of the current chunk in the input string - the length of the current chunk PyBoChunk provides a method to replace these two indices with the actual substring: ``` with_substrings = chunks.get_chunked(output) for n, chunk in enumerate(with_substrings): print(f'{n+1}.\t{chunk}') ``` I could do it manually, though: ``` chunk_str = input_str[output[0][1]:output[0][1]+output[0][2]] print(f'"{with_substrings[0][1]}" equals "{chunk_str}"') ``` Now, let's get human-readable values for the first int: ``` with_markers = chunks.get_markers(with_substrings) for n, chunk in enumerate(with_markers): print(f'{n+1}.\t{chunk}') ``` - punct: chunk containing Tibetan punctuation - syl: chunk containing a single syllable (necessarily Tibetan characters) - num: chunk containing Tibetan numerals - non-bo: chunk containing non-Tibetan characters Note that at this point, the only thing we have is substrings of the initial string with chunk markers. # bochunk.py BoChunk is a chunking framework built on top of BoString (see below). Its methods are used to create chunks – groups of characters that pertain to the same category. The produced output is a binary sequence of chunks matching or not a given condition. Although each chunking method only produces two kinds of chunks (those that match and those that don't), piped chunking allows to apply complex chunking patterns. ``` from pybo.bochunk import BoChunk bc = BoChunk(input_str) ``` ## 1. The available chunking methods #### Either Tibetan characters or not ``` bo_nonbo = bc.chunk_bo_chars() print(f'the output of chunking functions are {type(bo_nonbo)} each containing {len(bo_nonbo[0])} {type(bo_nonbo[0][0])}') for n, c in enumerate(bo_nonbo): print(f'{n+1}.\t{c}') ``` We have already seen this format in PyBoChunk, which actually only applies the chunking methods available in BoChunk in order to produce chunks corresponding to the expectations of a Tibetan reader. In a human-friendly format, it actually is: ``` for n, r in enumerate(bc.get_markers(bc.get_chunked(bo_nonbo))): print(f'{n+1}.\t{r[0]}\t\t"{r[1]}"') ``` We have indeed chunked the input string is a sequence of alternatively Tibetan and non-Tibetan chunks. #### Either Tibetan punctuation or not ``` punct_nonpunct = bc.chunk_punct() for n, p in enumerate(bc.get_markers(bc.get_chunked(punct_nonpunct))): print(f'{n+1}.\t{p[0]}\t\t"{p[1]}"') ``` Now, we have an alternating sequence of chunks that only cares about Tibetan punctuation. Note how "tr" is simply considered to be non-punct. #### Either Tibetan symbols or not ``` sym_nonsym = bc.chunk_symbol() for n, s in enumerate(bc.get_markers(bc.get_chunked(sym_nonsym))): print(f'{n+1}.\t{s[0]}\t\t"{s[1]}"') ``` ... there were no Tibetan symbols in the input string. #### Either Tibetan digits or not ``` num_nonnum = bc.chunk_number() for n, m in enumerate(bc.get_markers(bc.get_chunked(num_nonnum))): print(f'{n+1}.\t{m[0]}\t\t"{m[1]}"') ``` We have correctly identified the Tibetan digits in the input string #### Either spaces or not ``` space_nonspace = bc.chunk_spaces() for n, s in enumerate(bc.get_markers(bc.get_chunked(space_nonspace))): print(f'{n+1}.\t{s[0]}\t\t"{s[1]}"') ``` #### Syllabify ``` syl_nonsyl = bc.syllabify() for n, s in enumerate(bc.get_markers(bc.get_chunked(syl_nonsyl))): print(f'{n+1}.\t{s[0]}\t\t"{s[1]}"') ``` This is the only chunking method that doesn't create a binary sequence of chunks matching or not a given condition. Instead, it breaks up the input it receives into syllables that are only separated by tseks(both regular and non-breaking). For this reason, syllabify takes for granted that the input it receives is only Tibetan characters that can compose a syllable. When that was the case, for example in chunks 10 to 16, the syllabation is operated as expected. Spaces, on the other hand, are allowed within a syllable because they only serve to "beautify" a tibetan text by visually marking the end of a clause or that of a sentence after a punctuation. So we reproduce the behavior of a Tibetan reader, who will simply bypass a space encountered anywhere else. #### Extending the framework with additional methods All of these chunking methods follow a simple standardized format, making it extremely simple to create new ones to suit any specific needs. They are in turn built on top of BoString, which relies on the groups of characters that were put together to reproduce the intuition of a native Tibetan who is reading a text. So in case finer chunking abilities are required, finer character groups should be created in BoString. ## 2. Piped Chunking: Implementing complex chunking patterns Applying successively the chunking methods we have seen above in a specific order allows to design complex patterns for chunking a given string. We will exemplify this functionality by reproducing the chunking pipeline that PyBoChunk implements: a. turn the input string into a sequence of alternating "bo" / "non-bo" chunks b. turn the "bo" chunks into a sequence of alternating "punct" / "bo" chunks c. turn the remaining "bo" chunks into a sequence of alternating "sym" / "bo" chunks d. turn the remaining "bo" chunks into a sequence of alternating "num" / "bo" chunks e. turn the "bo" chunks into a sequence of syllables f. concatenate the chunks containing only spaces to the preceding one #### a. Either "bo" or "non-bo" ``` chunks = bc.chunk_bo_chars() print('\t', input_str, end='\n\n') for n, c in enumerate(bc.get_markers(bc.get_chunked(chunks))): print(f'{n+1}.\t{c[0]}\t\t"{c[1]}"') ``` The output is similar to what we had above. Yet this time, instead of rechunking the whole input string in "punct" or "non-punct", we will use piped chunking to only apply chunk_punct() to the "bo" chunks. Important: piped chunking is only possible when the content of the chunks is a tuple of ints. #### b. Either "punct" or "bo" ``` bc.pipe_chunk(chunks, bc.chunk_punct, to_chunk=bc.BO_MARKER, yes=bc.PUNCT_MARKER) print('\t', input_str, end='\n\n') for n, c in enumerate(bc.get_markers(bc.get_chunked(chunks))): print(f'{n+1}.\t{c[0]}\t\t"{c[1]}"') ``` pipe_chunk() takes as arguments: - chunks: the chunks produced by a previous chunking methods. They are expected to be a list of tuples containing each three ints. - bc.chunk_punct: the chunking method we want to apply on top of the existing chunks - to_chunk: the marker that identifies which chunks should be further processed by the new chunking method - yes: the marker to be used on the new chunks that match the new chunking method. (the chunks that don't match simply retain their previous marker) So to put it into simple English: "Within the existing chunks, I would like to take those marked as 'bo' and within them separate what is actual Tibetan text ('bo') from what is Tibetan punctuation ('punct')" #### c. Either "sym" or "bo" ``` bc.pipe_chunk(chunks, bc.chunk_symbol, to_chunk=bc.BO_MARKER, yes=bc.SYMBOL_MARKER) print('\t', input_str, end='\n\n') for n, c in enumerate(bc.get_markers(bc.get_chunked(chunks))): print(f'{n+1}.\t{c[0]}\t\t"{c[1]}"') ``` There were no symbols, so the chunks are unchanged. #### d. Either "num" or "bo" ``` bc.pipe_chunk(chunks, bc.chunk_number, to_chunk=bc.BO_MARKER, yes=bc.NUMBER_MARKER) print('\t', input_str, end='\n\n') for n, c in enumerate(bc.get_markers(bc.get_chunked(chunks))): print(f'{n+1}.\t{c[0]}\t\t"{c[1]}"') ``` "Within the existing chunks, I would like to take those marked as 'bo' and within them separate what is actual Tibetan text ('bo') from what is Tibetan digits ('num')" #### e. Syllabify "bo" into "syl" ``` bc.pipe_chunk(chunks, bc.syllabify, to_chunk=bc.BO_MARKER, yes=bc.SYL_MARKER) print('\t', input_str, end='\n\n') for n, c in enumerate(bc.get_markers(bc.get_chunked(chunks))): print(f'{n+1}.\t{c[0]}\t\t"{c[1]}"') ``` Here we are ! We have successfully preprocessed the input string into a sequence of identified content that we can confidently use for any further NLP processing. In order to have the exact same output as PyBoChunk, chunk 5 should be attached to chunk 4 and chunk 7 to chunk 6. Since this is implemented as a private method of PyBoChunk, we will stop this demonstration here. # bostring.py This is the foundational building block of all the preprocessing classes in pybo. The idea behind it is to reproduce the way a Tibetan reader will group characters in a given text when reading it. This is simply done by splitting in such groups all the Unicode characters found in the Tibetan Unicode tables. ``` from pybo import BoString bs = BoString(input_str) ``` #### Character groups Here are the groups we identified: ``` print(f'Consonants:\n\t{list(bs.cons)}') print(f'The subscripted variants:\n\t{list(bs.sub_cons)}\n') print(f'Vowels:\n\t{list(bs.vow)}\n') print(f'Tseks (regular and non-breaking):\n\t{list(bs.tsek)}\n') print(f'Consonants specific to Sanskrit:\n\t{list(bs.skrt_cons)}\n') print(f'The subscripted variants of Sanskrit consonants:\n\t{list(bs.skrt_sub_cons)}\n') print(f'Vowels specific to Sanskrit:\n\t{list(bs.skrt_vow)}\n') print(f'Sanskrit long vowel marker:\n\t{list(bs.skrt_long_vow)}\n') print(f'Regular Tibetan punctuation:\n\t{list(bs.normal_punct)}\n') print(f'Special Tibetan punctuation:\n\t{list(bs.special_punct)}\n') print(f'Tibetan numerals:\n\t{list(bs.numerals)}\n') print(f'Markers found inside Tibetan syllables:\n\t{list(bs.in_syl_marks)}\n') print(f'Unicode Tibetan symbols:\n\t{list(bs.symbols)}\n') print(f'Non-Tibetan and non-Sanskrit characters in the Tibetan tables:\n\t{list(bs.non_bo_non_skrt)}\n') print(f'All the spaces found in the Unicode tables:\n\t{list(bs.spaces)}') ``` Characters specific to Sanskrit are were distinguished in order identify the unambiguously Sanskrit syllables. The long vowel marker because when it is present at the end of a syllable, no tsek is required, so it also behaves like a syllable separator, and we need to have that information while syllabifying Tibetan text. Tibetan regular punctuation was separated from the rest thinking this might be handy when we want to do things like normalizing the punctuation. Otherwise, we could simply have one group for punctuation. Markers found inside syllables needed to be identified in order to ignore it when we want to compare a word from the dictionary and the same word in its marked form. Symbols have all been put together because we don't see yet use-cases requiring finer groups. All the space characters from the Unicode tables have been added in order to ensure we cover all possibilities and the tab character as well, because it is sometimes used instead of a space in Tibetan text. We expect that as we start using pybo for more and more things, unforseen use-cases will appear and these groups will certainly need to be refined or adjusted, but it is pretty straight forward to do so. For example, the group of Tibetan consonants is encoded in the following class attributes: - `self.cons = "ཀཁགངཅཆཇཉཏཐདནཔཕབམཙཚཛཝཞཟའཡརལཤསཧཨཪ"` (the actual characters) - `self.CONS = 1` (the group marker) - `self.char_markers = {self.CONS: 'cons', (...)}` (a human-readable replacement for the group marker) #### BoString's output The only thing BoString does is to loop once over the string and fill the `BoString.base_structure` dict with `{index: group_marker}` items for every character. ``` print(bs.base_structure) ``` The same information in a human-readable form: ``` print(f'\n\t"{input_str}"\n') for idx, g in bs.base_structure.items(): character = input_str[idx] group = bs.char_markers[g] print(f'{idx+1}.\t{character}\t{group}') ``` What might come in handy is the ability to export the groups for a portion of the input string: ``` start = 2 end = 5 sub_str = bs.export_groups(start, end) # now fetching the human-readable markers sub_str = {idx: bs.char_markers[group] for idx, group in sub_str.items()} print(sub_str) ``` Note that the indices in `sub_str` are adjusted for the substring. In case we want to keep the original indices: ``` orig_indices = bs.export_groups(start, end, for_substring=False) print({idx: bs.char_markers[group] for idx, group in orig_indices.items()}) ```
github_jupyter
``` import pandas as pd from gensim.models import FastText from keras.preprocessing.text import Tokenizer, text_to_word_sequence from sklearn.feature_extraction.text import CountVectorizer from nltk.corpus import stopwords from sklearn.manifold import TSNE import os import tensorflow as tf import numpy as np import re from tensorflow.contrib.tensorboard.plugins import projector with open('../data/txt/texto_limpio.txt', 'r') as f: text =f.readlines() f.close() model = FastText.load_fasttext_format('fasttext_word_vectors_embeddings.bin') text= [re.sub('\d+', '<NUM>', t) for t in text] cv= CountVectorizer(stop_words=stopwords.words('spanish')) vectorized_text = cv.fit_transform(text) sum_words = vectorized_text.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in cv.vocabulary_.items()] words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True) # top= list(map(lambda x: x[0],words_freq))[0:10000] words_freq_df = pd.DataFrame(words_freq) words_freq_df.columns = ['label','freq'] ``` Uso la similitud dentro del embedding para eliminar todos los token residuales del html ``` rm1 = [x[0] for x in model.wv.similar_by_word('font',topn=50)] rm2 = [x[0] for x in model.wv.similar_by_word('margin',topn=50)] rm3 = [x[0] for x in model.wv.similar_by_word('indexof',topn=50)] rm4 = [x[0] for x in model.wv.similar_by_word('pathname',topn=50)] rm5 = [x[0] for x in model.wv.similar_by_word('msofootnotereference',topn=50)] rm6 = [x[0] for x in model.wv.similar_by_word('align',topn=50)] rm7 = [x[0] for x in model.wv.similar_by_word('previous',topn=50)] rm8 = [x[0] for x in model.wv.similar_by_word('html',topn=50)] borrar = set(rm1).union(set(rm2)).union(set(rm3)).union(set(rm4)).union(set(rm5)).union(set(rm6)).union(set(rm7)).union(set(rm8)) len(borrar) words_freq_df = words_freq_df[~words_freq_df.label.isin(borrar)] top_df = words_freq_df[0:10000] top_df.to_csv('log/metadata.tsv',sep='\t',header=True,index=False) # # write labels # with open('log/metadata.tsv', 'w') as f: # for word in top: # f.write(word + '\n') embedding = np.array(list(map(lambda x: model.wv[x], top_df.label.values))) # setup a TensorFlow session tf.reset_default_graph() sess = tf.InteractiveSession() X = tf.Variable([0.0], name='embedding') place = tf.placeholder(tf.float32, shape=embedding.shape) set_x = tf.assign(X, place, validate_shape=False) sess.run(tf.global_variables_initializer()) sess.run(set_x, feed_dict={place: embedding}) # create a TensorFlow summary writer summary_writer = tf.summary.FileWriter('log', sess.graph) config = projector.ProjectorConfig() embedding_conf = config.embeddings.add() embedding_conf.tensor_name = 'embedding:0' embedding_conf.metadata_path = os.path.join('metadata.tsv') projector.visualize_embeddings(summary_writer, config) # save the model saver = tf.train.Saver() saver.save(sess, os.path.join('log', "model.ckpt")) embedding_df = pd.DataFrame(embedding) embedding_df.to_csv('tensors.tsv',sep='\t',header=False,index=False) ```
github_jupyter
<a href="https://colab.research.google.com/github/lululxvi/deepxde/blob/master/examples/Lorenz_inverse_forced_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Description This notebook aims at the identification of the parameters of the modified Lorenz attractor (with exogenous input) Built upon: * Lorenz attractor example from DeepXDE (Lu's code) * https://github.com/lululxvi/deepxde/issues/79 * kind help from Lu, greatly acknowledged # Install lib and imports ``` !pip install deepxde from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import numpy as np import requests import io import matplotlib.pyplot as plt import deepxde as dde from deepxde.backend import tf import scipy as sp import scipy.interpolate as interp from scipy.integrate import odeint ``` # Generate data ``` # true values, see p. 15 in https://arxiv.org/abs/1907.04502 C1true = 10 C2true = 15 C3true = 8 / 3 # time points maxtime = 3 time = np.linspace(0, maxtime, 200) ex_input = 10 * np.sin(2 * np.pi * time) # exogenous input # interpolate time / lift vectors (for using exogenous variable without fixed time stamps) def ex_func(t): spline = sp.interpolate.Rbf( time, ex_input, function="thin_plate", smooth=0, episilon=0 ) # return spline(t[:,0:]) return spline(t) # function that returns dy/dt def LorezODE(x, t): # Modified Lorenz system (with exogenous input). x1, x2, x3 = x dxdt = [ C1true * (x2 - x1), x1 * (C2true - x3) - x2, x1 * x2 - C3true * x3 + ex_func(t), ] return dxdt # initial condition x0 = [-8, 7, 27] # solve ODE x = odeint(LorezODE, x0, time) # plot results plt.plot(time, x, time, ex_input) plt.xlabel("time") plt.ylabel("x(t)") plt.show() time = time.reshape(-1, 1) time.shape ``` # Perform identification ``` # parameters to be identified C1 = tf.Variable(1.0) C2 = tf.Variable(1.0) C3 = tf.Variable(1.0) # interpolate time / lift vectors (for using exogenous variable without fixed time stamps) def ex_func2(t): spline = sp.interpolate.Rbf( time, ex_input, function="thin_plate", smooth=0, episilon=0 ) return spline(t[:, 0:]) # return spline(t) # define system ODEs def Lorenz_system(x, y, ex): """Modified Lorenz system (with exogenous input). dy1/dx = 10 * (y2 - y1) dy2/dx = y1 * (28 - y3) - y2 dy3/dx = y1 * y2 - 8/3 * y3 + u """ y1, y2, y3 = y[:, 0:1], y[:, 1:2], y[:, 2:] dy1_x = dde.grad.jacobian(y, x, i=0) dy2_x = dde.grad.jacobian(y, x, i=1) dy3_x = dde.grad.jacobian(y, x, i=2) return [ dy1_x - C1 * (y2 - y1), dy2_x - y1 * (C2 - y3) + y2, dy3_x - y1 * y2 + C3 * y3 - ex, # dy3_x - y1 * y2 + C3 * y3 - 10*tf.math.sin(2*np.pi*x), ] def boundary(_, on_initial): return on_initial # define time domain geom = dde.geometry.TimeDomain(0, maxtime) # Initial conditions ic1 = dde.IC(geom, lambda X: x0[0], boundary, component=0) ic2 = dde.IC(geom, lambda X: x0[1], boundary, component=1) ic3 = dde.IC(geom, lambda X: x0[2], boundary, component=2) # Get the training data observe_t, ob_y = time, x # boundary conditions observe_y0 = dde.PointSetBC(observe_t, ob_y[:, 0:1], component=0) observe_y1 = dde.PointSetBC(observe_t, ob_y[:, 1:2], component=1) observe_y2 = dde.PointSetBC(observe_t, ob_y[:, 2:3], component=2) # define data object data = dde.data.PDE( geom, Lorenz_system, [ic1, ic2, ic3, observe_y0, observe_y1, observe_y2], num_domain=400, num_boundary=2, anchors=observe_t, auxiliary_var_function=ex_func2, ) plt.plot(observe_t, ob_y) plt.xlabel("Time") plt.legend(["x", "y", "z"]) plt.title("Training data") plt.show() # define FNN architecture and compile net = dde.maps.FNN([1] + [40] * 3 + [3], "tanh", "Glorot uniform") model = dde.Model(data, net) model.compile("adam", lr=0.001) # callbacks for storing results fnamevar = "variables.dat" variable = dde.callbacks.VariableValue([C1, C2, C3], period=1, filename=fnamevar) losshistory, train_state = model.train(epochs=60000, callbacks=[variable]) ``` Plots ``` # reopen saved data using callbacks in fnamevar lines = open(fnamevar, "r").readlines() # read output data in fnamevar (this line is a long story...) Chat = np.array( [ np.fromstring( min(re.findall(re.escape("[") + "(.*?)" + re.escape("]"), line), key=len), sep=",", ) for line in lines ] ) l, c = Chat.shape plt.plot(range(l), Chat[:, 0], "r-") plt.plot(range(l), Chat[:, 1], "k-") plt.plot(range(l), Chat[:, 2], "g-") plt.plot(range(l), np.ones(Chat[:, 0].shape) * C1true, "r--") plt.plot(range(l), np.ones(Chat[:, 1].shape) * C2true, "k--") plt.plot(range(l), np.ones(Chat[:, 2].shape) * C3true, "g--") plt.legend(["C1hat", "C2hat", "C3hat", "True C1", "True C2", "True C3"], loc="right") plt.xlabel("Epoch") plt.show() yhat = model.predict(observe_t) plt.plot(observe_t, ob_y, "-", observe_t, yhat, "--") plt.xlabel("Time") plt.legend(["x", "y", "z", "xh", "yh", "zh"]) plt.title("Training data") plt.show() ```
github_jupyter
``` import numpy as np import pandas as pd import sklearn import torch import torch.nn as nn import matplotlib.pyplot as plt from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader infile = '../ChronoLSTM_1d_bin3/DATA_Linear/xvyw1beta9.5gammax1.0gammay1.0epsln1.0sgma1.0A1.0x01.122w0.8B0.15a1.0_h0.01_mix1.txt' input_x, _=np.loadtxt(infile, unpack=True, max_rows=100000) device = torch.device("cpu") num_bins=3 sm_length=20 def running_mean(x, N): """Use convolution to do running average.""" return np.convolve(x, np.ones((N,))/N, mode='valid') def find_nearest(key_arr, target): """key_arr: array-like, storing keys. target: the value which we want to be closest to.""" idx=np.abs(key_arr-target).argmin() return idx def Rm_peaks_steps(traj): global threshold """ Remove sudden changes in the trajectory such as peaks and small steps. In this method, I used gradient to identify the changes. If two nonzero gradients are too close (< threshold), we shall take this range as noise. """ traj=np.array(traj) grad_traj=np.gradient(traj) # gradient of trajectory idx_grad=np.where(grad_traj!=0)[0] threshold=20 idx0=idx_grad[0] for idx in idx_grad: window=idx-idx0 if window <= 1: # neighbor continue elif window > 1 and window <= threshold: traj[idx0:idx0+window//2+1]=traj[idx0] traj[idx0+window//2+1:idx+1]=traj[idx+1] idx0=idx elif window > threshold: idx0=idx return traj X = [1.5, 0, -1.5] input_x = running_mean(input_x, sm_length) # smooothen data. idx_x = map(lambda x: find_nearest(X, x), input_x) # convert to three bins. idx_2d=list(idx_x) # list(zip(idx_x, idx_y)) idx_2d = Rm_peaks_steps(idx_2d) # remove peaks and short steps text = idx_2d all_combs = [i for i in range(num_bins)] vocab=sorted(all_combs) # Creating a mapping from unique characters to indices char2idx = {u:i for i, u in enumerate(vocab)} idx2char = np.array(vocab) text_as_int = np.array([char2idx[c] for c in text]) class seq_data(Dataset): def __init__(self, traj, seq_length, shift): self.traj = traj self.seq_length = seq_length self.shift = shift def __len__(self): return self.traj[self.shift:].shape[0]//self.seq_length def __getitem__(self, idx): x = self.traj[:-self.shift][idx*self.seq_length:idx*self.seq_length+self.seq_length] y = self.traj[self.shift:][idx*self.seq_length:idx*self.seq_length+self.seq_length] return x, y class NLP(nn.Module): def __init__(self, input_dim, embedding_dim, rnn_units): super(NLP, self).__init__() self.input_dim = input_dim self.embedding_dim = embedding_dim self.hidden_dim = rnn_units self.embedding = nn.Embedding(self.input_dim, self.embedding_dim) self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim, batch_first=True) self.linear = nn.Linear(self.hidden_dim, self.input_dim) def forward(self, input): batch_size = input.shape[0] embedding_out = self.embedding(input) lstm_in = embedding_out.view(batch_size, input.shape[1], self.embedding_dim) lstm_out, hidden = self.lstm(lstm_in) y_pred = self.linear(lstm_out) return y_pred embedding = nn.Embedding(vocab_size, embedding_dim) embedding_out = embedding(batch_X_train) embedding_out.shape lstm = nn.LSTM(embedding_dim, rnn_units, batch_first=True) lstm_in = embedding_out.view(batch_X_train.shape[0], batch_X_train.shape[1], embedding_dim) lstm_out = lstm(lstm_in) lstm_out[1][0].shape w_ii, w_if, w_ic, w_io = list(model.modules())[2].weight_ih_l0.chunk(4, 0) w_hi, w_hf, w_hc, w_ho = list(model.modules())[2].weight_hh_l0.chunk(4, 0) b_hi, b_hf, b_hc, b_ho = list(model.modules())[2].bias_ih_l0.chunk(4,0) model.hidden[0][0][0][:10] EPOCHS = 20 sequence_len = 100 shift=1 batch_size=64 dataset = seq_data(text_as_int, 100, 1) dataset = DataLoader(dataset, batch_size=64, shuffle=True, drop_last=True) # Length of the vocabulary in chars vocab_size = len(vocab) # The embedding dimension embedding_dim = 8 # Number of RNN units rnn_units = 32 # Batch size batch_size=64 model = NLP(vocab_size, embedding_dim, rnn_units).to(device) print(model) loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) for epoch in range(EPOCHS): for batch_X_train, batch_Y_train in dataset: batch_X_train = batch_X_train.to(device) batch_Y_train = batch_Y_train.to(device) y_pred = model(batch_X_train) y=batch_Y_train.to(device) loss = loss_fn(y_pred.view(-1, vocab_size), y.view(-1)) optimizer.zero_grad() loss.backward() optimizer.step() print(epoch, loss.item()) ``` # Save weights ``` PATH = 'test_weight' torch.save(model.state_dict(), PATH) ``` # Reset model to batch_size=1 for prediction ``` model = NLP(vocab_size, embedding_dim, rnn_units, 1).to(device) model.load_state_dict(torch.load(PATH)) ``` # Prediction ``` def generate_text(start_string): input_eval = torch.tensor([char2idx[s] for s in start_string], device=device) text_generated = np.empty(1) for i in range(1000): input_eval = input_eval[np.newaxis, ...] # add a dimension for batch=1. prediction=model(input_eval) logits=prediction p=torch.nn.functional.softmax(logits, dim=-1) # take first batch predicted_id=torch.multinomial(p[0,-1], 1) input_eval = predicted_id text_generated = np.vstack((text_generated, idx2char[predicted_id].tolist())) return text_generated text = idx_2d[:1000] prediction=generate_text(text) infile = 'prediction2' prediction=np.loadtxt(infile) import matplotlib.pyplot as plt plt.plot(prediction[1:2000]) plt.show() plt.plot(text_as_int[1:2000]) np.where(prediction==0)[0].shape np.where(prediction==1)[0].shape np.where(prediction==2)[0].shape ```
github_jupyter
# Setup ``` import pandas as pd import numpy as np import jsonlines import seaborn as sns %matplotlib inline %config InlineBackend.figure_format = 'retina' import torch.nn as nn import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch_optimizer as optim import os from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" from importlib import reload pd.set_option('display.max_rows', 500) pd.set_option('display.float_format', '{:0.3f}'.format) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) pd.options.display.width = 0 import warnings import torchvision warnings.filterwarnings('ignore') from facebook_hateful_memes_detector.utils.globals import set_global, get_global set_global("cache_dir", "/home/ahemf/cache/cache") set_global("dataloader_workers", 4) set_global("use_autocast", True) set_global("models_dir", "/home/ahemf/cache/") from facebook_hateful_memes_detector.utils import read_json_lines_into_df, in_notebook, set_device, my_collate, clean_memory get_global("cache_dir") from facebook_hateful_memes_detector.models import Fasttext1DCNNModel, MultiImageMultiTextAttentionEarlyFusionModel, LangFeaturesModel, AlbertClassifer from facebook_hateful_memes_detector.preprocessing import TextImageDataset, get_datasets, get_image2torchvision_transforms, TextAugment from facebook_hateful_memes_detector.preprocessing import DefinedRotation, QuadrantCut, ImageAugment from facebook_hateful_memes_detector.training import * import facebook_hateful_memes_detector reload(facebook_hateful_memes_detector) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") set_device(device) device from transformers import pipeline from transformers import AutoTokenizer, AutoModelForQuestionAnswering import torch from transformers import AutoModelWithLMHead, AutoTokenizer from transformers import pipeline from transformers import AutoModelWithLMHead, AutoTokenizer from transformers import MarianMTModel, MarianTokenizer from tqdm.auto import tqdm, trange from transformers import MarianMTModel, MarianTokenizer data = get_datasets(data_dir="/home/ahemf/cache/data/", train_text_transform=None, train_image_transform=None, test_text_transform=None, test_image_transform=None, train_torchvision_pre_image_transform=None, test_torchvision_pre_image_transform=None, cache_images=False, use_images=True, dev=False, test_dev=True, keep_original_text=True, keep_original_image=True, keep_processed_image=True, keep_torchvision_image=False, train_mixup_config=None) data["test"]["label"] = -1 data['test_unseen']["label"] = -1 dev_unseen = data['dev_unseen'].copy() data['dev_unseen']["label"] = -1 df = pd.concat((data["train"], data['dev_unseen'], data["test"], data['test_unseen'])) df = pd.read_csv("/home/ahemf/cache/new_items.csv") def build_translator(lang_models, model_type="huggingface"): if model_type=="huggingface": forward_model, backward_model = lang_models["fwd"], lang_models["inv"] tokenizer = MarianTokenizer.from_pretrained(forward_model) model = MarianMTModel.from_pretrained(forward_model) model = model.to(get_device()) model = model.eval() state = dict(fwd=(tokenizer, model)) tokenizer = MarianTokenizer.from_pretrained(backward_model) model = MarianMTModel.from_pretrained(backward_model) model = model.to(get_device()) model = model.eval() state["inv"] = (tokenizer, model) elif model_type=="pytorch": forward_model, backward_model = lang_models["fwd"], lang_models["inv"] if "fwd_checkpoint_file" in lang_models: model = torch.hub.load('pytorch/fairseq', forward_model, tokenizer='moses', bpe='fastbpe', checkpoint_file=lang_models["fwd_checkpoint_file"]) else: model = torch.hub.load('pytorch/fairseq', forward_model, tokenizer='moses', bpe='fastbpe') if "inv_checkpoint_file" in lang_models: backward_model = torch.hub.load('pytorch/fairseq', backward_model, tokenizer='moses', bpe='fastbpe', checkpoint_file=lang_models["inv_checkpoint_file"]) else: backward_model = torch.hub.load('pytorch/fairseq', backward_model, tokenizer='moses', bpe='fastbpe') model = model.to(get_device()) model = model.eval() backward_model = backward_model.to(get_device()) backward_model = backward_model.eval() state = dict(fwd=model, inv=backward_model) def translate(text): texts = [text] if model_type=="huggingface": fwd_tokenizer, fwd_model = state["fwd"] inv_tokenizer, inv_model = state["inv"] lang_codes = fwd_tokenizer.supported_language_codes if "ROMANCE" in forward_model: lang_codes = ['>>fr<<', '>>es<<', '>>it<<', '>>pt<<', '>>ro<<', '>>ca<<', '>>gl<<', '>>la<<', '>>wa<<', '>>fur<<', '>>oc<<', '>>sc<<', '>>an<<', '>>frp<<',] better_lang_codes = ['>>fr<<', '>>es<<', '>>it<<', '>>pt<<', '>>ca<<', '>>fur<<', '>>oc<<', '>>sc<<', '>>an<<', '>>frp<<'] lang_codes = better_lang_codes if "CELTIC" in forward_model: lang_codes = ['>>ga<<'] if len(lang_codes) > 0: texts = [t for text in texts for t in [lang+" "+text for lang in lang_codes]] batch = fwd_tokenizer.prepare_translation_batch(texts) for k, v in batch.items(): if isinstance(v, torch.Tensor): v = v.to(get_device()) batch[k] = v translated = fwd_model.generate(**batch) fwd_translations = [fwd_tokenizer.decode(t, skip_special_tokens=True) for t in translated] inv_batch = inv_tokenizer.prepare_translation_batch(fwd_translations) for k, v in inv_batch.items(): if isinstance(v, torch.Tensor): v = v.to(get_device()) inv_batch[k] = v translated = inv_model.generate(**inv_batch) tgt_text = [inv_tokenizer.decode(t, skip_special_tokens=True) for t in translated] clean_memory() return tgt_text elif model_type=="pytorch": intermediate = state["fwd"].translate(text) res = state["inv"].translate(intermediate) clean_memory() return [res] return translate fox = "The quick brown fox jumps over the lazy dog." cats = "The cat sat on the front porch sipping a pint of milk." text = 'have you ever studied the history of the jews? did you know that they have always banded together as a tribe, infiltrated governments.' text_long = 'have you ever studied the history of the jews? did you know that they have always banded together as a tribe, infiltrated governments, monopolized the financial systems of nations instigated wars and intentionally created chaos in societies? the jews have mass murdered millions of non- jews over the centuries they have seized control of the media so you will never find out study the history of the jews!' hg_en_ru = dict(fwd='Helsinki-NLP/opus-mt-en-ru', inv='Helsinki-NLP/opus-mt-ru-en') hg_en_de = dict(fwd='Helsinki-NLP/opus-mt-en-de', inv='Helsinki-NLP/opus-mt-de-en') hg_en_celtic = dict(fwd='Helsinki-NLP/opus-mt-en-CELTIC', inv='sshleifer/opus-mt-CELTIC-en') hg_en_romance = dict(fwd='Helsinki-NLP/opus-mt-en-ROMANCE', inv='Helsinki-NLP/opus-mt-ROMANCE-en') fox = "The quick brown fox jumps over the lazy dog." cats = "The cat sat on the front porch sipping a pint of milk." text = 'have you ever studied the history of the jews? did you know that they have always banded together as a tribe, infiltrated governments.' text_long = 'have you ever studied the history of the jews? did you know that they have always banded together as a tribe, infiltrated governments, monopolized the financial systems of nations instigated wars and intentionally created chaos in societies? the jews have mass murdered millions of non- jews over the centuries they have seized control of the media so you will never find out study the history of the jews!' translate = build_translator(hg_en_ru) translate(fox) translate(cats) translate(text) pt_en_de_1 = dict(fwd='transformer.wmt19.en-de.single_model', inv='transformer.wmt19.de-en.single_model') pt_en_de_2 = dict(fwd='transformer.wmt19.en-de.single_model', inv='transformer.wmt19.de-en', inv_checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt') pt_en_de_3 = dict(fwd='transformer.wmt19.en-de', fwd_checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', inv='transformer.wmt19.de-en.single_model') pt_en_de_4 = dict(fwd='transformer.wmt19.en-de', fwd_checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', inv='transformer.wmt19.de-en', inv_checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt') pt_en_de_5 = dict(fwd='transformer.wmt16.en-de', inv='transformer.wmt19.de-en.single_model') pt_en_de_6 = dict(fwd='transformer.wmt16.en-de', inv='transformer.wmt19.de-en', inv_checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt') pt_en_de_7 = dict(fwd='conv.wmt17.en-de', inv='transformer.wmt19.de-en.single_model') pt_en_de_8 = dict(fwd='conv.wmt17.en-de', inv='transformer.wmt19.de-en', inv_checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt') pt_en_ru = dict(fwd='transformer.wmt19.en-ru.single_model', inv='transformer.wmt19.ru-en.single_model') translate = build_translator(pt_en_de_5, model_type="pytorch") translate(fox) translate(cats) translate(text) ``` # DAB ``` results = [] translate = build_translator(pt_en_de_6, model_type="pytorch") for row in tqdm(df.iterrows(), total=df.shape[0]): keys = row[1].index.values values = row[1].values d = dict(zip(keys, values)) t = translate(d["text"]) if isinstance(t, (list, tuple)): r = [(d["id"],ts) for ts in t] results.extend(r) else: results.append((d["id"], t)) results[1] # flattened = [] # for identifier, translations in results: # flattened.extend([(identifier, t) for t in translations]) len(results) pd.DataFrame(results, columns=["id", "text"]).to_csv(os.path.join(get_global("models_dir"),"new_items_10.csv"), index=False) ``` # Combine ``` df = pd.read_csv("/home/ahemf/cache/new_items.csv", engine="python") translated_cols = list(set(df.columns) - {'text'}) df_translated = df[translated_cols] df = df[["id", "text"]] dabs = [] for c in translated_cols: if c!="id": dabs.append(df_translated[["id", c]].rename(columns={c:"text"})) # dabs = [] for i in range(1, 14): db = pd.read_csv(os.path.join(get_global("models_dir"),"new_items_%s.csv"%i)) dabs.append(db) dabs = pd.concat((dabs)) dabs.shape dabs.text = dabs.text.astype(str) df.shape len(set(list(dabs.text.apply(lambda x: x.strip().lower())))) len(set(list(dabs.text.apply(lambda x: x.lower().strip())))) df.shape len(set(list(df.text.apply(lambda x: x.strip().lower())))) len(set(list(df.text.apply(lambda x: x.lower().strip())))) dabs.head() from collections import defaultdict id2textset = defaultdict(set) original_pairs = [] for row in df.iterrows(): idx = row[0] identifier, text = row[1] if text.lower().strip() in id2textset[identifier]: continue else: id2textset[identifier].add(text.lower().strip()) for row in dabs.iterrows(): idx = row[0] identifier, text = row[1] if text.lower().strip() in id2textset[identifier]: continue else: id2textset[identifier].add(text.lower().strip()) original_pairs.append((identifier, text)) len(original_pairs) dab = pd.DataFrame(original_pairs, columns=["id", "text"]) dab.to_csv(os.path.join(get_global("models_dir"),"new_dab.csv"), index=False) fdab = pd.read_csv("/home/ahemf/cache/fdab.csv", engine="python") fdab.shape fdab = pd.concat((fdab, dab)) fdab.head() fdab.shape fdab.to_csv("/home/ahemf/cache/fdab.csv", index=False) dab.groupby(["id"]).count().min() dab.id.nunique() dab.head().values df.head() df.to_csv("id2text.csv", index=False) !pwd !head -n5 text.csv ``` # Dedup ``` dab = pd.read_csv("/home/ahemf/cache/fdab.csv", engine="python") dab.shape df.shape from collections import defaultdict id2textset = defaultdict(set) original_pairs = [] for row in df[["id", "text"]].iterrows(): idx = row[0] identifier, text = row[1] if text.lower().strip() in id2textset[identifier]: continue else: id2textset[identifier].add(text.lower().strip()) for row in dab.iterrows(): idx = row[0] identifier, text = row[1] if text.lower().strip() in id2textset[identifier]: continue else: id2textset[identifier].add(text.lower().strip()) original_pairs.append((identifier, text)) len(original_pairs) from collections import defaultdict id2wordset = defaultdict(set) original_pairs = [] for row in df[["id", "text"]].iterrows(): idx = row[0] identifier, text = row[1] id2wordset[identifier].update(set(text.lower().strip().split())) for row in dab.iterrows(): idx = row[0] identifier, text = row[1] wsset = id2wordset[identifier] csset = set(text.lower().strip().split()) if len(csset - wsset)<1: continue else: id2wordset[identifier].update(csset) original_pairs.append((identifier, text)) len(original_pairs) dab = pd.DataFrame(original_pairs, columns=["id", "text"]) dab.to_csv("/home/ahemf/cache/hard_dab.csv", index=False) dab = dab.sort_values(["id"]) df = df.sort_values(["id"]) dab.head()['text'].values df.head(1)['text'].values ```
github_jupyter
# Lambda distribution (Vs Reff) ``` import matplotlib.pyplot as plt import pickle import numpy as np ## fucntions def load_pickle(fname): with open(fname, 'rb') as f: return pickle.load(f) def plot_lambda(catalog, i_early, i_late, i_bad, fn_out='./'): import matplotlib.pyplot as plt plt.ioff() f = plt.figure() ax = f.add_subplot(111) #for i, val in enumerate(lambdar_arr): for i in i_early: a = np.asarray(catalog['lambda_arr'][i]) ax.plot(a, 'r-', alpha=0.5) # Red = Early for i in i_late: ax.plot(catalog['lambda_arr'][i], 'b-', alpha=0.3) # Red = Early #plt.xlabel() # in the unit of Reff ax.set_title(r"$\lambda _{R}$") ax.set_ylabel(r"$\lambda _{R}$") ax.set_xlabel("["+ r'$R/R_{eff}$'+"]") ax.set_xlim(right=9) ax.set_ylim([0,1]) ax.set_xticks([0, 4.5, 9]) ax.set_xticklabels(["0", "0.5", "1"]) plt.savefig(fn_out) plt.close() def aexp2zred(aexp): return [1.0/a - 1.0 for a in aexp] def zred2aexp(zred): return [1.0/(1.0 + z) for z in zred] def lbt2aexp(lts): import astropy.units as u from astropy.cosmology import WMAP7, z_at_value zreds = [z_at_value(WMAP7.lookback_time, ll * u.Gyr) for ll in lts] return [1.0/(1+z) for z in zreds] def density_map(x, y, sort=True): from scipy.stats import gaussian_kde xy = np.vstack([x,y]) z = gaussian_kde(xy)(xy) z /= max(z) idx = z.argsort() xx, yy = x[idx], y[idx] z = z[idx] return xx, yy, z ``` ## I like this! ``` clusters = ['05427', '36413', '39990', '01605', '10002', '36415', '04466', '74010'][0:5] lr_points = 5 # number of points int 1 Reff. nreff = 3 nbins = 20 def lambda_den_map(clusters, exclude, nout=187, lr_points = 5, nreff=3, nbins=20, density_kernel=False): print(" nout:", nout, "lr_points:", lr_points, "nreff:", nreff, "nbins:", nbins) points = np.arange(lr_points * nreff) x_ticks_label = ["0", "1", "2", "3", "4"][0:nreff] x_tick_pos = [0] [x_tick_pos.append((i+1)*lr_points) for i in range(nreff)] # Need a compiled array of lambda_arr fig, axs = plt.subplots(2,2, sharey=True)#, sharex=True) mass_cut_l = [2e9, 2e9, 1e10, 1e11] mass_cut_r = [1e13,1e10, 1e11, 1e13] yticks_ok=[0.0, 0.2, 0.4, 0.6, 0.8] lambda_range=[0.0, 0.8] snout = str(nout) for imass in range(4): # Count number of galaxies ngood=0 for iclu, cluster in enumerate(clusters): wdir = '/home/hoseung/Work/data/' + cluster catalog = load_pickle(wdir + '/catalog_GM/' + 'catalog' + snout + '.pickle') #i_good = np.where((catalog['mstar'] > mass_cut_l[imass]) & (catalog['mstar'] < mass_cut_r[imass]))[0] i_good = (catalog['mstar'] > mass_cut_l[imass]) & (catalog['mstar'] < mass_cut_r[imass]) for i, gal in enumerate(catalog['id']): if gal in exclude[iclu]: i_good[i] = False #ngood += len(i_good) ngood += sum(i_good) ax = axs.ravel()[imass] all_lr = np.zeros((len(points), ngood)) # compile data ngood=0 for iclu, cluster in enumerate(clusters): wdir = '/home/hoseung/Work/data/' + cluster catalog = load_pickle(wdir + '/catalog_GM/' + 'catalog' + snout + '.pickle') #i_good = np.where((catalog['mstar'] > mass_cut_l[imass]) & (catalog['mstar'] < mass_cut_r[imass]))[0] i_good = (catalog['mstar'] > mass_cut_l[imass]) & (catalog['mstar'] < mass_cut_r[imass]) for i, gal in enumerate(catalog['id']): if gal in exclude[iclu]: i_good[i] = False ind_good = np.arange(len(i_good))[i_good] for i, i_gal in enumerate(ind_good): all_lr[:,ngood + i] = catalog['lambda_arr'][i_gal][:len(points)] #ngood +=len(i_good) ngood += sum(i_good) # Draw density maps if density_kernel: xpoints = np.tile(points, ngood) xx,yy,z = density_map(xpoints,all_lr.transpose().ravel()) im = ax.scatter(xx, yy, c=z, s=150, edgecolor='') ax.set_xlim([-0.5, nreff*lr_points]) ax.set_ylim([-0.1,0.9]) #x_tick_pos = "" #ax.set_xticks([0,lr_points-1,2*lr_points - 1]) #x_ticks_label = ["0", "1", "2"] # Correct. by default, rscale_lambda = 2.0 #ax.set_xticklabels(labels = [z for z in x_ticks_label]) #ax.set_xlabel(r"$R/R_{eff}$") ax.set_title(r"{:.1e} $< M_\ast <$ {:.1e}".format(mass_cut_l[imass], mass_cut_r[imass])) ax.text(1,0.65, "# gals:" + str(ngood)) # data coordinates else: den_map = np.zeros((nbins, len(points))) for i in range(len(points)): den_map[:,i], ypoints = np.histogram(all_lr[i,:], bins=nbins, range=lambda_range) #den_map[:,i] /= den_map[:,i].max() # normalize each bin. den_map /= den_map.max() im = ax.imshow(den_map, origin="lower", cmap="Blues", interpolation="none" , extent=[0,lr_points * nreff,0,nbins], aspect='auto') #ax.set_xlim([-1.5, lr_points*nreff]) ax.set_ylim([-0.5,nbins]) ax.set_title(r"{:.1e} $< M_\ast <$ {:.1e}".format(mass_cut_l[imass], mass_cut_r[imass])) ax.text(2,17, "# gals:" + str(ngood)) # data coordinates #ax.set_yticks([np.where(ypoints == yy)[0] for yy in [0.0, 0.2, 0.4, 0.6, 0.8]]) # 0.0, 0.2, 0.4, 0.6, 0.8 #ax.set_yticklabels([str(yy) for yy in yticks_ok]) if density_kernel: for j in range(2): for i in range(2): axs[j,i].set_xticks(x_tick_pos) axs[j,i].set_xticklabels(labels = [z for z in x_ticks_label]) axs[1,i].set_xlabel(r"$R/R_{eff}$") axs[i,0].set_ylabel("$\lambda _R$") #axs[i,j].set_yticks([np.where(ypoints == yy)[0] for yy in np.arange(lambda_range[0], lambda_range[1])]) # 0.0, 0.2, 0.4, 0.6, 0.8 axs[i,j].set_yticks([ly for ly in [0.0, 0.2, 0.4, 0.6, 0.8]]) axs[i,j].set_yticklabels([str(yy) for yy in yticks_ok]) else: for j in range(2): for i in range(2): axs[j,i].set_xticks(x_tick_pos) axs[j,i].set_xticklabels(labels = [z for z in x_ticks_label]) axs[1,i].set_xlabel(r"$R/R_{eff}$") axs[i,0].set_ylabel("$\lambda _R$") #axs[i,j].set_yticks([np.where(ypoints == yy)[0] for yy in np.arange(lambda_range[0], lambda_range[1])]) # 0.0, 0.2, 0.4, 0.6, 0.8 axs[i,j].set_yticks([ nbins * ly for ly in [0.0, 0.2, 0.4, 0.6, 0.8]]) axs[i,j].set_yticklabels([str(yy) for yy in yticks_ok]) # Add + mark at 0.5, 1.0, 2.0Reff #fig.tight_layout() cax = fig.add_axes([0.86, 0.1, 0.03, 0.8]) # [left corner x, left corner y, x width, y width] plt.colorbar(im, cax=cax, label='normalized denisty') plt.subplots_adjust(left=0.1, bottom=None, right=0.8, top=None, wspace=0.05, hspace=0.22) #left = 0.125 # the left side of the subplots of the figure #right = 0.9 # the right side of the subplots of the figure #bottom = 0.1 # the bottom of the subplots of the figure #top = 0.9 # the top of the subplots of the figure #wspace = 0.2 # the amount of width reserved for blank space between subplots #hspace = 0.5 # the amount of height reserved for white space between subplots plt.show() #lambda_den_map(clusters) exclude=[[],[],[],[],[1],[],[]] lambda_den_map(["05427", "36413", "39990", "28928", "01605", "36415", "10002"], exclude, nout=187, lr_points = lr_points, density_kernel=True) ``` High resolution run seems to have more galaxies. check mass function. ``` a=np.array([]) clusters = [5427, 36415, 39990, 1605, 10002, 36413, 4466, 74010][0:5] # 74010 is unreliable. # 36413 왜 안 돌아가나..? #exclude_gals = [[], # [], # [], # [123,155,], # [2694,4684,5448,5885,5906,6967,6981,7047,7072,7151,7612], # []] lr_points = 10 # number of points int 1 Reff. nreff = 3 points = np.arange(lr_points * nreff) x_ticks_label = ["0", "1", "2", "3", "4"][0:nreff] x_tick_pos = [0] [x_tick_pos.append((i+1)*lr_points) for i in range(nreff)] # Need a compiled array of lambda_arr fig, axs = plt.subplots(2,2, sharey=True, sharex=True) mass_cut_l = [0, 5e9, 1e10, 1e11, 1e12] mass_cut_r = [1e13,1e10, 1e11, 1e12, 1e13] #titles = #["All galaxies from all clusters", # " {} $< M_{*} <$ {}".format(mass_cut_l[imass], mass_cut_r[imass])] for imass in range(4): ax = axs.ravel()[imass] all_lr = np.zeros(0) xpos = [] # why list?? ypos = [] zpos = [] clur = [] for i, cluster in enumerate(clusters): wdir = '/home/hoseung/Work/data/' + str(cluster).zfill(5) catalog = load_pickle(wdir + '/catalog_GM/' + 'catalog187.pickle') i_good = np.where((catalog['mstar'] > mass_cut_l[imass]) & (catalog['mstar'] < mass_cut_r[imass]))[0] for ij, j in enumerate(i_good): all_lr = np.concatenate((all_lr, catalog['lambda_r'][j])) # some catalog has L_arr up to 2Rvir. # Draw density maps # x values xpoints = np.tile(points, len(all_lr)) # Gaussian_kde measures 2D density. But in this case x-axis and y-axis are two independent parameters #(not like x position and y position). So instead, measure 1-D histogram at each x point (R/Reff). xx, yy, z = density_map(xpoints[:all_lr.shape[0]], all_lr.ravel(), ax) ax.scatter(xx, yy, c=z, s=50, edgecolor='') ax.set_xlim([-0.5,2*lr_points]) ax.set_ylim([0,0.8]) ax.set_title(r"{:.1e} $< M_\ast <$ {:.1e}".format(mass_cut_l[imass], mass_cut_r[imass])) axs[1,0].set_xticks(x_tick_pos)#[0,lr_points-1,2*lr_points - 1]) axs[1,0].set_xticklabels(labels = [z for z in x_ticks_label]) axs[1,0].set_xlabel(r"$R/R_{eff}$") axs[1,1].set_xticks(x_tick_pos)#[0,lr_points-1,2*lr_points - 1]) axs[1,1].set_xticklabels(labels = [z for z in x_ticks_label]) axs[1,1].set_xlabel(r"$R/R_{eff}$") axs[0,0].set_ylabel("$\lambda _R$") axs[1,0].set_ylabel("$\lambda _R$") # Add + mark at 0.5, 1.0, 2.0Reff plt.show() plt.close() ``` #### Seaborn heat map looks better than imshow. (BTW, you can use pcolomesh (X,Y,Z) instead of imshow(map)) Choose a suitable color map from Seaborn color map templates. ``` #plt.clf() fig, ax = plt.subplots(1) import seaborn.apionly as sns # reset rc params to defaults sns.reset_orig() #cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(den_map, cmap="Blues", square=True, xticklabels=5, yticklabels=5, linewidths=.2, cbar_kws={"shrink": .5}, ax=ax) plt.gca().invert_yaxis() plt.show() # as a line plt.close() fig, ax = plt.subplots(len(clusters)) for i, cluster in enumerate(clusters): wdir = '/home/hoseung/Work/data/' + str(cluster).zfill(5) catalog = load_pickle(wdir + '/catalog_GM/' + 'catalog187.pickle') #i_early = np.where(catalog['mstar'] > 5e11)[0] i_early = np.where((catalog['mstar'] > 1e10) & (catalog['mstar'] < 1e11))[0] for j in i_early: ax[i].plot(points, catalog['lambda_arr'][j][:2 *lr_points], c='grey', alpha=0.3) ax[i].set_xlim([-0.5,2*lr_points]) ax[i].set_ylim([0,0.8]) x_tick_pos = "" ax[i].set_xticks([0,lr_points -1, 2*lr_points - 1]) x_ticks_label = ["0", "1", "2"] # Correct. by default, rscale_lambda = 2.0 ax[i].set_xticklabels(labels = [z for z in x_ticks_label]) ax[i].set_xlabel(r"$R/R_{eff}$") plt.show() len(catalog['lambda_arr'][j]) final_gals = list(cat['final_gal']) # exclude disky galaxies for bad_gal in exclude_gals[i]: final_gals.remove(bad_gal) ngals = len(final_gals) mstar = np.zeros((ngals, nnouts)) l_r = np.zeros((ngals, nnouts)) ```
github_jupyter
<a href="https://colab.research.google.com/github/apergo-ai/CRASS-data-set/blob/main/OpenAIinterface_master.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import io import pandas as pd import random import re import sys !pip install openai import openai class color: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' ## Load the CRASS dataset url = 'https://raw.githubusercontent.com/apergo-ai/CRASS-data-set/main/CRASS_FTM_main_data_set.csv' gpt3array = pd.read_csv(url, encoding='latin1',sep=";") gpt3arraylength = len(gpt3array) gpt3arraytransposed = gpt3array.T ## Access API using your credentials openai.api_key = # Enter your API key engineselection = "ada" # use one of the available OpenAI engines based on your preference by editing the line (possibilitiesare: davinci, curie, babbage, ada) ## This function is used to test the question answering capabilities of GPT-3 in CRASS OSM (Open Scoring Mode) in zero-shot fashion. def gpt3createanswerfunction(): gpt3arraycounter = 0 while gpt3arraycounter < 1: #choose how many answers you would like to fetch inputtext = str(gpt3arraytransposed[gpt3arraycounter].PCTID) + ". " + gpt3arraytransposed[gpt3arraycounter].Premise + " " + gpt3arraytransposed[gpt3arraycounter].QCC rawresponse=openai.Completion.create( engine=engineselection, prompt=inputtext, max_tokens=15) #print(rawresponse) responsetext = rawresponse.choices[0].text.strip() gpt3arraytransposed[gpt3arraycounter].gpt3response = responsetext #print(gpt3arraytransposed[gpt3arraycounter]) gpt3arraycounter += 1 return responsetext ## This function can be used to generate some examples in the style of the CRASS dataset. def gpt3createquestionfunction(gpt3array, gpt3arraytransposed, gpt3arraylength): newarraylength = gpt3arraylength numberofnewexamples = 0 inputexamplecounter = 1 inputtext = "" while numberofnewexamples < 1: while inputexamplecounter < 10: randomexample = random.randrange(100) inputtext = inputtext + "\n Premise " + str(inputexamplecounter) + ": " + gpt3arraytransposed[randomexample].Premise + "\n Question " + str(inputexamplecounter) + ": " + gpt3arraytransposed[randomexample].QCC + "\n Answer " + str(inputexamplecounter) + ": " + gpt3arraytransposed[randomexample].CorrectAnswer inputexamplecounter += 1 rawresponse=openai.Completion.create( engine=engineselection, prompt=inputtext, temperature=0.9, stop="Premise 11", max_tokens=45) #print(rawresponse) responsetext = rawresponse.choices[0].text.strip() if (responsetext.find('Premise 10') != -1): responsetext = re.split('Premise 10:|Question 10:|Answer 10:', responsetext) #Sometimes it can happen that GPT-3 generates unexpected text without the necessary markers. newpremise = responsetext[1].strip() newqcc = responsetext[2].strip() newanswer = responsetext[3].strip() else: print(color.BOLD + color.RED + 'Unexpected GPT-3 output, please start function again.' + color.END) sys.exit(1) newarraylength += 1 gpt3array.loc[gpt3arraylength] = [newarraylength,'1',newpremise,newqcc,newanswer,'','','',''] gpt3arraylength +=1 numberofnewexamples += 1 return gpt3array # calls the 'create-answer' function gpt3newanswers = gpt3createanswerfunction() # calls the 'create-question' function gpt3newitems = gpt3createquestionfunction(gpt3array, gpt3arraytransposed, gpt3arraylength) #Save Data to a target csv df = pd.DataFrame(gpt3newitems) df.to_csv('output.csv',sep=';', encoding='utf-8',index=False) ```
github_jupyter
# Precipitation exercises *** ## <font color=steelblue>Exercise 3 - Double-mass curve</font> <font color=steelblue>Perform a double-mass curve analysis with the data in sheet *Exercise_003* from file *RainfallData.xlsx*.</font> ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() sns.set_context('notebook') from scipy.optimize import curve_fit ``` ### Import data ``` # Importar los datos data3 = pd.read_excel('../data/RainfallData.xlsx', sheet_name='Exercise_003', skiprows=0, index_col=0) # name of the gages gages = data3.columns # calculate the mean across stations data3['AVG'] = data3.mean(axis=1) data3.head() ``` ### Double-mass curves We are going to plot simultaneously the double-mass curve for all the stations, so we can start identifying stations that may have problems. To plot several plots in the same figure, we will use the function `subplots` in `Matplotlib`. ``` fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(12, 8), sharex=True, sharey=True) for (gage, ax) in zip(gages, axes.flatten()): # line of slope 1 ax.plot((0, 800), (0, 800), ':k', label='1:1 line') # double-mass curve ax.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='data') ax.set_title('gage ' + gage) ax.legend() axes[1, 2].axis('off'); ``` From the plot we are certain that the series in gage C is correct, but there might be problems in the rest of the gages. ### Identify errors The double-mass curve must represent a linear regression with no intercept. We will create a function representing this linear regression which we will use in the following steps. ``` def linear_reg(x, m): """Linear regression with no intecept y = m * x Input: ------ x: float. Independet value m: float. Slope of the linear regression Output: ------- y: float. Regressed value""" y = m * x return y ``` #### Gage A To identify errors, we will have to fit the linear regression with no intercept to both the series before and after a specific year; if the diference in the fitted slope for those two series exceed an error threshold, we identify that year as a break point in the double-mass curve. We will iterate this process for each year and set a error threshold (or tolerance) to find all the possible break points in the series. ``` # define the gage gage = 'A' # define the error threshold error = .2 for year in data3.index[3:-3]: # fit the regression from 1978 onwards m1 = curve_fit(linear_reg, data3.loc[:year, 'AVG'].cumsum(), data3.loc[:year, gage].cumsum())[0][0] # fit the regression from 1978 onwards m2 = curve_fit(linear_reg, data3.loc[year:, 'AVG'].cumsum(), data3.loc[year:, gage].cumsum())[0][0] ## correction factor #factor = m1 / m2 #if (factor < 1 - error) | (factor > 1. + error): if abs(m1 - m2) > error: print('{0} m1 = {1:.3f} m2 = {2:.3f} factor = {3:.3f}'.format(year, m1, m2, factor)) ``` There are no errors in the series of gage A. #### All gages Simply changing the name of the gage in the previous section we can repeat the process. Let's create a function and the run it in a a loop. ``` def identify_errors(dataGage, dataAVG, error=.1): """Identify possible break points in the double-mass curve Parameters: ----------- dataGage: series. Annual series for the gage to be checked dataAVG: series. Annual series of the mean across gages in a region error: float. Error threshold Output: ------- It will print the years with a difference in slopes higher than 'error', alon with the values of the slopes. """ for year in dataGage.index[3:-3]: # fit the regression from 1978 onwards m1 = curve_fit(linear_reg, dataAVG.loc[:year].cumsum(), dataGage.loc[:year].cumsum())[0][0] # fit the regression from 1978 onwards m2 = curve_fit(linear_reg, dataAVG.loc[year:].cumsum(), dataGage.loc[year:].cumsum())[0][0] ## correction factor #factor = m1 / m2 #if (factor < 1 - error) | (factor > 1. + error): if abs(m1 - m2) > error: print('{0} m1 = {1:.3f} m2 = {2:.3f}'.format(year, m1, m2)) for gage in gages: print('Gage ', gage) identify_errors(data3['AVG'], data3[gage], error=.1) print() ``` We have identified errors in gages B, D and E. This was an automatic search to discard correct stations. Now, we have to analyse one by one these three stations that might have errors. ### Correct errors #### Gage B ##### Analyse the series We have identified anomalies in the years between 1929 and 1939. It will probably mean that there are two break points in the double mass curve. Let's look at the double mass curve and the specific points representing those two years. ``` # set gage and year corresponding to the break in the line gage = 'B' breaks = [1929, 1939] # visualize plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum().loc[breaks], data3[gage].cumsum().loc[breaks], '.', label='breaks') plt.legend(); ``` At a glance, we can identify three periods. There is period at the beginning of the series with a higher than usual slope; this period seem so extend until 1930 (not 1929 as we had identified). There is aperiod at the end of the series with a lower than usual slope; this period seems to start in 1938 (not 1939 as we had identified). We will reset the break points and calculate the slope of the regression to check it. ``` # reset the break points breaks = [1930, 1938] # fit the regression untill the first break m1 = curve_fit(linear_reg, data3.loc[:breaks[0], 'AVG'].cumsum(), data3.loc[:breaks[0], gage].cumsum())[0][0] # fit the regression from the first to the second break m2 = curve_fit(linear_reg, data3.loc[breaks[0]:breaks[1], 'AVG'].cumsum(), data3.loc[breaks[0]:breaks[1], gage].cumsum())[0][0] # fit the regression from t m3 = curve_fit(linear_reg, data3.loc[breaks[1]:, 'AVG'].cumsum(), data3.loc[breaks[1]:, gage].cumsum())[0][0] print('m1 = {0:.3f} m2 = {1:.3f} m3 = {2:.3f}'.format(m1, m2, m3)) ``` As expected, there are three different slopes in the series. We will assume that the correct data is that from 1930 to 1937, because it is longest period of the three and its slope is closer to 1. Therefore, we have to calculate the correction factors for two periods: before 1930 and after 1937; with these factors we can correct the series. ##### Correct the series ``` # correction factors factor12 = m2 / m1 factor23 = m2 / m3 factor12, factor23 # copy of the original series data3['B_'] = data3[gage].copy() # correct period before the first break data3.loc[:breaks[0], 'B_'] *= factor12 # correct period after the second break data3.loc[breaks[1]:, 'B_'] *= factor23 plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum(), data3['B_'].cumsum(), '.-', label='corrected') plt.legend(); ``` Now we can check again for errors in the correceted series. ``` # chech again for errors identify_errors(data3.B_, data3.AVG) ``` There aren't any more errors, so we've done correcting data from gage B. #### Gage D ##### Analyse the series We found a break point in year 1930. ``` # set gage and year corresponding to the break in the line gage = 'D' breaks = [1930] # visualize plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum().loc[breaks], data3[gage].cumsum().loc[breaks], '.', label='breaks') plt.legend(); # fit the regression untill the break m1 = curve_fit(linear_reg, data3.loc[:breaks[0], 'AVG'].cumsum(), data3.loc[:breaks[0], gage].cumsum())[0][0] # fit the regression after the break m2 = curve_fit(linear_reg, data3.loc[breaks[0]:, 'AVG'].cumsum(), data3.loc[breaks[0]:, gage].cumsum())[0][0] print('m1 = {0:.3f} m2 = {1:.3f}'.format(m1, m2)) ``` This case is simpler than the previous and we easily spot the breal point in 1930. THe period before 1930 has a slope closer to 1, so we will assume that this is the correct part of the series. ##### Correct the series ``` # correction factor factor = m1 / m2 factor # copy of the original series data3[gage + '_'] = data3[gage].copy() # correct period after the break data3.loc[breaks[0]:, gage + '_'] *= factor plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum(), data3[gage + '_'].cumsum(), '.-', label='corrected') plt.legend(); # chech again for errors identify_errors(data3[gage + '_'], data3.AVG, error=.1) ``` We identify two more possible break point in the corrected series. Both might indicate that the last section of the series has a higher slope that the initial. Let's correct the series from 1935 on, and this may solve the second break point in 1937. ``` gage = 'D_' breaks = [1935] # fit the regression untill the break m1 = curve_fit(linear_reg, data3.loc[:breaks[0], 'AVG'].cumsum(), data3.loc[:breaks[0], gage].cumsum())[0][0] # fit the regression after the break m2 = curve_fit(linear_reg, data3.loc[breaks[0]:, 'AVG'].cumsum(), data3.loc[breaks[0]:, gage].cumsum())[0][0] print('m1 = {0:.3f} m2 = {1:.3f}'.format(m1, m2)) # correction factor factor = m1 / m2 factor # copy of the original series data3[gage + '_'] = data3[gage].copy() # correct period after the break data3.loc[breaks[0]:, gage + '_'] *= factor plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum(), data3[gage + '_'].cumsum(), '.-', label='corrected') plt.legend(); # chech again for errors identify_errors(data3[gage + '_'], data3.AVG, error=.1) ``` #### Gage E ##### Analyse the series The series in gage E has a similar behaviour to series B. There is an anomaly in the series between 1929 and 1938, indicating that there might be two break points in the double-mass curve. ``` # set gage and year corresponding to the break in the line gage = 'E' breaks = [1929, 1938] # visualize plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum().loc[breaks], data3[gage].cumsum().loc[breaks], '.', label='1929') plt.legend(); # fit the regression untill the first break m1 = curve_fit(linear_reg, data3.loc[:breaks[0], 'AVG'].cumsum(), data3.loc[:breaks[0], gage].cumsum())[0][0] # fit the regression from the first to the second break m2 = curve_fit(linear_reg, data3.loc[breaks[0]:breaks[1], 'AVG'].cumsum(), data3.loc[breaks[0]:breaks[1], gage].cumsum())[0][0] # fit the regression from the second break on m3 = curve_fit(linear_reg, data3.loc[breaks[1]:, 'AVG'].cumsum(), data3.loc[breaks[1]:, gage].cumsum())[0][0] print('m1 = {0:.3f} m2 = {1:.3f} m3 = {2:.3f}'.format(m1, m2, m3)) ``` There seems to be only one break in the line between the first and the second period. The slopes in the second and third periods are that close that, most probably, there isn't a change from 1938 on. Apart from that, the break in the line seems to be stronger in 1930 than in 1929, so we will change the breaks to only include 1930. We will assume that the period to be corrected is that before 1930. ``` breaks = [1930] # fit the regression untill the first break m1 = curve_fit(linear_reg, data3.loc[:breaks[0], 'AVG'].cumsum(), data3.loc[:breaks[0], gage].cumsum())[0][0] # fit the regression from the first break m2 = curve_fit(linear_reg, data3.loc[breaks[0]:, 'AVG'].cumsum(), data3.loc[breaks[0]:, gage].cumsum())[0][0] m1, m2 ``` ##### Correct the series ``` # correction factor factor = m2 / m1 factor # copy of the original series data3['E_'] = data3[gage].copy() # correct period before the first break data3.loc[:breaks[0], 'E_'] *= factor plt.figure(figsize=(5, 5)) plt.axis('equal') plt.plot((0, 800), (0, 800), '--k') plt.plot(data3.AVG.cumsum(), data3[gage].cumsum(), '.-', label='original') plt.plot(data3.AVG.cumsum(), data3[gage + '_'].cumsum(), '.-', label='corrected') plt.legend(); # chech again for errors identify_errors(data3[gage + '_'], data3.AVG) ``` We don't identify any more errors, so the assumption that the slopes of the second and third period were close enough was correct. #### Redraw the double-mass plot ``` # recalculate the average gages = ['A', 'B_', 'C', 'D__', 'E_'] data3['AVG_'] = data3[gages].mean(axis=1) fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(12, 8), sharex=True, sharey=True) for (gage, ax) in zip(gages, axes.flatten()): ax.plot((0, 800), (0, 800), ':k') # double-mass curve ax.plot(data3.AVG_.cumsum(), data3[gage].cumsum(), '.-', label='corrected') ax.set_title('gage ' + gage) axes[1, 2].axis('off'); # save figure plt.savefig('../output/Ex3_double-mass curve.png', dpi=300) # export corrected series data3_ = data3.loc[:, gages] data3_.columns = ['A', 'B', 'C', 'D', 'E'] data3_.to_csv('../output/Ex3_corrected series.csv', float_format='%.2f') ```
github_jupyter
``` # !wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-dev.conllu # !wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-train.conllu # !wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-test.conllu # !wget https://storage.googleapis.com/xlnet/released_models/cased_L-12_H-768_A-12.zip -O xlnet.zip # !unzip xlnet.zip import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' tag2idx = {'PAD': 0, 'X': 1} tag_idx = 2 import sentencepiece as spm from prepro_utils import preprocess_text, encode_ids sp_model = spm.SentencePieceProcessor() sp_model.Load('xlnet_cased_L-12_H-768_A-12/spiece.model') def tokenize_fn(text): text = preprocess_text(text, lower= False) return encode_ids(sp_model, text) SEG_ID_A = 0 SEG_ID_B = 1 SEG_ID_CLS = 2 SEG_ID_SEP = 3 SEG_ID_PAD = 4 special_symbols = { "<unk>" : 0, "<s>" : 1, "</s>" : 2, "<cls>" : 3, "<sep>" : 4, "<pad>" : 5, "<mask>" : 6, "<eod>" : 7, "<eop>" : 8, } VOCAB_SIZE = 32000 UNK_ID = special_symbols["<unk>"] CLS_ID = special_symbols["<cls>"] SEP_ID = special_symbols["<sep>"] MASK_ID = special_symbols["<mask>"] EOD_ID = special_symbols["<eod>"] def process_corpus(corpus, until = None): global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx sentences, words, depends, labels, pos, sequences = [], [], [], [], [], [] temp_sentence, temp_word, temp_depend, temp_label, temp_pos = [], [], [], [], [] segments, masks = [], [] first_time = True for sentence in corpus: try: if len(sentence): if sentence[0] == '#': continue if first_time: print(sentence) first_time = False sentence = sentence.split('\t') if sentence[7] not in tag2idx: tag2idx[sentence[7]] = tag_idx tag_idx += 1 temp_word.append(sentence[1]) temp_depend.append(int(sentence[6]) + 1) temp_label.append(tag2idx[sentence[7]]) temp_sentence.append(sentence[1]) temp_pos.append(sentence[3]) else: if len(temp_sentence) < 2 or len(temp_word) != len(temp_label): temp_word = [] temp_depend = [] temp_label = [] temp_sentence = [] temp_pos = [] continue bert_tokens = [] labels_ = [] depends_ = [] seq_ = [] for no, orig_token in enumerate(temp_word): t = tokenize_fn(orig_token) labels_.append(temp_label[no]) depends_.append(temp_depend[no]) bert_tokens.extend(t) labels_.extend([1] * (len(t) - 1)) depends_.extend([0] * (len(t) - 1)) seq_.append(no + 1) bert_tokens.extend([4, 3]) labels_.extend([0, 0]) depends_.extend([0, 0]) segment = [0] * (len(bert_tokens) - 1) + [SEG_ID_CLS] input_mask = [0] * len(segment) words.append(bert_tokens) depends.append(depends_) labels.append(labels_) sentences.append(temp_sentence) pos.append(temp_pos) sequences.append(seq_) segments.append(segment) masks.append(input_mask) temp_word = [] temp_depend = [] temp_label = [] temp_sentence = [] temp_pos = [] except Exception as e: print(e, sentence) return sentences[:-1], words[:-1], depends[:-1], labels[:-1], pos[:-1], sequences[:-1], segments[:-1], masks[:-1] with open('en_ewt-ud-dev.conllu') as fopen: dev = fopen.read().split('\n') sentences_dev, words_dev, depends_dev, labels_dev, _, seq_dev, segments_dev, masks_dev = process_corpus(dev) with open('en_ewt-ud-test.conllu') as fopen: test = fopen.read().split('\n') sentences_test, words_test, depends_test, labels_test, _, seq_test, segments_test, masks_test = process_corpus(test) sentences_test.extend(sentences_dev) words_test.extend(words_dev) depends_test.extend(depends_dev) labels_test.extend(labels_dev) seq_test.extend(seq_dev) segments_test.extend(segments_dev) masks_test.extend(masks_dev) with open('en_ewt-ud-train.conllu') as fopen: train = fopen.read().split('\n') sentences_train, words_train, depends_train, labels_train, _, _, segments_train, masks_train = process_corpus(train) len(sentences_train), len(sentences_test) idx2tag = {v:k for k, v in tag2idx.items()} train_X = words_train train_Y = labels_train train_depends = depends_train test_X = words_test test_Y = labels_test test_depends = depends_test import xlnet import model_utils import tensorflow as tf import numpy as np kwargs = dict( is_training=True, use_tpu=False, use_bfloat16=False, dropout=0.1, dropatt=0.1, init='normal', init_range=0.1, init_std=0.05, clamp_len=-1) xlnet_parameters = xlnet.RunConfig(**kwargs) xlnet_config = xlnet.XLNetConfig(json_path='xlnet_cased_L-12_H-768_A-12/xlnet_config.json') epoch = 15 batch_size = 32 warmup_proportion = 0.1 num_train_steps = int(len(train_X) / batch_size * epoch) num_warmup_steps = int(num_train_steps * warmup_proportion) print(num_train_steps, num_warmup_steps) training_parameters = dict( decay_method = 'poly', train_steps = num_train_steps, learning_rate = 2e-5, warmup_steps = num_warmup_steps, min_lr_ratio = 0.0, weight_decay = 0.00, adam_epsilon = 1e-8, num_core_per_host = 1, lr_layer_decay_rate = 1, use_tpu=False, use_bfloat16=False, dropout=0.0, dropatt=0.0, init='normal', init_range=0.1, init_std=0.02, clip = 1.0, clamp_len=-1,) class Parameter: def __init__(self, decay_method, warmup_steps, weight_decay, adam_epsilon, num_core_per_host, lr_layer_decay_rate, use_tpu, learning_rate, train_steps, min_lr_ratio, clip, **kwargs): self.decay_method = decay_method self.warmup_steps = warmup_steps self.weight_decay = weight_decay self.adam_epsilon = adam_epsilon self.num_core_per_host = num_core_per_host self.lr_layer_decay_rate = lr_layer_decay_rate self.use_tpu = use_tpu self.learning_rate = learning_rate self.train_steps = train_steps self.min_lr_ratio = min_lr_ratio self.clip = clip training_parameters = Parameter(**training_parameters) class BiAAttention: def __init__(self, input_size_encoder, input_size_decoder, num_labels): self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.num_labels = num_labels self.W_d = tf.get_variable("W_d", shape=[self.num_labels, self.input_size_decoder], initializer=tf.contrib.layers.xavier_initializer()) self.W_e = tf.get_variable("W_e", shape=[self.num_labels, self.input_size_encoder], initializer=tf.contrib.layers.xavier_initializer()) self.U = tf.get_variable("U", shape=[self.num_labels, self.input_size_decoder, self.input_size_encoder], initializer=tf.contrib.layers.xavier_initializer()) def forward(self, input_d, input_e, mask_d=None, mask_e=None): batch = tf.shape(input_d)[0] length_decoder = tf.shape(input_d)[1] length_encoder = tf.shape(input_e)[1] out_d = tf.expand_dims(tf.matmul(self.W_d, tf.transpose(input_d, [0, 2, 1])), 3) out_e = tf.expand_dims(tf.matmul(self.W_e, tf.transpose(input_e, [0, 2, 1])), 2) output = tf.matmul(tf.expand_dims(input_d, 1), self.U) output = tf.matmul(output, tf.transpose(tf.expand_dims(input_e, 1), [0, 1, 3, 2])) output = output + out_d + out_e if mask_d is not None: d = tf.expand_dims(tf.expand_dims(mask_d, 1), 3) e = tf.expand_dims(tf.expand_dims(mask_e, 1), 2) output = output * d * e return output class BiLinear: def __init__(self, left_features, right_features, out_features): self.left_features = left_features self.right_features = right_features self.out_features = out_features self.U = tf.get_variable("U-bi", shape=[out_features, left_features, right_features], initializer=tf.contrib.layers.xavier_initializer()) self.W_l = tf.get_variable("Wl", shape=[out_features, left_features], initializer=tf.contrib.layers.xavier_initializer()) self.W_r = tf.get_variable("Wr", shape=[out_features, right_features], initializer=tf.contrib.layers.xavier_initializer()) def forward(self, input_left, input_right): left_size = tf.shape(input_left) output_shape = tf.concat([left_size[:-1], [self.out_features]], axis = 0) batch = tf.cast(tf.reduce_prod(left_size[:-1]), tf.int32) input_left = tf.reshape(input_left, (batch, self.left_features)) input_right = tf.reshape(input_right, (batch, self.right_features)) tiled = tf.tile(tf.expand_dims(input_left, axis = 0), (self.out_features,1,1)) output = tf.transpose(tf.reduce_sum(tf.matmul(tiled, self.U), axis = 2)) output = output + tf.matmul(input_left, tf.transpose(self.W_l))\ + tf.matmul(input_right, tf.transpose(self.W_r)) return tf.reshape(output, output_shape) class Attention: def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, hidden_size, encoder_layers, num_labels, arc_space, type_space): def cells(size, reuse=False): return tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(),reuse=reuse) self.word_embedd = tf.Variable(tf.random_uniform([num_words, word_dim], -1, 1)) self.char_embedd = tf.Variable(tf.random_uniform([num_chars, char_dim], -1, 1)) self.conv1d = tf.layers.Conv1D(num_filters, kernel_size, 1, padding='VALID') self.num_labels = num_labels self.encoder = tf.nn.rnn_cell.MultiRNNCell([cells(hidden_size) for _ in range(encoder_layers)]) def encode(self, input_word, input_char): word = tf.nn.embedding_lookup(self.word_embedd, input_word) char = tf.nn.embedding_lookup(self.char_embedd, input_char) b = tf.shape(char)[0] wl = tf.shape(char)[1] cl = tf.shape(char)[2] d = char.shape[3] char = tf.reshape(char, [b * wl, cl, d]) char = tf.reduce_max(self.conv1d(char), axis = 1) char = tf.nn.tanh(char) d = char.shape[-1] char = tf.reshape(char, [b, wl, d]) src_encoding = tf.concat([word, char], axis=2) output, hn = tf.nn.dynamic_rnn(self.encoder, src_encoding, dtype = tf.float32, scope = 'encoder') arc_h = tf.nn.elu(self.arc_h(output)) arc_c = tf.nn.elu(self.arc_c(output)) type_h = tf.nn.elu(self.type_h(output)) type_c = tf.nn.elu(self.type_c(output)) return (arc_h, arc_c), (type_h, type_c), hn def forward(self, input_word, input_char, mask): arcs, types, _ = self.encode(input_word, input_char) out_arc = tf.squeeze(self.attention.forward(arcs[0], arcs[1], mask_d=mask, mask_e=mask), axis = 1) return out_arc, types, mask def loss(self, input_word, input_char, mask, heads, types): out_arc, out_type, _ = self.forward(input_word, input_char, mask) type_h, type_c = out_type batch = tf.shape(out_arc)[0] max_len = tf.shape(out_arc)[1] batch_index = tf.range(0, batch) t = tf.transpose(heads) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(t, axis = 0)], axis = 0)) type_h = tf.gather_nd(type_h, concatenated) out_type = self.bilinear.forward(type_h, type_c) minus_inf = -1e8 minus_mask = (1 - mask) * minus_inf out_arc = out_arc + tf.expand_dims(minus_mask, axis = 2) + tf.expand_dims(minus_mask, axis = 1) loss_arc = tf.nn.log_softmax(out_arc, dim=1) loss_type = tf.nn.log_softmax(out_type, dim=2) loss_arc = loss_arc * tf.expand_dims(mask, axis = 2) * tf.expand_dims(mask, axis = 1) loss_type = loss_type * tf.expand_dims(mask, axis = 2) num = tf.reduce_sum(mask) - tf.cast(batch, tf.float32) child_index = tf.tile(tf.expand_dims(tf.range(0, max_len), 1), [1, batch]) t = tf.transpose(heads) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(t, axis = 0), tf.expand_dims(child_index, axis = 0)], axis = 0)) loss_arc = tf.gather_nd(loss_arc, concatenated) loss_arc = tf.transpose(loss_arc, [1, 0]) t = tf.transpose(types) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(child_index, axis = 0), tf.expand_dims(t, axis = 0)], axis = 0)) loss_type = tf.gather_nd(loss_type, concatenated) loss_type = tf.transpose(loss_type, [1, 0]) return tf.reduce_sum(-loss_arc) / num, tf.reduce_sum(-loss_type) / num def decode(self, input_word, input_char, mask, leading_symbolic=0): out_arc, out_type, _ = self.forward(input_word, input_char, mask) batch = tf.shape(out_arc)[0] max_len = tf.shape(out_arc)[1] sec_max_len = tf.shape(out_arc)[2] out_arc = out_arc + tf.linalg.diag(tf.fill([max_len], -np.inf)) minus_mask = tf.expand_dims(tf.cast(1 - mask, tf.bool), axis = 2) minus_mask = tf.tile(minus_mask, [1, 1, sec_max_len]) out_arc = tf.where(minus_mask, tf.fill(tf.shape(out_arc), -np.inf), out_arc) heads = tf.argmax(out_arc, axis = 1) type_h, type_c = out_type batch = tf.shape(type_h)[0] max_len = tf.shape(type_h)[1] batch_index = tf.range(0, batch) t = tf.cast(tf.transpose(heads), tf.int32) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(t, axis = 0)], axis = 0)) type_h = tf.gather_nd(type_h, concatenated) out_type = self.bilinear.forward(type_h, type_c) out_type = out_type[:, :, leading_symbolic:] types = tf.argmax(out_type, axis = 2) return heads, types class Model: def __init__( self, learning_rate, hidden_size_word, cov = 0.0): self.words = tf.placeholder(tf.int32, (None, None)) self.segment_ids = tf.placeholder(tf.int32, [None, None]) self.input_masks = tf.placeholder(tf.float32, [None, None]) self.heads = tf.placeholder(tf.int32, (None, None)) self.types = tf.placeholder(tf.int32, (None, None)) self.mask = tf.cast(tf.math.not_equal(self.words, 0), tf.float32) self.maxlen = tf.shape(self.words)[1] self.lengths = tf.count_nonzero(self.words, 1) mask = self.mask heads = self.heads types = self.types self.arc_h = tf.layers.Dense(hidden_size_word) self.arc_c = tf.layers.Dense(hidden_size_word) self.attention = BiAAttention(hidden_size_word, hidden_size_word, 1) self.type_h = tf.layers.Dense(hidden_size_word) self.type_c = tf.layers.Dense(hidden_size_word) self.bilinear = BiLinear(hidden_size_word, hidden_size_word, len(tag2idx)) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=xlnet_parameters, input_ids=tf.transpose(self.words, [1, 0]), seg_ids=tf.transpose(self.segment_ids, [1, 0]), input_mask=tf.transpose(self.input_masks, [1, 0])) output_layer = xlnet_model.get_sequence_output() output_layer = tf.transpose(output_layer, [1, 0, 2]) arc_h = tf.nn.elu(self.arc_h(output_layer)) arc_c = tf.nn.elu(self.arc_c(output_layer)) type_h = tf.nn.elu(self.type_h(output_layer)) type_c = tf.nn.elu(self.type_c(output_layer)) out_arc = tf.squeeze(self.attention.forward(arc_h, arc_h, mask_d=self.mask, mask_e=self.mask), axis = 1) batch = tf.shape(out_arc)[0] max_len = tf.shape(out_arc)[1] sec_max_len = tf.shape(out_arc)[2] batch_index = tf.range(0, batch) decode_arc = out_arc + tf.linalg.diag(tf.fill([max_len], -np.inf)) minus_mask = tf.expand_dims(tf.cast(1 - mask, tf.bool), axis = 2) minus_mask = tf.tile(minus_mask, [1, 1, sec_max_len]) decode_arc = tf.where(minus_mask, tf.fill(tf.shape(decode_arc), -np.inf), decode_arc) self.heads_seq = tf.argmax(decode_arc, axis = 1) t = tf.cast(tf.transpose(self.heads_seq), tf.int32) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(t, axis = 0)], axis = 0)) type_h = tf.gather_nd(type_h, concatenated) out_type = self.bilinear.forward(type_h, type_c) self.tags_seq = tf.argmax(out_type, axis = 2) batch = tf.shape(out_arc)[0] max_len = tf.shape(out_arc)[1] batch_index = tf.range(0, batch) t = tf.transpose(heads) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(t, axis = 0)], axis = 0)) type_h = tf.gather_nd(type_h, concatenated) out_type = self.bilinear.forward(type_h, type_c) minus_inf = -1e8 minus_mask = (1 - mask) * minus_inf out_arc = out_arc + tf.expand_dims(minus_mask, axis = 2) + tf.expand_dims(minus_mask, axis = 1) loss_arc = tf.nn.log_softmax(out_arc, dim=1) loss_type = tf.nn.log_softmax(out_type, dim=2) loss_arc = loss_arc * tf.expand_dims(mask, axis = 2) * tf.expand_dims(mask, axis = 1) loss_type = loss_type * tf.expand_dims(mask, axis = 2) num = tf.reduce_sum(mask) - tf.cast(batch, tf.float32) child_index = tf.tile(tf.expand_dims(tf.range(0, max_len), 1), [1, batch]) t = tf.transpose(heads) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(t, axis = 0), tf.expand_dims(child_index, axis = 0)], axis = 0)) loss_arc = tf.gather_nd(loss_arc, concatenated) loss_arc = tf.transpose(loss_arc, [1, 0]) t = tf.transpose(types) broadcasted = tf.broadcast_to(batch_index, tf.shape(t)) concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0), tf.expand_dims(child_index, axis = 0), tf.expand_dims(t, axis = 0)], axis = 0)) loss_type = tf.gather_nd(loss_type, concatenated) loss_type = tf.transpose(loss_type, [1, 0]) self.cost = (tf.reduce_sum(-loss_arc) / num) + (tf.reduce_sum(-loss_type) / num) self.optimizer = tf.train.AdamOptimizer( learning_rate = learning_rate ).minimize(self.cost) mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen) self.prediction = tf.boolean_mask(self.tags_seq, mask) mask_label = tf.boolean_mask(self.types, mask) correct_pred = tf.equal(tf.cast(self.prediction, tf.int32), mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) self.prediction = tf.cast(tf.boolean_mask(self.heads_seq, mask), tf.int32) mask_label = tf.boolean_mask(self.heads, mask) correct_pred = tf.equal(self.prediction, mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy_depends = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) tf.reset_default_graph() sess = tf.InteractiveSession() learning_rate = 2e-5 hidden_size_word = 128 model = Model(learning_rate, hidden_size_word) sess.run(tf.global_variables_initializer()) import collections import re def get_assignment_map_from_checkpoint(tvars, init_checkpoint): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match('^(.*):\\d+$', name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name_to_variable[name] initialized_variable_names[name] = 1 initialized_variable_names[name + ':0'] = 1 return (assignment_map, initialized_variable_names) tvars = tf.trainable_variables() checkpoint = 'xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt' assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(tvars, checkpoint) saver = tf.train.Saver(var_list = assignment_map) saver.restore(sess, checkpoint) from tensorflow.keras.preprocessing.sequence import pad_sequences batch_x = train_X[:5] batch_x = pad_sequences(batch_x,padding='post') batch_y = train_Y[:5] batch_y = pad_sequences(batch_y,padding='post') batch_depends = train_depends[:5] batch_depends = pad_sequences(batch_depends,padding='post') batch_segments = segments_train[:5] batch_segments = pad_sequences(batch_segments, padding='post', value = 4) batch_masks = masks_train[:5] batch_masks = pad_sequences(batch_masks, padding='post', value = 1) sess.run([model.accuracy, model.accuracy_depends, model.cost], feed_dict = {model.words: batch_x, model.types: batch_y, model.heads: batch_depends, model.segment_ids: batch_segments, model.input_masks: batch_masks}) tags_seq, heads = sess.run( [model.tags_seq, model.heads_seq], feed_dict = { model.words: batch_x, model.segment_ids: batch_segments, model.input_masks: batch_masks }, ) tags_seq[0], heads[0], batch_depends[0] from tqdm import tqdm for e in range(epoch): train_acc, train_loss = [], [] test_acc, test_loss = [], [] train_acc_depends, test_acc_depends = [], [] pbar = tqdm( range(0, len(train_X), batch_size), desc = 'train minibatch loop' ) for i in pbar: index = min(i + batch_size, len(train_X)) batch_x = train_X[i: index] batch_x = pad_sequences(batch_x,padding='post') batch_y = train_Y[i: index] batch_y = pad_sequences(batch_y,padding='post') batch_depends = train_depends[i: index] batch_depends = pad_sequences(batch_depends,padding='post') batch_segments = segments_train[i: index] batch_segments = pad_sequences(batch_segments, padding='post', value = 4) batch_masks = masks_train[i: index] batch_masks = pad_sequences(batch_masks, padding='post', value = 1) acc_depends, acc, cost, _ = sess.run( [model.accuracy_depends, model.accuracy, model.cost, model.optimizer], feed_dict = { model.words: batch_x, model.types: batch_y, model.heads: batch_depends, model.segment_ids: batch_segments, model.input_masks: batch_masks }, ) train_loss.append(cost) train_acc.append(acc) train_acc_depends.append(acc_depends) pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends) pbar = tqdm( range(0, len(test_X), batch_size), desc = 'test minibatch loop' ) for i in pbar: index = min(i + batch_size, len(test_X)) batch_x = test_X[i: index] batch_x = pad_sequences(batch_x,padding='post') batch_y = test_Y[i: index] batch_y = pad_sequences(batch_y,padding='post') batch_depends = test_depends[i: index] batch_depends = pad_sequences(batch_depends,padding='post') batch_segments = segments_test[i: index] batch_segments = pad_sequences(batch_segments, padding='post', value = 4) batch_masks = masks_test[i: index] batch_masks = pad_sequences(batch_masks, padding='post', value = 1) acc_depends, acc, cost = sess.run( [model.accuracy_depends, model.accuracy, model.cost], feed_dict = { model.words: batch_x, model.types: batch_y, model.heads: batch_depends, model.segment_ids: batch_segments, model.input_masks: batch_masks }, ) test_loss.append(cost) test_acc.append(acc) test_acc_depends.append(acc_depends) pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends) print( 'epoch: %d, training loss: %f, training acc: %f, training depends: %f, valid loss: %f, valid acc: %f, valid depends: %f\n' % (e, np.mean(train_loss), np.mean(train_acc), np.mean(train_acc_depends), np.mean(test_loss), np.mean(test_acc), np.mean(test_acc_depends) )) tags_seq, heads = sess.run( [model.tags_seq, model.heads_seq], feed_dict = { model.words: batch_x, model.segment_ids: batch_segments, model.input_masks: batch_masks }, ) tags_seq[0], heads[0] - 1, batch_depends[0] - 1 def evaluate(heads_pred, types_pred, heads, types, lengths, symbolic_root=False, symbolic_end=False): batch_size, _ = heads_pred.shape ucorr = 0. lcorr = 0. total = 0. ucomplete_match = 0. lcomplete_match = 0. corr_root = 0. total_root = 0. start = 1 if symbolic_root else 0 end = 1 if symbolic_end else 0 for i in range(batch_size): ucm = 1. lcm = 1. for j in range(start, lengths[i] - end): total += 1 if heads[i, j] == heads_pred[i, j]: ucorr += 1 if types[i, j] == types_pred[i, j]: lcorr += 1 else: lcm = 0 else: ucm = 0 lcm = 0 if heads[i, j] == 0: total_root += 1 corr_root += 1 if heads_pred[i, j] == 0 else 0 ucomplete_match += ucm lcomplete_match += lcm return ucorr / total, lcorr / total, corr_root / total_root arcs, types, roots = [], [], [] for i in range(0, len(test_X), batch_size): index = min(i + batch_size, len(test_X)) batch_x = test_X[i: index] batch_x = pad_sequences(batch_x,padding='post') batch_y = test_Y[i: index] batch_y = pad_sequences(batch_y,padding='post') batch_depends = test_depends[i: index] batch_depends = pad_sequences(batch_depends,padding='post') batch_segments = segments_test[i: index] batch_segments = pad_sequences(batch_segments, padding='post', value = 4) batch_masks = masks_test[i: index] batch_masks = pad_sequences(batch_masks, padding='post', value = 1) tags_seq, heads = sess.run( [model.tags_seq, model.heads_seq], feed_dict = { model.words: batch_x, model.segment_ids: batch_segments, model.input_masks: batch_masks }, ) arc_accuracy, type_accuracy, root_accuracy = evaluate(heads - 1, tags_seq, batch_depends - 1, batch_y, np.count_nonzero(batch_x, axis = 1)) arcs.append(arc_accuracy) types.append(type_accuracy) roots.append(root_accuracy) print('arc accuracy:', np.mean(arcs)) print('types accuracy:', np.mean(types)) print('root accuracy:', np.mean(roots)) ```
github_jupyter
## Musterlösung zu Projektaufgabe Deskriptive Statistik und offene Fragen #### Grundlage: Datensatz der San Francisco Public Library, s.a. https://zbmed.github.io/2020-2021-ZK_Data_Librarian_Modul_3/organisation/dataset/ ##### Frage 1: Wie viele Senioren und Kinder sind Kunden der San Francisco Public Library? ##### Frage 2: Wie viele Nutzer möchten per Mail informiert werden? ##### Frage 3: Wie alt sind diese Nutzer durchschnittlich im Vergleich zu Nutzern, die per Post informiert werden möchten? ##### Frage 4: Wie viele Ausleihen werden im Mittel pro Altersgruppe und pro Jahr getätigt? Ist die Streuung zwischen den Gruppen gleich? ##### Frage 5: Welche Altersgruppe verlängert im Mittel wie oft? ##### Frage 6: Wie ist die Verteilung der Altersgruppen im Mission District ##### Frage 7: Erklärung von Normalisierung von Kreuztabellen (Kapitel 3 im Skript) ``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np %matplotlib inline sns.set() # das was wir brauchen in abgekürzter Form df = pd.read_csv( "../data/Library_Usage.csv", na_values="none" ) # Einlesen des Datensatzes in das neu definierte DataFrame df mit Überschreibung #fehlender Werte #df df.head() #Überblick über das DataFrame ``` # Frage 1: Wie viele Senioren und Kinder sind Kunden der San Francisco Public Library? Um diese Frage zu beantworten, kommen die Spalten "Patron Type Definition" oder "Age Range" in Frage, also schauen wir uns die Einträge (Merkmalsausprägungen) an: ``` df['Patron Type Definition'].value_counts() ``` Man sieht, dass es sich um nominale Werte handelt. ``` df['Age Range'].value_counts() ``` Man sieht, dass es sich um ordinale Werte handelt. In der Beschreibung des Datensatzes steht, dass sich die Spalte 'Age Range' nach dem Geburtsdatum richtet. D.h. es ist davon auszugehen, dass die Nutzer der Bibliothek zum Zeitpunkt der Bereitstellung des Datensatzes (2016) in die Kategorien von 'Age Range' einsortiert werden. In der Spalte 'Patron Type Definition' sind verschiedene Merkmalsausprägungen zu finden, die nicht unbedingt etwas mit dem Alter zu tun haben (z.B. VISITOR oder SPECIAL). Daher ist davon auszugehen, dass wir mit 'Age Range' am nähesten Fragestellungen zu Alter beantworten zu können. Richtige Angaben zu tatsächlichem Alter der Nutzer liegen bei diesem Datensatz nicht vor. An dieser Stelle ist also zu definieren, was Senioren und Kinder sind. Eine Möglichkeit ist, Senioren als Age Range = 65 to 74 years und Age Range = 75 years and over zu definieren, man könnte aber auch Age Range = 60 to 64 years dazunehmen. Für das weitere Vorgehen, definieren wir Senioren als über 65-jährige und Kinder als bis 19-jährige. Um die Frage 1 zu beantworten, reicht es also, die entsprechenden Einträge zu summieren. ``` kinder=df.loc[df['Age Range'] == "0 to 9 years"] #Hilfsvariable len(kinder) senioren=df.loc[ (df['Age Range'] == "65 to 74 years") | (df['Age Range'] == "75 years and over") ] #Hilfsvariable len(senioren) ``` # Antwort auf Frage 1: ``` print('Es sind ' + str(len(kinder)) + ' Kinder (bis 19 Jahre) und ' + str(len(senioren)) + ' Senioren (ab 65 Jahren) registriert.' ) ``` ----- # Frage 2: Wie viele Nutzer möchten per Mail informiert werden? ``` len(df.loc[(df['Notice Preference Definition'] == "email")]) ``` # Antwort auf Frage 2: ``` print(str(len(df.loc[(df['Notice Preference Definition'] == "email")])) + ' Nutzer möchten per Mail informiert werden.') ``` # Frage 3: Wie alt sind diese Nutzer durchschnittlich im Vergleich zu Nutzern, die per Post informiert werden möchten? Die relevante Spalte über die gewünschte Benachrichtigungsart schauen wir uns genauer an: ``` df['Notice Preference Definition'].value_counts() ``` Es handelt sich um ein nominales Merkmal. Auch hier stoßen wir auf eine Interpretationsfrage. Da der Datensatz kein Alter ausgibt, sondern nur Altersstufen (Age Range) bzw. Kategorien, die nicht immer etwas mit dem Alter zu tun haben sondern eher mit dem Nutzerstatus (Patron Type Definition), müssen wir überlegen, was ein Durchschnitt bedeuten kann. Age Range ist ein ordinales Merkmal, d.h. wir können keinen Erwartungswert berechnen, aber uns dennoch Häufigkeitstabellen anschauen. Um diese beiden Merkmale (nominal und ordinal) zu verbinden, können wir uns einen Plot angucken, dafür definieren wir eine neue Variable indem wir nach den Merkmalen filtern, die uns interessieren: ``` benachrichtigung=df.loc[ (df['Notice Preference Definition'] == "email") | (df['Notice Preference Definition'] == "print") ] #Hilfsvariable für den Plot, damit nur die Merkmale 'email' und 'print' angezeigt werden sns.catplot(x='Age Range', kind='count', hue='Notice Preference Definition', data=benachrichtigung, aspect=3, order=["0 to 9 years","10 to 19 years","20 to 24 years","25 to 34 years","35 to 44 years","45 to 54 years","55 to 59 years","60 to 64 years","65 to 74 years","75 years and over"]) #wenn data=df gewählt wird, würden wir auch die Werte für 'print' sehen ``` Um besser die Zahlen zu verstehen, hilft uns eine Kreuztabelle: ``` pd.crosstab( benachrichtigung['Notice Preference Definition'], benachrichtigung['Age Range'], margins=True ) # Beachte, dass wir hier eine Teilmenge des Datensatzes betrachten über die Variable "benachrichitigung". # D.h. wir blenden die Ereignisse aus, wo in der Spalte 'Notice Preference Definition' nicht # 'email' oder 'print' steht. ``` ---- ###### WICHTIG: Beachte, dass die Summe der Nutzer, die per Mail informiert werden hier 323778 ergibt. Wir haben aber bei Frage 2 festgestellt, dass eigentlich 323937 Nutzer per Mail informiert werden möchten. Dies liegt daran, dass offensichtlich in einigen Einträgen (Zeilen des Datensatzes) zwar im Feld "Notice Preference Definition" der Wert "email" steht, aber offensichtlich im Feld "Age Range" kein Eintrag steht. In der Kreuztabelle werden die beiden Merkmale 'Age Range' und 'Notice Preference Definition' betrachtet und somit nur die Einträge, wo entsprechednd beide Felder ausgefüllt sind. (Das ist übrigens unabhängig ob man die Kreuztabelle über die "große" Variable "df" oder "benachrichtigung" berechnet, probiere es gerne aus!) ---- ``` # Nun möchten wir das prozentual betrachten: pd.crosstab( benachrichtigung['Notice Preference Definition'], benachrichtigung['Age Range'], margins=True, normalize=0 ) ``` # Antwort auf Frage 3: Die Frage können wir wie folgt beantworten. Von allen Nutzern, die per Mail informiert werden möchten, sind - ca. 9% in der Alterklasse 0 bis 9 Jahre, - ca. 12% in der Altersklasse 10 bis 19 Jahre, ... - ca. 6% in der Altersklasse 65 bis 74 Jahre, - ca. 2% in der Altersklasse 75 Jahre und älter. Im Vergleich dazu, sind von allen Nutzern, die per Post informiert werden möchten - ca. 13% in der Altersklasse 0 bis 9 Jahre, - ca. 17% in der Altersklasse 10 bis 19 Jahre, ... - ca. 86% in der Altersklasse 65 bis 74 Jahre, - ca. 65% in der Altersklasse 75 Jahre und älter. # Frage 4: Wie viele Ausleihen werden im Mittel pro Altersgruppe und pro Jahr getätigt? Ist die Streuung zwischen den Gruppen gleich? Für diese Fragestellung sind die Spalten 'Total Checkouts' und 'Age Range' relevant. Bei der ersten handelt es sich um ein metrisches Merkmal, die zweite ist ordinal. Wir definieren eine neue Spalte, die die durchschnittliche jährliche Ausleihe eines Nutzers ausgibt. ``` df['Circulation Active Year'] = pd.to_numeric(df['Circulation Active Year'], errors='coerce') df['Membership Duration Years'] = (df['Circulation Active Year'] - df['Year Patron Registered'])+1 # hier werden der Einfachheit ganze Jahre angenommen df["Average Checkouts per Year"] =(df['Total Checkouts']/df['Membership Duration Years']) import matplotlib.pyplot as plt %matplotlib inline sns.set() plt.figure(figsize=(16, 6)) my_order =['0 to 9 years', '10 to 19 years', '20 to 24 years', '25 to 34 years', '35 to 44 years', '45 to 54 years', '55 to 59 years', '60 to 64 years', '65 to 74 years', '75 years and over'] plt = sns.boxplot(y='Average Checkouts per Year', x='Age Range', data=df[df['Average Checkouts per Year']<df['Average Checkouts per Year'].quantile(0.95)], fliersize=0.5, whis=1.5, order =my_order) ``` # Antwort auf Frage 4: In der Grafik ist die Antwort ablesbar. Man sieht ganz schön, dass es kaum Zusammenhang zwischen der durschnittlichen Ausleihe pro Jahr und den Altersgruppen gibt. Lediglich die Altersgruppe zwischen 20 und 24 und 25 und 34 leihen sichtbar weniger Bücher aus als die anderen Altersgruppen. # Frage 5: Welche Altersgruppe verlängert im Mittel wie oft? # Antwort auf Frage 5 ``` Age_Range_0_9 = df.loc[df['Age Range'] == '0 to 9 years'] Age_Range_10_19 = df.loc[df['Age Range'] == '10 to 19 years'] Age_Range_20_24 = df.loc[df['Age Range'] == '20 to 24 years'] Age_Range_25_34 = df.loc[df['Age Range'] == '25 to 34 years'] Age_Range_35_44 = df.loc[df['Age Range'] == '35 to 44 years'] Age_Range_45_54 = df.loc[df['Age Range'] == '45 to 54 years'] Age_Range_55_59 = df.loc[df['Age Range'] == '55 to 59 years'] Age_Range_60_64 = df.loc[df['Age Range'] == '60 to 64 years'] Age_Range_65_74 = df.loc[df['Age Range'] == '65 to 74 years'] Age_Range_75 = df.loc[df['Age Range'] == '75 years and over'] # Hilfsvariablen print('Im Mittel werden in den Altersgruppen wie folgt Ausleihen getätigt:' ) print('0 bis 9-jährige: ' + str(Age_Range_0_9['Average Checkouts per Year'].mean())) print('10 bis 19-jährige: ' + str(Age_Range_10_19['Average Checkouts per Year'].mean())) print('...') print('65 bis 74-jährige: ' + str(Age_Range_65_74['Average Checkouts per Year'].mean())) print('ab 75-jährige: ' + str(Age_Range_75['Average Checkouts per Year'].mean())) ``` ---- # Frage 6: Wie ist die Verteilung der Altersgruppen im Mission District Für diese Frage sind die Variablen 'Home Library Definition' und 'Age Range' relevant. Die erstere ist nominal, die zweite ordinal. D.h. auch hier ist die Berechnung von Erwartungswert und Varianz nicht möglich. Für einen ersten graphischen Überblick können wir aber einfach zählen, wie viele Nutzer welcher Altersgruppe im Mission District sind. ``` mission_district=df.loc[(df['Home Library Definition'] == "Mission")] # Hilfsvariable my_order =['0 to 9 years', '10 to 19 years', '20 to 24 years', '25 to 34 years', '35 to 44 years', '45 to 54 years', '55 to 59 years', '60 to 64 years', '65 to 74 years', '75 years and over'] sns.catplot(x='Age Range', kind="count", data=mission_district, order=my_order, aspect=3) ``` Die obere Grafik beantwortet die Frage schon ganz gut, dennoch können wir uns auch die genauen Werte über eine Kreuztabelle ausgeben lassen: ``` pd.crosstab( mission_district['Home Library Definition'], mission_district['Age Range'], margins=True, normalize=0 ) ``` # Antwort auf Frage 6 Die Verteilung der Altersgruppen im Mission District ist wie folgt: - ca. 11% der Nutzer sind zwischen 0 und 9 Jahren, - ca. 16% der Nutzer sind zwischen 10 und 19 Jahren, ... - ca. 4% der Nutzer sind zwischen 65 und 74 Jahren, - ca. 1% der Nutzer sind 75 Jahre und älter. ---- # Frage 7: Erklärung von Normalisierung von Kreuztabellen (Kapitel 3 im Skript) Wir nutzen das Beispiel von oben (Alterskategorien und Benachrichtigungsart aus Frage 3). Zuerst einmal eine nicht normalisierte Kreuztabelle: ``` # Nun möchten wir das prozentual betrachten: pd.crosstab( benachrichtigung['Notice Preference Definition'], benachrichtigung['Age Range'], margins=True ) # die Reihenfolge der betrachteten Spalten 'Notice Preference Definition' und 'Age Range' spielt keine Rolle, # probiere es ruhig aus" # Nun normalisieren wir nach Zeilen (normalize=0) pd.crosstab( benachrichtigung['Notice Preference Definition'], benachrichtigung['Age Range'], margins=True, normalize=0 ) ``` Hier sehen wir, dass jede Zeile in der Summe 1 ergibt. D.h. wir können die Tabelle prozentual nach Zeilen interpretieren. Also z.B. erste Zeile: Von allen Nutzern, die per Mail informiert werden möchten, befinden sich ca. 13% (0.128020 von 1) in der Altersgruppe 45 bis 54 Jahre. ``` # Als Vergleich normalisieren wir nach Spalten (normalize=1) pd.crosstab( benachrichtigung['Notice Preference Definition'], benachrichtigung['Age Range'], margins=True, normalize=1 ) ``` Hier sehen wir, dass jede Spalte in der Summe 1 ergibt. D.h. wir können die Tabelle prozentual nach Spalten interpretieren. Also z.B. erste Spalte: Von allen Nutzern, die zwischen 0 und Jahren sind, möchten sich ca. 13% (0.130813 von 1) per Post informieren lassen. ----
github_jupyter
``` !pip install chart_studio import plotly.graph_objects as go import plotly.offline as offline_py import plotly.graph_objects as go import plotly.offline as offline_py from wordcloud import WordCloud import matplotlib.pyplot as plt import plotly.figure_factory as ff import numpy as np %matplotlib inline import pandas as pd df = pd.read_csv("https://raw.githubusercontent.com/DSEI21000-S21/project-product-price-prediction/main/data/random_samples/stratified_sampling_data_by_price_whigh_sz50000_1619218354.csv") # size of dataset print('The size of the dataset is: {} \n'.format(df.shape)) # different data types in the dataset print('The types of the dataset: {}'.format(df.dtypes)) df.head() df.price.describe() # most popular categories -- Women, electronics and men x = df['c1'].value_counts().index.values.astype('str')[:15] y = df['c1'].value_counts().values[:15] pct = [("%.2f"%(v*100))+"%" for v in (y/len(df))] [:15] trace1 = go.Bar(x=x, y=y, text=pct) layout = dict(title= 'Number of Items by Main Category', yaxis = dict(title='Count'), xaxis = dict(title='Brand')) fig=dict(data=[trace1], layout=layout) offline_py.iplot(fig) x = df['brand_name'].value_counts().index.values.astype('str')[:15] y = df['brand_name'].value_counts().values[:15] pct = [("%.2f"%(v*100))+"%" for v in (y/len(df))] [:15] colorscale = [[0, '#FAEE1C'], [0.33, '#F3558E'], [0.66, '#9C1DE7'], [1, '#581B98']] # most popular brands -- Nike & PINK trace1 = go.Bar(x=x, y=y, text=pct, marker=dict(color = y, colorscale=colorscale, showscale=True)) layout = dict(title= 'Number of Items by brand name', yaxis = dict(title='Count'), xaxis = dict(title='Brand')) fig=dict(data=[trace1], layout=layout) offline_py.iplot(fig) # visualize which words has the highest frequencies within the top1 category description = df.item_description[df.c1 == 'women'] plt.subplots(figsize = (8,8)) wordcloud = WordCloud ( background_color = 'white', width = 512, height = 384 ).generate(' '.join(description)) plt.imshow(wordcloud) # image show plt.axis('off') # to off the axis of x and y plt.title('Top Words -- Women') plt.show() # relationship between price and shipping dataframe = df[df.brand_name == 'Nike'][:100] datawomen = dataframe.loc[:, ['price', 'shipping']] datawomen["index"] = np.arange(1,len(datawomen)+1) fig = ff.create_scatterplotmatrix(datawomen, diag='box', index='index',colormap='Portland', colormap_type='cat', height=700, width=700) offline_py.iplot(fig) ```
github_jupyter
Originaly taken from https://www.easy-tensorflow.com and adapted for the purpose of the course # Imports ``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt ``` # Load the MNIST dataset ## Data dimenstion ``` from tensorflow.examples.tutorials.mnist import input_data img_h = img_w = 28 # MNIST images are 28x28 img_size_flat = img_h * img_w # 28x28=784, the total number of pixels n_classes = 10 # Number of classes, one class per digit n_channels = 1 ``` ## Helper functions to load the MNIST data ``` def load_data(mode='train'): """ Function to (download and) load the MNIST data :param mode: train or test :return: images and the corresponding labels """ mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) if mode == 'train': x_train, y_train, x_valid, y_valid = mnist.train.images, mnist.train.labels, \ mnist.validation.images, mnist.validation.labels x_train, _ = reformat(x_train, y_train) x_valid, _ = reformat(x_valid, y_valid) return x_train, y_train, x_valid, y_valid elif mode == 'test': x_test, y_test = mnist.test.images, mnist.test.labels x_test, _ = reformat(x_test, y_test) return x_test, y_test def reformat(x, y): """ Reformats the data to the format acceptable for convolutional layers :param x: input array :param y: corresponding labels :return: reshaped input and labels """ img_size, num_ch, num_class = int(np.sqrt(x.shape[-1])), 1, len(np.unique(np.argmax(y, 1))) dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32) labels = (np.arange(num_class) == y[:, None]).astype(np.float32) return dataset, labels def randomize(x, y): """ Randomizes the order of data samples and their corresponding labels""" permutation = np.random.permutation(y.shape[0]) shuffled_x = x[permutation, :, :, :] shuffled_y = y[permutation] return shuffled_x, shuffled_y def get_next_batch(x, y, start, end): x_batch = x[start:end] y_batch = y[start:end] return x_batch, y_batch ``` ## Load the data and display the sizes Now we can use the defined helper function in "train" mode which loads the train and validation images and their corresponding labels. We'll also display their sizes: ``` x_train, y_train, x_valid, y_valid = load_data(mode='train') print("Size of:") print("- Training-set:\t\t{}".format(len(y_train))) print("- Validation-set:\t{}".format(len(y_valid))) ``` # Hyperparameters ``` logs_path = "./logs" # path to the folder that we want to save the logs for Tensorboard lr = 0.001 # The optimization initial learning rate epochs = 10 # Total number of training epochs batch_size = 100 # Training batch size display_freq = 100 # Frequency of displaying the training results ``` # Network configuration ``` # 1st Convolutional Layer filter_size1 = 5 # Convolution filters are 5 x 5 pixels. num_filters1 = 16 # There are 16 of these filters. stride1 = 1 # The stride of the sliding window # 2nd Convolutional Layer filter_size2 = 5 # Convolution filters are 5 x 5 pixels. num_filters2 = 32 # There are 32 of these filters. stride2 = 1 # The stride of the sliding window # Fully-connected layer. h1 = 128 # Number of neurons in fully-connected layer. ``` # Create network helper functions ## Helper functions for creating new variables ``` # weight and bais wrappers def weight_variable(shape): """ Create a weight variable with appropriate initialization :param name: weight name :param shape: weight shape :return: initialized weight variable """ initer = tf.truncated_normal_initializer(stddev=0.01) return tf.get_variable('W', dtype=tf.float32, shape=shape, initializer=initer) def bias_variable(shape): """ Create a bias variable with appropriate initialization :param name: bias variable name :param shape: bias variable shape :return: initialized bias variable """ initial = tf.constant(0., shape=shape, dtype=tf.float32) return tf.get_variable('b', dtype=tf.float32, initializer=initial) ``` ## Helper-function for creating a new Convolutional Layer ``` def conv_layer(x, filter_size, num_filters, stride, name): """ Create a 2D convolution layer :param x: input from previous layer :param filter_size: size of each filter :param num_filters: number of filters (or output feature maps) :param stride: filter stride :param name: layer name :return: The output array """ with tf.variable_scope(name): num_in_channel = x.get_shape().as_list()[-1] shape = [filter_size, filter_size, num_in_channel, num_filters] W = weight_variable(shape=shape) tf.summary.histogram('weight', W) b = bias_variable(shape=[num_filters]) tf.summary.histogram('bias', b) layer = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding="SAME") layer += b return tf.nn.relu(layer) ``` ## Helper-function for creating a new Max-pooling Layer ``` def max_pool(x, ksize, stride, name): """ Create a max pooling layer :param x: input to max-pooling layer :param ksize: size of the max-pooling filter :param stride: stride of the max-pooling filter :param name: layer name :return: The output array """ return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], padding="SAME", name=name) ``` # Helper-function for flattening a layer ``` def flatten_layer(layer): """ Flattens the output of the convolutional layer to be fed into fully-connected layer :param layer: input array :return: flattened array """ with tf.variable_scope('Flatten_layer'): layer_shape = layer.get_shape() num_features = layer_shape[1:4].num_elements() layer_flat = tf.reshape(layer, [-1, num_features]) return layer_flat ``` ## Helper-function for creating a new fully-connected Layer ``` def fc_layer(x, num_units, name, use_relu=True): """ Create a fully-connected layer :param x: input from previous layer :param num_units: number of hidden units in the fully-connected layer :param name: layer name :param use_relu: boolean to add ReLU non-linearity (or not) :return: The output array """ with tf.variable_scope(name): in_dim = x.get_shape()[1] W = weight_variable(shape=[in_dim, num_units]) tf.summary.histogram('weight', W) b = bias_variable(shape=[num_units]) tf.summary.histogram('bias', b) layer = tf.matmul(x, W) layer += b if use_relu: layer = tf.nn.relu(layer) return layer ``` # Network graph ## Placeholders for the inputs (x) and corresponding labels (y) ``` with tf.name_scope('Input'): x = tf.placeholder(tf.float32, shape=[None, img_h, img_w, n_channels], name='X') y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y') ``` ## Create the network layers ``` conv1 = conv_layer(x, filter_size1, num_filters1, stride1, name='conv1') pool1 = max_pool(conv1, ksize=2, stride=2, name='pool1') conv2 = conv_layer(pool1, filter_size2, num_filters2, stride2, name='conv2') pool2 = max_pool(conv2, ksize=2, stride=2, name='pool2') layer_flat = flatten_layer(pool2) fc1 = fc_layer(layer_flat, h1, 'FC1', use_relu=True) output_logits = fc_layer(fc1, n_classes, 'OUT', use_relu=False) ``` ## Define the loss function, optimizer, accuracy, and predicted class ``` with tf.variable_scope('Train'): with tf.variable_scope('Loss'): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_logits), name='loss') tf.summary.scalar('loss', loss) with tf.variable_scope('Optimizer'): optimizer = tf.train.AdamOptimizer(learning_rate=lr, name='Adam-op').minimize(loss) with tf.variable_scope('Accuracy'): correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name='correct_pred') accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') tf.summary.scalar('accuracy', accuracy) with tf.variable_scope('Prediction'): cls_prediction = tf.argmax(output_logits, axis=1, name='predictions') ``` ## Initialize all variables and merge the summaries ``` # Initialize the variables init = tf.global_variables_initializer() # Merge all summaries merged = tf.summary.merge_all() ``` # Train ``` sess = tf.InteractiveSession() sess.run(init) global_step = 0 summary_writer = tf.summary.FileWriter(logs_path, sess.graph) # Number of training iterations in each epoch num_tr_iter = int(len(y_train) / batch_size) for epoch in range(epochs): print('Training epoch: {}'.format(epoch + 1)) x_train, y_train = randomize(x_train, y_train) for iteration in range(num_tr_iter): global_step += 1 start = iteration * batch_size end = (iteration + 1) * batch_size x_batch, y_batch = get_next_batch(x_train, y_train, start, end) # Run optimization op (backprop) feed_dict_batch = {x: x_batch, y: y_batch} sess.run(optimizer, feed_dict=feed_dict_batch) if iteration % display_freq == 0: # Calculate and display the batch loss and accuracy loss_batch, acc_batch, summary_tr = sess.run([loss, accuracy, merged], feed_dict=feed_dict_batch) summary_writer.add_summary(summary_tr, global_step) print("iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}". format(iteration, loss_batch, acc_batch)) # Run validation after every epoch feed_dict_valid = {x: x_valid, y: y_valid} loss_valid, acc_valid = sess.run([loss, accuracy], feed_dict=feed_dict_valid) print('---------------------------------------------------------') print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}". format(epoch + 1, loss_valid, acc_valid)) print('---------------------------------------------------------') ``` # Test ``` def plot_images(images, cls_true, cls_pred=None, title=None): """ Create figure with 3x3 sub-plots. :param images: array of images to be plotted, (9, img_h*img_w) :param cls_true: corresponding true labels (9,) :param cls_pred: corresponding true labels (9,) """ fig, axes = plt.subplots(3, 3, figsize=(9, 9)) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(np.squeeze(images[i]), cmap='binary') # Show true and predicted classes. if cls_pred is None: ax_title = "True: {0}".format(cls_true[i]) else: ax_title = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_title(ax_title) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) if title: plt.suptitle(title, size=20) plt.show(block=False) def plot_example_errors(images, cls_true, cls_pred, title=None): """ Function for plotting examples of images that have been mis-classified :param images: array of all images, (#imgs, img_h*img_w) :param cls_true: corresponding true labels, (#imgs,) :param cls_pred: corresponding predicted labels, (#imgs,) """ # Negate the boolean array. incorrect = np.logical_not(np.equal(cls_pred, cls_true)) # Get the images from the test-set that have been # incorrectly classified. incorrect_images = images[incorrect] # Get the true and predicted classes for those images. cls_pred = cls_pred[incorrect] cls_true = cls_true[incorrect] # Plot the first 9 images. plot_images(images=incorrect_images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9], title=title) # Test the network when training is done x_test, y_test = load_data(mode='test') feed_dict_test = {x: x_test, y: y_test} loss_test, acc_test = sess.run([loss, accuracy], feed_dict=feed_dict_test) print('---------------------------------------------------------') print("Test loss: {0:.2f}, test accuracy: {1:.01%}".format(loss_test, acc_test)) print('---------------------------------------------------------') # Plot some of the correct and misclassified examples cls_pred = sess.run(cls_prediction, feed_dict=feed_dict_test) cls_true = np.argmax(y_test, axis=1) plot_images(x_test, cls_true, cls_pred, title='Correct Examples') plot_example_errors(x_test, cls_true, cls_pred, title='Misclassified Examples') plt.show() # close the session after you are done with testing sess.close() ``` At this step our coding is done. We can inspect more in our network using the Tensorboard open your terminal and move inside the notebookz folder in my case *C:\Dev\UpdateConference2019\notebooks* and type: ``` tensorboard --logdir=logs --host localhost ```
github_jupyter
# Is it reasonable to consider only largest mergers among many? Multiple mergers can occur at a time. Especially when merger takes long, another merger can begin before one ends. Then the effect of a merger can't be separated. In such case, I take the largest merger only. But how reliable is it? This script shows distribution of mergers and their mass ratio. 1) Multiple mergers at a snapshot are rare. 2) Multiple major mergers are even rarer. 3) Merger time overlap sometimes happen. 4) Merger time overlap among major ones are rare. 5) When the measurement window (~ 5-10 snapshots before and after) is considered, many more overlap will be added. !! The figure can be pickled and shown later quickly!. ``` def get_merger_info(main, atree, sat_root_idx, dist_gal_scale_in=1.0, dist_gal_scale_out=2.0): """ Returns merger mass ratio and beginning of the merger. nout_init_this_merger, mass_this_merger = get_merger_info() Assumes """ satellite = ctu.extract_main_tree(atree, sat_root_idx, no_subset=True) nout_min = max([min(main['nout']), min(satellite['nout'])]) i_main_ok = (main['nout'] >= nout_min) * (main['nout'] <= max(satellite["nout"])) i_sat_ok = (satellite['nout'] >= nout_min) satellite = satellite[i_sat_ok] # distances at all valid nouts. dd = np.sqrt(np.square(main["x"][i_main_ok] - satellite['x']) \ + np.square(main["y"][i_main_ok] - satellite['y']) \ + np.square(main["z"][i_main_ok] - satellite['z'])) * 1e3 rgal_tot = (main['rvir'][i_main_ok] + satellite['rvir']) #print(" Galaxy sizes : main {}, and the second {}, and the sum {}".format( # main['r'][i_main_ok], satellite['r'], rgal_tot)) #print(" dd :", dd) if sum(dist_gal_scale_in * rgal_tot > dd) > 0: # First close encounter is technically the beginning of merger, # but in practice that could be merely a flyby, # and whether they will merger soon or not is not known. # I can't call an encounter a merger if the encounter will end up merging in 100Gyrs. #nout_init_this = min(satellite['nout'][dist_gal_scale * rgal_tot < dd]) # First try # shouldn't go out 2Rgal. i_dist_bad = np.where(dist_gal_scale_out * rgal_tot < dd)[0] i_dist_ok = np.where(dist_gal_scale_in * rgal_tot > dd)[0] if len(i_dist_bad) > 0: i_dist_bad_last = min(i_dist_bad) i_dist_final = i_dist_ok[i_dist_ok < i_dist_bad_last] else: i_dist_final = i_dist_ok if len(i_dist_final) > 0: nout_init_this = satellite['nout'][min(i_dist_final)] # Second try mass_this = satellite['m'][satellite['nout'] == nout_init_this].squeeze() else: nout_init_this = -1 mass_this = 0 else: nout_init_this = -1 mass_this = 0 return nout_init_this, mass_this def find_all_meger(alltrees, idx_all, nout_ini=37, dist_gal_scale=2, min_mass_ratio = 0.01, verbose=False, do_plot = False): """ Parameters ---------- dist_gal_scale if two galaxies are closer than dist_gal_scale * (sum of raidus of the two), that epoch is the nout_init_merger. nout_ini blabla """ gal_list=[] mr_list=[] nout_list=[] nout_ini_list=[] # initial time when two halos(Galaxy stellar components in this case) overlap. for idx in idx_all: # full tree of a galaxy atree = ctu.extract_a_tree(alltrees.data, idx) # main progenitor tree main = ctu.extract_main_tree(atree, idx) x_nout = main['nout'].flatten() i_nout_ok = x_nout > nout_ini main = main[i_nout_ok] #x_nout = x_nout[i_nout_ok] pos = np.zeros((3,len(main))) pos[0,:] = main['x'] pos[1,:] = main['y'] pos[2,:] = main['z'] mass_ratios_this = []#np.zeros(len(main)) nout_inits_this = []#np.zeros(len(main)) nout_list_this = [] for i, nout in enumerate(main['nout']): # merger ratio i_prgs = np.where(atree['desc_id'] == main['id'][i])[0] #print(" {} Progenitors at nout = {}".format(len(i_prgs), nout)) # multiple prgs = merger if len(i_prgs) > 1: #if verbose: #print("{} {} Progenitors at nout = {}".format(idx, len(i_prgs), nout)) id_prgs = atree['id'][i_prgs] mass_prgs = atree['m'][i_prgs] m_r = mass_prgs / max(mass_prgs) sats = id_prgs[mass_prgs < max(mass_prgs)] mass_ratios_now=[] nout_inits_now=[] for this_sat in sats: n_i_t, mass_this_sat = get_merger_info(main, atree, this_sat, dist_gal_scale_in=dist_gal_scale, dist_gal_scale_out = 3.0) mass_ratio = mass_this_sat / max(mass_prgs) if mass_ratio > min_mass_ratio: nout_inits_now.append(n_i_t) mass_ratios_now.append(1./mass_ratio) nout_list_this.append(nout) nout_inits_this.append(nout_inits_now) mass_ratios_this.append(mass_ratios_now) #mr = 1./mass_ratios gal_list.append(idx) nout_list.append(nout_list_this) mr_list.append(mass_ratios_this) nout_ini_list.append(nout_inits_this) return gal_list, mr_list, nout_list, nout_ini_list import tree.ctutils as ctu import numpy as np from analysis.misc import load_cat # parameters used for lambda_arr clipping. ind_upper = 20 ind_lower = 20 sig_upper = 2.0 sig_lower = 2.0 nout_ini = 62 nout_fi = 187 verbose=True # In[4]: base = './' cdir = ['catalog/', 'easy/', 'catalog_GM/', "easy_final/"][3] cluster = ['05427', '05420', '29172', \ '29176', '10002', '36415', '06098', '39990', '36413','17891', '07206', '04466', '01605', '35663'][3]#[:-3] wdir = base + cluster + '/' alltrees = ctu.load_tree(wdir, is_gal=True) ad = alltrees.data tn = ad[ad['nout'] == nout_fi] cat = load_cat(wdir + cdir + 'catalog' + str(nout_fi) + '.pickle') #idx_all = [tn['id'][tn['Orig_halo_id'] == id_final][0] for id_final in cat['id']] idx_all = cat['idx'][cat["idx"] > 0].astype(int) # why idx are float??? gal_list, mr_list, nout_list, nout_init_list = \ find_all_meger(alltrees, idx_all, nout_ini=37, dist_gal_scale=1, min_mass_ratio = 0.001, verbose=False, do_plot = False) import matplotlib.pyplot as plt # simple scatter fig, ax = plt.subplots() for nout_list_this, mr_list_this in zip(nout_list, mr_list): for nout, mr in zip(nout_list_this, mr_list_this): for mm in mr: ax.scatter(nout, mm) try: ax.scatter(nout, mr[0], marker="*", color='r') except: #ax.scatter(nout, mr, marker="*", color='r') pass ax.set_yscale('log') ax.set_ylabel("Merger mass ratio") ax.set_xlabel("Nout") plt.show() import pickle # merging process overlap fig, ax = plt.subplots() # each galaxy for igal, (nout_init_this, nout_list_this, mr_list_this) in enumerate(zip(nout_init_list, nout_list, mr_list)): # each snapshot allnout_thisgal = [] allmm_thisgal = [] for nout_init, nout, mr in zip(nout_init_this, nout_list_this, mr_list_this): # each merger for ni, mm in zip(nout_init, mr): ax.plot([ni, nout], [mm,mm], 'y.-') #cm = ax.scatter(allnout_thisgal, igal * 10 + np.arange(len(allnout_thisgal)), # s=1e3/np.array(allmm_thisgal)) #c=np.array(allmm_thisgal), cmap="Greys", vmin=0, vmax=1e3) ax.set_ylabel("mergers (arbitrary value)") ax.set_xlabel("Nout") #plt.colorbar(cm) plt.show() # merging process overlap fig, ax = plt.subplots() # each galaxy for igal, (nout_init_this, nout_list_this, mr_list_this) in enumerate(zip(nout_init_list, nout_list, mr_list)): # each snapshot allnout_thisgal = [] allmm_thisgal = [] for nout_init, nout, mr in zip(nout_init_this, nout_list_this, mr_list_this): # each merger for ni, mm in zip(nout_init, mr): allnout_thisgal.append(nout) allnout_thisgal.append(ni) allmm_thisgal.append(mm) allmm_thisgal.append(1e3) # one marker per one merger, and minimize the other one. # ax.plot(allnout_thisgal, igal * 10 + np.arange(len(allnout_thisgal)), '-') cm = ax.scatter(allnout_thisgal, igal * 10 + np.arange(len(allnout_thisgal)), s=1e3/np.array(allmm_thisgal)) #c=np.array(allmm_thisgal), cmap="Greys", vmin=0, vmax=1e3) ax.set_ylabel("mergers (arbitrary value)") ax.set_xlabel("Nout") #plt.colorbar(cm) pickle.dump(plt.gcf(), open("merger_overlap_plot.pickle", "wb")) plt.show() ``` ### A figure can be saved! ``` # On a separate ipython kernel, the following will generate the same figure! import matplotlib.pyplot as plt import pickle fig = pickle.load(open("merger_overlap_plot.pickle", "rb")) plt.show(fig) ```
github_jupyter
``` import os import pandas as pd import matplotlib.pyplot as plt from keras.utils import np_utils from keras.models import Sequential from keras.callbacks import EarlyStopping, History, ModelCheckpoint from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda from keras.layers.normalization import BatchNormalization from sklearn.preprocessing import LabelEncoder from keras.utils.np_utils import to_categorical from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split import numpy as np train_features = np.load('train_features.npy') valid_features = np.load('valid_features.npy') train_dir = "new_train/" valid_dir = "new_valid/" classes = os.listdir(train_dir) # Get the labels train_labels = [] for c in classes: l = [c]*len(os.listdir(train_dir+c+'/')) train_labels.extend(l) valid_labels = [] for c in classes: l = [c]*len(os.listdir(valid_dir+c+'/')) valid_labels.extend(l) onehot_train = to_categorical(LabelEncoder().fit_transform(train_labels)) onehot_valid = to_categorical(LabelEncoder().fit_transform(valid_labels)) model = Sequential() model.add(Flatten(input_shape=train_features.shape[1:])) model.add(Dense(4096, activation='relu')) #model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) #model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(8, activation='softmax')) model.compile(optimizer="adam",loss="categorical_crossentropy",metrics =["accuracy"]) callbacks = EarlyStopping(monitor='val_loss', patience=1, verbose=1, mode='auto') # autosave best Model best_model_file = "./batch_normalized_dropout_weights.h5" best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True) history = model.fit(train_features, onehot_train, batch_size=10, nb_epoch=10, validation_data=(valid_features,onehot_valid),shuffle=True,callbacks = [callbacks,best_model]) model.load_weights("fully_connected_dropout_weights.h5") # summarize history for accuracy plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.plot(history.history['acc']); plt.plot(history.history['val_acc']); plt.title('model accuracy'); plt.ylabel('accuracy'); plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left'); # summarize history for loss plt.subplot(1, 2, 2) plt.plot(history.history['loss']); plt.plot(history.history['val_loss']); plt.title('model loss'); plt.ylabel('loss'); plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left'); plt.show() test_features = np.load("test_features.npy") test_preds = model.predict_proba(test_features, verbose=1) test_preds[0:5] submission1 = pd.DataFrame(test_preds, columns= os.listdir(train_dir)) test_files = os.listdir("test_stg1/test_stg1/") submission1.insert(0, 'image', test_files) submission1.head() clipped_preds = np.clip(test_preds,(1-0.82)/7,0.82) submission2 = pd.DataFrame(clipped_preds, columns= os.listdir("train/train/")) submission2.insert(0, 'image', test_files) submission2.head() submission2.to_csv("fully_connected_dropout.csv",index = False) ```
github_jupyter
``` import pandas as pd import json import requests import numpy as np ``` # Points intersecction Procedure definition: ```sql DROP FUNCTION get_aqpoints(weights_raw TEXT, points_array TEXT); DROP TYPE weight_intersection; CREATE TYPE weight_intersection AS (points_id numeric, basinid numeric, the_geom geometry, water_risk numeric, water_stress numeric, interannual_variability numeric, seasonal_variability numeric, flood_occurrence numeric, drought_severity numeric, upstream_storage numeric, groundwater_stress numeric, return_flow_ratio numeric, upstream_protected_land numeric, media_coverage numeric, access_to_water numeric, threatened_amphibians numeric); CREATE OR REPLACE FUNCTION get_aqpoints(weights_raw TEXT, points_array TEXT) RETURNS SETOF weight_intersection as $$ DECLARE query1 TEXT; weights TEXT; weights_sum TEXT; BEGIN query1:='select array_to_string(array_agg(col::text ||''*''|| weights::text), ''+ '') as weights, sum(weights)::text as weights_sum from (select unnest(Array'|| weights_raw||') as weights, unnest(array[''bws_s'', ''wsv_s'', ''sv_s'', ''hfo_s'', ''dro_s'', ''stor_s'', ''gw_s'', ''wri_s'', ''eco_s_s'', ''mc_s'', ''wcg_s'', ''eco_v_s'']) as col) as tables where weights is not null'; EXECUTE query1 into weights, weights_sum; RETURN query EXECUTE 'with points as (SELECT st_setsrid(st_geomfromtext(points),4326) as the_geom, row_number() over () as points_id FROM unnest(Array'|| points_array ||') as points), ranges as (select basinid, average, min(average) over (), max(average) over (), bws_s, wsv_s, sv_s, hfo_s, dro_s, stor_s, gw_s, wri_s, eco_s_s, mc_s, wcg_s, eco_v_s from (SELECT basinid, ('|| weights ||')/('|| weights_sum ||') as average, bws_s, wsv_s, sv_s, hfo_s, dro_s, stor_s, gw_s, wri_s, eco_s_s, mc_s, wcg_s, eco_v_s FROM water_risk_weights) initial) select points_id::numeric, ranges.basinid::numeric, points.the_geom, (((average-min)/(max-min))*5)::numeric as water_risk, bws_s::numeric as baseline_water_stress, wsv_s::numeric as interannual_variability, sv_s::numeric as seasonal_variability, hfo_s::numeric as flood_occurrence, dro_s::numeric as drought_severity, stor_s::numeric as upstream_storage, gw_s::numeric as groundwater_stress, wri_s::numeric as return_flow_ratio, eco_s_s::numeric as upstream_protected_land, mc_s::numeric as media_coverage, wcg_s::numeric as access_to_water, eco_v_s::numeric as threatened_amphibians from ranges inner join wri_subcatchements on ranges.basinid=wri_subcatchements.basinid right join points on st_intersects(wri_subcatchements.the_geom, points.the_geom)'; END $$ language 'plpgsql'; ``` Sql Example: ```sql Select * from get_aqpoints('[4, 1, 0.5, 1, 1, 2, 2, 1, 0.5, 1, 2, 0.5]','[''POINT(84.8085584935 -14.20067639)'', ''POINT(54.0392656274 -70.8898132233)'', ''POINT(-28.5832686897 -7.71134965117)'', ''POINT(47.9458596199 82.5599787066)'', ''POINT(50.8126903314 -46.0154993389)'', ''POINT(-62.5229253542 -9.68983337791)'', ''POINT(-34.7977234627 84.9984574252)'', ''POINT(-80.1102876685 -33.9286081419)'', ''POINT(22.6686500117 -85.6713992254)'', ''POINT(44.359802466 -27.7294728889)'']') ``` Sql Template: ``` sql Select * from get_aqpoints({weights_scheeme},{points array}) ``` ``` payload = {'q': "Select * from get_aqpoints('[4, 1, 0.5, 1, 1, 2, 2, 1, 0.5, 1, 2, 0.5]','[''POINT(84.8085584935 -14.20067639)'', ''POINT(54.0392656274 -70.8898132233)'', ''POINT(-28.5832686897 -7.71134965117)'', ''POINT(47.9458596199 82.5599787066)'', ''POINT(50.8126903314 -46.0154993389)'', ''POINT(-62.5229253542 -9.68983337791)'', ''POINT(-34.7977234627 84.9984574252)'', ''POINT(-80.1102876685 -33.9286081419)'', ''POINT(22.6686500117 -85.6713992254)'', ''POINT(44.359802466 -27.7294728889)'']')"} r = requests.get('https://wri-01.carto.com/api/v2/sql', params=payload) tableStructure= pd.read_json(json.dumps(r.json()['rows']), orient='records') tableStructure.head(0) ``` # Anexo: Points stress test test points; 100; 500; 1000; 100000; ``` t = 180 * np.random.rand(1000000,2) - 90 ``` 'POINT(-72.0 42.2)' ``` example1=[] for point in t: example1.append('\'\'POINT(' +str(point[0])+ ' ' + str(point[1]) +')\'\'') t10 = '['+', '.join(example1[:10])+']' t100 = '['+', '.join(example1[:100])+']' t1000 = '['+', '.join(example1[:1000])+']' t10000 = '['+', '.join(example1[:10000])+']' print(t10) print(len(t10)) print(len(t100)) print(len(t1000)) print(len(t10000)) payload = {'q': ""} payload['q']="Select * from get_aqpoints('[4, 1, 0.5, 1, 1, 2, 2, 1, 0.5, 1, 2, 0.5]',\'"+ t10 +"\')" r = requests.get('https://wri-01.carto.com/api/v2/sql', params=payload) tableStructure= pd.read_json(json.dumps(r.json()['rows']), orient='records') tableStructure.head(2) payload = {'q': ""} payload['q']="Select * from get_aqpoints('[4, 1, 0.5, 1, 1, 2, 2, 1, 0.5, 1, 2, 0.5]',\'"+ t100 +"\')" r = requests.get('https://wri-01.carto.com/api/v2/sql', params=payload) tableStructure= pd.read_json(json.dumps(r.json()['rows']), orient='records') tableStructure.head(0) payload = {'q': ""} payload['q']="Select * from get_aqpoints('[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]','[''Point(72.421875, 56.559482)'', ''Point(18.6328125, 10.8333059)'', ''Point(-109.6875, 42.03297)'']')" r = requests.post('https://wri-01.carto.com/api/v2/sql', data=payload) if r.status_code != 200: issue = json.loads(r.text) print(issue) else: tableStructure= pd.read_json(json.dumps(r.json()['rows']), orient='records') tableStructure.head(10) ```
github_jupyter
# Calculating the Bilingual Evaluation Understudy (BLEU) score: Ungraded Lab In this ungraded lab, we will implement a popular metric for evaluating the quality of machine-translated text: the BLEU score proposed by Kishore Papineni, et al. In their 2002 paper ["BLEU: a Method for Automatic Evaluation of Machine Translation"](https://www.aclweb.org/anthology/P02-1040.pdf), the BLEU score works by comparing "candidate" text to one or more "reference" translations. The result is better the closer the score is to 1. Let's see how to get this value in the following sections. # Part 1: BLEU Score ## 1.1 Importing the Libraries We will first start by importing the Python libraries we will use in the first part of this lab. For learning, we will implement our own version of the BLEU Score using Numpy. To verify that our implementation is correct, we will compare our results with those generated by the [SacreBLEU library](https://github.com/mjpost/sacrebleu). This package provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. It also knows all the standard test sets and handles downloading, processing, and tokenization. ``` %%capture !pip3 install sacrebleu %%capture !wget https://raw.githubusercontent.com/martin-fabbri/colab-notebooks/master/deeplearning.ai/nlp/datasets/wmt19_can.txt !wget https://raw.githubusercontent.com/martin-fabbri/colab-notebooks/master/deeplearning.ai/nlp/datasets/wmt19_ref.txt !wget https://raw.githubusercontent.com/martin-fabbri/colab-notebooks/master/deeplearning.ai/nlp/datasets/wmt19_src.txt import math from collections import Counter import matplotlib.pyplot as plt import nltk import numpy as np import sacrebleu from nltk.util import ngrams nltk.download("punkt") !pip list | grep "nltk\|sacrebleu" ``` ## 1.2 Defining the BLEU Score You have seen the formula for calculating the BLEU score in this week's lectures. More formally, we can express the BLEU score as: $$BLEU = BP\Bigl(\prod_{i=1}^{4}precision_i\Bigr)^{(1/4)}$$ with the Brevity Penalty and precision defined as: $$BP = min\Bigl(1, e^{(1-({ref}/{cand}))}\Bigr)$$ $$precision_i = \frac {\sum_{snt \in{cand}}\sum_{i\in{snt}}min\Bigl(m^{i}_{cand}, m^{i}_{ref}\Bigr)}{w^{i}_{t}}$$ where: * $m^{i}_{cand}$, is the count of i-gram in candidate matching the reference translation. * $m^{i}_{ref}$, is the count of i-gram in the reference translation. * $w^{i}_{t}$, is the total number of i-grams in candidate translation. ## 1.3 Explaining the BLEU score ### Brevity Penalty (example): ``` ref_length = np.ones(100) can_length = np.linspace(1.5, 0.5, 100) x = ref_length / can_length y = 1 - x y = np.exp(y) y = np.minimum(np.ones(y.shape), y) # Code for in order to make the plot fig, ax = plt.subplots(1) lines = ax.plot(x, y) ax.set( xlabel="Ratio of the length of the reference to the candidate text", ylabel="Brevity Penalty", ) plt.show() ``` The brevity penalty penalizes generated translations that are too short compared to the closest reference length with an exponential decay. The brevity penalty compensates for the fact that the BLEU score has no recall term. ### N-Gram Precision (example): ``` data = {"1-gram": 0.8, "2-gram": 0.7, "3-gram": 0.6, "4-gram": 0.5} names = list(data.keys()) values = list(data.values()) fig, ax = plt.subplots(1) bars = ax.bar(names, values) ax.set(ylabel="N-gram precision") plt.show() ``` The n-gram precision counts how many unigrams, bigrams, trigrams, and four-grams (i=1,...,4) match their n-gram counterpart in the reference translations. This term acts as a precision metric. Unigrams account for adequacy while longer n-grams account for fluency of the translation. To avoid overcounting, the n-gram counts are clipped to the maximal n-gram count occurring in the reference ($m_{n}^{ref}$). Typically precision shows exponential decay with the with the degree of the n-gram. ### N-gram BLEU score (example): ``` data = {"1-gram": 0.8, "2-gram": 0.77, "3-gram": 0.74, "4-gram": 0.71} names = list(data.keys()) values = list(data.values()) fig, ax = plt.subplots(1) bars = ax.bar(names, values) ax.set(ylabel="Modified N-gram precision") plt.show() ``` When the n-gram precision is multiplied by the BP, then the exponential decay of n-grams is almost fully compensated. The BLEU score corresponds to a geometric average of this modified n-gram precision. ## 1.4 Example Calculations of the BLEU score In this example we will have a reference translation and 2 candidates translations. We will tokenize all sentences using the NLTK package introduced in Course 2 of this NLP specialization. ``` reference = "The NASA Opportunity rover is battling a massive dust storm on planet Mars." candidate_1 = "The Opportunity rover is combating a big sandstorm on planet Mars." candidate_2 = "A NASA rover is fighting a massive storm on planet Mars." tokenized_ref = nltk.word_tokenize(reference.lower()) tokenized_cand_1 = nltk.word_tokenize(candidate_1.lower()) tokenized_cand_2 = nltk.word_tokenize(candidate_2.lower()) print(f"{reference} -> {tokenized_ref}") print("\n") print(f"{candidate_1} -> {tokenized_cand_1}") print("\n") print(f"{candidate_2} -> {tokenized_cand_2}") ``` ### STEP 1: Computing the Brevity Penalty ``` def brevity_penalty(candidate, reference): ref_length = len(reference) can_length = len(candidate) # Brevity Penalty if ref_length < can_length: # if reference length is less than candidate length BP = 1 # set BP = 1 else: penalty = 1 - (ref_length / can_length) # else set BP=exp(1-(ref_length/can_length)) BP = np.exp(penalty) return BP ``` ### STEP 2: Computing the Precision ``` def clipped_precision(candidate, reference): """ Clipped precision function given a original and a machine translated sentences """ clipped_precision_score = [] for i in range(1, 5): ref_n_gram = Counter(ngrams(reference,i)) cand_n_gram = Counter(ngrams(candidate,i)) c = sum(cand_n_gram.values()) for j in cand_n_gram: # for every n-gram up to 4 in candidate text if j in ref_n_gram: # check if it is in the reference n-gram if cand_n_gram[j] > ref_n_gram[j]: # if the count of the candidate n-gram is bigger # than the corresponding count in the reference n-gram, cand_n_gram[j] = ref_n_gram[j] # then set the count of the candidate n-gram to be equal # to the reference n-gram else: cand_n_gram[j] = 0 # else set the candidate n-gram equal to zero clipped_precision_score.append(sum(cand_n_gram.values())/c) weights =[0.25]*4 s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, clipped_precision_score)) s = math.exp(math.fsum(s)) return s ``` ### STEP 3: Computing the BLEU score ``` def bleu_score(candidate, reference): BP = brevity_penalty(candidate, reference) precision = clipped_precision(candidate, reference) return BP * precision ``` ### STEP 4: Testing with our Example Reference and Candidates Sentences ``` print( "Results reference versus candidate 1 our own code BLEU: ", round(bleu_score(tokenized_cand_1, tokenized_ref) * 100, 1), ) print( "Results reference versus candidate 2 our own code BLEU: ", round(bleu_score(tokenized_cand_2, tokenized_ref) * 100, 1), ) ``` ### STEP 5: Comparing the Results from our Code with the SacreBLEU Library ``` print( "Results reference versus candidate 1 sacrebleu library BLEU: ", round(sacrebleu.corpus_bleu(candidate_1, reference).score, 1), ) print( "Results reference versus candidate 2 sacrebleu library BLEU: ", round(sacrebleu.corpus_bleu(candidate_2, reference).score, 1), ) ``` # Part 2: BLEU computation on a corpus ## Loading Data Sets for Evaluation Using the BLEU Score In this section, we will show a simple pipeline for evaluating machine translated text. Due to storage and speed constraints, we will not be using our own model in this lab (you'll get to do that in the assignment!). Instead, we will be using [Google Translate](https://translate.google.com) to generate English to German translations and we will evaluate it against a known evaluation set. There are three files we will need: 1. A source text in English. In this lab, we will use the first 1671 words of the [wmt19](http://statmt.org/wmt19/translation-task.html) evaluation dataset downloaded via SacreBLEU. We just grabbed a subset because of limitations in the number of words that can be translated using Google Translate. 2. A reference translation to German of the corresponding first 1671 words from the original English text. This is also provided by SacreBLEU. 3. A candidate machine translation to German from the same 1671 words. This is generated by feeding the source text to a machine translation model. As mentioned above, we will use Google Translate to generate the translations in this file. With that, we can now compare the reference an candidate translation to get the BLEU Score. ``` # Loading the raw data wmt19_src = open("wmt19_src.txt", "rU") wmt19_src_1 = wmt19_src.read() wmt19_src.close() wmt19_ref = open("wmt19_ref.txt", "rU") wmt19_ref_1 = wmt19_ref.read() wmt19_ref.close() wmt19_can = open("wmt19_can.txt", "rU") wmt19_can_1 = wmt19_can.read() wmt19_can.close() tokenized_corpus_src = nltk.word_tokenize(wmt19_src_1.lower()) tokenized_corpus_ref = nltk.word_tokenize(wmt19_ref_1.lower()) tokenized_corpus_cand = nltk.word_tokenize(wmt19_can_1.lower()) print("English source text:") print("\n") print(f"{wmt19_src_1[0:170]} -> {tokenized_corpus_src[0:30]}") print("\n") print("German reference translation:") print("\n") print(f"{wmt19_ref_1[0:219]} -> {tokenized_corpus_ref[0:35]}") print("\n") print("German machine translation:") print("\n") print(f"{wmt19_can_1[0:199]} -> {tokenized_corpus_cand[0:29]}") print( "Results reference versus candidate 1 our own BLEU implementation: ", round(bleu_score(tokenized_corpus_cand, tokenized_corpus_ref) * 100, 1), ) print( "Results reference versus candidate 1 sacrebleu library BLEU: ", round(sacrebleu.corpus_bleu(wmt19_can_1, wmt19_ref_1).score, 1), ) ``` **BLEU Score Interpretation on a Corpus** |Score | Interpretation | |:---------:|:-------------------------------------------------------------:| | < 10 | Almost useless | | 10 - 19 | Hard to get the gist | | 20 - 29 | The gist is clear, but has significant grammatical errors | | 30 - 40 | Understandable to good translations | | 40 - 50 | High quality translations | | 50 - 60 | Very high quality, adequate, and fluent translations | | > 60 | Quality often better than human | From the table above (taken [here](https://cloud.google.com/translate/automl/docs/evaluate)), we can see the translation is high quality (*if you see "Hard to get the gist", please open your workspace, delete `wmt19_can.txt` and get the latest version via the Lab Help button*). Moreover, the results of our coded BLEU score are almost identical to those of the SacreBLEU package.
github_jupyter
# Lab 2: Inference in Graphical Models ### Machine Learning 2, 2016 * The lab exercises should be made in groups of two people. * The deadline is Sunday, April 24, 23:59. * Assignment should be sent to t.s.cohen at uva dot nl (Taco Cohen). The subject line of your email should be "[ML2_2016] lab#_lastname1\_lastname2". * Put your and your teammate's names in the body of the email * Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file follows the same rule as the subject line. Notes on implementation: * You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please contact us. * Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline. * NOTE: test your code and make sure we can run your notebook / scripts! ### Introduction In this assignment, we will implement the sum-product and max-sum algorithms for factor graphs over discrete variables. The relevant theory is covered in chapter 8 of Bishop's PRML book, in particular section 8.4. Read this chapter carefuly before continuing! We will first implement sum-product and max-sum and apply it to a simple poly-tree structured factor graph for medical diagnosis. Then, we will implement a loopy version of the algorithms and use it for image denoising. For this assignment we recommended you stick to numpy ndarrays (constructed with np.array, np.zeros, np.ones, etc.) as opposed to numpy matrices, because arrays can store n-dimensional arrays whereas matrices only work for 2d arrays. We need n-dimensional arrays in order to store conditional distributions with more than 1 conditioning variable. If you want to perform matrix multiplication on arrays, use the np.dot function; all infix operators including *, +, -, work element-wise on arrays. ## Part 1: The sum-product algorithm We will implement a datastructure to store a factor graph and to facilitate computations on this graph. Recall that a factor graph consists of two types of nodes, factors and variables. Below you will find some classes for these node types to get you started. Carefully inspect this code and make sure you understand what it does; you will have to build on it later. ``` %pylab inline np.set_printoptions(precision=4) class Node(object): """ Base-class for Nodes in a factor graph. Only instantiate sub-classes of Node. """ def __init__(self, name): # A name for this Node, for printing purposes self.name = name # Neighbours in the graph, identified with their index in this list. # i.e. self.neighbours contains neighbour 0 through len(self.neighbours) - 1. self.neighbours = [] # Reset the node-state (not the graph topology) self.reset() def reset(self): # Incoming messages; a dictionary mapping neighbours to messages. # That is, it maps Node -> np.ndarray. self.in_msgs = {} # A set of neighbours for which this node has pending messages. # We use a python set object so we don't have to worry about duplicates. self.pending = set([]) def add_neighbour(self, nb): self.neighbours.append(nb) def send_sp_msg(self, other): # To be implemented in subclass. raise Exception('Method send_sp_msg not implemented in base-class Node') def send_ms_msg(self, other): # To be implemented in subclass. raise Exception('Method send_ms_msg not implemented in base-class Node') def receive_msg(self, other, msg): # Store the incomming message, replacing previous messages from the same node self.in_msgs[other] = msg # TODO: add pending messages # self.pending.update(...) def __str__(self): # This is printed when using 'print node_instance' return self.name class Variable(Node): def __init__(self, name, num_states): """ Variable node constructor. Args: name: a name string for this node. Used for printing. num_states: the number of states this variable can take. Allowable states run from 0 through (num_states - 1). For example, for a binary variable num_states=2, and the allowable states are 0, 1. """ self.num_states = num_states # Call the base-class constructor super(Variable, self).__init__(name) def set_observed(self, observed_state): """ Set this variable to an observed state. Args: observed_state: an integer value in [0, self.num_states - 1]. """ # Observed state is represented as a 1-of-N variable # Could be 0.0 for sum-product, but log(0.0) = -inf so a tiny value is preferable for max-sum self.observed_state[:] = 0.000001 self.observed_state[observed_state] = 1.0 def set_latent(self): """ Erase an observed state for this variable and consider it latent again. """ # No state is preferred, so set all entries of observed_state to 1.0 # Using this representation we need not differentiate between observed and latent # variables when sending messages. self.observed_state[:] = 1.0 def reset(self): super(Variable, self).reset() self.observed_state = np.ones(self.num_states) def marginal(self, Z=None): """ Compute the marginal distribution of this Variable. It is assumed that message passing has completed when this function is called. Args: Z: an optional normalization constant can be passed in. If None is passed, Z is computed. Returns: marginal, Z. The first is a numpy array containing the normalized marginal distribution. Z is either equal to the input Z, or computed in this function (if Z=None was passed). """ # TODO: compute marginal return None, Z def send_sp_msg(self, other): # TODO: implement Variable -> Factor message for sum-product pass def send_ms_msg(self, other): # TODO: implement Variable -> Factor message for max-sum pass class Factor(Node): def __init__(self, name, f, neighbours): """ Factor node constructor. Args: name: a name string for this node. Used for printing f: a numpy.ndarray with N axes, where N is the number of neighbours. That is, the axes of f correspond to variables, and the index along that axes corresponds to a value of that variable. Each axis of the array should have as many entries as the corresponding neighbour variable has states. neighbours: a list of neighbouring Variables. Bi-directional connections are created. """ # Call the base-class constructor super(Factor, self).__init__(name) assert len(neighbours) == f.ndim, 'Factor function f should accept as many arguments as this Factor node has neighbours' for nb_ind in range(len(neighbours)): nb = neighbours[nb_ind] assert f.shape[nb_ind] == nb.num_states, 'The range of the factor function f is invalid for input %i %s' % (nb_ind, nb.name) self.add_neighbour(nb) nb.add_neighbour(self) self.f = f def send_sp_msg(self, other): # TODO: implement Factor -> Variable message for sum-product pass def send_ms_msg(self, other): # TODO: implement Factor -> Variable message for max-sum pass ``` ### 1.1 Instantiate network (10 points) Convert the directed graphical model ("Bayesian Network") shown below to a factor graph. Instantiate this graph by creating Variable and Factor instances and linking them according to the graph structure. To instantiate the factor graph, first create the Variable nodes and then create Factor nodes, passing a list of neighbour Variables to each Factor. Use the following prior and conditional probabilities. $$ p(\verb+Influenza+) = 0.05 \\\\ p(\verb+Smokes+) = 0.2 \\\\ $$ $$ p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 1) = 0.3 \\\\ p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 0) = 0.001 \\\\ p(\verb+Fever+ = 1| \verb+Influenza+ = 1) = 0.9 \\\\ p(\verb+Fever+ = 1| \verb+Influenza+ = 0) = 0.05 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 1) = 0.99 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 0) = 0.9 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 1) = 0.7 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 0) = 0.0001 \\\\ p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 1) = 0.8 \\\\ p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 0) = 0.07 \\\\ p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 1) = 0.6 \\\\ p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 0) = 0.001 \\\\ $$ ``` from IPython.core.display import Image Image(filename='bn.png') # Variables def init_variables(): I = Variable('Influenza', 2) S = Variable('Smokes', 2) ST = Variable('SoreThroat', 2) F = Variable('Fever', 2) B = Variable('Bronchitits', 2) C = Variable('Coughing', 2) W = Variable('Wheezing', 2) return I, S, ST, F, B, C, W ``` $$ p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 1) = 0.3 \\\\ p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 0) = 0.001 $$ ``` # Factor nodes def init_f1(I, ST): # Order: I, ST f1_weights = np.empty((2, 2)) f1_weights[1, 1] = 0.3 f1_weights[0, 1] = 0.001 f1_weights[:, 0] = 1 - f1_weights[:, 1] f1 = Factor('f1', f1_weights, [I, ST]) return f1 ``` $$ p(\verb+Fever+ = 1| \verb+Influenza+ = 1) = 0.9 \\\\ p(\verb+Fever+ = 1| \verb+Influenza+ = 0) = 0.05 $$ ``` def init_f2(I, F): # Order: I, F f2_weights = np.empty((2, 2)) f2_weights[1, 1] = 0.9 f2_weights[0, 1] = 0.05 f2_weights[:, 0] = 1 - f2_weights[:, 1] f2 = Factor('f2', f2_weights, [I, F]) return f2 ``` $$ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 1) = 0.99 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 0) = 0.9 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 1) = 0.7 \\\\ p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 0) = 0.0001 $$ ``` def init_f3(I, S, B): # Order: I, S, B f3_weights = np.empty((2, 2, 2)) f3_weights[1, 1, 1] = 0.99 f3_weights[1, 0, 1] = 0.9 f3_weights[0, 1, 1] = 0.7 f3_weights[0, 0, 1] = 0.0001 f3_weights[:, :, 0] = 1 - f3_weights[:, :, 1] f3 = Factor('f3', f3_weights, [I, S, B]) return f3 ``` $$ p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 1) = 0.8 \\\\ p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 0) = 0.07 $$ ``` def init_f4(B, C): # Order: B, C f4_weights = np.empty((2, 2)) f4_weights[1, 1] = 0.8 f4_weights[0, 1] = 0.07 f4_weights[:, 0] = 1 - f4_weights[:, 1] f4 = Factor('f4', f4_weights, [B, C]) return f4 ``` $$ p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 1) = 0.6 \\\\ p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 0) = 0.001 $$ ``` def init_f5(B, W): # Order: B, W f5_weights = np.empty((2, 2)) f5_weights[1, 1] = 0.6 f5_weights[0, 1] = 0.001 f5_weights[:, 0] = 1 - f5_weights[:, 1] f5 = Factor('f5', f5_weights, [B, W]) return f5 ``` $$ p(\verb+Smokes+) = 0.2 $$ ``` def init_f6(S): f6_weights = np.array([0.8, 0.2]) f6 = Factor('f6', f6_weights, [S]) return f6 ``` $$ p(\verb+Influenza+) = 0.05 $$ ``` def init_f7(I): f7_weights = np.array([0.95, 0.05]) f7 = Factor('f7', f7_weights, [I]) return f7 def check_params_before_sending(self, other): in_nodes = filter(lambda neighbour: neighbour != other, self.neighbours) # Checks if all the information required to pass a message is present for in_node in in_nodes: if in_node not in self.in_msgs: raise ValueError('Message from %s is missing for the factor %s' % (in_node, self)) # A list of incoming messages in_msgs = map(lambda in_node: self.in_msgs[in_node], in_nodes) return in_nodes, in_msgs Node.check_params_before_sending = check_params_before_sending ``` ### 1.2 Factor to variable messages (20 points) Write a method `send_sp_msg(self, other)` for the Factor class, that checks if all the information required to pass a message to Variable `other` is present, computes the message and sends it to `other`. "Sending" here simply means calling the `receive_msg` function of the receiving node (we will implement this later). The message itself should be represented as a numpy array (np.array) whose length is equal to the number of states of the variable. An elegant and efficient solution can be obtained using the n-way outer product of vectors. This product takes n vectors $\mathbf{x}^{(1)}, \ldots, \mathbf{x}^{(n)}$ and computes a $n$-dimensional tensor (ndarray) whose element $i_0,i_1,...,i_n$ is given by $\prod_j \mathbf{x}^{(j)}_{i_j}$. In python, this is realized as `np.multiply.reduce(np.ix_(*vectors))` for a python list `vectors` of 1D numpy arrays. Try to figure out how this statement works -- it contains some useful functional programming techniques. Another function that you may find useful in computing the message is `np.tensordot`. ``` def send_sp_msg(self, other): assert isinstance(other,Variable) if other not in self.neighbours: raise Exception('The specified node is not a neighbour') factor_indexes = range(len(self.neighbours)) factor_indexes.remove(self.neighbours.index(other)) message_indexes = range(len(factor_indexes)) mes = [] # extracting messages that are later used in computations for ne in self.neighbours: if ne==other: continue if ne not in self.in_msgs: raise Exception('Some messages are not received') mes.append(self.in_msgs[ne]) mes = np.tensordot(self.f,np.multiply.reduce(np.ix_(*mes)),axes=(factor_indexes,message_indexes)) # sending the message self.send_sp_msg_proc(other,mes) ``` ### 1.3 Variable to factor messages (10 points) Write a method `send_sp_message(self, other)` for the Variable class, that checks if all the information required to pass a message to Variable var is present, computes the message and sends it to factor. ``` def variable_send_sp_msg(self, other): in_nodes, in_msgs = self.check_params_before_sending(other) # Already with observed_state required for 1.7 # If it's unobserved then self.observed_state == vector of 1 and we # will marginalize over all possible values of the particular variable # Otherwise, for example, if it's always 1, then self.observed_state == [0, 1] # and we will set all 0 state to 0 out_msg = self.observed_state * np.multiply.reduce(in_msgs) # Sends a message to other other.receive_msg(self, out_msg) self.pending.remove(other) Variable.send_sp_msg = variable_send_sp_msg ``` ### 1.4 Compute marginal (10 points) Later in this assignment, we will implement message passing schemes to do inference. Once the message passing has completed, we will want to compute local marginals for each variable. Write the method `marginal` for the Variable class, that computes a marginal distribution over that node. ``` def variable_marginal(self, Z=None): # Already with observed_state required for 1.7 marginal = self.observed_state * np.multiply.reduce(self.in_msgs.values()) if Z == None: Z = np.sum(marginal) marginal /= Z return marginal, Z Variable.marginal = variable_marginal ``` ### 1.5 Receiving messages (10 points) In order to implement the loopy and non-loopy message passing algorithms, we need some way to determine which nodes are ready to send messages to which neighbours. To do this in a way that works for both loopy and non-loopy algorithms, we make use of the concept of "pending messages", which is explained in Bishop (8.4.7): "we will say that a (variable or factor) node a has a message pending on its link to a node b if node a has received any message on any of its other links since the last time it send (sic) a message to b. Thus, when a node receives a message on one of its links, this creates pending messages on all of its other links." Keep in mind that for the non-loopy algorithm, nodes may not have received any messages on some or all of their links. Therefore, before we say node a has a pending message for node b, we must check that node a has received all messages needed to compute the message that is to be sent to b. Modify the function `receive_msg`, so that it updates the self.pending variable as described above. The member self.pending is a set that is to be filled with Nodes to which self has pending messages. Modify the `send_msg` functions to remove pending messages as they are sent. ``` def node_receive_msg(self, other, msg): # Store the incoming message, replacing previous messages from the same node self.in_msgs[other] = msg print '%s receives message from %s: %s' % (self, other, msg) for neighbour in set(self.neighbours) - {other}: if neighbour in self.in_msgs: # If received all messages from neighbours if len(self.in_msgs) == len(self.neighbours): self.pending.update([neighbour]) elif len(self.in_msgs) == len(self.neighbours) - 1: # If all incoming messages received self.pending.update([neighbour]) Node.receive_msg = node_receive_msg ``` ### 1.6 Inference Engine (10 points) Write a function `sum_product(node_list)` that runs the sum-product message passing algorithm on a tree-structured factor graph with given nodes. The input parameter `node_list` is a list of all Node instances in the graph, which is assumed to be ordered correctly. That is, the list starts with a leaf node, which can always send a message. Subsequent nodes in `node_list` should be capable of sending a message when the pending messages of preceding nodes in the list have been sent. The sum-product algorithm then proceeds by passing over the list from beginning to end, sending all pending messages at the nodes it encounters. Then, in reverse order, the algorithm traverses the list again and again sends all pending messages at each node as it is encountered. For this to work, you must initialize pending messages for all the leaf nodes, e.g. `influenza_prior.pending.add(influenza)`, where `influenza_prior` is a Factor node corresponding the the prior, `influenza` is a Variable node and the only connection of `influenza_prior` goes to `influenza`. ``` def apply_algorithm(node_list, func): for node in node_list: for other in list(node.pending): func(node, other) def sum_product(node, other): node.send_sp_msg(other) def configure_experiment(): variables = init_variables() I, S, ST, F, B, C, W = variables f1 = init_f1(I, ST) f2 = init_f2(I, F) f3 = init_f3(I, S, B) f4 = init_f4(B, C) f5 = init_f5(B, W) f6 = init_f6(S) f7 = init_f7(I) f6.pending.update([S]) f7.pending.update([I]) ST.pending.update([f1]) F.pending.update([f2]) C.pending.update([f4]) W.pending.update([f5]) return (I, S, ST, F, B, C, W), (f1, f2, f3, f4, f5, f6, f7) def print_marginals(variables): for variable in variables: marginal, Z = variable.marginal(None) print variable, marginal variables, factors = configure_experiment() I, S, ST, F, B, C, W = variables f1, f2, f3, f4, f5, f6, f7 = factors node_list = [f6, f7, W, C, F, f4, f5, S, f2, B, f3, I, f1, ST] print 'Forward pass' apply_algorithm(node_list, sum_product) ST.pending.update([f1]) print 'Backward pass' apply_algorithm(reversed(node_list), sum_product) print_marginals(variables) ``` ### 1.7 Observed variables and probabilistic queries (15 points) We will now use the inference engine to answer probabilistic queries. That is, we will set certain variables to observed values, and obtain the marginals over latent variables. We have already provided functions `set_observed` and `set_latent` that manage a member of Variable called `observed_state`. Modify the `Variable.send_msg` and `Variable.marginal` routines that you wrote before, to use `observed_state` so as to get the required marginals when some nodes are observed. ``` variables, factors = configure_experiment() I, S, ST, F, B, C, W = variables f1, f2, f3, f4, f5, f6, f7 = factors B.set_observed(1) node_list = [f6, f7, W, C, F, f4, f5, S, f2, B, f3, I, f1, ST] print 'Forward pass' apply_algorithm(node_list, sum_product) ST.pending.update([f1]) print 'Backward pass' apply_algorithm(reversed(node_list), sum_product) print_marginals(variables) ``` ### 1.8 Sum-product and MAP states (5 points) A maximum a posteriori state (MAP-state) is an assignment of all latent variables that maximizes the probability of latent variables given observed variables: $$ \mathbf{x}_{\verb+MAP+} = \arg\max _{\mathbf{x}} p(\mathbf{x} | \mathbf{y}) $$ Could we use the sum-product algorithm to obtain a MAP state? If yes, how? If no, why not? ## Part 2: The max-sum algorithm Next, we implement the max-sum algorithm as described in section 8.4.5 of Bishop. ### 2.1 Factor to variable messages (10 points) Implement the function `Factor.send_ms_msg` that sends Factor -> Variable messages for the max-sum algorithm. It is analogous to the `Factor.send_sp_msg` function you implemented before. ``` def factor_send_ms_msg(self, other): in_nodes, in_msgs = self.check_params_before_sending(other) factor_indexes = range(len(self.neighbours)) # Excludes an index of the "other" node factor_indexes.remove(self.neighbours.index(other)) # Replacing sum by max, product by sum out_msg = np.expand_dims(np.add.reduce(np.ix_(*in_msgs)), self.neighbours.index(other)) out_msg = np.squeeze(np.apply_over_axes(np.amax, np.log(self.f) + out_msg, factor_indexes)) # Sends a message to other other.receive_msg(self, out_msg) self.pending.remove(other) Factor.send_ms_msg = factor_send_ms_msg ``` ### 2.2 Variable to factor messages (10 points) Implement the `Variable.send_ms_msg` function that sends Variable -> Factor messages for the max-sum algorithm. ``` def variable_send_ms_msg(self, other): in_nodes, in_msgs = self.check_params_before_sending(other) out_msg = np.log(self.observed_state) if len(in_msgs) > 0: out_msg += np.add.reduce(in_msgs) # Sends a message to other other.receive_msg(self, out_msg) self.pending.remove(other) Variable.send_ms_msg = variable_send_ms_msg ``` ### 2.3 Find a MAP state (10 points) Using the same message passing schedule we used for sum-product, implement the max-sum algorithm. For simplicity, we will ignore issues relating to non-unique maxima. So there is no need to implement backtracking; the MAP state is obtained by a per-node maximization (eq. 8.98 in Bishop). Make sure your algorithm works with both latent and observed variables. ``` def map_state(self): # Returns 0 state or 1 return np.argmax(np.add.reduce(self.in_msgs.values()) + np.log(self.observed_state)) Variable.map_state = map_state def max_sum(node, other): node.send_ms_msg(other) def print_map_states(variables): for variable in variables: map_state = variable.map_state() print variable, map_state variables, factors = configure_experiment() I, S, ST, F, B, C, W = variables f1, f2, f3, f4, f5, f6, f7 = factors B.set_observed(1) node_list = [f6, f7, W, C, F, f4, f5, S, f2, B, f3, I, f1, ST] print 'Forward pass' apply_algorithm(node_list, max_sum) ST.pending.update([f1]) print 'Backward pass' apply_algorithm(reversed(node_list), max_sum) print_map_states(variables) ``` ## Part 3: Image Denoising and Loopy BP Next, we will use a loopy version of max-sum to perform denoising on a binary image. The model itself is discussed in Bishop 8.3.3, but we will use loopy max-sum instead of Iterative Conditional Modes as Bishop does. The following code creates some toy data. `im` is a quite large binary image, `test_im` is a smaller synthetic binary image. Noisy versions are also provided. ``` from pylab import imread, gray # Load the image and binarize im = np.mean(imread('dalmatian1.png'), axis=2) > 0.5 imshow(im) gray() # Add some noise noise = np.random.rand(*im.shape) > 0.9 noise_im = np.logical_xor(noise, im) figure() imshow(noise_im) test_im = np.zeros((10,10)) #test_im[5:8, 3:8] = 1.0 #test_im[5,5] = 1.0 figure() imshow(test_im) # Add some noise noise = np.random.rand(*test_im.shape) > 0.9 noise_test_im = np.logical_xor(noise, test_im) figure() imshow(noise_test_im) show() ``` ### 3.1 Construct factor graph (10 points) Convert the Markov Random Field (Bishop, fig. 8.31) to a factor graph and instantiate it. ``` from itertools import product def create_factor_graph(img): Y = np.empty(img.shape, dtype='object') X = np.empty(img.shape, dtype='object') fYX = np.empty(img.shape, dtype='object') fXR = np.empty((img.shape[0] - 1, img.shape[1] - 1), dtype='object') fXB = np.empty((img.shape[0] - 1, img.shape[1] - 1), dtype='object') init_prob = np.array([[0.8, 0.2], [0.2, 0.8]]) for y, x in product(range(img.shape[0]), range(img.shape[1])): Y[y, x] = Variable('y(%d,%d)' % (x, y), 2) Y[y, x].set_observed(img[y, x]) X[y, x] = Variable('x(%d,%d)' % (x, y), 2) fYX[y, x] = Factor('fXY(%d,%d)' % (x, y), init_prob, [Y[y, x], X[y, x]]) Y[y, x].pending.update([fYX[y, x]]) one_msg = np.ones(2) for y, x in product(range(img.shape[0] - 1), range(img.shape[1] - 1)): fXR[y, x] = Factor('fXR(%d,%d)' % (x, y), init_prob, [X[y, x], X[y, x + 1]]) fXB[y, x] = Factor('fXB(%d,%d)' % (x, y), init_prob, [X[y, x], X[y + 1, x]]) # Flooding schedule, simultaneously passing a message across every link in both direction # Bishop 8.4.7 X[y, x].in_msgs[fXR[y, x]] = one_msg X[y, x].in_msgs[fXB[y, x]] = one_msg X[y, x + 1].in_msgs[fXR[y, x]] = one_msg X[y + 1, x].in_msgs[fXB[y, x]] = one_msg return Y, X, fYX, fXR, fXB ``` ### 3.2 Loopy max-sum (10 points) Implement the loopy max-sum algorithm, by passing messages from randomly chosen nodes iteratively until no more pending messages are created or a maximum number of iterations is reached. Think of a good way to initialize the messages in the graph. ``` def denoise(img, niter=10): Y, X, fYX, fXR, fXB = create_factor_graph(img) for i in range(niter): fXX = np.hstack((fXR.flatten(), fXB.flatten())) np.random.shuffle(fXX) # Preordered, first observed variables, then factors between observed variables and # corresponding laten variables, then all latent variables and then factors between # latents in the random order. node_list = np.hstack((Y.flatten(), fYX.flatten(), X.flatten(), fXX)).tolist() apply_algorithm(node_list, max_sum) result = np.zeros_like(img) for y, x in product(range(img.shape[0]), range(img.shape[1])): result[y, x] = X[y, x].map_state() return result imshow(denoise(noise_test_im)) show() imshow(denoise(noise_im, niter=10)) show() ```
github_jupyter
# Advanced Ray - Overview © 2019-2020, Anyscale. All Rights Reserved ![Anyscale Academy](../images/AnyscaleAcademyLogo.png) This tutorial, part of [Anyscale Academy](https://anyscale.com/academy), picks up where the [Ray Crash Course](../ray-crash-course/00-Ray-Crash-Course-Overview.ipynb) left off. It explores tasks and actors in more detail, including profiling and debugging applications, and it explains Ray's internal architecture. See the instructions in the [README](../README.md) for setting up your environment to use this tutorial. Go [here](../Overview.ipynb) for an overview of all tutorials. > **Tip:** Recall that the [Ray Package Reference](https://docs.ray.io/en/latest/package-ref.html) in the [Ray Docs](https://docs.ray.io/en/latest/) is useful for exploring the API features we'll learn. ## Join Us at Ray Summit 2020! Join us for the [_free_ Ray Summit 2020 virtual conference](https://events.linuxfoundation.org/ray-summit/?utm_source=dean&utm_medium=embed&utm_campaign=ray_summit&utm_content=anyscale_academy), September 30 - October 1, 2020. We have an amazing lineup of luminar keynote speakers and breakout sessions on the Ray ecosystem, third-party Ray libraries, and applications of Ray in the real world. ![Ray Summit 2020](../images/raysummit-horizontal-white-banner-full.png) ## Advanced Ray Tutorial Guide | # | Lesson (Notebook) | Description | | :- | :-------------------------------------------------------- | :---------------------------------------- | | 00 | [Overview](00-Advanced-Ray-Overview.ipynb) | A _table of contents_ for this tutorial. | | 01 | [Ray Tasks Revisited](01-Ray-Tasks-Revisited.ipynb) | More exploration of `ray.wait()` usage patterns, task dependencies and their management, and task profiling techniques. | | 02 | [Ray Actors Revisited](02-Ray-Actors-Revisited.ipynb) | A more in-depth look at actor characteristics and profiling actor performance using the _Ray Dashboard_. | | 03 | [Ray Internals](03-Ray-Internals.ipynb) | Explores the architecture of Ray, task scheduling, the Object Store, etc. | In addition, exercise solutions for this tutorial can be found [here](solutions/Advanced-Ray-Solutions.ipynb). ## Getting Help * The [#tutorial channel](https://ray-distributed.slack.com/archives/C011ML23W5B) on the [Ray Slack](https://ray-distributed.slack.com). [Click here](https://forms.gle/9TSdDYUgxYs8SA9e8) to join. * [Email](mailto:academy@anyscale.com) Find an issue? Please report it! * [GitHub issues](https://github.com/anyscale/academy/issues) #### Give Us Feedback! Let us know what you like and don't like about this reinforcement learning and Ray RLlib tutorial. * [Survey](https://forms.gle/PKYkFvrAf5M3jL3B8)
github_jupyter
# Tris ## Les plus simples Implanter les trois tris classiques suivants, selon les algorithmes vus en cours. **Remarque : en python on peut échanger deux variables comme ça ** ``` a = 5 b = 6 a,b = b,a print a print b def triSelection(t): """ Tri sélection Sélectionne un élément minimal à chaque itération Input : - t un tableau """ pass # écrire le code t = [] triSelection(t) assert(t == []) t = [5,4,3,2,1] triSelection(t) assert(t == [1,2,3,4,5]) triSelection(t) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] triSelection(t) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) def triBulles(t): """ Tri Bulles Parcours autant de fois que nécessaire les couples de valeurs adjacentes Input : - t un tableau """ pass # écrire le code t = [] triBulles(t) assert(t == []) t = [5,4,3,2,1] triBulles(t) assert(t == [1,2,3,4,5]) triBulles(t) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] triBulles(t) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) def triInsertion(t): """ Tri Insertion Insère dans la partie gauche triée du tableau Input : - t un tableau """ pass # écrire le code t = [] triInsertion(t) assert(t == []) t = [5,4,3,2,1] triInsertion(t) assert(t == [1,2,3,4,5]) triInsertion(t) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] triInsertion(t) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) ``` Implantez les fonctions qui suivent en reprenant les mêmes algrorithmes que précédement. Cette fois chaque fonction doit retourner 2 valeurs : nombre de comparaisons d'éléments du tableau et nombre d'écriture dans le tableau. **Remarque 1 : ** Les fonctions python peuvent retourner plusieurs valeurs en même temps sous la forme de ***tuples*** ``` def maFonction(): return 1,2 v = maFonction() print v print v[0] print v[1] ``` **Remarque 2 :** L'inversion de deux valeurs du tableau $a,b = b, a$ effectue **2 écritures** dans le tableau. **Remarque 3 :** Il se peut que votre algorithme soit légèrement différent du mien et que le comptage (et les tests) aussi. Si vous modifiez les tests, assurez-vous de bien comprendre ce que vous comptez ! ``` def comptageTriSelection(t): """ Tri sélection Sélectionne le minimum à chaque tour Input : - t un tableau Output : a,b avec a le nombre de comparaisons et b le nombre d'écritures """ pass # écrire le code t = [] assert(comptageTriSelection(t) == (0,0)) assert(t == []) t = [5,4,3,2,1] assert(comptageTriSelection(t) == (10,8)) assert(t == [1,2,3,4,5]) assert(comptageTriSelection(t) == (10,8)) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] assert(comptageTriSelection(t) == (66,22)) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) def comptageTriBulles(t): """ Tri Bulles Parcours autant de fois que nécessaire les couples de valeurs adjacentes Input : - t un tableau Output : a,b avec a le nombre de comparaisons et b le nombre d'écritures """ pass # écrire le code t = [] assert(comptageTriBulles(t) == (0,0)) assert(t == []) t = [5,4,3,2,1] assert(comptageTriBulles(t) == (10,20)) assert(t == [1,2,3,4,5]) assert(comptageTriBulles(t) == (4,0)) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] assert(comptageTriBulles(t) == (38,36)) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) def comptageTriInsertion(t): """ Tri Insertion Insère successivement les éléments dans la partie gauche triée du tableau Input : - t un tableau Output : a,b avec a le nombre de comparaisons et b le nombre d'écritures Remarque : pour la boucle interne, on comptera exactement une comparaison de plus que d'entrée dans la boucle """ pass # écrire le code t = [] assert(comptageTriInsertion(t) == (0,0)) assert(t == []) t = [5,4,3,2,1] assert(comptageTriInsertion(t) == (14,14)) assert(t == [1,2,3,4,5]) assert(comptageTriInsertion(t) == (4,4)) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] assert(comptageTriInsertion(t) == (29,29)) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) ``` ## Le tri rapide Commencez par implanter une fonction de pivot. ``` def pivot(t, deb, fin): """ Pivot sur un intervalle [deb, fin[ d'un tableau Agit sur les valeurs entre l'indice deb (inclus) et fin (exclus) Choisit la première valeur t[deb] du tableau comme pivot et réorganise le tableau telle que toutes les valeurs plus petites ou égales à t[deb] soit à gauche et toutes les valeurs plus grandes à droite. Input : - t un tableau - deb, l'indice de départ (inclus) - fin, l'indice de fin (exclus) Output : l'indice final du pivot """ pass # écrire le code t = [1,2,3,4,5] assert(pivot(t,0,5) == 0) assert(all(t[i] > t[0] for i in xrange(1,5))) t = [5,4,3,2,1] assert(pivot(t,0,5) == 4) assert(all(t[i] <= t[4] for i in xrange(4))) t = [4,5,2,13,4,2,4,13,4,5,7,6] assert(pivot(t,0,len(t)) == 5) assert(all(t[i] <= t[5] for i in xrange(5))) assert(all(t[i] > t[5] for i in xrange(6,len(t)))) ``` Implanter l'algorithme de tri rapide vu en classe en utilisant votre fonction de pivot ``` def triRapide(t,deb,fin): """ Tri Rapide Tri récursif reposant sur la fonction pivot Input : - t un tableau - deb l'indice de départ (inclus) - fin l'indice de fin (exclus) """ pass # écrire le code t = [] triRapide(t,0, len(t)) assert(t == []) t = [5,4,3,2,1] triRapide(t, 0, len(t)) assert(t == [1,2,3,4,5]) triRapide(t,0,len(t)) assert(t == [1,2,3,4,5]) t = [1,5,2,13,4,2,4,13,4,5,7,6] triRapide(t,0,len(t)) assert(t == [1, 2, 2, 4, 4, 4, 5, 5, 6, 7, 13, 13]) ``` De la même façon que pour les autres tris, comptez les comparaisons / écritures effectuées par le tri. Pour la fonction pivot, vous devrez donc retourner 3 valeurs : position du pivot, nombre de comparaisons, nombre d'écritures. On peut le faire de cette manière ``` def maFonction(): return 1,2,3 a,b,c = maFonction() print a print b print c def comptagePivot(t, deb, fin): """ Pivot sur une partie de tableau Agit sur les valeurs entre l'indice deb (inclus) et fin (exclus) Choisit la première valeur t[deb] du tableau comme pivot et réorganise le tableau telle que toutes les valeurs plus petites ou égales à t[dev] soit à gauche et toutes les valeurs plus grandes à droite. Input : - t un tableau - deb, l'indice de départ (inclus) - fin, l'indice de fin (exclus) Output : l'indice final du pivot, le nombre de comparaisons, le nombre d'écritures """ pass # écrire le code # ces tests ont été écrits pour l'algo du pivot du cours, # si votre algorithme est différent, vous obtiendrez des valeurs différentes t = [1,2,3,4,5] assert(comptagePivot(t,0,5) == (0,6,2)) t = [5,4,3,2,1] assert(comptagePivot(t,0,5) == (4,6,2)) t = [4,5,2,13,4,2,4,13,4,5,7,6] assert(comptagePivot(t,0,len(t)) == (5,13,6)) def comptageTriRapide(t,deb,fin): """ Tri Rapide Tri récursif reposant sur la fonction pivot Input : - t un tableau - deb l'indice de départ (inclus) - fin l'indice de fin (exclus) Output : a,b avec a le nombre de comparaisons et b le nombre d'écritures """ pass # écrire le code t = [] assert(comptageTriRapide(t,0, len(t)) == (0,0)) t = [5,4,3,2,1] assert(comptageTriRapide(t,0, len(t)) == (18,8)) assert(t == [1,2,3,4,5]) assert(comptageTriRapide(t,0, len(t)) == (18,8)) t = [1,5,2,13,4,2,4,13,4,5,7,6] assert(comptageTriRapide(t,0, len(t)) == (50,22)) ``` ## Comparaisons de tris Écrire une fonction qui prend en paramètre un tableau, le tri selon les différentes méthodes et retourne la plus efficace en nombre de comparaisons. ** Attention : les tris se font sur place, il faut donc copier le tableau orginal à chaque fois avant d'appliquer le tri pour pouvoir comparer les algorithmes** ``` # rappel syntaxe copier un tableau t = [1,2,3] tcopie = list(t) tcopie[0] = 10 print t print tcopie def meilleurTriComparaison(t): """ Meilleur tri -- comparaisons Lance chacun des tris sur une copie du tableau t en comptant les comparaisons Input : - t un tableau Output : le tri le plus efficace en terme de comparaisons : "Selection", "Bulles", "Insertion" ou "Rapide" """ pass # écrire le code t = [1,2,3,4,5] assert(meilleurTriComparaison(t) in {"Bulles", "Insertion"}) t = [2,3,4,5,6,7,8,9,1] assert(meilleurTriComparaison(t) == "Insertion") t = [12,16,5,2,14,13,15,7,1,19,3,9,10,18,6,20] assert(meilleurTriComparaison(t) == "Rapide") def meilleurTriEcriture(t): """ Meilleur tri -- écritures Lance chacun des tris sur une copie du tableau t en comptant les écritures Input : - t un tableau Output : le tri le plus efficace en terme d'écritures : "Selection", "Bulles", "Insertion" ou "Rapide" """ pass # écrire le code t = [1,2,3,4,5] assert(meilleurTriEcriture(t) == "Bulles") # éventuellement insertion t = [12,16,5,2,14,13,15,7,1,19,3,9,10,18,6,20] assert(meilleurTriEcriture(t) == "Selection") ``` On va maintenant pouvoir observer l'efficacité des tris sur des tirages aléatoires. La fonction suivante permet de générer un tableau aléatoire de taille n. Tirer des tableaux de différentes tailles et observer quels tris sont les plus efficaces avec vos fonctions précédentes. ``` def randomTableau(n): """ Retourne un tableau aléatoire de taille n (chaque valeur est choisie uniformément sur [[0,n]]) """ import random return [random.randint(0,n) for i in xrange(n)] t = randomTableau(1000) print meilleurTriComparaison(t) print meilleurTriEcriture(t) ``` Implanter une fonction qui retourne un histogramme sous forme de dictionnaire python des tris qui ont été plus efficaces. Un dictionnaire python est un objet où l'on peut associer des valeurs à des clés. Par exemple : ``` d = {} d["Rapide"] = 3 d["Insertion"] = 2 d["Selection"] = 1 d["Bulles"] = 0 d ``` Le but de la fonction est de créer un dictionnaire où chaque valeur représente le nombre de fois où ce tri a été le plus rapide. ``` def histogrammeComparaisons(n, m): """ Histogramme -- comparaisons Input : - n, la taille des tableaux aléatoires - m, le nombre de tirages à effectuer Output : un dictionnaire python qui représente l'histogramme des tris les plus efficaces sur les m tirages """ pass # écrire le code histogrammeComparaisons(10,100) # entre rapide et insertion histogrammeComparaisons(100,100) # que du rapide histogrammeComparaisons(1000,10) # que du rapide def histogrammeEcritures(n, m): """ Histogramme -- ecritures Input : - n, la taille des tableaux aléatoires - m, le nombre de tirages à effectuer Output : un dictionnaire python qui représente l'histogramme des tris les plus efficaces sur les m tirages """ pass # écrire le code histogrammeEcritures(10,100) # entre rapide et selection histogrammeEcritures(100,100) # que du selection histogrammeEcritures(1000,10) # que du selection ``` On va maintenant utiliser une autre fonction aléatoire qui, cette fois, crée des tableaux "presque" triés, avec un paramètre de mélange. ``` def randomTableauPresqueTrie(n, k): """ Retourne un tableau ou chaque valeur d'indice i est choisie uniformément entre i - k, i + k Input : - n la taille du tableau - k le rayon possible pour chaque valeur """ import random return [random.randint(i-k,i+k) for i in xrange(n)] t = randomTableauPresqueTrie(10,0) t t = randomTableauPresqueTrie(10,5) t t = randomTableauPresqueTrie(10,10) t t = randomTableauPresqueTrie(10,100) t t = randomTableauPresqueTrie(100,10) print meilleurTriComparaison(t) print meilleurTriEcriture(t) ``` Ecrivez de nouvelles fonctions histogrammes en utilisant cette fonction et observez la différence ! ``` def histogrammeComparaisonsPresqueTrie(n, k, m): """ Histogramme -- comparaisons Input : - n, la taille des tableaux aléatoires - k, le nombre d'inversions aléatoires - m, le nombre de tirages à effectuer Output : un dictionnaire python qui représente l'histogramme des tris les plus efficaces sur les m tirages """ pass # écrire le code histogrammeComparaisonsPresqueTrie(10,3,100) histogrammeComparaisonsPresqueTrie(100,10,100) histogrammeComparaisonsPresqueTrie(100,30,100) histogrammeComparaisonsPresqueTrie(100,50,100) ``` Cela se traduit-il sur le temps de calcul ? Exécuter le code ci-dessous et expérimenter. ``` t = randomTableau(100) tcopie = list(t) time triRapide(tcopie, 0, len(tcopie)) tcopie = list(t) time triInsertion(tcopie) t = randomTableauPresqueTrie(100,10) tcopie = list(t) time triRapide(tcopie, 0, len(tcopie)) tcopie = list(t) time triInsertion(tcopie) ``` Qu'en est-il de la fonction *sort* de python ? Comparer les vitesses d'exécutions de vos algorithmes sur des tableaux aléatoires triés et presque triés avec la fonction *sort* ``` t = randomTableau(100) tcopie = list(t) time tcopie.sort() time triRapide(t, 0, len(t)) ``` # Autour de Quicksort ## k selection On veut trouver l'élément médian d'un tableau de $n$ valeurs. **Rappel:** le médian d'un ensemble de valeur est une valeur permettant de couper l'ensemble en deux parties égales: l'une contenant tous les éléments inférieur, et les autres les éléments supérieurs. ``` def kSelectionNaif(tab, k): """ kSelectionNaif Renvoie le kième plus petit élément d'un tableau sans utiliser la fonction pivot. Algorithme: trouver le plus grand élément, le sortir, et itérer k fois. Input: - tab un tableau - k un entier strictement positif """ for i in range(k): m = max(tab) tab.remove(m) return max(tab) def kSelection(tab, k): """ kSelection Utilise la fonction de pivot pour trouver le kième élément d'un tableau dans l'ordre croissant Input: - tab un tableau - un entier strictement positif Output: - le kième élément du tableau dans l'ordre croisant """ pass # écrire le code tab = [6,1,3,2,2,9,14] assert(kSelection(tab, 1) == 1) assert(kSelection(tab, 5) == 6) t = randomTableau(100) tcopy = t[:] assert(kSelection(t, 50) == kSelectionNaif(tcopy, 50)) ``` Comparer les deux algorithmes ci dessus. Lequel est le plus efficace? ``` t = randomTableau(2000) tcopy = t[:] time kSelection(t, 1000) time kSelectionNaif(tcopy, 1000) ``` ## Tri Fusion et listes chaînées Comme on l'a vu en cours, le tri fusion a un défaut majeur : sur un tableau, le tri nécessite de l'allocation mémoire. Cependant, quand on travail sur des **listes chaînées**, ce n'est plus le cas. Ci dessous, la structure de liste du TP2 légèrement améliorée que l'on va réutiliser. ``` class Cellule: def __init__(self, valeur): self.valeur = valeur self.suivante = None def __repr__(self): return str(self.valeur) class Liste: def __init__(self, tab = None): """ Initialisation d'une liste (vide ou avec tableau) Input : - tab (optionnel) un itérable contenant des valeurs """ self.premiere = None if not tab is None: c = None for v in tab: if c is None: self.premiere = Cellule(v) c = self.premiere else: c.suivante = Cellule(v) c = c.suivante def __repr__(self): s = "[" c = self.premiere while not c is None: s += str(c) if c.suivante is not None: s+=" -> " c = c.suivante s+="]" return s def ajouteCelluleEnTete(self, c): """ Ajoute une cellule en tête de liste Input : - c, une cellule """ c.suivante = self.premiere self.premiere = c def ajouteValeurEnTete(self, v): """ Alloue une nouvelle cellule de valeur v et l'ajoute en en tête de liste Input : - v une valeur """ self.ajouteCelluleEnTete(Cellule(v)) L = Liste([1,2,3]) L L.ajouteValeurEnTete(4) L ``` **Ecrire une fonction qui découpe une liste en deux à partir d'un indice donné et retourne la liste obenue avec les valeurs coupées** Remarque : vous n'avez besoin de parcourir les valeurs qu'une seule fois et seulement jusqu'à $i$. **Dans tous les cas, la complexité ne doit pas dépasser O(n)**. ``` def coupeListe(L, i): """ Coupe une liste à l'indice i (exclus) Input : - L la liste à couper - i l'indice Output : La liste des valeurs d'indice >= i ou la liste vide si la taille de L est <= à i """ pass # écrire le code L = Liste([1,2,3,4,5]) L2 = coupeListe(L,0) assert(str(L) == "[]") assert(str(L2) == "[1 -> 2 -> 3 -> 4 -> 5]") L = Liste([1,2,3,4,5]) L2 = coupeListe(L,1) assert(str(L) == "[1]") assert(str(L2) == "[2 -> 3 -> 4 -> 5]") L = Liste([1,2,3,4,5]) L2 = coupeListe(L,3) assert(str(L) == "[1 -> 2 -> 3]") assert(str(L2) == "[4 -> 5]") L = Liste([1,2,3,4,5]) L2 = coupeListe(L,5) assert(str(L) == "[1 -> 2 -> 3 -> 4 -> 5]") assert(str(L2) == "[]") L = Liste([1,2,3,4,5]) L2 = coupeListe(L,7) assert(str(L) == "[1 -> 2 -> 3 -> 4 -> 5]") assert(str(L2) == "[]") ``` On va écrire un algorithme de fusion de deux listes chaînées triées. Pour des raisons pratiques, il est préférable de prendre en paramètre des **cellules** et non des listes. Cela permet d'écrire l'algorithme de façon récursive. Adapter l'algorithme *Fusion* vu en classe pour les tableau aux listes chaînées : on écrira un algorithme **récursif** qui prend en paramètre deux cellules. On considère ces cellules comme des têtes de listes triées. L'algortihme doit retourner la première cellule de la liste triée résultat. ``` def fusionCellules(c1,c2): """ Fusion de deux listes triées par cellules de têtes Input : - c1 la première cellule de la liste 1 (ou none si liste vide) - c2, la première celule de la liste 2 (ou none si liste vide) Output : La première cellule de la liste triée contenant toutes les cellules atteintes par c1 et c2 """ # écrire le code ici def fusionListes(L1,L2): """ Fusion de deux listes triées On utilise la fonction fusionCellules pour fusionner les deux listes, puis on rattache la nouvelle tete de liste à L1. Donc à la fin de la fonction, L1 contient l'ensemble de la liste fusionnée et L2 est vide. Input : - L1, la première liste - L2, la deuxième liste """ L1.premiere = fusionCellules(L1.premiere, L2.premiere) L2.premiere = None L1 = Liste([1,1,3,3,4,5]) L2 = Liste([2,4,4,5,6]) fusionListes(L1,L2) assert(str(L1) == "[1 -> 1 -> 2 -> 3 -> 3 -> 4 -> 4 -> 4 -> 5 -> 5 -> 6]") assert(str(L2) == "[]") L1 = Liste([1,1,3,3,4,5]) LVide = Liste() fusionListes(L1,LVide) assert(str(L1) == "[1 -> 1 -> 3 -> 3 -> 4 -> 5]") assert(str(LVide) == "[]") L1 = Liste([1,1,3,3,4,5]) LVide = Liste() L3 = fusionListes(LVide,L1) assert(str(LVide) == "[1 -> 1 -> 3 -> 3 -> 4 -> 5]") assert(str(L1) == "[]") ``` On a maintenant tous les outils pour écrire un tri fusion sur les listes. ``` def triFusion(L, n): """ Tri Fusion Input : - L une liste - n la taille de la liste """ pass # écrire le code L = Liste([5,1,4,2,4,3,2,4,8]) triFusion(L,9) assert(str(L) == "[1 -> 2 -> 2 -> 3 -> 4 -> 4 -> 4 -> 5 -> 8]") L = Liste() triFusion(L,0) assert(str(L) == "[]") L = Liste([1]) triFusion(L,0) assert(str(L) == "[1]") t = randomTableau(50) L1 = Liste(t) triFusion(L1,50) t.sort() L2 = Liste(t) assert(str(L1) == str(L2)) ``` ## Aller plus loin : optimisation du pivotage Retournons au tri rapide : comme vous l'avez remarqué, le choix du pivot est crucial dans le déroulement de l'algorithme de tri rapide. Si le pivot "tombe" trop près du bord de l'intervalle c'est une catastrophe... Si tout se passait bien on espérerait pouvoir couper en deux à chaque fois, ce qui donnerait un algorithme en $O(n\ ln\, n)$ (pourquoi ?) On voudrait donc que le pivot soit la valeur *médiane* du tableau (autant d'éléments plus petits que d'éléments plus grands). Il existe des stratégie pour augmenter les chances de trouver la médiane. Par exemple, pour le pivot, on pourrait choisir trois éléments dans le tableau et prendre le médian des trois. Écrire une telle fonction de pivotage et un tri rapide. Comparer avec votre première version. ``` # 3 comparaisons: optimal def median3(a, b, c): """ Renvoie le median de trois éléments Input: - a, b, c trois valeurs Output: - le médiant de a, b, c """ pass # écrire le code assert(median3(1,2,3) == 2) assert(median3(3,1,2) == 2) assert(median3(2,3,1) == 2) def pivot3(t, deb, fin): pass # écrire le code """ Tri rapide utilisant l'algorithme pivot3 au lieu du pivot classique Input : - t un tableau - deb, indice du début de tableau (inclus) - fin, indice de fin de tableau (exclus) """ def triRapide3(t, deb, fin): pass # écrire le code ``` Tester les performances de votre nouvelle fonction et comparer avec la première version du tri rapide (en particulier, sur les tableaux triés). Proposer d'autres optimisations.
github_jupyter
Avani Gupta <br> Roll: 2019121004 # Excercise - Multi-class classification of MNIST using Perceptron In binary perceptron, where $\mathbf{y} \in \{-1, +1\}$, we used to update our weights only for wrongly classified examples. The multi-class perceptron is regarded as a generalization of binary perceptron. Learning through iteration is the same as the perceptron. Weighted inputs are passed through a multiclass signum activation function. If the predicted output label is the same as true label then weights are not updated. However, when predicted output label $\neq$ true label, then the wrongly classified input example is added to the weights of the correct label and subtracted from the weights of the incorrect label. Effectively, this amounts to ’rewarding’ the correct weight vector, ’punishing’ the misleading, incorrect weight vector, and leaving alone an other weight vectors. ``` from sklearn import datasets from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import random import numpy as np import seaborn as sns; sns.set(); import pandas as pd import math import gif import warnings warnings.filterwarnings('ignore') # Setting the seed to ensure reproducibility of experiments np.random.seed(11) # One-hot encoding of target label, Y def one_hot(a): b = -1 * np.ones((a.size, a.max()+1)) b[np.arange(a.size), a] = 1 return b # Loading digits datasets digits = datasets.load_digits() # One-hot encoding of target label, Y Y = digits.target Y = one_hot(Y) # Adding column of ones to absorb bias b of the hyperplane into X X = digits.data bias_ones = np.ones((len(X), 1)) X = np.hstack((X, bias_ones)) # Train-val-test data X_train_val, X_test, Y_train_val, Y_test = train_test_split(X, Y, shuffle=True, test_size = 0.2) X_train, X_val, Y_train, Y_val = train_test_split(X_train_val, Y_train_val, test_size = 0.12517) print("Training dataset: ", X_train.shape) print("Validation dataset: ", X_val.shape) print("Test dataset: ", X_test.shape) sns.reset_orig(); plt.gray() plt.matshow(digits.images[10]) plt.show(); ``` #### Write your code below tut notebook functions ``` # Defining signum activation function def signum(vec_w_x): """ signum activation for perceptron Parameters ------------ vec_w_x: ndarray Weighted inputs """ vec_w_x[vec_w_x >= 0] = 1 vec_w_x[vec_w_x < 0] = -1 return vec_w_x # multi-class signum def multi_class_signum(vec_w_x): """ Multiclass signum activation. Parameters ------------ vec_w_x: ndarray Weighted inputs """ flag = np.all(vec_w_x == 0) if flag: return vec_w_x else: num_examples, num_outputs = np.shape(vec_w_x) range_examples = np.array(range(0, num_examples)) zero_idxs = np.argwhere(np.all(vec_w_x == 0, axis=1)) non_zero_examples = np.delete(range_examples, zero_idxs[:, 0]) signum_vec_w_x = vec_w_x[non_zero_examples] maxvals = np.amax(signum_vec_w_x, axis=1) for i in range(num_examples): idx = np.argwhere(signum_vec_w_x == maxvals[i])[0] signum_vec_w_x[idx[0], idx[1]] = 1 non_maxvals_idxs = np.argwhere(signum_vec_w_x != 1) signum_vec_w_x[non_maxvals_idxs[:, 0], non_maxvals_idxs[:, 1]] = -1 vec_w_x[non_zero_examples] = signum_vec_w_x return vec_w_x # Evaluation for train, val, and test set. def get_accuracy(y_predicted, Y_input_set, num_datapoints): miscls_points = np.argwhere(np.any(y_predicted != Y_input_set, axis=1)) miscls_points = np.unique(miscls_points) accuracy = (1-len(miscls_points)/num_datapoints)*100 return accuracy def get_prediction(X_input_set, Y_input_set, weights, get_acc=True, model_type='perceptron', predict='no'): if len(Y_input_set) != 0: num_datapoints, num_categories = np.shape(Y_input_set) vec_w_transpose_x = np.dot(X_input_set, weights) if num_categories > 1: # Multi-class if model_type == 'perceptron': y_pred_out = multi_class_signum(vec_w_transpose_x) elif model_type == 'logreg': y_pred_out = softmax(X_input_set, vec_w_transpose_x, predict=predict) else: # Binary class if model_type == 'perceptron' or model_type == 'LinearDA': y_pred_out = signum(vec_w_transpose_x) elif model_type == 'logreg': y_pred_out = sigmoid(vec_w_transpose_x, predict=predict) # Both prediction and evaluation if get_acc: cls_acc = get_accuracy(y_pred_out, Y_input_set, num_datapoints) return cls_acc # Only prediction return y_pred_out # Perceptron training algorithm def train(X_train, Y_train, weights, learning_rate=1, total_epochs=100): """Training method for Perceptron. Parameters ----------- X_train: ndarray (num_examples(rows) vs num_features(columns)) Input dataset which perceptron will use to learn optimal weights Y_train: ndarray (num_examples(rows) vs class_labels(columns)) Class labels for input data weights: ndarray (num_features vs n_output) Weights used to train the network and predict on test set learning_rate: int Learning rate use to learn and update weights total_epochs: int Max number of epochs to train the perceptron model """ n_samples, _ = np.shape(X_train) history_weights = [] epoch = 1 # Number of missclassified points we would like to see in the train set. # While training, its value will change every epoch. If m==0, our training # error will be zero. m = 1 # If the most recent weights gave 0 misclassifications, break the loop. # Else continue until total_epochs is completed. while m != 0 and epoch <= total_epochs: m = 0 # Compute weighted inputs and predict class labels on training set. weights_transpose_x = np.dot(X_train, weights) weights_transpose_x = signum(weights_transpose_x) y_train_out = np.multiply(Y_train, weights_transpose_x) epoch += 1 # Collecting misclassified indexes and count them y_miscls_idxs = np.argwhere(y_train_out <= 0)[:, 0] y_miscls_idxs = np.unique(y_miscls_idxs) m = len(y_miscls_idxs) # Calculate gradients and update weights dweights = np.dot((X_train[y_miscls_idxs]).T, Y_train[y_miscls_idxs]) weights += (learning_rate/n_samples) * dweights weights = np.round(weights, decimals=4) # Append weights to visualize decision boundary later history_weights.append(weights) if m == 0 and epoch <= total_epochs: print("Training has stabilized with all points classified: ", epoch) else: print(f'Training completed at {epoch-1} epochs. {m} misclassified points remain.') return history_weights ``` My code ``` weights_arr = np.zeros((X_train.shape[1], Y_train.shape[1])) for i in range(Y_train.shape[1]): weights = np.zeros((X_train.shape[1], 1)) weights_arr[:, i:i+1] = train(X_train, Y_train[:, i].reshape((-1,1)), weights, 1, 10000)[-1].copy() def accuracy(X, Y, W): pred = X @ W Class_value = np.max(pred, axis=1, keepdims=True) pred = (pred == Class_value ) class1 = np.where(Y == 1, True, False) match = pred[class1] acc = np.mean(match) * 100 return acc train_acc = accuracy(X_train, Y_train, weights_arr) print("Train accuracy: ",train_acc) val_acc = accuracy(X_val, Y_val, weights_arr) print("Validation accuracy: ", val_acc) test_acc = accuracy(X_test, Y_test, weights_arr) print("Test accuracy: ", test_acc) ```
github_jupyter
# Descriptive stats for datasets and network localization ------------------- Author: Brin Rosenthal (sbrosenthal@ucsd.edu) ------------------- Notebook to calculate localization of dDNV sets, and measure number of patients with a dDNV in established disease genes, in a non-disease gene (non-recurrent), or no dDNVs. <a id='import'></a> ## Import packages ``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns import networkx as nx import pandas as pd import random from IPython.display import display # latex rendering of text in graphs import matplotlib as mpl mpl.rc('text', usetex = False) mpl.rc('font', family = 'serif') from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['Arial'] sns.set_style('white') sns.set_style("ticks", {"xtick.major.size": 15, "ytick.major.size": 15}) plt.rcParams['svg.fonttype'] = 'none' import sys % matplotlib inline # need to have networkx version 1.11 nx.__version__ # import network localization methods sys.path.append('netprop_zscore_code/') import network_localization ``` [TOC](#toc) <a id='importData'></a> # Load the data- including ASD and CHD high confidence genes, and DNV sets ASD genes come from table 4 in http://www.cell.com/neuron/fulltext/S0896-6273(15)00773-4 ``` # ------- ASD high confidence genes (established disease genes) ------- HC_genes_temp = pd.read_csv('data/HC_genes/ASD_HC.tsv',sep='\t',index_col='Unnamed: 0') ASD_HC = [str(g[1:-1]).strip("'") for g in HC_genes_temp['seed_genes'].tolist()[0][1:-1].split(', ')] len(ASD_HC) # ------- High confidence CHD genes (established disease genes) ------- # Load congenital heart defect recurring DNVs (from https://www.nature.com/articles/ng.3970) HC_genes_temp = pd.read_csv('data/HC_genes/CHD_HC.tsv',sep='\t',index_col='Unnamed: 0') CHD_HC = [str(g[1:-1]).strip("'") for g in HC_genes_temp['seed_genes'].tolist()[0][1:-1].split(', ')] len(CHD_HC) # Load all NDD DNVs (from supp materials of CHD paper; from https://www.nature.com/articles/ng.3970) NDD_df = pd.read_excel('data/SSC/homsy_database_S08.xlsx',skiprows=1) ASD_DNV = NDD_df[NDD_df['Study']=='SSC'] # simons simplex data print('\nnumber total ASD damaging DNVs') print(len(ASD_DNV)) ASD_DNV_VC = ASD_DNV['Gene'].value_counts() # count recurrences ASD_DNV = list(np.unique(ASD_DNV['Gene'].tolist())) print('\nnumber unique ASD damaging DNVs:') print(len(ASD_DNV)) print('\nnumber recurrent ASD damaging DNVs') print(sum(ASD_DNV_VC>1)) # number recurrent ASD # Load the new cohort (Supp table 9 from https://www.nature.com/articles/ng.3970) CHD_new_df = pd.read_excel('data/CHD_databases_2017_paper/ng.3970-S3.xlsx',sheetname='S9',skiprows=1) CHD_new_df.index=CHD_new_df['Blinded ID'] # apply the same filtering as for old data CHD_new_df = CHD_new_df[(CHD_new_df['Variant_Class']!='syn')&(CHD_new_df['Variant_Class']!='mis')] print('number total CHD damaging DNVs:') print(len(CHD_new_df)) DNV_noNDD_full = CHD_new_df[CHD_new_df['NDD']=='No'] DNV_noNDD_full = DNV_noNDD_full['Gene'].unique().tolist() print('\nnumber damaging CHD DNVs without NDD:') print(len(DNV_noNDD_full)) DNV_withNDD_full = CHD_new_df[CHD_new_df['NDD']=='Yes'] DNV_withNDD_full = DNV_withNDD_full['Gene'].unique().tolist() print('\nnumber damaging CHD DNVs with NDD:') print(len(DNV_withNDD_full)) CHD_DNV = CHD_new_df['Gene'].unique().tolist() CHD_DNV_VC = CHD_new_df['Gene'].value_counts() print('\nnumber unique CHD damaging DNVs:') print(len(CHD_DNV)) print('\nnumber recurrent CHD damaging DNVs ') print(sum(CHD_DNV_VC>1)) # number recurrent CHD # Load control DNVs (from Database S3) ctrl_DNV_df = pd.read_excel('data/SSC/homsy_database_S03.xlsx',skiprows=1) # exclude synonymouse and non-damaging missense ctrl_DNV = ctrl_DNV_df[(ctrl_DNV_df['Variant Class']!='Synonymous')&(ctrl_DNV_df['Variant Class']!='Missense')] ctrl_DNV_VC = ctrl_DNV['Gene'].value_counts() print('number damaging control DNVs:') print(len(ctrl_DNV)) #ctrl_DNV = ctrl_DNV_df ctrl_DNV = list(np.unique(ctrl_DNV['Gene'].tolist())) print('\nnumber unique damaging control DNVs:') print(len(ctrl_DNV)) print('\nnumber recurrent damaging control DNVs:') print(sum(ctrl_DNV_VC>1)) ``` ## Define gene sets for use in rest of analysis ``` # pick out which gene sets to investigate genes_ASD = ASD_HC print(len(genes_ASD)) genes_CHD = CHD_HC print(len(genes_CHD)) ``` [TOC](#toc) <a id='plotHCgenes'></a> # Make figure 1: summary of HC genes in CHD and ASD (showing low recurrence) ``` ASD_df = NDD_df[(NDD_df['Study']=='SSC')] ASD_df.head() CHD_DNV_df = CHD_new_df CHD_DNV_df.head() ``` # Bar chart- how many DNVs within established disease genes compared to outside? ``` print(len(ASD_df)) display(ASD_df['Class (2)'].value_counts()) ASD_df.head() CHD_new_df.head() # display(CHD_DNV_for_merge['Class'].value_counts()) # print(len(CHD_DNV_for_merge)) # display(CHD_DNV_for_merge['Gene'].value_counts().head()) # CHD_DNV_for_merge.head() num_CHD_DNV_in_HC = sum(CHD_new_df['Gene'].isin(ASD_HC+CHD_HC)) print(num_CHD_DNV_in_HC) num_CHD_DNV_not_HC = sum(~CHD_new_df['Gene'].isin(ASD_HC+CHD_HC)) print(num_CHD_DNV_not_HC) num_CHD_no_DNV = 2645-(num_CHD_DNV_in_HC+num_CHD_DNV_not_HC) print(num_CHD_no_DNV) num_ASD_DNV_in_HC = sum(ASD_df['Gene'].isin(ASD_HC+CHD_HC)) print(num_ASD_DNV_in_HC) num_ASD_DNV_not_HC = sum(~ASD_df['Gene'].isin(ASD_HC+CHD_HC)) print(num_ASD_DNV_not_HC) num_ASD_no_DNV = 2759-(num_ASD_DNV_in_HC+num_ASD_DNV_not_HC) print(num_ASD_no_DNV) num_CHD_DNV_in_HC+num_CHD_DNV_not_HC # add a bar showing the number of patients without damaging DNVs sns.set_style('whitegrid',{'axes.grid':False}) sns.set_style("ticks", {"xtick.major.size": 15, "ytick.major.size": 15}) plt.rcParams['svg.fonttype'] = 'none' plt.figure(figsize=(3,3)) plt.bar([-.15,.85,1.85],np.divide([num_ASD_DNV_in_HC,num_ASD_DNV_not_HC,num_ASD_no_DNV],2759.0),width=.3, color='#9FEE9D',alpha=.7,edgecolor='k', label='ASD') plt.bar([.15,1.15,2.15],np.divide([num_CHD_DNV_in_HC,num_CHD_DNV_not_HC,num_CHD_no_DNV],2645.0),width=.3, color='#E69EE6',alpha=.7,edgecolor='k', label='CHD') plt.ylim([0,1]) plt.xticks([0,1,2],['dDNVs in \ndisease genes', 'dDNVs outside \ndisease genes','no dDNVs'],fontsize=14,rotation='vertical') plt.ylabel('fraction cohort',fontsize=16) plt.legend(loc='upper left',fontsize=14) # plt.savefig('../../manuscript/figures_1911/Figure1/Figure1_final assets/DNVs_in_out_disease_genes.png',dpi=300,bbox_inches='tight') # plt.savefig('../../manuscript/figures_1911/Figure1/Figure1_final assets/DNVs_in_out_disease_genes.svg',dpi=300,bbox_inches='tight') ``` [TOC](#toc) <a id='localization'></a> # Select focal interactome, calculate network localization of DNVs PCnet downloaded from ndex and parsed to networkx format https://ndexbio.org/#/network/f93f402c-86d4-11e7-a10d-0ac135e8bacf ``` # load the pre-computed interactome # PCnet downloaded from ndex and parsed to networkx format # https://ndexbio.org/#/network/f93f402c-86d4-11e7-a10d-0ac135e8bacf Gint = nx.read_gpickle('/Users/brinrosenthal/Documents/CCBB_tickets_data/PCnet/G_PCnet.gpickle') int_name = 'PCnet' print(len(Gint.nodes())) print(len(Gint.edges())) ``` ### How much overlap is there between disease genes and selected interactome? ``` print(len(np.intersect1d(list(ASD_HC),Gint.nodes()))) print(len(ASD_HC)) print(len(np.intersect1d(list(CHD_HC),Gint.nodes()))) print(len(CHD_HC)) ``` [TOC](#toc) <a id='disease_localization'></a> # How localized are the individual diseases? Two localization methods implemented here: 1. Largest connected component (following Menche et al) 2. Number shared edges in gene set (similar to method on STRING-DB) ## ASD localization ``` # ----- distributions for non-sampled case ----- # set numreps to 5000 for full run num_reps=100 ASD_numedges_list, ASD_numedges_rand, ASD_LCC_list, ASD_LCC_size_rand = network_localization.localization(Gint, focal_genes = ASD_DNV, num_reps = num_reps, sample_frac = 1.0, method = 'both', plot = False,print_counter=True) # plot distributions for non-sampled case sns.set_style('white') sns.set_style("ticks", {"xtick.major.size": 15, "ytick.major.size": 15}) plt.figure(figsize=(1.6,1.54)) plt.vlines(np.mean(ASD_numedges_list),ymin=0,ymax=0.004,color='r',lw=.5,label='ASD dDNVs') sns.kdeplot(ASD_numedges_rand,color='k',lw=.5,alpha=.5,shade=True,label='random') plt.legend(loc='upper left',fontsize=8) plt.ylabel('density',fontsize=8) plt.xlabel('number shared edges',fontsize=8) plt.xticks(fontsize=8) plt.yticks(fontsize=8) plt.ylim([0,.006]) # print the z-score and fdr ASD_NE_z = (np.mean(ASD_numedges_list)-np.mean(ASD_numedges_rand))/np.std(ASD_numedges_rand) from scipy.stats import norm ptemp = norm.sf(abs(ASD_NE_z)) plt.title('permutation p = '+("%.2g" % ptemp),fontsize=8) # plt.savefig('../../manuscript/figures_1911/Supplement/ASD_DNV_numedges_'+int_name+'_no_bootstrap'+str(num_reps)+'.png',dpi=300,bbox_inches='tight') # plt.savefig('../../manuscript/figures_1911/Supplement/ASD_DNV_numedges_'+int_name+'_no_bootstrap'+str(num_reps)+'.svg',dpi=300,bbox_inches='tight') ``` ## CHD localization ``` # ---- non-sampled case ---- # set numreps to 5000 for full run num_reps=100 CHD_numedges_list, CHD_numedges_rand, CHD_LCC_list, CHD_LCC_size_rand = network_localization.localization(Gint, focal_genes = CHD_DNV, num_reps = num_reps, sample_frac = 1.0, method = 'both', plot = False,print_counter=True) # plot distributions for non-sampled case sns.set_style('white') sns.set_style("ticks", {"xtick.major.size": 15, "ytick.major.size": 15}) plt.figure(figsize=(1.6,1.54)) plt.vlines(np.mean(CHD_numedges_list),ymin=0,ymax=0.004,color='r',lw=.5,label='CHD dDNVs') sns.kdeplot(CHD_numedges_rand,color='k',lw=.5,alpha=.5,shade=True,label='random') plt.legend(loc='upper left',fontsize=8) plt.ylabel('density',fontsize=8) plt.xlabel('number shared edges',fontsize=8) plt.xticks(fontsize=8) plt.yticks(fontsize=8) plt.ylim([0,.006]) # print the z-score and fdr CHD_NE_z = (np.mean(CHD_numedges_list)-np.mean(CHD_numedges_rand))/np.std(CHD_numedges_rand) from scipy.stats import norm ptemp = norm.sf(abs(CHD_NE_z)) plt.title('permutation p = '+("%.2g" % ptemp),fontsize=8) # plt.savefig('../../manuscript/figures_1911/Supplement/CHD_DNV_numedges_'+int_name+'_no_bootstrap'+str(num_reps)+'.png',dpi=300,bbox_inches='tight') # plt.savefig('../../manuscript/figures_1911/Supplement/CHD_DNV_numedges_'+int_name+'_no_bootstrap'+str(num_reps)+'.svg',dpi=300,bbox_inches='tight') ``` ## Control cohort localization ``` len(ctrl_DNV) # set numreps to 5000 for full run num_reps=100 CTRL_numedges_list, CTRL_numedges_rand, CTRL_LCC_list, CTRL_LCC_size_rand = network_localization.localization(Gint, focal_genes = ctrl_DNV, num_reps = num_reps, sample_frac = 1.0, method = 'both', plot = False,print_counter=True) # plot distributions for non-sampled case sns.set_style('white') sns.set_style("ticks", {"xtick.major.size": 15, "ytick.major.size": 15}) plt.figure(figsize=(1.6,1.54)) plt.vlines(np.mean(CTRL_numedges_list),ymin=0,ymax=0.02,color='r',lw=.5,label='CTRL dDNVs') sns.kdeplot(CTRL_numedges_rand,color='k',lw=.5,alpha=.5,shade=True,label='random') plt.legend(loc='upper left',fontsize=8) plt.ylabel('density',fontsize=8) plt.xlabel('number shared edges',fontsize=8) plt.xticks(fontsize=8) plt.yticks(fontsize=8) plt.ylim([0,.04]) # print the z-score and fdr CTRL_NE_z = (np.mean(CTRL_numedges_list)-np.mean(CTRL_numedges_rand))/np.std(CTRL_numedges_rand) from scipy.stats import norm ptemp = norm.sf(abs(CTRL_NE_z)) plt.title('permutation p = '+("%.2g" % ptemp),fontsize=8) # plt.savefig('../../manuscript/figures_1911/Supplement/CTRL_DNV_numedges_'+int_name+'_no_bootstrap'+str(num_reps)+'.png',dpi=300,bbox_inches='tight') # plt.savefig('../../manuscript/figures_1911/Supplement/CTRL_DNV_numedges_'+int_name+'_no_bootstrap'+str(num_reps)+'.svg',dpi=300,bbox_inches='tight') ``` [TOC](#toc)
github_jupyter
# Taller de Datos Abiertos **Nivel : Medio** Este Jupyter Notebook es para aprender conceptos básicos de la ciencia de datos utilizando la plataforma Datos Abiertos de Cali, Colombia. En este libro aprenderá cómo: - Recopilar datos de una API - Datos limpios - Visualizar y trazar datos - Crea un visual genial de datos <hr/> # 1. Obtener Los Datos - Queremos este conjunto de datos : [Homicidios comunes en el Municipio de Santiago de Cali](http://datos.cali.gov.co/dataset/homicidios-comunes-en-el-municipio-de-santiago-de-cali-segun-comuna-del-hecho-2001-2017/resource/670f2cd8-3b5f-4657-a136-577afefc38be) - Podemos usar la **API** para recopilar los datos. El *API Endpoint* es esto: http://datos.cali.gov.co/en/api/3/action/datastore_search?resource_id=670f2cd8-3b5f-4657-a136-577afefc38be ``` #Library para usar APIs import requests import json #API Endpoint (de la plataforma de Datos Abiertos) url = 'http://datos.cali.gov.co/en/api/3/action/datastore_search?resource_id=670f2cd8-3b5f-4657-a136-577afefc38be' ``` ### Usa la API y obtén una respuesta en json (indicio: mira el documentación de <a href='https://realpython.com/python-requests'>requests</a>) ``` #usa la api y obtén una respuesta response = requests.request("GET", url) #cambiar la respuesta en un objeto json #mira el objecto json ``` <hr/> # 2. Limpiar Los Datos ``` import pandas as pd ``` ### Crear un DataFrame con el objecto de json y limpialo - **Usa la <a href='https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html'>library para dataframes con pandas</a>** - En el fin, el DataFrame debería verse así : ``` #no usa esta csv, es solo para ver cómo deben verse sus datos cuando termine de limpiarlos pd.read_csv('datosLimpios.csv').head() # crear un 'DataFrame' objecto desde el json # puedes usar json['result']['records'] para ver la información que queremos ``` ### Limpiando los datos, necesitas hacer : - establece el indice a 'comuna' - omitir las filas de 'TOTAL' y 'sin especificar' - cambiar columnas para ser el año - cambiar el DataFrame para todo ser numérico - reemplazar los datos faltantes con -1 - ordenar el DataFrame ``` #LIMPIANDO LOS DATOS #Terminar la función: def limpiarLosDatos(datos): #establece el indice para ser el numero de comuna datos.set_index('No', inplace=True) #omitir filas innecesarias datosLimpios = datos.loc[["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22"]] #omitir columnas innecesarias datosLimpios.drop("_id", axis=1, inplace=True) #cambiar columnas para ser el año #cambiar los valores de columnas para que sean numéricos (puede usar pd.to_numeric) #reemplazar los datos faltantes con -1 #cambiar los valores para que sean numéricos (puede usar pd.to_numeric) #ordenar el dataframe por año return datosLimpios #usa la función para limpiar los datos #guarda la resulta en un variable ``` <hr/> # 3. Visualizar los Datos *(indicio: usa la plot() function para DataFrames en pandas)* ``` #con esto, podemos visualizar en el Jupyter notebook %matplotlib inline import matplotlib.pyplot as plt #primero, visualiza todos los datos (usa la función .T para transponer los datos para trazarlos correctamente.) #título y nombres de ejes plt.title('Homocidios en Comunas en Cali 2001 - 2016') plt.xlabel('año') plt.ylabel('homocidios') #la comuna con mucho mas homocidios de otros es numero 13, lo visualizamos : #tambien, usa la función .T para transponer los datos trazarlos correctamente ``` <hr/> # 4. Mapear los Datos Para este conjunto de datos, es bueno visualizar los homicidios a lo largo del tiempo, pero es aún más interesante ver dónde están en un mapa. Ahora usaremos la biblioteca de [geopandas](http://geopandas.org/) para crear un mapa de homicidios de Cali. ``` import geopandas as gpd caliShapeFileUrl = 'http://ws-idesc.cali.gov.co:8081/geoserver/idesc/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=idesc:mc_comunas&maxFeatures=50&outputFormat=SHAPE-ZIP' ``` ** Usa la función [read_file()](http://geopandas.org/io.html) de geopandas para obtener un geoDataFrame con el shapefile de Cali : ** ``` caliShape = gpd.read_file(caliShapeFileUrl) caliShape.head() ``` ** Usa la función [plot()](http://geopandas.org/gallery/plotting_with_geoplot.html) de geopandas para ver Cali : ** ### Siguiente *Chevre!* Ahora, tenemos un DataFrame con la información de homocidios por comuna por año en Cali, y también tenemos un DataFrame con las formas de las comunas en Cali en una mapa. Unámoslos para hacer una visual interesante! Vamos a utilizar la función [join()](https://pandas.pydata.org/pandas-docs/version/0.24.2/reference/api/pandas.DataFrame.join.html) de pandas para hacer esto. ** Primero: restructura el DataFrame de homocidios para ser compatible con los datos de mapa: ** - Cambie el nombre de su DataFrame de antes a ser datosLimpios y puede usar el siguiente código para ayudarlo ``` #nombreAntes = datosLimpios datosLimpios.reset_index(inplace=True) datosLimpios = datosLimpios.apply(pd.to_numeric) datosLimpios.set_index('No') datosLimpios.index.names=['comuna'] datosLimpios.head() ``` ** Segundo: resetructura el DataFrame de Cali ShapeFile para tener 'comuna' para el índice ** ``` #establecer el índice para ser el número de la comuna caliShape.set_index('comuna', inplace=True) ``` ** Tercero: Usa la función [join()](https://pandas.pydata.org/pandas-docs/version/0.24.2/reference/api/pandas.DataFrame.join.html) de pandas para fusionar los DataFrames ** ``` #Esta función es para hacer que el mapa se vea bonito def personalizaLaMapa(vmin, vmax): # quitar el eje ax.axis('off') # título ax.set_title('Número de homicidios en 2001 por distrito en Cali', \ fontdict={'fontsize': '25', 'fontweight' : '3'}) # fuente de los datos ax.annotate('Source: Datos Abiertos, Alcaldía de Cali', xy=(0.1, .08), xycoords='figure fraction', horizontalalignment='left', verticalalignment='top', fontsize=10, color='#555555') # Crear una barra de colores como la leyenda del mapa sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm) ``` ** Trata de usar la función de [plot()](http://geopandas.org/gallery/plotting_with_geoplot.html) en geopandas para visualiza todos en el mapa de Cali : ** - primero, solo por un año - usa la función 'personalizaLaMapa' para hacer que el mapa se vea bonito - despues, guarda la figure en una imagen de png con la función [savefig()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.savefig.html) ``` #el año (year) que queremos ver year = 2001 vmin, vmax = 0, 250 #crear una figura en matplotlib fig, ax = plt.subplots(1, figsize=(10, 10)) #indicio: termina esto --> plot(column= , cmap='Blues, ax=) # Personalización personalizaLaMapa(vmin, vmax) # esto guardará la figura como un png de alta resolución. También puede guardar como svg # usa la función savefig('nombre.png', dpi=300) ``` # Buen Trabajo! Muy genial! Utilizamos datos de la plataforma de [Datos Abiertos, Cali](http://datos.cali.gov.co/) y creamos nuestro propio visual! ¿Qué otros conjuntos de datos geniales podemos utilizar? ![Homocidios](https://media.giphy.com/media/fSRR6jTOJggap5KGS9/giphy.gif "homocidios") <hr/> ## Fin. <hr/>
github_jupyter