text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import os import cv2 import math import warnings import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, fbeta_score from keras import optimizers from keras import backend as K from keras.models import Sequential, Model from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler, EarlyStopping from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Activation, BatchNormalization, GlobalAveragePooling2D, Input # Set seeds to make the experiment more reproducible. from tensorflow import set_random_seed from numpy.random import seed set_random_seed(0) seed(0) %matplotlib inline sns.set(style="whitegrid") warnings.filterwarnings("ignore") train = pd.read_csv('../input/imet-2019-fgvc6/train.csv') labels = pd.read_csv('../input/imet-2019-fgvc6/labels.csv') test = pd.read_csv('../input/imet-2019-fgvc6/sample_submission.csv') train["attribute_ids"] = train["attribute_ids"].apply(lambda x:x.split(" ")) train["id"] = train["id"].apply(lambda x: x + ".png") test["id"] = test["id"].apply(lambda x: x + ".png") print('Number of train samples: ', train.shape[0]) print('Number of test samples: ', test.shape[0]) print('Number of labels: ', labels.shape[0]) display(train.head()) display(labels.head()) ``` ### Model parameters ``` # Model parameters BATCH_SIZE = 128 EPOCHS = 30 LEARNING_RATE = 0.0001 HEIGHT = 64 WIDTH = 64 CANAL = 3 N_CLASSES = labels.shape[0] ES_PATIENCE = 5 DECAY_DROP = 0.5 DECAY_EPOCHS = 10 classes = list(map(str, range(N_CLASSES))) def f2_score_thr(threshold=0.5): def f2_score(y_true, y_pred): beta = 2 y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold), K.floatx()) true_positives = K.sum(K.clip(y_true * y_pred, 0, 1), axis=1) predicted_positives = K.sum(K.clip(y_pred, 0, 1), axis=1) possible_positives = K.sum(K.clip(y_true, 0, 1), axis=1) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) return K.mean(((1+beta**2)*precision*recall) / ((beta**2)*precision+recall+K.epsilon())) return f2_score def custom_f2(y_true, y_pred): beta = 2 tp = np.sum((y_true == 1) & (y_pred == 1)) tn = np.sum((y_true == 0) & (y_pred == 0)) fp = np.sum((y_true == 0) & (y_pred == 1)) fn = np.sum((y_true == 1) & (y_pred == 0)) p = tp / (tp + fp + K.epsilon()) r = tp / (tp + fn + K.epsilon()) f2 = (1+beta**2)*p*r / (p*beta**2 + r + 1e-15) return f2 def step_decay(epoch): initial_lrate = LEARNING_RATE drop = DECAY_DROP epochs_drop = DECAY_EPOCHS lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate train_datagen=ImageDataGenerator(rescale=1./255, validation_split=0.25) train_generator=train_datagen.flow_from_dataframe( dataframe=train, directory="../input/imet-2019-fgvc6/train", x_col="id", y_col="attribute_ids", batch_size=BATCH_SIZE, shuffle=True, class_mode="categorical", classes=classes, target_size=(HEIGHT, WIDTH), subset='training') valid_generator=train_datagen.flow_from_dataframe( dataframe=train, directory="../input/imet-2019-fgvc6/train", x_col="id", y_col="attribute_ids", batch_size=BATCH_SIZE, shuffle=True, class_mode="categorical", classes=classes, target_size=(HEIGHT, WIDTH), subset='validation') test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_dataframe( dataframe=test, directory = "../input/imet-2019-fgvc6/test", x_col="id", target_size=(HEIGHT, WIDTH), batch_size=1, shuffle=False, class_mode=None) ``` ### Model ``` def create_model(input_shape, n_out): input_tensor = Input(shape=input_shape) base_model = applications.VGG19(weights=None, include_top=False, input_tensor=input_tensor) base_model.load_weights('../input/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5') x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) final_output = Dense(n_out, activation='sigmoid', name='final_output')(x) model = Model(input_tensor, final_output) return model # warm up model # first: train only the top layers (which were randomly initialized) model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES) for layer in model.layers: layer.trainable = False for i in range(-5,0): model.layers[i].trainable = True optimizer = optimizers.Adam(lr=LEARNING_RATE) metrics = ["accuracy", "categorical_accuracy"] es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=ES_PATIENCE) callbacks = [es] model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics) model.summary() ``` #### Train top layers ``` STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS, callbacks=callbacks, verbose=2, max_queue_size=16, workers=3, use_multiprocessing=True) ``` #### Fine-tune the complete model ``` for layer in model.layers: layer.trainable = True metrics = ["accuracy", "categorical_accuracy"] lrate = LearningRateScheduler(step_decay) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=(ES_PATIENCE)) callbacks = [es, lrate] optimizer = optimizers.Adam(lr=0.0001) model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics) model.summary() STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS, callbacks=callbacks, verbose=2, max_queue_size=16, workers=3, use_multiprocessing=True) ``` ### Complete model graph loss ``` sns.set_style("whitegrid") fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex='col', figsize=(20,7)) ax1.plot(history.history['loss'], label='Train loss') ax1.plot(history.history['val_loss'], label='Validation loss') ax1.legend(loc='best') ax1.set_title('Loss') ax2.plot(history.history['acc'], label='Train Accuracy') ax2.plot(history.history['val_acc'], label='Validation accuracy') ax2.legend(loc='best') ax2.set_title('Accuracy') ax3.plot(history.history['categorical_accuracy'], label='Train Cat Accuracy') ax3.plot(history.history['val_categorical_accuracy'], label='Validation Cat Accuracy') ax3.legend(loc='best') ax3.set_title('Cat Accuracy') plt.xlabel('Epochs') sns.despine() plt.show() ``` ### Find best threshold value ``` lastFullValPred = np.empty((0, N_CLASSES)) lastFullValLabels = np.empty((0, N_CLASSES)) for i in range(STEP_SIZE_VALID+1): im, lbl = next(valid_generator) scores = model.predict(im, batch_size=valid_generator.batch_size) lastFullValPred = np.append(lastFullValPred, scores, axis=0) lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0) print(lastFullValPred.shape, lastFullValLabels.shape) def find_best_fixed_threshold(preds, targs, do_plot=True): score = [] thrs = np.arange(0, 0.5, 0.01) for thr in thrs: score.append(custom_f2(targs, (preds > thr).astype(int))) score = np.array(score) pm = score.argmax() best_thr, best_score = thrs[pm], score[pm].item() print(f'thr={best_thr:.3f}', f'F2={best_score:.3f}') if do_plot: plt.plot(thrs, score) plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max()) plt.text(best_thr+0.03, best_score-0.01, f'$F_{2}=${best_score:.3f}', fontsize=14); plt.show() return best_thr, best_score threshold, best_score = find_best_fixed_threshold(lastFullValPred, lastFullValLabels, do_plot=True) ``` ### Apply model to test set and output predictions ``` test_generator.reset() STEP_SIZE_TEST = test_generator.n//test_generator.batch_size preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST) predictions = [] for pred_ar in preds: valid = [] for idx, pred in enumerate(pred_ar): if pred > threshold: valid.append(idx) if len(valid) == 0: valid.append(np.argmax(pred_ar)) predictions.append(valid) filenames = test_generator.filenames label_map = {valid_generator.class_indices[k] : k for k in valid_generator.class_indices} results = pd.DataFrame({'id':filenames, 'attribute_ids':predictions}) results['id'] = results['id'].map(lambda x: str(x)[:-4]) results['attribute_ids'] = results['attribute_ids'].apply(lambda x: list(map(label_map.get, x))) results["attribute_ids"] = results["attribute_ids"].apply(lambda x: ' '.join(x)) results.to_csv('submission.csv',index=False) results.head(10) ```
github_jupyter
# Tutorial 07: Networks from OpenStreetMap In this tutorial, we discuss how networks that have been imported from OpenStreetMap can be integrated and run in Flow. This will all be presented via the Bay Bridge network, seen in the figure below. Networks from OpenStreetMap are commonly used in many traffic simulators for the purposes of replicating traffic in realistic traffic geometries. This is true in both SUMO and Aimsun (which are both supported in Flow), with each supporting several techniques for importing such network files. This process is further simplified and abstracted in Flow, with users simply required to specify the path to the osm file in order to simulate traffic in the network. <img src="img/bay_bridge_osm.png" width=750> <center> **Figure 1**: Snapshot of the Bay Bridge from OpenStreetMap </center> Before we begin, let us import all relevant Flow parameters as we have done for previous tutorials. If you are unfamiliar with these parameters, you are encouraged to review tutorial 1. ``` # the TestEnv environment is used to simply simulate the network from flow.envs import TestEnv # the Experiment class is used for running simulations from flow.core.experiment import Experiment # all other imports are standard from flow.core.params import VehicleParams from flow.core.params import NetParams from flow.core.params import InitialConfig from flow.core.params import EnvParams from flow.core.params import SumoParams ``` ## 1. Running a Default Simulation In order to create a scenario object in Flow with network features depicted from OpenStreetMap, we will use the base `Scenario` class. This class can sufficiently support the generation of any .osm file. ``` from flow.scenarios import Scenario ``` In order to recreate the network features of a specific osm file, the path to the osm file must be specified in `NetParams`. For this example, we will use an osm file extracted from the section of the Bay Bridge as depicted in Figure 1. In order to specify the path to the osm file, simply fill in the `osm_path` attribute with the path to the .osm file as follows: ``` net_params = NetParams( osm_path='networks/bay_bridge.osm', no_internal_links=False ) ``` Note that in the above cell the `no_internal_links` attribute is set to False to generate junctions for the network within SUMO. This has no effect in Aimsun, and can be left out. Next, we create all other parameters as we have in tutorials 1 and 2. For this example, we will assume a total of 1000 are uniformly spread across the Bay Bridge. Once again, if the choice of parameters is unclear, you are encouraged to review Tutorial 1. ``` # create the remainding parameters env_params = EnvParams() sim_params = SumoParams(render=True) initial_config = InitialConfig() vehicles = VehicleParams() vehicles.add('human', num_vehicles=100) # create the scenario scenario = Scenario( name='bay_bridge', net_params=net_params, initial_config=initial_config, vehicles=vehicles ) ``` We are finally ready to test our scenario in simulation. In order to do so, we create an `Experiment` object and run the simulation for a number of steps. This is done in the cell below. ``` # create the environment env = TestEnv( env_params=env_params, sim_params=sim_params, scenario=scenario ) # run the simulation for 1000 steps exp = Experiment(env=env) exp.run(1, 1000) ``` ## 2. Customizing the Scenario While the above example does allow you to view the network within Flow, the simulation is limited for two reasons. For one, vehicles are placed on all edges within the network; if we wished to simulate traffic solely on the on the bridge and do not care about the artireols, for instance, this would result in unnecessary computational burdens. Next, as you may have noticed if you ran the above example to completion, routes in the base scenario class are defaulted to consist of the vehicles' current edges only, meaning that vehicles exit the network as soon as they reach the end of the edge they are originated on. In the next subsections, we discuss how the scenario can be modified to resolve these issues. ### 2.1 Specifying Traversable Edges In order to limit the edges vehicles are placed on to the road sections edges corresponding to the westbound Bay Bridge, we define an `EDGES_DISTRIBUTION` variable. This variable specifies the names of the edges within the network that vehicles are permitted to originated in, and is assigned to the scenario via the `edges_distribution` component of the `InitialConfig` input parameter, as seen in the code snippet below. Note that the names of the edges can be identified from the .osm file or by right clicking on specific edges from the SUMO gui (see the figure below). <img src="img/osm_edge_name.png" width=600> <center> **Figure 2**: Name of an edge from SUMO </center> ``` # we define an EDGES_DISTRIBUTION variable with the edges within # the westbound Bay Bridge EDGES_DISTRIBUTION = [ "11197898", "123741311", "123741303", "90077193#0", "90077193#1", "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ] # the above variable is added to initial_config new_initial_config = InitialConfig( edges_distribution=EDGES_DISTRIBUTION ) ``` ### 2.2 Creating Custom Routes Next, we choose to specify the routes of vehicles so that they can traverse the entire Bay Bridge, instead of the only the edge they are currently on. In order to this, we create a new scenario class that inherits all its properties from `Scenario` and simply redefine the routes by modifying the `specify_routes` variable. This method was originally introduced in Tutorial 07: Creating Custom Scenarios. The new scenario class looks as follows: ``` # we create a new scenario class to specify the expected routes class BayBridgeOSMScenario(Scenario): def specify_routes(self, net_params): return { "11197898": [ "11197898", "123741311", "123741303", "90077193#0", "90077193#1", "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1", ], "123741311": [ "123741311", "123741303", "90077193#0", "90077193#1", "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "123741303": [ "123741303", "90077193#0", "90077193#1", "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "90077193#0": [ "90077193#0", "90077193#1", "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "90077193#1": [ "90077193#1", "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "340686922": [ "340686922", "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "236348366": [ "236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "340686911#0": [ "340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "340686911#1": [ "340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "340686911#2": [ "340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1" ], "340686911#3": [ "340686911#3", "236348361", "236348360#0", "236348360#1" ], "236348361": [ "236348361", "236348360#0", "236348360#1" ], "236348360#0": [ "236348360#0", "236348360#1" ], "236348360#1": [ "236348360#1" ] } ``` ### 2.3 Rerunning the SImulation We are now ready to rerun the simulation with fully defined vehicle routes and a limited number of traversable edges. If we run the cell below, we can see the new simulation in action. ``` # create the scenario new_scenario = BayBridgeOSMScenario( name='bay_bridge', net_params=net_params, initial_config=new_initial_config, vehicles=vehicles, ) # create the environment env = TestEnv( env_params=env_params, sim_params=sim_params, scenario=new_scenario ) # run the simulation for 1000 steps exp = Experiment(env=env) exp.run(1, 10000) ``` ## 3. Other Tips This tutorial introduces how to incorporate OpenStreetMap files in Flow. This feature, however, does not negate other features that are introduced in other tutorials and documentation. For example, if you would like to not have vehicles be originated side-by-side within a network, this can still be done by specifying a "random" spacing for vehicles as follows: initial_config = InitialConfig( spacing="random", edges_distribution=EDGES_DISTRIBUTION ) In addition, inflows of vehicles can be added to networks imported from OpenStreetMap as they are for any other network (see the tutorial on adding inflows for more on this).
github_jupyter
CER040 - Install signed Management Proxy certificate ==================================================== This notebook installs into the Big Data Cluster the certificate signed using: - [CER030 - Sign Management Proxy certificate with generated CA](../cert-management/cer030-sign-service-proxy-generated-cert.ipynb) Steps ----- ### Parameters ``` app_name = "mgmtproxy" scaledset_name = "mgmtproxy" container_name = "service-proxy" prefix_keyfile_name = "service-proxy" common_name = "mgmtproxy-svc" user = "nginx" group = "nginx" mode = "550" test_cert_store_root = "/var/opt/secrets/test-certificates" ``` ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" # Load this notebook as json to get access to the expert rules in the notebook metadata. # try: j = load_json("cer040-install-service-proxy-cert.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "expanded_rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["expanded_rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Create a temporary directory to stage files ``` # Create a temporary directory to hold configuration files import tempfile temp_dir = tempfile.mkdtemp() print(f"Temporary directory created: {temp_dir}") ``` ### Helper function to save configuration files to disk ``` # Define helper function 'save_file' to save configuration files to the temporary directory created above import os import io def save_file(filename, contents): with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file: text_file.write(contents) print("File saved: " + os.path.join(temp_dir, filename)) print("Function `save_file` defined successfully.") ``` ### Get name of the ‘Running’ `controller` `pod` ``` # Place the name of the 'Running' controller pod in variable `controller` controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True) print(f"Controller pod name: {controller}") ``` ### Get the name of the `management proxy` `pod` ``` # Place the name of the mgmtproxy pod in variable `pod` pod = run(f'kubectl get pod --selector=app=mgmtproxy -n {namespace} -o jsonpath={{.items[0].metadata.name}}', return_output=True) print(f"Management proxy pod name: {pod}") ``` ### Copy certifcate files from `controller` to local machine ``` import os cwd = os.getcwd() os.chdir(temp_dir) # Use chdir to workaround kubectl bug on Windows, which incorrectly processes 'c:\' on kubectl cp cmd line run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.pem {prefix_keyfile_name}-certificate.pem -c controller -n {namespace}') run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pem {prefix_keyfile_name}-privatekey.pem -c controller -n {namespace}') os.chdir(cwd) ``` ### Copy certifcate files from local machine to `controldb` ``` import os cwd = os.getcwd() os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp {prefix_keyfile_name}-certificate.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.pem -c mssql-server -n {namespace}') run(f'kubectl cp {prefix_keyfile_name}-privatekey.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem -c mssql-server -n {namespace}') os.chdir(cwd) ``` ### Get the `controller-db-rw-secret` secret Get the controller SQL symmetric key password for decryption. ``` import base64 controller_db_rw_secret = run(f'kubectl get secret/controller-db-rw-secret -n {namespace} -o jsonpath={{.data.encryptionPassword}}', return_output=True) controller_db_rw_secret = base64.b64decode(controller_db_rw_secret).decode('utf-8') print("controller_db_rw_secret retrieved") ``` ### Update the files table with the certificates through opened SQL connection ``` import os sql = f""" OPEN SYMMETRIC KEY ControllerDbSymmetricKey DECRYPTION BY PASSWORD = '{controller_db_rw_secret}' DECLARE @FileData VARBINARY(MAX), @Key uniqueidentifier; SELECT @Key = KEY_GUID('ControllerDbSymmetricKey'); SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.pem', SINGLE_BLOB) AS doc; EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/{scaledset_name}/containers/{container_name}/files/{prefix_keyfile_name}-certificate.pem', @Data = @FileData, @KeyGuid = @Key, @Version = '0', @User = '{user}', @Group = '{group}', @Mode = '{mode}'; SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem', SINGLE_BLOB) AS doc; EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/{scaledset_name}/containers/{container_name}/files/{prefix_keyfile_name}-privatekey.pem', @Data = @FileData, @KeyGuid = @Key, @Version = '0', @User = '{user}', @Group = '{group}', @Mode = '{mode}'; """ save_file("insert_certificates.sql", sql) cwd = os.getcwd() os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp insert_certificates.sql controldb-0:/var/opt/mssql/insert_certificates.sql -c mssql-server -n {namespace}') run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "SQLCMDPASSWORD=`cat /var/run/secrets/credentials/mssql-sa-password/password` /opt/mssql-tools/bin/sqlcmd -b -U sa -d controller -i /var/opt/mssql/insert_certificates.sql" """) # Clean up run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/insert_certificates.sql" """) run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.pem" """) run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-privatekey.pem" """) os.chdir(cwd) ``` ### Clear out the controller\_db\_rw\_secret variable ``` controller_db_rw_secret= "" ``` ### Clean up certificate staging area Remove the certificate files generated on disk (they have now been placed in the controller database). ``` cmd = f"rm -r {test_cert_store_root}/{app_name}" run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"') ``` ### Restart Pod ``` run(f'kubectl delete pod {pod} -n {namespace}') ``` ### Clean up temporary directory for staging configuration files ``` # Delete the temporary directory used to hold configuration files import shutil shutil.rmtree(temp_dir) print(f'Temporary directory deleted: {temp_dir}') print('Notebook execution complete.') ``` Related ------- - [CER041 - Install signed Knox certificate](../cert-management/cer041-install-knox-cert.ipynb) - [CER030 - Sign Management Proxy certificate with generated CA](../cert-management/cer030-sign-service-proxy-generated-cert.ipynb) - [CER020 - Create Management Proxy certificate](../cert-management/cer020-create-management-service-proxy-cert.ipynb)
github_jupyter
``` import os #this module aims to test bam file's format via ValidateSamFile (a java picard tool) #if VSF output ERROR then break and show user the ERROR info for format correction #otherwise it shall continue the qc process location="/home/kechanglin/data/new_test.bam" #input a bam's directory valinfo=os.popen("java -jar /home/kechanglin/picard.jar ValidateSamFile I="+location+" MODE=SUMMARY") import re def validateBAM(valinfo): for i in valinfo: if not re.match('ERROR',str(i))== None: print(i) validateBAM(valinfo) import pysam location1="/data/yangxiaoxia/bqsr.bam" psentity=pysam.AlignmentFile(location1,'rb') # BAMheader=pysam.view("-H",location1) a=str(re.findall(r'PL:\w+',BAMheader))[5:-2] #output platform name print(a) print(BAMheader) BAMheader[67:76] class BAMinput(object): """input bam file and check format""" def __init__(self,directory): self.directory=directory def ValidateBAM(self,crash=False): self.valinfo=os.popen("java -jar /home/kechanglin/picard.jar ValidateSamFile I="+self.directory) for i in self.valinfo: if not re.match('ERROR',str(i))== None: print(i) self.crash=True if crash==True: raise ValueError('fatal error in file format!') def gen_identification(self): self.treatment=False self.BAMheader=pysam.view("-H",self.directory) self.platform=str(re.findall(r'PL:\w+',self.BAMheader))[5:-2] #output platform name try: if plat[str.upper(self.platform)]=='gen3': self.treatment=True #treatment arg is for furture gen3 file handling except KeyError as e: raise KeyError('no equipment info provided, thus unable to tell sequencing technology, it could be a converted file.') finally: return self.treatment # @staticmethod def check_md5(filepath, md5): """Check File md5.""" filemd5 = FileMD5(filepath) return filemd5.md5check(md5) class FileMD5(object): """Generate md5.""" def __init__(self, filepath): """Init class.""" filepath = os.path.abspath(filepath) if not os.path.isfile(filepath): raise ValueError("Can not find file %s!" % filepath) self.filepath = filepath @property def md5(self): """Get md5 of file.""" hash_md5 = hashlib.md5() with open(self.filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() def write_md5(self, outfile): """Write md5 of file to outfile.""" with open(outfile, 'wt') as wt: wt.write('%s %s' % (self.md5, self.filepath)) def md5check(self, md5): """Check md5 file.""" if self.md5 != md5: return False else: return True #maybe useless code def md5sum(md5file): """Md5sum -c file.""" md5, filename = ('',) * 2 with open(md5file) as wt: for line in wt: if line.startswith("MD5"): filename, md5 = line.split('=') filename = filename.replace('MD5(', '').replace(')', '') else: md5, filename = line.split(' ')[:2] md5 = md5.strip() filename = filename.strip() filemd5 = FileMD5(filename) infor = 'succeed' if filemd5.md5check(md5) else 'fail' print('{} md5 check: {}'.format(filename, infor)) import logging import os import re import sys import time import argparse import json import pysam import hashlib import subprocess plat={ 'PACBIO':'gen3', 'SEQUEL':'gen3', 'ILLUMINA':'gen2', 'MGISEQ':'gen2' } qualimap_loc="/home/kechanglin/biosoft/qualimap_v2.2.1/qualimap" qualimap_out="/home/kechanglin/data" """Run system command.""" def run_cmd(cmd): pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE) stdout,stderr = pipe.communicate() pipe.wait() if pipe.returncode != 0: raise ValueError("Failed to run command :%s, error mesages: %s." % (cmd, pipe.stderr.read().decode('utf-8'))) else: return stdout,stderr # return pipe.returncode,stdout,stderr testinstance=BAMinput(location) testinstance.ValidateBAM() testinstance.gen_identification() testinstance=BAMinput(location) testinstance.ValidateBAM() #testinstance.gen_identification() check_md5(location1,'093dd0fec383a9d9') qc_info=os.popen(qualimap_loc+' bamqc -bam '+location+' -outdir '+qualimap_out+' -outformat PDF:HTML') print(qc_info) qc_sub_run=run_cmd(qualimap_loc+' bamqc -bam '+location+' -outdir '+qualimap_out+' -outformat PDF:HTML') run_list=str(qc_sub_run[0]).split('\\n') for i in range(len(run_list)): print(run_list[i]) import hashlib #individual check on md5 def md5(filepath): """Get md5 of file.""" hash_md5 = hashlib.md5() with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() md5(location) ```
github_jupyter
# Exploratory analyses in Macaulay 2016 ## Figure 1 **A.** In this study, the authors focused on "thrombocytes," which are the equivalent of megakaryotes in fish, and are marked by expression of CD41. TThey performed single-cell RNA-seq on FACS-sorted GFP+ cells from a CD41:EGFP transgenic fish, using the C1, aligning to the genome using STAR and used Salmon to quantify gene expression. <br> **B.** They then performed ICA and did t-SNE on the (4) ICA components! They're colored by whether this was a high or low GFP expressing cell.<br> **C.** They then clustered the cells into groups by cutting a hierarchical clustering tree. ![Figure 1](figures/figure1.png) Let's dig into this a little deeper. What if they did PCA instead of ICA? Or MDS instead of t-SNE? Or no previous filtering before t-SNE? How did they define their clusters? Make a directory for saving the figures ``` mkdir figures ``` Load all Python libraries and the data. ``` # Must be the first import for compatibility reasons from __future__ import print_function # Alphabetical order of modules is convention import ipywidgets import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy import linalg from scipy.cluster.hierarchy import dendrogram import seaborn as sns from sklearn.decomposition import FastICA, PCA from sklearn.manifold import TSNE, MDS # Use plotting settings that I like sns.set(style='white', context='talk') %matplotlib inline genes = pd.read_csv('macaulay2016/gene_expression_s.csv', index_col=0).sort_index(0).sort_index(1) sample_data = pd.read_csv('macaulay2016/sample_info_qc.csv', index_col=0).sort_index(0).sort_index(1) genes = genes.ix[:, sample_data[sample_data["Pass QC"]].index] sample_data = sample_data[sample_data["Pass QC"]] sample_data['condition_color'] = ['#31a354' if c == 'HIGH' else '#e5f5e0' for c in sample_data['condition']] ercc_idx = filter(lambda i: 'ERCC' in i, genes.index) egenes = genes.drop(ercc_idx) egenes = egenes.drop('GFP') egenes = (egenes / egenes.sum()) * 1e6 mask = (egenes > 1).sum(1) > 2 egenes = egenes.ix[mask] original_expression_data = np.log10(egenes.T + 1).copy() gene_annotation = pd.read_csv('macaulay2016/zv9_gene_annotation.txt', sep='\t', index_col=0) gene_annotation = gene_annotation.ix[egenes.index] # --- Read their supplemental data to get the assigned clusters from the paper, for comparison --- # supplemental_data_sample_info = pd.read_csv('supplementary-data-1-sample-info/original_experiment_sample_info.csv', index_col=0) # Convert the string of tuples into a (r,g,b) tuple for matplotlib supplemental_data_sample_info['cluster_color'] = supplemental_data_sample_info['cluster_color'].map(eval) supplemental_data_sample_info = supplemental_data_sample_info.reindex(index=original_expression_data.index) ``` Interactive function for trying dimensionality reduction and different components. They said to pick their cutoff they used the frobenius norm reconstruction error, meaning, the difference between the ICA- or PCA-predicted matrix given the number of components, versus the original matrix, which we will plot as well. ``` # For comparison, get the norm of the two matrices compared to if you added 1 to all values in the data # (different points in euclidean space) reconstruct_norm_within_one = np.linalg.norm(original_expression_data - (original_expression_data+1), 'fro') def explore_matrix_decomposition(algorithm, n_components, color_by): if algorithm == 'ICA': reducer = FastICA(n_components, random_state=3984) elif algorithm == 'PCA': reducer = PCA(n_components) Y = reducer.fit_transform(original_expression_data.copy()) XX = pd.DataFrame(Y, index=egenes.columns) if n_components == 4 and algorithm == 'ICA': XX.columns = ['difference_component', 'within_small_component', 'outlier_component', 'within_large_component'] if color_by == 'EGFP': color = sample_data['condition_color'] elif color_by == 'Cluster assignment': color = supplemental_data_sample_info['cluster_color'] g = sns.PairGrid(XX) g.map(plt.scatter, color=color, linewidths=0.25, edgecolors='k', s=50) # Reduce the number of ticks for ax in g.axes.flat: ax.locator_params(nbins=4) # --- Get how well this number of components reconstructs the original data --- # reconstruction_norms = [] for i in range(1, n_components+1): reconstructed = pd.DataFrame(XX.values[:, :i].dot(reducer.components_[:i, :]), index=original_expression_data.index, columns=original_expression_data.columns) reconstruction_error = original_expression_data.subtract(reconstructed).abs() reconstruction_norms.append(np.linalg.norm(reconstruction_error)) reconstruction_norms = np.array(reconstruction_norms) print('reconstruction norms:', reconstruction_norms) print('Compared to the difference between the original matrix +1:', reconstruct_norm_within_one) cumulative_difference = reconstruction_norms[:-1] - reconstruction_norms[1:] xticks = np.arange(n_components) fig, axes = plt.subplots(figsize=(8, 3), ncols=2) ax = axes[0] ax.plot(reconstruction_norms, 'o-') ax.set(xlabel='Components', ylabel='Frobenius norm', xticks=xticks, xlim=(-0.5, n_components-0.5)) ax = axes[1] plt.sca(ax) ax.plot(cumulative_difference, 'o-', color='#262626') # #262626 = 90% black xticklabels = ['{}-{}'.format(i+1, i) for i in range(n_components-1)] ax.set(xticklabels=xticklabels, xlabel='Difference between components', xticks=xticks, xlim=(-0.5, n_components-1.5)) fig.suptitle("Frobenius Norm of Reconstruction Error") sns.despine() ipywidgets.interact(explore_matrix_decomposition, algorithm=ipywidgets.Dropdown(options=['PCA', 'ICA'], value='ICA'), n_components=ipywidgets.IntSlider(value=4, min=2, max=10, step=1), color_by=ipywidgets.Dropdown(options=['EGFP', 'Cluster assignment'], value='EGFP')); ``` #### [Quiz 4.1.1](https://docs.google.com/forms/d/1KVBaFSlP6eo-UiM8kAuRd4SMBlTccQbxquXEYlUNWZk/viewform) ``` ICA_PCA = ('ICA', "PCA") ICA_COLUMNS = ['difference_component', 'within_small_component', 'outlier_component', 'within_large_component'] # Write a short function to abstract away possibly decomposing the data into parts, or maybe not. def maybe_decompose(matrix_decomposer, n_components): if matrix_decomposer in ICA_PCA: if matrix_decomposer == 'ICA': reducer = FastICA(n_components, random_state=3984) elif matrix_decomposer == 'PCA': reducer = PCA(n_components) decomposed = reducer.fit_transform(original_expression_data.copy()) decomposed = pd.DataFrame(decomposed, index=egenes.columns) if n_components == 4 and matrix_decomposer == 'ICA': decomposed.columns = ICA_COLUMNS else: decomposed = original_expression_data.copy() return decomposed def explore_manifold_learning(matrix_decomposer, n_components, color_by, manifold_learner): decomposed = maybe_decompose(matrix_decomposer, n_components) if manifold_learner == 't-SNE': embedder = TSNE(n_components=2, perplexity=75, random_state=254) elif manifold_learner == 'MDS': embedder = MDS(n_components=2, random_state=254) embedded = embedder.fit_transform(decomposed) embedded = pd.DataFrame(embedded, index=decomposed.index) fig, ax = plt.subplots(figsize=(4, 4)) if color_by == 'EGFP': color = sample_data['condition_color'] elif color_by == 'Cluster assignment': color = supplemental_data_sample_info['cluster_color'] plt.scatter(embedded[0], embedded[1], c=color, s=50) # Empty the tick labels ax.set(xticks=[], yticks=[]) sns.despine(bottom=True, left=True) fig.tight_layout() ipywidgets.interact(explore_manifold_learning, matrix_decomposer=ipywidgets.Dropdown(options=['PCA', 'ICA', "None"], value='ICA'), n_components=ipywidgets.IntSlider(value=4, min=2, max=10, step=1), color_by=ipywidgets.Dropdown(options=['EGFP', 'Cluster assignment'], value='EGFP'), manifold_learner=ipywidgets.Dropdown(options=['t-SNE', 'MDS'], value='t-SNE')); ``` #### [Quiz 4.1.2](https://docs.google.com/forms/d/1_2SAkIFB2dxTGjfIoDQMFSEJrw1RetpHrBVFAbUyjoQ/viewform) ### How did they assign cells to clusters? The authors used hierarchical clustering with the ward algorithm, and cut the tree at a couple different thresholds to get broad clusters, and did a smaller cut to get finer clusters. ``` """ Cluster assignment and coloring functions from supplemental notebooks """ from collections import defaultdict from matplotlib.colors import rgb2hex, colorConverter class Clusters(dict): def _repr_html_(self): html = '<table style="border: 0;">' for c in self: hx = rgb2hex(colorConverter.to_rgb(c)) html += '<tr style="border: 0;">' \ '<td style="background-color: {0}; ' \ 'border: 0;">' \ '<code style="background-color: {0};">'.format(hx) html += c + '</code></td>' html += '<td style="border: 0"><code>' html += repr(self[c]) + '</code>' html += '</td></tr>' html += '</table>' return html def get_cluster_classes(den, label='ivl'): cluster_idxs = defaultdict(list) for c, pi in zip(den['color_list'], den['icoord']): for leg in pi[1:3]: i = (leg - 5.0) / 10.0 if abs(i - int(i)) < 1e-5: cluster_idxs[c].append(int(i)) cluster_classes = Clusters() for c, l in cluster_idxs.items(): i_l = list(sorted([den[label][i] for i in l])) cluster_classes[c] = i_l return cluster_classes def get_cluster_limits(den): cluster_idxs = defaultdict(list) for c, pi in zip(den['color_list'], den['icoord']): for leg in pi[1:3]: i = (leg - 5.0) / 10.0 if abs(i - int(i)) < 1e-5: cluster_idxs[c].append(int(i)) cluster_limits = Clusters() for c in cluster_idxs: cluster_limits[c] = (min(cluster_idxs[c]), max(cluster_idxs[c])) return cluster_limits ``` #### Transformed gene expression values to ICA ``` n = 4 ica = FastICA(n, random_state=3984) reduced = ica.fit_transform(original_expression_data) reduced = pd.DataFrame(reduced, index=original_expression_data.index, columns=ICA_COLUMNS) clm = sns.clustermap(reduced, method='ward', lw=0, col_cluster=False); fig, ax = plt.subplots(figsize=(10, 3)) thr = 0.8 cden = dendrogram(clm.dendrogram_row.linkage, color_threshold=thr, labels=original_expression_data.index); plt.axhline(thr, color='k'); plt.xticks(rotation=90, fontsize=4); clusters = get_cluster_classes(cden) clusters ``` Get the cells in the clusters ``` cell_color = [] for cell in original_expression_data.index: for color in clusters: if cell in clusters[color]: cell_color.append(color) break ``` They saw that there was a subgroup of cells in the blue/cyan cluster so they made finer clusters: ``` import itertools fig, ax = plt.subplots(figsize=(10, 3)) thr = 0.442 finer_den = dendrogram(clm.dendrogram_row.linkage, color_threshold=thr, labels=original_expression_data.index); plt.axhline(thr, color='k'); plt.xticks(rotation=90, fontsize=4); finer_clusters = get_cluster_classes(finer_den) finer_clusters ``` Get cells in finer clusters and assign clusters ``` finer_cell_color = [] for cell in original_expression_data.index: for color in finer_clusters: if cell in finer_clusters[color]: finer_cell_color.append(color) break named_clusters = {} named_clusters['1a'] = finer_clusters['c'] named_clusters['1b'] = finer_clusters['m'] named_clusters['2'] = clusters['y'] named_clusters['3'] = clusters['m'] named_clusters['4'] = clusters['g'] named_clusters['x'] = clusters['r'] palette = sns.color_palette("Set2", 5) named_cluster_colors = {'1a' : palette[0], '1b' : palette[1], '2' : palette[2], '3' : palette[3], '4' : palette[4], 'x' : (0.8, 0.8, 0.8)} cell_cluster = [] for cell in sample_data.index: for cluster in named_clusters: if cell in named_clusters[cluster]: cell_cluster.append(cluster) break # Assign clusters to a column in the metadata sample_data['cluster'] = cell_cluster sample_data['cluster_color'] = sample_data['cluster'].map(named_cluster_colors) # Look at the sizes of the groups sample_data.groupby('cluster').size() ``` Plot the assigned clusters! ``` sns.set_style('white') sns.set_context('talk') plt.scatter(sample_data['tsne_0'], sample_data['tsne_1'], color=sample_data['cluster_color'], s=100, edgecolor='k'); plt.axis('off'); plt.tight_layout(); plt.savefig('figures/tsne_clusters.pdf') from scipy.cluster import hierarchy import matplotlib as mpl sns.set(style='white') # Make the clustering dendrogram colors not suck hierarchy.set_link_color_palette(list(map(mpl.colors.rgb2hex, sns.color_palette('Set3', n_colors=12)))) def explore_clustering(method, metric, dendrogram_thresh, matrix_decomposer, n_components, col_cluster=False): decomposed = maybe_decompose(matrix_decomposer, n_components) # Don't cluster columns when the data isn't decomposed because the raw number of genes is too big if col_cluster and matrix_decomposer not in ICA_PCA: print("Cowardly refusing to cluster the columns when the matrix isn't decomposed because it'll take forever") col_cluster = False clustergrid = sns.clustermap(decomposed, method=method, lw=0, col_cluster=col_cluster, metric=metric, row_colors=sample_data['condition_color']); plt.setp(clustergrid.ax_heatmap.get_yticklabels(), rotation=0, fontsize=4) clustergrid.fig.suptitle('Linkage Method: {}, Distance Metric: {}'.format(method, metric)) # --- Set up dendrogram + t-SNE figure --- # width, height = 12, 3 fig, axes = plt.subplots(figsize=(width, height), ncols=2, gridspec_kw=dict(width_ratios=(.75, .25))) # --- Plot dendrogram --- # # sns.set(style='darkgrid') ax = axes[0] # sca = "Set current axes" plt.sca(ax) cden = hierarchy.dendrogram(clustergrid.dendrogram_row.linkage, color_threshold=dendrogram_thresh, labels=decomposed.index, above_threshold_color='DarkSlateGray'); xmin, xmax = ax.get_xlim() if dendrogram_thresh <= xmin: print("The dendrogram threshold is below the axes .. there will be one cluster for all cells") if dendrogram_thresh >= xmax: print("The dendrogram threshold is above the axes .. there will be one cluster for all cells") ax.hlines(dendrogram_thresh, xmin, xmax, color='DarkRed', linestyle='--'); ax.set_axis_bgcolor("#EAEAF2") ax.grid(axis='y', color='white', zorder=1000) ax.set(title='Threshold: {:g}'.format(dendrogram_thresh)) plt.setp(ax.get_xticklabels(), rotation=90, fontsize=4) sns.despine(ax=ax, bottom=True, left=True) # --- Get cluster-defined colors for each cell, for this threshold --- # clusters = get_cluster_classes(cden) cell_color = [] for cell in decomposed.index: for color in clusters: if cell in clusters[color]: cell_color.append(color) break # --- perform t-SNE --- # embedder = TSNE(n_components=2, perplexity=75, random_state=254) embedded = embedder.fit_transform(decomposed) embedded = pd.DataFrame(embedded, index=decomposed.index) # --- Plot the t-SNE result with the cell colors --- # ax = axes[1] ax.scatter(embedded[0], embedded[1], color=cell_color, s=40, linewidths=0.5, edgecolors='k') sns.despine(ax=ax, bottom=True, left=True) ax.set(xticks=[], yticks=[]) ipywidgets.interact(explore_clustering, method=ipywidgets.Dropdown(options=['ward', 'single', 'complete', 'average', 'centroid'], value='ward', description='Linkage Method'), metric=ipywidgets.Dropdown(options=['euclidean', 'cityblock'], value='euclidean', description='Distance Metric'), col_cluster=ipywidgets.Checkbox(value=False, description="Cluster the columns?"), dendrogram_thresh=ipywidgets.FloatText(value=0.8, description='Tree cut clustering threshold'), matrix_decomposer=ipywidgets.Dropdown(options=['PCA', 'ICA'], value='ICA', description='Matrix decomposition algorithm'), n_components=ipywidgets.IntSlider(value=4, min=2, max=10, step=1, description='Number of components'),); ``` #### Exploring Clustering - please note below * With "ward" clustering, can only use "euclidean" distance. Try some other distance and you will get an error! * The "Tree cut clustering threshold" thing is tricky - every time any value changes, the entire thing gets recalculated. Best thing to do is to write the threshold you want outside of the box, and then copy-paste it in. * This has a built-in fail-safe to not cluster the columns (components or genes) when you don't decompose the data first, because it'll take too long because there are 20,672 genes. In general, <10k, ideally ~5k is best for clustering because of compute power and how much you'll get out of seeing the clusters * With ICA and 4 components try these two thresholds: (These are what they used in the paper) * 0.8 - Defines broad clusters * 0.442 - Defines the two small clusters in the early population #### [Quiz 4.1.3](https://docs.google.com/forms/d/1yYaDNTTWGT3yr2-K7iEzCqX4WN36h_EY06-WmbcgzSA/viewform) ## Figure 2 ![](figures/figure2.png) ### How did they assign cells to pseudotime? To do this, we'll follow along with their notebook, [3. Progression ordering and plots](macaulay2016/3.%20Progression%20ordering%20and%20plots.ipynb)
github_jupyter
## _*H2 energy plot computed using ExcitationPreserving*_ This notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances using VQE and ExcitationPreserving. It is compared to the same energies as computed by the NumPyMinimumEigensolver. `ExcitationPreserving` is a particle preserving variational form and should be used in conjunction with operator `jordan_wigner mapping` and `HarteeFock` initial state. This notebook has been written to use the PYQUANTE chemistry driver. See the PYQUANTE chemistry driver readme if you need to install the external PyQuante2 library that this driver requires. ``` import numpy as np import pylab import copy from qiskit import BasicAer from qiskit.aqua import aqua_globals, QuantumInstance from qiskit.aqua.algorithms import NumPyMinimumEigensolver, VQE from qiskit.aqua.components.optimizers import COBYLA from qiskit.circuit.library import ExcitationPreserving from qiskit.chemistry.drivers import PyQuanteDriver, BasisType from qiskit.chemistry.core import Hamiltonian, QubitMappingType from qiskit.chemistry.components.initial_states import HartreeFock molecule = 'H .0 .0 -{0}; H .0 .0 {0}' algorithms = ['VQE', 'NumPyMinimumEigensolver'] start = 0.5 # Start distance by = 0.5 # How much to increase distance by steps = 20 # Number of steps to increase by energies = np.empty([len(algorithms), steps+1]) hf_energies = np.empty(steps+1) distances = np.empty(steps+1) eval_counts = np.empty(steps+1) aqua_globals.random_seed = 50 print('Processing step __', end='') for i in range(steps+1): print('\b\b{:2d}'.format(i), end='', flush=True) d = start + i*by/steps for j in range(len(algorithms)): driver = PyQuanteDriver(atoms=molecule.format(d/2), basis=BasisType.BSTO3G) qmolecule = driver.run() operator = Hamiltonian(qubit_mapping=QubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False) qubit_op, aux_ops = operator.run(qmolecule) if algorithms[j] == 'NumPyMinimumEigensolver': result = NumPyMinimumEigensolver(qubit_op).run() else: optimizer = COBYLA(maxiter=10000) initial_state = HartreeFock(operator.molecule_info['num_orbitals'], operator.molecule_info['num_particles'], qubit_mapping=operator._qubit_mapping, two_qubit_reduction=operator._two_qubit_reduction) var_form = ExcitationPreserving(qubit_op.num_qubits, initial_state=initial_state) algo = VQE(qubit_op, var_form, optimizer) result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed)) eval_counts[i] = result.optimizer_evals result = operator.process_algorithm_result(result) energies[j][i] = result.energy hf_energies[i] = result.hartree_fock_energy distances[i] = d print(' --- complete') print('Distances: ', distances) print('Energies:', energies) print('Hartree-Fock energies:', hf_energies) print('VQE num evaluations:', eval_counts) pylab.plot(distances, hf_energies, label='Hartree-Fock') for j in range(len(algorithms)): pylab.plot(distances, energies[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('H2 Ground State Energy') pylab.legend(loc='upper right'); pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock') pylab.plot(distances, np.subtract(energies[0], energies[1]), label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.yscale('log') pylab.title('Energy difference from NumPyMinimumEigensolver') pylab.legend(loc='center right'); pylab.plot(distances, eval_counts, '-o', color=[0.8500, 0.3250, 0.0980], label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Evaluations') pylab.title('VQE number of evaluations') pylab.legend(loc='upper left'); ```
github_jupyter
``` import pandas as pd import numpy as np from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin import lightgbm as lgb from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, StratifiedKFold, RandomizedSearchCV from sklearn.metrics import f1_score, make_scorer from scipy.stats import randint as sp_randint from scipy.stats import uniform as sp_uniform train_data = pd.read_csv("../input/train.csv") test_data = pd.read_csv("../input/test.csv") sub_orig = pd.read_csv("../input/sample_submission.csv", index_col = 0) target = train_data['Target'] train_data.shape test_data.shape test_data.loc[test_data['rez_esc'] == 99.0 , 'rez_esc'] = 5 class MissingValuesImputer(BaseEstimator, TransformerMixin): def __init__(self, impute_zero_columns): self.impute_zero_columns = impute_zero_columns def fit(self, X, y = None): print("Missing Values Imputer") return self def transform(self, X, y = None): # Fill missing values for v18q1, v2a1 and rez_esc for column in self.impute_zero_columns: X[column] = X[column].fillna(0) # For meaneduc we use the average schooling of household adults self.X_with_meaneduc_na = X[pd.isnull(X['meaneduc'])] self.mean_escolari_dict = dict(self.X_with_meaneduc_na.groupby('idhogar')['escolari'].apply(np.mean)) for row_index in self.X_with_meaneduc_na.index: row_idhogar = X.at[row_index, 'idhogar'] X.at[row_index, 'meaneduc'] = self.mean_escolari_dict[row_idhogar] X.at[row_index, 'SQBmeaned'] = np.square(self.mean_escolari_dict[row_idhogar]) return X class RemoveObjectTransformer(BaseEstimator, TransformerMixin): def __init__(self): self.target = ['dependency'] self.source = ['SQBdependency'] def fit(self, X, y = None): print("Remove Object Imputer") return self def transform(self, X, y = None): for i in range(0, len(self.target)): X[self.target[i]] = np.sqrt(X[self.source[i]]) X.drop(self.source, axis=1, inplace=True) return X def calculate_edu(row): if (row['edjefe'] == 'yes' and row['edjefa'] == 'no') or (row['edjefe'] == 'no' and row['edjefa'] == 'yes'): return 1 if row['edjefe'] == 'no' and row['edjefa'] == 'no': return 0 if row['edjefe'] == 'yes' or row['edjefe'] == 'no': return pd.to_numeric(row['edjefa']) return pd.to_numeric(row['edjefe']) class CategoricalVariableTransformer(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y = None): print("Categorical Variables Transformer") return self def transform(self, X, y = None): X['house_holder_edu'] = X.apply(calculate_edu, axis=1).values X.drop(['edjefe', 'edjefa'], axis=1, inplace=True) return X class UnnecessaryColumnsRemoverTransformer(BaseEstimator, TransformerMixin): def __init__(self, axis = 1): print("Unnecessary Columns Remover Transformer") self.axis = axis self.unnecessary_columns = [ 'r4t3', 'tamhog', 'tamviv', 'hogar_total', 'v18q', 'v14a', 'mobilephone', 'energcocinar1', 'sanitario6', 'Id', 'estadocivil7', 'lugar1', 'area1', 'female', 'agesq', ] def fit(self, X, y = None): unnecessary_columns_to_extend = [ [col for col in X.columns.tolist() if 'SQB' in col], ] for col_list in unnecessary_columns_to_extend: self.unnecessary_columns.extend(col_list) return self def transform(self, X, y = None): X = X.drop(self.unnecessary_columns, axis = self.axis) return X class FeatureEngineeringTransformer(BaseEstimator, TransformerMixin): def __init__(self, axis = 1): self.axis = axis # individual level boolean features self.aggr_mean_list = ['rez_esc', 'dis', 'male', 'female', 'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco2', 'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11', 'parentesco12', 'instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9'] # individual level ordered features self.individual_ordered_features = ['escolari', 'age', 'escolari_age'] def fit(self, X, y = None): print("Feature Engineering Transformer") self.more_columns_to_drop = [ [col for col in X.columns.tolist() if 'parentesco' in col and 'parentesco1' not in col], ['idhogar'] ] self.aggregate_features = (['mean', 'max', 'min', 'sum']) return self def transform(self, X, y = None): X['adult'] = X['hogar_adul'] - X['hogar_mayor'] X['dependency_count'] = X['hogar_nin'] + X['hogar_mayor'] X['dependency'] = X['dependency_count'] / X['adult'] X['dependency'] = X['dependency'].fillna(0) X['child_percent'] = X['hogar_nin'] / X['hogar_total'] X['elder_percent'] = X['hogar_mayor'] / X['hogar_total'] X['adult_percent'] = X['hogar_adul'] / X['hogar_total'] X['rent_per_adult'] = X['v2a1'] / X['hogar_adul'] X['rent_per_person'] = X['v2a1'] / X['hhsize'] X['overcrowding_room_and_bedroom'] = (X['hacdor'] + X['hacapo']) / 2 X['no_appliances'] = X['refrig'] + X['computer'] + X['television'] X['r4h1_percent_in_male'] = X['r4h1'] / X['r4h3'] X['r4m1_percent_in_female'] = X['r4m1'] / X['r4m3'] X['r4h1_percent_in_total'] = X['r4h1'] / X['hhsize'] X['r4m1_percent_in_total'] = X['r4m1'] / X['hhsize'] X['r4t1_percent_in_total'] = X['r4t1'] / X['hhsize'] X['rent_per_room'] = X['v2a1'] / X['rooms'] X['bedroom_per_room'] = X['bedrooms'] / X['rooms'] X['elder_per_room'] = X['hogar_mayor'] / X['rooms'] X['adults_per_room'] = X['adult'] / X['rooms'] X['child_per_room'] = X['hogar_nin'] / X['rooms'] X['male_per_room'] = X['r4h3'] / X['rooms'] X['female_per_room'] = X['r4m3'] / X['rooms'] X['room_per_person_household'] = X['hhsize'] / X['rooms'] X['rent_per_bedroom'] = X['v2a1'] / X['bedrooms'] X['edler_per_bedroom'] = X['hogar_mayor'] / X['bedrooms'] X['adults_per_bedroom'] = X['adult'] / X['bedrooms'] X['child_per_bedroom'] = X['hogar_nin'] / X['bedrooms'] X['male_per_bedroom'] = X['r4h3'] / X['bedrooms'] X['female_per_bedroom'] = X['r4m3'] / X['bedrooms'] X['bedrooms_per_person_household'] = X['hhsize'] / X['bedrooms'] X['tablet_per_person_household'] = X['v18q1'] / X['hhsize'] X['phone_per_person_household'] = X['qmobilephone'] / X['hhsize'] X['age_12_19'] = X['hogar_nin'] - X['r4t1'] X['escolari_age'] = X['escolari'] / X['age'] X['rez_esc_escolari'] = X['rez_esc'] / X['escolari'] X['rez_esc_r4t1'] = X['rez_esc'] / X['r4t1'] X['rez_esc_r4t2'] = X['rez_esc'] / X['r4t2'] X['rez_esc_r4t3'] = X['rez_esc'] / X['r4t3'] X['rez_esc_age'] = X['rez_esc'] / X['age'] # Create individual-level mean features grouped_mean_df = X.groupby('idhogar')[self.aggr_mean_list] grouped_mean_df = grouped_mean_df.agg((['mean'])) # Create individual-level ordered features grouped_ordered_df = X.groupby('idhogar')[self.individual_ordered_features] grouped_ordered_df = grouped_ordered_df.agg(self.aggregate_features) X = X.join(grouped_mean_df, on = 'idhogar') X = X.join(grouped_ordered_df, on = 'idhogar') # Finally remove the other parentesco columns since we are only going to use only heads of # households for our scoring for col in self.more_columns_to_drop: X = X.drop(col, axis = self.axis) return X class CorrelationOutputer(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y = None): return self def transform(self, X, y = None): if 'Target' in X.columns.values: correlation = X.corr() correlation = correlation['Target'].sort_values(ascending=False) print(f'The most 20 positive feature: \n{correlation.head(20)}') print(f'The most 20 negative feature: \n{correlation.tail(20)}') X.drop(['Target'], axis=1, inplace=True) print(f'X has {X.shape[0]} rows, and {X.shape[1]} features') return X class LGBClassifierCV(BaseEstimator, RegressorMixin): def __init__(self, axis = 0, lgb_params = None, fit_params = None, cv = 3, perform_random_search = False, use_train_test_split = False, use_kfold_split = True): self.axis = axis self.lgb_params = lgb_params self.fit_params = fit_params self.cv = cv self.perform_random_search = perform_random_search self.use_train_test_split = use_train_test_split self.use_kfold_split = use_kfold_split @property def feature_importances_(self): feature_importances = [] for estimator in self.estimators_: feature_importances.append( estimator.feature_importances_ ) return np.mean(feature_importances, axis = 0) @property def evals_result_(self): evals_result = [] for estimator in self.estimators_: evals_result.append( estimator.evals_result_ ) return np.array(evals_result) @property def best_scores_(self): best_scores = [] for estimator in self.estimators_: best_scores.append( estimator.best_score_['validation']['macroF1'] ) return np.array(best_scores) @property def cv_scores_(self): return self.best_scores_ @property def cv_score_(self): return np.mean(self.best_scores_) @property def best_iterations_(self): best_iterations = [] for estimator in self.estimators_: best_iterations.append( estimator.best_iteration_ ) return np.array(best_iterations) @property def best_iteration_(self): return np.round(np.mean(self.best_iterations_)) def find_best_params_(self, X, y): # Define a search space for the parameters lgb_search_params = { 'num_leaves': sp_randint(20, 100), 'min_child_samples': sp_randint(40, 100), 'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4], 'subsample': sp_uniform(loc = 0.75, scale = 0.25), 'colsample_bytree': sp_uniform(loc = 0.8, scale = 0.15), 'reg_alpha': [0, 1e-3, 1e-1, 1, 10, 50, 100], 'reg_lambda': [0, 1e-3, 1e-1, 1, 10, 50, 100] } x_train, x_val, y_train, y_val = train_test_split(X, y, test_size = 0.10, random_state = 42, stratify = y) F1_scorer = make_scorer(f1_score, greater_is_better = True, average = 'macro') lgb_model = lgb.LGBMClassifier(**self.lgb_params) self.fit_params["eval_set"] = [(x_train, y_train), (x_val, y_val)] self.fit_params["verbose"] = 200 rs = RandomizedSearchCV(estimator = lgb_model, param_distributions = lgb_search_params, n_iter = 100, scoring = F1_scorer, cv = 5, refit = True, random_state = 314, verbose = False, fit_params = self.fit_params) # Fit the random search _ = rs.fit(x_train, y_train) print("Optimal LGB parameters:") print(rs.best_params_) with open("lgb_best_params.pickle", "wb") as lgb_best_params: pickle.dump(rs.best_params_, lgb_best_params) return rs.best_params_ def fit(self, X, y, **fit_params): print("LGBClassifierCV") # Use only heads of households for scoring X.insert(0, 'Target', y) X = X.query('parentesco1 == 1') y = X['Target'] - 1 X = X.drop(['Target', 'parentesco1'], 1) print("Number of columns in train - " + str(X.shape[1])) self.estimators_ = [] # Use the best parameters to fit a model to whole data if self.perform_random_search: self.lgb_optimal_params = self.find_best_params_(X, y) # Use a simple train-test split. I have found that this gives a better local CV score than K folds. if self.use_train_test_split: x_train, x_val, y_train, y_val = train_test_split(X, y, test_size = 0.1, random_state = 0) lgb_model = lgb.LGBMClassifier(**self.lgb_params) if self.perform_random_search: lgb_model.set_params(**self.lgb_optimal_params) lgb_model.fit( x_train, y_train, eval_set = [(x_train, y_train), (x_val, y_val)], **self.fit_params ) print("Train F1 - " + str(lgb_model.best_score_['train']['macroF1']) + " " + "Validation F1 - " + str(lgb_model.best_score_['validation']['macroF1'])) self.estimators_.append(lgb_model) # When not using random search to tune parameters, proceed with a simple Stratified Kfold CV if self.use_kfold_split: kf = StratifiedKFold(n_splits = self.cv, shuffle = True) for fold_index, (train, valid) in enumerate(kf.split(X, y)): print("Train Fold Index - " + str(fold_index)) lgb_model = lgb.LGBMClassifier(**self.lgb_params) if self.perform_random_search: lgb_model.set_params(**self.lgb_optimal_params) lgb_model.fit( X.iloc[train], y.iloc[train], eval_set = [(X.iloc[train], y.iloc[train]), (X.iloc[valid], y.iloc[valid])], **self.fit_params ) print("Train F1 - " + str(lgb_model.best_score_['train']['macroF1']) + " " + "Validation F1 - " + str(lgb_model.best_score_['validation']['macroF1'])) self.estimators_.append(lgb_model) return self def predict(self, X): # Remove this column since we are using only heads of households for scoring X = X.drop('parentesco1', 1) # When not using random search, use voting to get predictions from all CV estimators. y_pred = [] for estimator_index, estimator in enumerate(self.estimators_): print("Estimator Index - " + str(estimator_index)) y_pred.append(estimator.predict(X)) return np.mean(y_pred, axis = self.axis).astype(int) def get_lgb_params(): def evaluate_macroF1_lgb(truth, predictions): pred_labels = predictions.reshape(len(np.unique(truth)), -1).argmax(axis = 0) f1 = f1_score(truth, pred_labels, average = 'macro') return ('macroF1', f1, True) def learning_rate_power_0997(current_iter): base_learning_rate = 0.1 min_learning_rate = 0.02 lr = base_learning_rate * np.power(.995, current_iter) return max(lr, min_learning_rate) lgb_params = {'boosting_type': 'dart', 'class_weight': 'balanced', "objective": 'multiclassova', 'colsample_bytree': 0.932999339566722, 'min_child_samples': 49, 'min_child_weight': 0.01, 'num_leaves': 92, 'reg_alpha': 0.001, 'reg_lambda': 0, 'subsample': 0.7588178065029635, 'metric': None, 'silent': True, 'random_state': 0, 'n_jobs': -1} fit_params = {"early_stopping_rounds": 400, "eval_metric" : evaluate_macroF1_lgb, 'eval_names': ['train', 'validation'], 'verbose': False, 'categorical_feature': 'auto'} return lgb_params, fit_params lgb_params, lgb_fit_params = get_lgb_params() pipeline = Pipeline([ ('na_imputer', MissingValuesImputer(impute_zero_columns = ['v18q1', 'v2a1', 'rez_esc'])), ('remove_imputer', RemoveObjectTransformer()), ('cat_transformer', CategoricalVariableTransformer()), ('feature_engineering_transformer', FeatureEngineeringTransformer()), ('unnecessary_columns_remover_transformer', UnnecessaryColumnsRemoverTransformer()), ('correlation', CorrelationOutputer()), ('lgb', LGBClassifierCV(lgb_params = lgb_params, fit_params = lgb_fit_params, cv = 5, perform_random_search = False, use_train_test_split = True, use_kfold_split = False) ) ]) pipeline.fit(train_data.copy(), target) pred = pipeline.predict(test_data.copy()) print("Local CV Score - " + str(pipeline.named_steps['lgb'].cv_score_)) sub_orig['Target'] = pred + 1 sub_orig.to_csv('Pipeline_Base_LGB_'+ str(pipeline.named_steps['lgb'].cv_score_) + '.csv') print(sub_orig.head()) ```
github_jupyter
# Introduction to TimeSeries data TimeSeries data has very distinct features - Temporal component. Basic assumptions of independent data points don’t hold true - Trend: Determinitic or Stochastic - Seasonality: pattern that reflects periodicity or fluctuations #### Additional TimeSeries Datasets https://machinelearningmastery.com/time-series-datasets-for-machine-learning/ ``` import pandas as pd import numpy as np from matplotlib import pyplot as plt %matplotlib inline dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m') data = pd.read_csv('data/AirPassengers.csv', parse_dates=['Month'], index_col='Month',date_parser=dateparse) print data.head() ts = data['#Passengers'] plt.plot(ts) plt.show() ts_log = np.log(ts) from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose(ts) trend = decomposition.trend seasonal = decomposition.seasonal residual = decomposition.resid plt.subplot(411) plt.plot(ts_log, label='Original') plt.legend(loc='best') plt.subplot(412) plt.plot(trend, label='Trend') plt.legend(loc='best') plt.subplot(413) plt.plot(seasonal,label='Seasonality') plt.legend(loc='best') plt.subplot(414) plt.plot(residual, label='Residuals') plt.legend(loc='best') plt.tight_layout() plt.show() ``` ### Spot checking if TS is Stationary with Moving Average ``` # moving average moving_avg = ts_log.rolling(window=12, center=False).mean() rolstd = ts_log.rolling(window=12, center=False).std() plt.plot(ts_log) plt.plot(moving_avg, color='red') #plt.plot(rolstd, color='black', label= Rolling Std') #plt.legend(loc='best') plt.show() ``` ### Lets explore differencing the series to make it stationary ``` # Subtract rolling mean ts_log_moving_avg_diff = ts_log - moving_avg ts_log_moving_avg_diff.dropna(inplace=True) moving_avg = ts_log_moving_avg_diff.rolling(window=12, center=False).mean() rolstd = ts_log_moving_avg_diff.rolling(window=12, center=False).std() plt.plot(ts_log_moving_avg_diff, label="diff orig") plt.plot(moving_avg, color='red', label='mean') plt.legend(loc='best') plt.show() ### EXPONENTIAL SMOOTHING expwighted_avg = ts_log.ewm(halflife=12,ignore_na=False,min_periods=0,adjust=True).mean() plt.plot(ts_log) plt.plot(expwighted_avg, color='red') # Stationary Test with Dickey-Fuller from statsmodels.tsa.stattools import adfuller dftest = adfuller(ts_log, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) for key,value in dftest[4].items(): dfoutput['Critical Value (%s)'%key] = value print dfoutput ``` ** Test static value > 1%,5%,10%, hence series is not stationary ** ### Switching Gears -- ARIMA Forecasting Lets now work with a simple shampoo sales dataset ``` #https://datamarket.com/data/set/22r0/sales-of-shampoo-over-a-three-year-period import pandas as pd import matplotlib.pyplot as plt from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error series = pd.read_csv('data/shampoo-sales.csv', header=0, index_col=0, squeeze=True) X = series.values ticks = range(len(X)) size = int(len(X) * 0.7) train, test = X[0:size], X[size:len(X)] history = [float(x) for x in train[:size]] predictions = list() for t in range(len(test)): model = ARIMA(history, order=(5,1,0)) model_fit = model.fit(disp=0) output = model_fit.forecast() yhat = output[0] predictions.append(yhat) obs = test[t] history.append(obs) print('predicted=%f, expected=%f' % (yhat, obs)) error = mean_squared_error(test, predictions) print('Test MSE: %.3f' % error) # plot plt.plot(ticks[-len(test):], test) plt.plot(ticks[-len(test):], predictions, color='red') plt.show() # TimeSeries with LSTM <-- for detailed instructions, look at the univariate time series notebook # Tx = Tx-1, Tx-2 ; Window size = 2 import mxnet as mx import numpy as np dataset = series.values ticks = range(len(dataset)) from sklearn.preprocessing import MinMaxScaler dataset = np.reshape(dataset, (len(dataset), 1)) scaler = MinMaxScaler(feature_range=(0,1)) scaled_dataset = scaler.fit_transform(dataset) dataset[:5], scaled_dataset[:5] # align the data seq_len = 2 x = scaled_dataset y = scaled_dataset[: ,[-1]] x[:5], y[:5] dataX = []; dataY = [] for i in range(0, len(y)-seq_len): _x = x[i: i+seq_len] _y = y[i+seq_len] dataX.append(_x) dataY.append(_y) dataX[0], dataY[0] # Tx0, Tx1 => Tx2 # Tx1, Tx2 => Tx3 # Define Itertors #split the data train_size = int(len(dataY) * 0.7) test_size = len(dataY) - train_size batch_size = 8 trainX, testX = np.array(dataX[:train_size]), np.array(dataX[train_size:]) trainY, testY = np.array(dataY[:train_size]), np.array(dataY[train_size:]) train_iter = mx.io.NDArrayIter(data=trainX, label=trainY, batch_size=batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(data=testX, label=testY, batch_size=batch_size, shuffle=False) trainX.shape # Lets build the network data = mx.sym.var("data") data = mx.sym.transpose(data, axes=(1,0,2)) # T N C -- Time Steps/ Seq len; N - Batch Size, C - dimensions in the hidden state ''' Long-Short Term Memory (LSTM) network cell. Parameters: num_hidden (int) – Number of units in output symbol. prefix (str, default ‘lstm_‘) – Prefix for name of layers (and name of weight if params is None). params (RNNParams, default None) – Container for weight sharing between cells. Created if None. forget_bias (bias added to forget gate, default 1.0.) – Jozefowicz et al. 2015 recommends setting this to 1.0 ''' lstm1 = mx.rnn.LSTMCell(num_hidden=5, prefix='lstm1') lstm2 = mx.rnn.LSTMCell(num_hidden=10, prefix='lstm2') L1, states = lstm1.unroll(length=seq_len, inputs=data, merge_outputs=True, layout="TNC") L2, L2_states = lstm2.unroll(length=seq_len, inputs=L1, merge_outputs=True, layout="TNC") L2_reshape = mx.sym.reshape(L2_states[0], shape=(-1, 0), reverse=True) # (T*N, 10 -- num_hidden lstm2) fc = mx.sym.FullyConnected(L2_reshape, num_hidden=1, name='fc') net = mx.sym.LinearRegressionOutput(data=fc, name="softmax") #mx.viz.plot_network(net) #, shape=(1,2,2)) import logging logging.getLogger().setLevel(logging.DEBUG) num_epochs = 250 model = mx.mod.Module(symbol=net, context=mx.cpu(0)) model.fit(train_data=train_iter, eval_data=val_iter, optimizer="adam", optimizer_params={'learning_rate': 1E-3}, eval_metric="mse", num_epoch=num_epochs ) ``` ``` After 250 epochs INFO:root:Epoch[249] Time cost=0.013 INFO:root:Epoch[249] Validation-mse=0.022094 ``` ``` import matplotlib.pyplot as plt %matplotlib inline test_pred = model.predict(val_iter).asnumpy() print np.mean((test_pred - testY)**2) test_plot = scaler.inverse_transform(test_pred) print test_plot[:5], testY[:5] #plt.plot(ticks[train_size+seq_len:], test_plot) t_plot = np.empty_like(dataset) t_plot[:] = np.nan t_plot[len(trainY): -seq_len] = test_plot #plt.plot(ticks, dataset, label="real data") plt.plot(ticks[len(trainX):], dataset[len(trainX):], label="real data") #only test data plt.plot(ticks, t_plot, label= "pred") #plt.legend() ``` ##### References https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/
github_jupyter
## High dynamic range and environment maps This example shows how to: - setup camera for baking 360 degree panoramic environment map - save image file with 8/16/32 bits per sample color depth - use OpenCV to save image in HDR format ![plotoptix ray_tracing_output](https://plotoptix.rnd.team/images/360deg_env_map.jpg "This notebook output") Saving images to jpg format is great when you want to use them quickly, with no additional editing outside your script. Jpg uses 8 bit/sample color depth and lossy compression - such output of your work is ready to go e.g. to the web. If you plan to apply retouching in an image editing software you'll appreciate saving your renders to a lossless format, with 16 bit/sample color depth. PlotOptiX can do that, tiff (Linux/Windows) and png (Windows) formats are supported. However, the full information is preserved only if 32 bit/sample, floating point precision data, is saved to a file. Such a high dynamic range (HDR) representation keeps all the bright lights and details in shadows without clamping and rounding errors. Exposure and any tonal corrections can be re-adjusted in HDR images without quality losses. And, most importantly, HDR images are the best for lighting scenes as environment maps - that is what we are going to show in this example. HDR image are written by PlotOptiX natively in tiff format, or you can use OpenCV to save such images in the Radiance file format (.hdr). ``` import numpy as np from plotoptix import TkOptiX ``` Generate some data - balls distributed on a sphere: ``` n = 2000 r0 = 0.1 R = 4 r = r0 + 0.3 * np.random.rand(n) x = np.random.normal(loc=0, scale=1.0, size=n) y = np.random.normal(loc=0, scale=1.0, size=n) z = np.random.normal(loc=0, scale=1.0, size=n) xyz = np.stack((x, y, z)).T for i in range(n): xyz[i] *= R / np.linalg.norm(xyz[i]) ``` A simple function to signal that ray tracing has finished: ``` def accum_done(rt: TkOptiX) -> None: print("rt completed!") ``` Setup the ray tracing parameters. **Note** that AI denoiser is NOT applied. It could result with a visible seam at the line joining vertical edges of the image, when the image is displayed as an environment map. Instead, only a gamma correction is used and you need to accumulate enough data to reduce the noise. ``` rt = TkOptiX(on_rt_accum_done=accum_done) rt.set_param( min_accumulation_step=2, max_accumulation_frames=300 ) rt.set_uint("path_seg_range", 4, 8) # a little more than the default (2,6) to improve the ambient occlusion impression exposure = 1; gamma = 1.7 rt.set_float("tonemap_exposure", exposure) rt.set_float("tonemap_gamma", gamma) rt.add_postproc("Gamma") ``` Setup lighting: one bright warm spherical light and some cold light from the ambient. ``` rt.setup_light("l1", pos=[1.5, 0, 1.5], color=[3.5, 3.2, 2.8], radius=0.75, in_geometry=False) rt.set_ambient([0.1, 0.2, 0.3]) rt.set_background(0) ``` Setup cameras: one for making the panoramic view, one to show balls from inside the sphere, and one looking from a distance. ``` rt.setup_camera("cam1", cam_type="Panoramic", eye=[0, 0, 0], target=[0, 0, -1], up=[0, 1, 0]) rt.setup_camera("cam2", cam_type="DoF", eye=[0, 0, 2], target=[0, 0, 0], up=[0, 1, 0], aperture_radius=0.2, fov=45, focal_scale=2.8) rt.setup_camera("cam3", cam_type="DoF", eye=[0, 0, 10], target=[0, 0, 0], up=[0, 1, 0], aperture_radius=0.07, fov=35, focal_scale=0.56) ``` Upload data points: ``` rt.set_data("points", pos=xyz, r=r, c=0.7) ``` Open the GUI window: ``` rt.show() ``` Switch camera views. Let the ray tracing to converge with *cam1* active, this is the image to be used in the next example. ``` rt.set_current_camera("cam2") rt.set_current_camera("cam1") ``` Save images with 8, 16 and 32 bit/sample color depths. 360 degree environment maps can be inspected with the script ``7_panorama_viewer.py`` or used by the notebook ``10_2_read_hdr_360deg_env_map.ipynb``. **Note:** wait until the image appears in the GUI window; before that image buffers are empty. See callback examples (e.g. [this](https://github.com/rnd-team-dev/plotoptix/blob/master/examples/2_animations_and_callbacks/0_wait_for_raytracing_done.py) or [this](https://github.com/rnd-team-dev/plotoptix/blob/master/examples/2_animations_and_callbacks/0_wait_for_raytracing_done.py)) on how to wait for the result in the code. Here, only a simple message is printed when accumulation is done (see cell #3). ``` rt.save_image("rt_output_8bps.jpg") rt.save_image("rt_output_16bps.tif", bps="Bps16") rt.save_image("rt_output_32bps.tif", bps="Bps32") ``` Save the image also in Radiance file, if you have OpenCV installed. ``` import cv2 a = rt.get_rt_output("Bps32", "BGR") # ray tracing output in 32bps depth and channels order required for .hdr format print(a.dtype, a.shape, np.max(a)) # note it is a floating point array, and strong lights are above 1.0 values cv2.imwrite('rt_output_32bps.hdr', a) rt.close() ```
github_jupyter
# Thinking About Data Generating Processes In this notebook, we are going to demonstrate why thinking about the data generating process can be important. ![TigerPutt](https://media.giphy.com/media/TgMz5yQqqB3VrpMWP8/giphy.gif) **Note**: This notebook borrows heavily from [this case study](https://mc-stan.org/users/documentation/case-studies/golf.html) in the Stan manual. Many thanks to Andrew Gelman, Mark Broadie, and others for inspiring this exercise. ## The Data We will be using golf data on whether a golfer makes a putt or not. Columns are: * `pid`: A unique identifier that tells us which player took the shot * `distance`: How many inches there are to the hole * `made`: Whether the putt went in ``` import matplotlib.pyplot as plt import math import numpy as np import pandas as pd import pymc3 as pm import scipy.stats as st import seaborn as sns import theano.tensor as tt %matplotlib inline data_color = "#3cad13" lr_color = "#2b63f0" gm_color = "#e3822d" putts = pd.read_csv("./putts.csv", index_col=0) putts.head() ``` ### How often are putts made? ``` putts = putts.sort_values("distance") dist_splits = np.array_split(putts["distance"].values, 20) dist_mean = [d.mean() for d in dist_splits] made_splits = np.array_split(putts["made"].values, 20) made_mean = [m.mean() for m in made_splits] fig, ax = plt.subplots() ax.scatter(dist_mean, made_mean, color=data_color) ax.set_title("Percent of putts made") ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ``` ## The "Standard" Approach Typically when faced with a binary classification problem, the first thing that one turns to is logistic regression Our logistic regression model will only be a function of distance. ### Fit the model ``` from sklearn.linear_model import LogisticRegression X = putts.loc[:, ["distance"]].values y = putts.loc[:, "made"].values logr = LogisticRegression(solver="lbfgs") logr.fit(X, y) ``` ### Compare with data ``` Xlinspace = np.linspace(12.0, 500, 5000) probs = logr.predict_proba(Xlinspace[:, None]) prob_miss = probs[:, 0] prob_make = probs[:, 1] fig, ax = plt.subplots() ax.plot(Xlinspace, prob_make, color=lr_color, linewidth=2.0) ax.annotate("P(make) from Logistic Regression", xy=(75, 0.8), color=lr_color) ax.scatter(dist_mean, made_mean, color=data_color) ax.annotate("Binned Putt Probabilities", xy=(375, 0.15), color=data_color) ax.set_title("Logistic Regression vs Data") ax.set_ylim(0.0, 1.05) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ``` ## Modeling the Data Generating Process Rather than simply choose logistic regression as our model, let's build a model from first principles ### The probability a putt shot goes in? First, we need to determine the chance of a putt going in. The following excerpt comes directly from the Stan case studies page, > The graph below shows a simplified sketch of a golf shot. The dotted line represents the angle within which the ball of radius $r$ must be hit so that it falls within the hole of radius $R$. This threshold angle is $\sin^{−1}((R−r)/x)$. The graph, which is not to scale, is intended to illustrate the geometry of the ball needing to go into the hole. > > ![putt_image](./putt.png) > > The next step is to model human error. We assume that the golfer is attempting to hit the ball completely straight but that many small factors interfere with this goal, so that the actual angle follows a normal distribution centered at 0 with some standard deviation $\sigma$. > > ![putt_image](./error.png) > > The probability the ball goes in the hole is then the probability that the angle is less than the threshold; that is, $\text{Pr}(|angle| < \sin^{−1}((R−r) / x)) = 2 \Phi \left( \frac{\sin^{−1}((R−r)/x)}{\sigma} \right) - 1$, where $\Phi$ is the cumulative normal distribution function. The only unknown parameter in this model is $\sigma$, the standard deviation of the distribution of shot angles... > > Our model then has two parts: > > $$y_j \sim \text{Bernoulli}(p_j)$$ > $$p_j = 2 \Phi \left( \frac{\sin^{-1}((R - r)/x)}{\sigma} \right) - 1$$ Let's investigate how $p_j$ changes with $\sigma$ ``` r = 1.68 / 2 R = 4.25 / 2 Xvalues = np.linspace(1.0, 20*12, 500) fig, ax = plt.subplots(figsize=(12, 8)) for sigma in [0.5, 1.0, 2.0, 5.0, 20.0]: sigma_radians = np.deg2rad(sigma) inner_stuff = np.arcsin((R - r) / Xvalues) / sigma_radians p_j = 2*st.norm().cdf(inner_stuff) - 1 ax.plot(Xvalues/12, p_j, color="k", linewidth=1.) ax.annotate(r"$\sigma = 0.5$", (10.5, 0.8)) ax.annotate(r"$\sigma = 1.0$", (6.5, 0.7)) ax.annotate(r"$\sigma = 2.0$", (4.25, 0.6)) ax.annotate(r"$\sigma = 5.0$", (3.5, 0.35)) ax.annotate(r"$\sigma = 20.0$", (2.5, 0.1)) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.set_xlabel("Distance from hole (in ft)") ax.set_title(r"Probability of Making Putt (varying $\sigma$)") ``` Now, we translate the model that has been built into a Bayesian model ``` Xb = putts.loc[:, "distance"].copy().values yb = putts.loc[:, "made"].copy().values Xb_tilde = np.maximum((R - r) + 1e-2, Xb) m = pm.Model() with m: # Prior on sigma sigma = pm.HalfNormal("sigma", sigma=15.0) # Convert sigma to radians sigma_radians = sigma * (math.pi/180) # Compute probability of making it _inner = tt.arcsin((R - r) / Xb_tilde) / sigma_radians p_j = 2*pm.math.exp( pm.distributions.dist_math.normal_lcdf(0.0, 1.0, _inner) ) - 1.0 # Determine whether successful putt_make = pm.Bernoulli("putt_make", p=p_j, observed=yb) with m: trace = pm.sample(2500, tune=1000) fig, ax = plt.subplots() ax.hist(trace["sigma"]) ax.set_title(r"Distribution of $\sigma$ from MCMC") ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) Xvalues = np.linspace(2.0, 700, 500) sigma_radians = np.deg2rad(trace["sigma"]) inner_stuff = np.arcsin((R - r) / Xvalues[None, :]) / sigma_radians[:, None] p_j = 2 * st.norm().cdf(inner_stuff) - 1 p_j_mean = np.mean(p_j, axis=0) p_j_5 = np.quantile(p_j, 0.01, axis=0) p_j_95 = np.quantile(p_j, 0.99, axis=0) fig, ax = plt.subplots() ax.plot(Xvalues, p_j_mean, color=gm_color, linewidth=1.0) ax.fill_between(Xvalues, p_j_5, p_j_95, color=gm_color, alpha=0.35) ax.annotate("Geometric Putt Model", xy=(300, 0.25), color=gm_color) ax.scatter(dist_mean, made_mean, color=data_color) ax.annotate("Binned Putt Probabilities", xy=(100, 0.75), color=data_color) ax.set_title("Geometric Putt Model vs Data") ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ``` ## Comparing Logistic Regression and Geometric Putt Model ``` fig, ax = plt.subplots(figsize=(12, 8)) ax.plot(Xvalues / 12, p_j_mean, color=gm_color, linewidth=1.0) ax.fill_between(Xvalues / 12, p_j_5, p_j_95, color=gm_color, alpha=0.35) ax.annotate("Geometric Putt Model", xy=(300 / 12, 0.2), color=gm_color) ax.plot( Xvalues / 12, logr.predict_proba(Xvalues[:, None])[:, 1], color=lr_color, linewidth=2.0 ) ax.annotate("P(make) from Logistic Regression", xy=(120 / 12, 0.6), color=lr_color) ax.scatter(np.array(dist_mean) / 12, made_mean, color=data_color) ax.annotate("Binned Putt Probabilities", xy=(100 / 12, 0.75), color=data_color) ax.set_title("Geometric Putt Model vs Data") ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ``` ### Why are we missing the long putts? ``` fig, ax = plt.subplots() ax.hist(putts["distance"].values / 12) ax.set_title("Distribution of Putts") ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ```
github_jupyter
# Merging, Joining, and Concatenating There are 3 main ways of combining DataFrames together: Merging, Joining and Concatenating. In this lecture we will discuss these 3 methods with examples. ____ ### Example DataFrames ``` import pandas as pd df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=[0, 1, 2, 3]) df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], 'B': ['B4', 'B5', 'B6', 'B7'], 'C': ['C4', 'C5', 'C6', 'C7'], 'D': ['D4', 'D5', 'D6', 'D7']}, index=[4, 5, 6, 7]) df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], 'B': ['B8', 'B9', 'B10', 'B11'], 'C': ['C8', 'C9', 'C10', 'C11'], 'D': ['D8', 'D9', 'D10', 'D11']}, index=[8, 9, 10, 11]) df1 df2 df3 ``` ## Concatenation Concatenation basically glues together DataFrames. Keep in mind that dimensions should match along the axis you are concatenating on. You can use **pd.concat** and pass in a list of DataFrames to concatenate together: ``` pd.concat([df1,df2,df3]) pd.concat([df1,df2,df3],axis=1) ``` _____ ## Example DataFrames ``` left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], 'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3']}) right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}) left right ``` ___ ## Merging The **merge** function allows you to merge DataFrames together using a similar logic as merging SQL Tables together. For example: ``` pd.merge(left,right,how='inner',on='key') ``` Or to show a more complicated example: ``` left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], 'key2': ['K0', 'K1', 'K0', 'K1'], 'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3']}) right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], 'key2': ['K0', 'K0', 'K0', 'K0'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}) left right pd.merge(left, right, on=['key1', 'key2']) pd.merge(left, right, how='outer', on=['key1', 'key2']) pd.merge(left, right, how='right', on=['key1', 'key2']) pd.merge(left, right, how='left', on=['key1', 'key2']) ``` ## Joining Joining is a convenient method for combining the columns of two potentially differently-indexed DataFrames into a single result DataFrame. Merge using index and not the column ``` left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=['K0', 'K1', 'K2']) right = pd.DataFrame({'C': ['C0', 'C2', 'C3'], 'D': ['D0', 'D2', 'D3']}, index=['K0', 'K2', 'K3']) left right left.join(right) left.join(right, how='outer') ``` # Great Job!
github_jupyter
``` !pip install ffmpeg-python !unzip voice-morphing-master.zip cd voice-morphing-master/ !pip3 install -r requirements.txt !pip3 install ffmpeg-python cd voice-morphing-master from utils.morphing import make_younger, make_older # Code for recording audio from the browser from IPython.display import Javascript from google.colab import output from base64 import b64decode import IPython import uuid from google.colab import output class InvokeButton(object): def __init__(self, title, callback): self._title = title self._callback = callback def _repr_html_(self): from google.colab import output callback_id = 'button-' + str(uuid.uuid4()) output.register_callback(callback_id, self._callback) template = """<button id="{callback_id}" style="cursor:pointer;background-color:#EEEEEE;border-color:#E0E0E0;padding:5px 15px;font-size:14px">{title}</button> <script> document.querySelector("#{callback_id}").onclick = (e) => {{ google.colab.kernel.invokeFunction('{callback_id}', [], {{}}) e.preventDefault(); }}; </script>""" html = template.format(title=self._title, callback_id=callback_id) return html RECORD = """ const sleep = time => new Promise(resolve => setTimeout(resolve, time)) const b2text = blob => new Promise(resolve => { const reader = new FileReader() reader.onloadend = e => resolve(e.srcElement.result) reader.readAsDataURL(blob) }) var record = time => new Promise(async resolve => { stream = await navigator.mediaDevices.getUserMedia({ audio: true }) recorder = new MediaRecorder(stream) chunks = [] recorder.ondataavailable = e => chunks.push(e.data) recorder.start() await sleep(time) recorder.onstop = async ()=>{ blob = new Blob(chunks) text = await b2text(blob) resolve(text) } recorder.stop() }) """ def record(sec=3): display(Javascript(RECORD)) s = output.eval_js('record(%d)' % (sec*1000)) b = b64decode(s.split(',')[1]) with open('recording.wav','wb+') as f: f.write(b) return 'recording.wav' predict_age('recording.wav')*40 record() Audio('recording.wav',rate=16000) from gender_detector.run_model import predict from age_regressor.run_model import predict as predict_age import numpy as np import torch as t import librosa Audio('new_recording.wav') print(f"guessed age of speaker:{predict_age('recording.wav')*40}") make_younger('recording.wav', predict('recording.wav')) Audio('new_recording.wav') print(f"guessed age of younger speaker:{predict_age('new_recording.wav')*40}") make_older('recording.wav', predict('recording.wav')) Audio('new_recording.wav') print(f"guessed age of younger speaker:{predict_age('new_recording.wav')*40}") ```
github_jupyter
## Instrumental Variables Example ``` # Copyright (c) [2021] Alessio Russo [alessior@kth.se]. All rights reserved. # This file is part of PythonVRFT. # PythonVRFT is free software: you can redistribute it and/or modify # it under the terms of the MIT License. You should have received a copy of # the MIT License along with PythonVRFT. # If not, see <https://opensource.org/licenses/MIT>. # # Code author: [Alessio Russo - alessior@kth.se] # Last update: 10th January 2020, by alessior@kth.se # # Example 3 # ------------ # In this example we see how to apply VRFT to a simple SISO model # with measurement noise using instrumental variables # Input data is generated using random normal noise # ``` ### Load libraries ``` import numpy as np import matplotlib.pyplot as plt import scipy.signal as scipysig from vrft import * ``` ### System, Reference Model and Control law ``` # System dt = 1e-2 num = [0.5] den = [1, -0.9] sys = ExtendedTF(num, den, dt=dt) sigma = 0.5 # measurement noise # Reference Model refModel = ExtendedTF([0.6], [1, -0.4], dt=dt) # Control law control = [ExtendedTF([1], [1, -1], dt=dt), ExtendedTF([1, 0], [1, -1], dt=dt)] ``` ### Generate signals ``` # Function used to generate the data def generate_data(sys, u, t, sigma): t, y = scipysig.dlsim(sys, u, t) y = y.flatten() + sigma * np.random.normal(size = t.size) # Length of the initial condition depends on the reference model return iddata(y, u, dt, [0]) # Generate input siganl t_start = 0 t_end = 10 t = np.arange(t_start, t_end, dt) u = np.random.normal(size = t.size) # To use IV we must perform 2 experiments on the plant using the same input data1 = generate_data(sys, u, t, sigma) data2 = generate_data(sys, u, t, sigma) data = [data1, data2] ``` ### VRFT ``` # VRFT Pre-filter prefilter = refModel * (1 - refModel) # VRFT method with Instrumental variables theta_iv, r_iv, loss_iv, C_iv = compute_vrft(data, refModel, control, prefilter, iv=True) # VRFT method without Instrumental variables theta_noiv, r_noiv, loss_noiv, C_noiv = compute_vrft(data1, refModel, control, prefilter, iv=False) #Obtained controller print('------IV------') print("Loss: {}\nTheta: {}\nController: {}".format(loss_iv, theta_iv, C_iv)) print('------No IV------') print("Loss: {}\nTheta: {}\nController: {}".format(loss_noiv, theta_noiv, C_noiv)) ``` ### Verify performance ``` # Closed loop system closed_loop_iv = (C_iv * sys).feedback() closed_loop_noiv = (C_noiv * sys).feedback() t = t[:len(r_iv)] u = np.ones(len(t)) _, yr = scipysig.dlsim(refModel, u, t) _, yc_iv = scipysig.dlsim(closed_loop_iv, u, t) _, yc_noiv = scipysig.dlsim(closed_loop_noiv, u, t) _, ys = scipysig.dlsim(sys, u, t) yr = yr.flatten() ys = ys.flatten() yc_noiv = yc_noiv.flatten() yc_iv = yc_iv.flatten() fig, ax = plt.subplots(4, sharex=True, figsize=(12,8), dpi= 100, facecolor='w', edgecolor='k') ax[0].plot(t, yr,label='Reference System') ax[0].plot(t, yc_iv, label='CL System - IV') ax[0].plot(t, yc_noiv, label='CL System - No IV') ax[0].set_title('CL Systems response') ax[0].grid(True) ax[1].plot(t, ys, label='OL System') ax[1].set_title('OL Systems response') ax[1].grid(True) ax[2].plot(t, data1.y[:len(r_iv)]) ax[2].grid(True) ax[2].set_title('Experiment data') ax[3].plot(t, r_iv) ax[3].grid(True) ax[3].set_title('Virtual Reference') # Now add the legend with some customizations. legend = ax[0].legend(loc='lower right', shadow=True) # The frame is matplotlib.patches.Rectangle instance surrounding the legend. frame = legend.get_frame() frame.set_facecolor('0.90') plt.show() ```
github_jupyter
<small><small><i> All the IPython Notebooks in **Python Datatypes** lecture series by Dr. Milaan Parmar are available @ **[GitHub](https://github.com/milaan9/02_Python_Datatypes)** </i></small></small> # Python List In this class, we'll learn everything about Python lists, how they are created, slicing of a list, adding or removing elements from them and so on. Python offers a range of compound data types often referred to as sequences. List is one of the most frequently used and very versatile data types used in Python. ## What is List in Python? Python list is a **data structure** which is used to store various types of data.In Python, lists are **mutable** i.e., Python will not create a new list if we modify an element of the list. It works as a container that holds other objects in a given order. We can perform various operations like insertion and deletion on list. A list can be composed by storing a **sequence** of different type of values **separated by commas**. ## Creating Python List In Python programming, a list is created by placing all the items (elements) inside **square brackets `[]`**, separated by **commas** **`,`**. All the elements in list are stored in the index basis with starting index **0**. It can have any number of items and they may be of different types (integer, float, string etc.). <div> <img src="img/l0.png" width="600"/> </div> **Syantax:** ```python <list_name>=[value1,value2,value3,...,valuen] ``` ``` # Example: # empty list, no item in the list my_list = [] print(my_list) print(len(my_list)) # 0 # list of integers my_list1 = [1, 2, 3] print(my_list1) # list with mixed data types my_list2 = [1, "Hello", 3.4] print(my_list2) # nested list my_list3 = ["mouse", [9, 3, 6], ['a']] print(my_list3) list=['foo','bar','baz','quz','quux','corge'] print(list) my_list=[1,2,3,4,4.5,'helloworld','X'] print(my_list) ``` ## Access elements from a list There are various ways in which we can access the elements of a list. ### List Index We can use the index operator **`[]`** to access an item in a list. In Python, indices start at 0. So, a list having 5 elements will have an index from 0 to 4. Trying to access indexes other than these will raise an **`IndexError`**. The index must be an integer. We can't use float or other types, this will result in **`TypeError`**. Nested lists are accessed using nested indexing. <div> <img src="img/l6_1.png" width="400"/> </div> ``` # Example: List indexing my_list = ['p', 'r', 'o', 'b', 'e'] print(my_list[0]) # Output: p print(my_list[2]) # Output: o print(my_list[4]) # Output: e # Nested List n_list = ["Happy", [2, 0, 1, 5]] # Nested indexing print(n_list[0][1]) print(n_list[1][3]) print(my_list[4.0]) # Error! Only integer can be used for indexing ``` ### Negative indexing Python allows negative indexing for its sequences. The index of -1 refers to the last item, -2 to the second last item and so on. ``` # Example: Negative indexing in lists # Python allows negative indexing for its sequences. #The index of -1 refers to the last item, -2 to the second last item and so on. my_list = ['p','r','o','b','e'] print(my_list[-1]) print(my_list[-5]) # Example: list=['foo','bar','baz','quz','quux','corge'] print(list[2]) print(list[0:]) # if we don't set where to stop it takes all the rest print(list[4:6]) print(list[-4:-1]) # it does not include the end index print(list[1:5:2]) # it does not include the end index print(list[-1: :-1]) # reverse list print(list[-1]) # last element print(list[-2]) # second last element print(len(list)-1) # index of last element does_exist = 'bar' in list print(does_exist) # True ``` >**Note:** If the index provided in the list slice is outside the list, then it raises an IndexError exception. ### How to slice lists in Python? We can access a range of items in a list by using the slicing operator **`:`**(colon). **Syntax:** ```python <list_name>[start : stop : step] ``` ``` # Example: List slicing in Python my_list = ['p','r','o','g','r','a','m','i','n','g'] # indes [ 0 1 2 3 4 5 6 7 8 9 ] # index [-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 ] print(my_list[2:5]) # elements 3rd to 4th print(my_list[:-5]) # elements beginning to 4th print(my_list[5:]) # elements 5th to end # elements beginning to end print(my_list[:]) # Output: ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'n','g') ``` Slicing can be best visualized by considering the index to be between the elements as shown below. So if we want to access a range, we need two indices that will slice that portion from the list. <div> <img src="img/l6_2.png" width="350"/> </div> >**NOTE:** Internal Memory Organization: >List do not store the elements directly at the index. In fact a reference is stored at each index which subsequently refers to the object stored somewhere in the memory. This is due to the fact that some objects may be large enough than other objects and hence they are stored at some other memory location. ## Python List Operations Apart from creating and accessing elements from the list, Python allows us to perform various other operations on the list. Some common operations are given below: ### Add/Change List Elements Lists are mutable, meaning their elements can be changed unlike **[string](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String.ipynb)** or **[tuple](https://github.com/milaan9/02_Python_Datatypes/blob/main/004_Python_Tuple.ipynb)**. We can use the assignment operator (**`=`**) to change an item or a range of items. ``` # Example: Correcting mistake values in a list odd = [2, 4, 6, 8] odd[0] = 1 # change the 1st item print(odd) odd[1:4] = [3, 5, 7] # change 2nd to 4th items print(odd) # Example: data1=[5,10,15,20,25] print("Values of list are: ", data1) data1[2]="Multiple of 5" # we are modifying the 3rd element using its index [2] print("Values of list are: ", data1) ``` We can add one item to a list using the **`append()`** method or add several items using **`extend()`** method. ``` # Example: Appending and Extending lists in Python odd = [1, 3, 5] odd.append(7) print(odd) odd.extend([9, 11, 13]) print(odd) # Example: list1=['a','b','c'] list1.append(1.5) list1.append('x') list1.append(['y','z']) # append list into list as single object print(list1) ``` We can also use **`+`** (concatenation) operator to combine two lists. This is also called concatenation. ``` # Example: Concatenating and repeating lists odd = [1, 3, 5] print(odd + [9, 7, 5]) # Example: list1=['a','b','c'] list2=['x','y','z'] list3=list1+list2 print(list3) # Example: list1=['a','b','c'] a='x' print(list1+a) ``` >**NOTE:** **`+`** operator implies that both the operands passed must be list else error will be shown. The **`'*'`** operator repeats a list for the given number of times. ``` # Example: Concatenating and repeating lists odd = [1, 3, 5] print(odd + [9, 7, 5]) print(["re"] * 3) # Example: list1=['a','b','c'] print(list1*3) print(["re"] * 3) ``` Furthermore, we can insert one item at a desired location by using the method **`insert()`** or insert multiple items by squeezing it into an empty slice of a list. ``` # Example: Demonstration of list insert() method odd = [1, 9] odd.insert(1,3) print(odd) odd[2:2] = [5, 7] print(odd) ``` ### Delete/Remove List Elements We can delete one or more items from a list using the keyword **`del`**. It can even delete the list entirely. ``` # Example: Deleting list items my_list = ['p', 'r', 'o', 'b', 'l', 'e', 'm'] del my_list[2] # delete one item print(my_list) del my_list[1:5] # delete multiple items print(my_list) del my_list # delete entire list print(my_list) # Error: List not defined # Example: list1=['a','b','c'] print("data in list : ",list1) del(list1[2]) print("new data in list : ",list1) ``` We can use **`remove()`** method to remove the given item or **`pop()`** method to remove an item at the given index. The **`pop()`** method removes and returns the last item if the index is not provided. This helps us implement lists as stacks (first in, last out data structure). We can also use the **`clear()`** method to empty a list. ``` # Example: my_list = ['p','r','o','b','l','e','m'] my_list.remove('p') print(my_list) # Output: ['r', 'o', 'b', 'l', 'e', 'm'] print(my_list.pop(1)) # Output: 'o' print(my_list) # Output: ['r', 'b', 'l', 'e', 'm'] print(my_list.pop()) # Output: 'm' print(my_list) # Output: ['r', 'b', 'l', 'e'] my_list.clear() print(my_list) # Output: [] ``` Finally, we can also delete items in a list by assigning an empty list to a slice of elements. ``` # Example: my_list = ['p','r','o','b','l','e','m'] my_list[2:3] = [] my_list # Example: my_list[2:5] = [] my_list ``` ## Python Built-in List Functions Built-in functions like **`all()`**, **`any()`**, **`sorted()`**, **`min()`**, **`max()`**, **`len()`**, **`cmp()`**, **`list()`**, etc. are commonly used with dictionaries to perform different tasks. | Functions | Description | |:----| :--- | | **[all()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/003_Python_all%28%29.ipynb)** | Returns **`True`** if all keys of the list are True. | | **[any()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/002_Python_any%28%29.ipynb)** | Returns **`True`** if any key of the list is true. If the list is empty, return **`False`**. | | **[sorted()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/060_Python_sorted%28%29.ipynb)** | Returns a new sorted list of elements in the list. | | **[min()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/042_Python_min%28%29.ipynb)** | Returns the minimum value from the list given. | | **[max()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/041_Python_max%28%29.ipynb)** | Returns the maximum value from the list given. | | **[len()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/040_Python_len%28%29.ipynb)** | Returns number of elements in a list. | | **cmp()** | Compares items of two lists. (Not available in Python 3). | | **[list()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/038_Python_list%28%29_Function.ipynb)** | Takes sequence types and converts them to lists. | ### **`all(list)`** - The method all() method returns True when all elements in the given iterable are true. If not, it returns False. ``` # Example: How all() works for lists? # all values true l = [1, 3, 4, 5] print(all(l)) # all values false l = [0, False] print(all(l)) # one false value l = [1, 3, 4, 0] print(all(l)) # one true value l = [0, False, 5] print(all(l)) # empty iterable l = [] print(all(l)) ``` ### **`any(list)`** - any() function returns True if any element of an iterable is True. If not, any() returns False. ``` # Example: True since 1,3 and 4 (at least one) is true l = [1, 3, 4, 0] print(any(l)) # False since both are False l = [0, False] print(any(l)) # True since 5 is true l = [0, False, 5] print(any(l)) # False since iterable is empty l = [] print(any(l)) ``` ### **`sorted(dict)`** - The sorted() function sorts the elements of a given iterable in a specific order (either **ascending** or **descending**) and returns the sorted iterable as a list. ``` # Example: vowels list py_list = ['e', 'a', 'u', 'o', 'i'] print(sorted(py_list)) print(sorted(py_list, reverse=True)) ``` ### **`min(list)`** - this method is used to get min value from the list. In Python3 lists element's type should be same otherwise compiler throw type Error. ``` # Example: list1 = ['a','b','c'] list2 = [1,2,3] list3=['a','b','c',1,2,3] print(min(list1)) #a print(min(list2)) #1 print(min(list3)) #typeError ``` ### **`max(list)`** - The max() method returns the elements from the list with maximum value. ``` # Example: list1 = ['a','b','c'] list2 = [1,2,3] list3=['a','b','c',1,2,3] print(max(list1)) #c print(max(list2)) #3 print(max(list3)) #typeEror ``` ### **`len(list)`** - The len() method returns the number of elements in the list. ``` # Example: list1 = ['a','b','c'] list2 = [] list3=['a','b','c',1,2,3] print(len(list1)) print(len(list2)) print(len(list3)) ``` ## Python List Methods Methods that are available with list objects in Python programming are tabulated below. Some of the methods have already been used above. | Method | Description | |:----| :--- | | **[append()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/002_Python_List_append%28%29.ipynb)** | Add an element to the end of the list | | **[extend()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/003_Python_List_extend%28%29.ipynb)** | Add all elements of a list to the another list | | **[insert()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/004_Python_List_insert%28%29.ipynb)** | Insert an item at the defined index | | **[remove()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/005_Python_List_remove%28%29.ipynb)** | Removes an item from the list | | **[pop()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/007_Python_List_pop%28%29.ipynb)** | Removes and returns an element at the given index | **[clear()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/011_Python_List_clear%28%29.ipynb)** | Removes all items from the list | | **[index()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/001_Python_List_index%28%29.ipynb)** | Returns the index of the first matched item | | **[count()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/006_Python_List_count%28%29.ipynb)** | Returns the count of the number of items passed as an argument | | **[sort()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/009_Python_List_sort%28%29.ipynb)** | Sort items in a list in ascending order | | **[reverse()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/008_Python_List_reverse%28%29.ipynb)** | Reverse the order of items in the list | | **[copy()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/010_Python_List_copy%28%29.ipynb)** | Returns a shallow copy of the list | ### **`append()`** - The `append()` method adds an item to the end of the list. ``` # Example: list1 =[1,2,3] list1.append(4) list1.append('helloworld') list1.append(['a','b','c']) #append as single object print(list1) ``` ### **`extend()`** - The `extend()` method adds all the elements of an iterable (list, tuple, string etc.) to the end of the list. ``` # Example: list1 =[1,2,3] list1.extend([4]) list1.extend('helloworld') list1.extend(['a','b','c']) print(list1) ``` ### **`insert()`** - The `insert()` method inserts an element to the list at the specified index. ``` # Example: list1 =['helloworld','python','java','c++',1,2,3] list1.insert(3,'C#') print(list1) ``` ### **`remove()`** - The `remove()` method removes the first matching element (which is passed as an argument) from the list. ``` # Example: list1 =['helloworld','python','java','c++',1,2,3] list1.remove("java") print(list1) list1.remove(2) print(list1) ``` ### **`pop()`** - The `pop()` method removes the item at the given index from the list and returns the removed item. ``` # Example: list1 =['helloworld','python','java','c++',1,2,3] list1.pop() # 3 is pop print(list1) list1.pop(2) # list[2] = "java" pop print(list1) ``` ### **`clear()`** - The `clear()` method removes all items from the list. ``` list1 =['helloworld','python','java','c++',1,2,3] list1.clear() print('list1:', list1) ``` ### **`index()`** - The `index()` method returns the index of the specified element in the list. ``` # Example: list1 =['helloworld','python','java','c++',1,2,3] print("index of java : ",list1.index('java')) print("index of 2 : ",list1.index(2)) ``` ### **`count()`** - The `count()` method returns the number of times the specified element appears in the list. ``` # Example: list1 =[1,2,3,'a','b','c',1,2,3] print(list1.count(1)) print(list1.count('b')) print(list1.count(4)) ``` ### **`sort() `** - The `sort()` method sorts the elements of a given list in a specific ascending or descending order. ``` # Example: list1 =[22,30,100,300,399] list2=['z','ab','abc','a','b'] list1.sort() list2.sort() print(list1) list1.sort(reverse=True) print(list1) print(list2) # Example: list3=['a','b',1,2] list3.sort() print(list3) #error ``` ### **`reverse() `** - The `reverse()` method reverses the elements of the list. ``` # Example: list1 =['helloworld','python','java','c++',1,2,3] list1.reverse() print(list1) ``` ### **`copy() `** - The `copy()` method returns a shallow copy of the list. ``` list1 =['helloworld','python','java','c++',1,2,3] list2 = list1.copy() print('list1:', list1) print('list2:', list2) ``` Here is a complete list of all the **[built-in methods to work with List in Python](https://github.com/milaan9/02_Python_Datatypes/tree/main/003_Python_List_Methods)**. ## List Comprehension: Elegant way to create new List List comprehension is an elegant and concise way to create a new list from an existing list in Python. A list comprehension consists of an expression followed by **[for loop](https://github.com/milaan9/03_Python_Flow_Control/blob/main/005_Python_for_Loop.ipynb)** inside square brackets. Here is an example to make a list with each item being increasing power of 2. ``` pow2 = [2 ** x for x in range(10)] print(pow2) ``` This code is equivalent to: ``` pow2 = [] for x in range(10): pow2.append(2 ** x) print(pow2) ``` A list comprehension can optionally contain more **`for`** or **[if statements](https://github.com/milaan9/03_Python_Flow_Control/blob/main/001_Python_if_statement.ipynb)**. An optional **`if`** statement can filter out items for the new list. Here are some examples. ``` pow2 = [2 ** x for x in range(10) if x > 5] pow2 odd = [x for x in range(20) if x % 2 == 1] odd [x+y for x in ['Python ','C '] for y in ['Language','Programming']] ``` ## Other List Operations in Python ### 1. List Membership Test We can test if an item exists in a list or not, using the keyword **`in`**. ``` my_list = ['p', 'r', 'o', 'b', 'l', 'e', 'm'] # Output: True print('p' in my_list) # Output: False print('a' in my_list) # Output: True print('c' not in my_list) ``` ### 2. Iterating Through a List Using a for loop we can iterate through each item in a list. ``` for fruit in ['apple','banana','mango']: print("I like",fruit) ``` ### 3. Other List Operations in Python Using a **`for`** loop we can iterate through each item in a list. ``` # Example: list1=['a','b','c',1,2,3] for x in list1 : print(x) for fruit in ['apple','banana','mango']: print("I like",fruit) ``` ## 💻 Exercises ➞ <span class='label label-default'>List</span> ### Exercises ➞ <span class='label label-default'>Level 1</span> 1. Declare a list with more than 5 items with different data types 2. Find the length of your list 3. Get the first item, the middle item and the last item of the list 4. Declare a list **`called my_info`**, put your (name, age, height, marital status, country) 5. Declare a list variable named **`mix_fruits`** and assign initial values Guava, Mango, Apple, Pear, Fig, Orange and Banana. 6. Print the list using **`print()`** 7. Print the number of **`mix_fruits`** in the list 8. Print the first, middle and last fruit 9. Print the list after modifying one of the fruit 10. Add an fruit to variable **`mix_fruits`** 11. Insert an fruit in the middle of the **`mix_fruits`** list 12. Change one of the fruit names to uppercase 13. Join the elements in **`mix_fruits`** with a string **`-#-`** 14. Check if a certain fruit exists in the **`mix_fruits`** list. 15. Sort the list using **`sort()`** method 16. Reverse the list in descending order using **`reverse()`** method 17. Slice out the first 3 fruits from the list 18. Slice out the last 3 fruits from the list 19. Slice out the middle fruit or fruits from the list 20. Remove the first fruit from the list 21. Remove the middle fruit or companies from the list 22. Remove the last fruit from the list 23. Remove all fruits from the list 24. Delete the fruits list 25. Join the following lists: ```py front_end = ['HTML', 'CSS', 'JS', 'React', 'Redux'] back_end = ['Node','Express', 'MongoDB'] ``` 26. After joining the lists in question 25. Copy the joined list and assign it to a variable full_stack. Then insert Python and SQL after Redux. ### Exercises ➞ <span class='label label-default'>Level 2</span> 1. The following is a list of 10 students ages: ```py ages = [19, 23, 19, 25, 21, 20, 25, 26, 25, 24] ``` - Sort the list and find the min and max age - Add the min age and the max age again to the list - Find the median age (one middle item or two middle items divided by two) - Find the average age (sum of all items divided by their number ) - Find the range of the ages (max minus min) - Compare the value of (min - average) and (max - average), use **`abs()`** method 1. Find the middle country(ies) in the **[countries list](https://github.com/milaan9/02_Python_Datatypes/blob/main/countries_data.py)** 1. Divide the countries list into two equal lists if it is even if not one more country for the first half. 1. ['India', 'Russia', 'USA', 'Finland', 'Sweden', 'Norway', 'Denmark']. Unpack the first three countries and the rest as scandic countries.
github_jupyter
## Working with LIDAR datasets in `adaptivefiltering` This notebook will explain how Lidar datasets are treated in `adaptivefiltering` by showcasing the most common use cases. If you are not yet familiar with Jupyter, check the [Introduction to Python+Jupyter notebook](python.ipynb) first. The first thing to do in a Jupyter notebook that uses `adaptivefiltering` is to import the library: ``` import adaptivefiltering ``` ### Loading datasets `adaptivefiltering` handles Lidar data sets in LAS/LAZ format. To load a data set, we construct a `DataSet` object given its filename and assign it to a variable `ds`: ``` ds = adaptivefiltering.DataSet(filename="500k_NZ20_Westport.laz") ``` In above example, we are loading a small sample data set that is provided by `adaptivefiltering`. You can also load your own data set by providing its filename. `adaptivefiltering` currently only supports datasets in LAS and LAZ format. The dataset filename is assumed to either be an absolute path, be located in the current working directory or that you first specified its location using the `set_data_directory` function: ``` adaptivefiltering.set_data_directory("/some/directory") ``` ### Spatial Reference Systems By default, `adaptivefiltering` will try to determine the dataset's metadata to determine the correct spatial reference system. If it is not specified in the metadata or if you want to force interpretation as a certain spatial reference system, you can pass its [Well-known Text (WKT) representation](https://en.wikipedia.org/wiki/Well-known_text_representation_of_coordinate_reference_systems) to the data set: ``` ds_epsg = adaptivefiltering.DataSet( filename="500k_NZ20_Westport.laz", spatial_reference="EPSG:4326" ) ``` Note that specifying a specific spatial reference system does *not* reproject the dataset, but reinterprets the given data. If you want to reproject your data, have a look at `adaptivefiltering.reproject_dataset` below. ### Visualizing datasets With the dataset loaded as the object `ds`, we have several ways of visualizing the data set directly in Jupyter. By default, a hillshade model with a configurabel spatial resolution in meters is used: ``` ds.show(resolution=1.0) ``` `adaptivefiltering` supports more visualization methods. These can best be explored using an interactive user interface: ``` ds.show_interactive() ``` If you already know exactly what visualization type and paramters you want, you can pass them directly to `show`. The full list of options is available in the [online documentation](https://adaptivefiltering.readthedocs.io/en/latest/index.html#adaptivefiltering.DataSet.show) or can be accessed directly in Jupyter by using the `?` operator: ``` ?ds.show ``` ### Restricting datasets If your Lidar dataset is very large, handling the entire data set becomes unwieldy, especially if we want to interactively tune ground point filtering pipelines. It is therefore important to crop the dataset to a subset that we can easily work on. We do so by showing an interactive map, adding a polygon with the polygon selector tool and hitting the *Finalize* button: ``` rds = ds.restrict() ``` In the above, the restricted dataset is assigned to a new object `rds`. This follows a design principle of `adaptivefiltering`: All objects (datasets, filter pipelines etc.) are *immutable* - operations that work on datasets *never* implicitly modify an object. Instead the, provided input (`ds` in the above) is left untouched, and a modified copy is returned. This results in an increased memory consumption, but makes the interactive exploration of ground point filtering with `adaptivefiltering` easier to handle. ### Transforming datasets The above principle of *immutability* is also followed by all other functions that transform datasets. The most prominent such transformation is the application of ground point filter pipelines. It is of such importance, that it is covered in a separate [notebook on filter pipelines](filtering.ipynb). Other data transformations are e.g. `remove_classification` which removes any existing classification data from a dataset: ``` ds = adaptivefiltering.remove_classification(ds) ``` Here, we have chosen to assign the transformed dataset to the same name as the original dataset. This is not violating the principle of immutability, because we explicitly chose to do so. Another dataset transformation that was already mentioned is the reprojection into a different spatial reference system: ``` reprojected = adaptivefiltering.reproject_dataset(ds, "EPSG:4326") ``` If your dataset's metadata does not specify a spatial reference system, you need specify it additionally using the `in_srs=` parameter to `adaptivefiltering.reproject_dataset`. ### Saving datasets Once we have achieved a result that is worth storing, we can save the dataset to a LAS/LAZ file by calling its `save` method: ``` ds.save("without_classification.las", compress=False, overwrite=False) ``` In the above, the first argument is the filename to save to (relative paths are interpreted w.r.t. the current working directory). Optionally, LAZ compression can be activated by setting `compress=True`. If an existing file would be overwritten, explicit permission needs to do that needs to be granted by setting `overwrite=True`.
github_jupyter
# DSCI 572 Lecture 3 How to survive in a world where everything is wrong. #### Outline: - Binary representations (5 min) - Decimals in binary (5 min) - Fixed point (5 min) - Floating point (20 min) - Break (5 min) - Spacing between numbers (10 min) - Order of operations (10 min) - Log-sum-exp (15 min) - Matrix inversions (5 min) Context: - This used to be the first of a two-lecture sequence. - 2nd lecture has been removed. - So this is a "standalone" topic in the course. - Not super related to our story but you need to see it somewhere. ``` import numpy as np import numpy.linalg as npla import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['font.size'] = 16 ``` Motivating examples: ``` (0.3 - 0.2 - 0.1)*1e20 1e40 + 10000000000000000 == 1e40 ``` (try this in other languages!) <br><br><br><br><br> Funny story: during my undergrad I send a bug report to MathWorks because I observed this behaviour in MATLAB. I got a very polite and patient response... ## Binary numbers and representations of integers (5 min) ``` "{0:b}".format(13) x = 13 type(x) ``` Read this as \begin{align*} &1\times 2^3 \\ +\, &1\times 2^2 \\ +\, &0\times 2^1 \\ +\, &1\times 2^0 \\ \\ =\, &8+4+1 = 13 \end{align*} - In practice there's one bit used as the "sign bit" - Since we don't need both positive 0 and negative 0, we keep one extra number on the negative side - So a 64-bit integer ranges from $-2^{63}$ to $2^{63}-1$, inclusive. - Python is special because it takes care of this for you behind the scenes. ``` type(2**100) # Python is special 2**100 ``` ## Review scientific notation (0 min) ``` 23423423974482344.0 ``` ## Decimal numbers in binary (5 min) Consider the number $101.11$ Interpretation: $$\begin{align*} &1\times 2^2 \\ +\, &0\times 2^1 \\ +\, &1\times 2^0 \\ +\, &1\times 2^{-1} \\ +\, &1\times 2^{-2} \\ \\ =\, &4+1+0.5+0.25 = 5.75 \end{align*}$$ Exercise: convert $110.101$ to base 10. ## Fixed point (5 min) - We have 64 bits per number. - Should we use 32 bits on each side of the decimal? - Or 32 and 31, because we need the sign. ``` 2**32 2**-13 ``` ## Floating point (20 min) Everything is represented in "scientific notation". In other words, $A \times 10^B$. Except in this case it's more like $1.M \times 2^E$, where $M$ is called the mantissa and $E$ is called the exponent. Examples: | number in base 10 | scientific notation (base 10) | scientific notation (binary) | mantissa (M) | exponent (E) | |--------------------|-------------------------------|------------------------------|--------|--------| | $2$ | $1.0\times 2^1$ | $1.0 \times 2^1$ | $0$ | $1$ | | $10$ | $1.25\times 2^3$ | $1.01\times 2^{11}$ | $01$ | $11$ | | $0.375$ | $1.5\times 2^{-2} $ | $1.1\times 2^{-10}$ | $1$ | $-10$ | | $0.1$ | $1.6 \times 2^{-4}$ | $1.100110011\ldots \times 2^{-100}$ | $100110011$... | $-100$ | - Some numbers that are short in base 10 are (infinitely) long in base 2, like $0.1$. - We have infinitely long numbers like this in base 10 too, like $1/3$. [**IEEE floating point standard**](https://en.wikipedia.org/wiki/IEEE_floating_point): Key info: in IEEE double precision, we use 1 bit for the overall sign, 52 bits for the mantissa and 11 bits for the exponent (total = 64 bits). ``` 10.0**300.0 10.0**400.0 ``` Below are two pieces of code that more-or-less do the same thing, namely convert a float to its binary representation. ``` # from http://stackoverflow.com/questions/16444726/binary-representation-of-float-in-python-bits-not-hex import struct def binary(num): packed = struct.pack('!d', float(num)) integers = [c for c in packed] binaries = [bin(i) for i in integers] stripped_binaries = [s.replace('0b', '') for s in binaries] padded = [s.rjust(8, '0') for s in stripped_binaries] final = ''.join(padded) assert len(final) == 64 # alternate approach # x = float(x) # if x == 0: # return "0" * 64 # w, sign = (float.hex(x), 0) if x > 0 else (float.hex(x)[1:], 1) # mantissa, exp = int(w[4:17], 16), int(w[18:]) # final = "{}{:011b}{:052b}".format(sign, exp + 1023, mantissa) sign, exponent_plus_1023, mantissa = final[0], final[1:12], final[12:] sign_str = "" if int(sign) == 0 else "-" mantissa_base10_scale = int(mantissa, 2) mantissa_base10 = mantissa_base10_scale / 2**52 # shift decimal point from end of binary string to beginning of it mantissa_base10 = round(mantissa_base10, 8) # purely for cosmetic reasons, not actually part of it mantissa_base10_str = str(mantissa_base10)[2:] # throw away the leading "0." exponent_base10 = int(exponent_plus_1023, 2) - 1023 print("%s = %s1.%s x 2^%s" % (num, sign_str, mantissa_base10_str, exponent_base10)) print() print("%s %s %s" % (sign, exponent_plus_1023, mantissa)) print("^ ^ ^") print("sign exponent+1023 (%d) mantissa (%s)" % (exponent_base10+1023, mantissa_base10)) binary(2) binary(10) binary(0.375) binary(0.1) ``` Note: Instead of storing the 11-bit exponent as a signed integer from $-1023$ to $1024$, it is actually stored as an unsigned integer from $0$ to $2047$. So you need to read the number from the raw bits and then subtract $1023$. #### some take home messages (!!) - numbers are not represented exactly - most calculations are "wrong" - when these errors are introduced, **you might not get an error message or warning** - most numbers cannot be represented - even most _integers_ cannot be represented as floating point numbers - there is a biggest number - there is a smallest number - most environments you'll encounter will use IEEE double precision... but others do exist (especially single precision) ``` type(100) type(100.0) ``` ## Break (5 min) ## Spacing between numbers (10 min) Imagine you were in the decimal system (not binary), and were using scientific notation but you were only allowed 3 digits after the decimal point. In-class exercise: how large is the _spacing_ between the given number and the _next largest number that we can represent_? - $8.982$ - $3.432\times 10^2$ - $0.001\times 10^1$ Conclusion: we only need to look at the exponent. The same goes for binary. The steps happen at every power of 2 instead of 10, and we have way more digits after the decimal (52 instead of 3), but everything else is pretty much the same. So the spacing size, as a function of the number itself, is a staircase function. ``` x = np.linspace(1,1000,100000) spacing = 2**np.floor(np.log2(x)) plt.plot(x, spacing); plt.xlabel("Number itself"); plt.ylabel("Gap to the next number"); 2**-52 # this is the spacing at 1.0 1.0 + 1e-20 == 1.0 0.3 - 0.2 - 0.1 ``` The steps get bigger, but the trend is a straight line. In other words, if we zoom out we see that the spacing size is _proportional to the number itself_. ``` plt.plot(x, spacing) plt.plot(x,x); plt.xlabel("Number itself"); plt.ylabel("Gap to the next number"); ``` We can also look on a log-log scale ``` plt.loglog(x,spacing) plt.loglog(x,x); plt.xlabel("Number itself"); plt.ylabel("Gap to the next number"); ``` Going back to our original example: ``` 0.3-0.2-0.1 ``` - Let's assume some rounding occurred at the end, i.e. 52 bits after the decimal point. - Then we'd expect things to be wrong on the order of ``` 2**(-52)*(0.1) 2**-52 ``` #### Take-home message about roundoff errors (!!) The error in representing a number $\approx$ the number itself $\times \, 10^{-16}$ ## Order of operations (10 min) Consider the following: ``` 1e16+1+1 == 1+1+1e16 ``` ???????? Ok... let's break it down. ``` 1 + 1 + 1e16 == 1e16 # makes sense 1e16 + 1 + 1 == 1e16 # ???? 1e16 + 1 + 1 +1+1+1+1+1+1+1+1+1 == 1e16 ``` Ok, so the first one is the weird one. We now understand the above, given our new knowledge. The spacing between $10^{16}$ and the next largest number must be more than 2, so when 1 is added to $10^{16}$ we round back down to $10^{16}$. What's happening here is that we do operations from left to right. So when we first do 1+1 we get 2. And I picked $10^{16}$ on purpose so that the spacing is more than 2 but less than 4. So when we add 2 to $10^{16}$ we do get far enough to round _up_ to the next number. In other words, the order of operations doesn't matter on paper, but it can matter in code due to floating point issues. We can check our reasoning by adding on the right side with parentheses ``` 1e16 + (1+1) == 1e16 ``` For kicks: ``` x = 1e16 for i in range(100_000): x = x + 1 x == 1e16 ``` We can make it look even more disturbing for big numbers... ``` 1e40 + 1000000000000000000 == 1e40 ``` Or small numbers... ``` 1 + 1e-20 == 1 ``` Very abbreviated version of this lecture: "you have 16 digits of precision to work with". **(begin optional)** By the way, what _is_ the spacing at $10^{16}$? We need to find the larest power of 2 that is less than $10^{16}$, which happens to be ``` 2.0**53 # make it easier to read '%g' % 2.0**53 ``` Ok this looks good. So now we just need to do $2^{53}\times 2^{-52}=2$. So the spacing is exactly 2. I guess we round down when we add 1! We can also test this: ``` 1e16 + 1 == 1e16 1e16 + 1.0000001 == 1e16 ``` **(end optional)** ## log-sum-exp (15 min) Let's consider logistic regression with one feature and no intercept, so that `w` is just a number. The loss function is : ``` def loss_lr_1D(w, x, y): return np.sum(np.log(1 + np.exp(-y*w*x))) n = 100 x = np.random.randn(n) x[1:10] *= 1e5 w = np.random.randn() y = np.random.choice([-1,+1], size=n) loss_lr_1D(w,x,y) ``` What we are doing here? $$f(w) = \sum_{i=1}^n \log \left( 1+ \exp(-y_iwx_i) \right) $$ The key is that we're computing $\log(1+\exp(z))$ and getting an overflow when $z\gg 1$. (Note: when doing the optimization, we only really need the gradient of the loss, so this isn't a completely realistic concern. But it happens for real in related problems.) But when $z\gg1$ we can say $1+\exp(z)\approx \exp(z)$ and in that case $$\log(1+\exp(z))\approx \log(\exp(z)) = z$$ We can try it... ``` def log_1_plus_exp(z): return np.log(1+np.exp(z)) def log_1_plus_exp_safe(z): if z > 100: return z else: return log_1_plus_exp(z) print(log_1_plus_exp(0)) print(log_1_plus_exp_safe(0)) print(log_1_plus_exp(1000)) print(log_1_plus_exp_safe(1000)) print(log_1_plus_exp(110.)) print(log_1_plus_exp_safe(110.)) ``` This is cool! We combined math + CS + our brains and got better results! #### more take-home messages - By combining an understanding of floating point errors and math, we can write better code. - This is one (of many) reasons why we use libraries like sklearn rather than implementing things ourself. Other reasons: speed, edge cases, updates over time, less likely to contain a bug. #### (optional) Moving to $d>1$ and multi-class - When $d>1$ not much changes, except that your $z$ above is actually a dot product of a $w$-vector and an $x$-vector, so getting big numbers in there starts to be a much bigger problem. The above is then even more important. - Imagine $d=1000000$. Even if these numbers look like random noise (both positive and negative, cancelling each other out) by the central limit theorem their sum will grow like $\sqrt{n}$ and eventually overflow will be a problem. You only need to get to $z=1000$ before problems hit. - When the number of classes change then things change more substantially. In particular with $K$ classes the $\log(1+\exp(z))$ flavour changes to $$\log\left(\sum_{k=1}^K \exp(z_k)\right)$$ In that case we play a different trick which is to pull out the max $z_k$. More on this in lab 2! ## (optional) Inverting matrices (5 min) Sometime we take the inverse of a matrix. I have seen it happen. Key point: _this is almost always a bad idea!_ - If you actually need the inverse, then you must compute it - But in real situations you almost always need to solve $Ax=b$ given some $b$ - In this case, use a solve function to compute this directly, rather than going to $x=A^{-1}b$. ``` A = np.random.rand(3,3) # random 3x3 matrix b = np.random.rand(3) x1 = npla.inv(A) @ b x1 x2 = npla.solve(A, b) x2 np.allclose(x1,x2) # looks good n = 250 A = np.vander(np.random.rand(n)) b = np.random.rand(n) y1 = npla.inv(A) @ b y2 = npla.solve(A,b) np.allclose(y1,y2) np.max(y1-y2) ``` What happened?? Well, we're not really going to go into detail here. But, basically, just don't compute the inverse if you don't have to...
github_jupyter
# Training Notebook for ArcFace model on Refined MS1M dataset ## Overview Use this notebook to train a ArcFace model from scratch. Make sure to have the Refined MS1M dataset prepared before proceeding. ## Prerequisites The training notebooks and scripts are tested on python 2.7. The following additional packages need to be installed before proceeding: * MXNet - `pip install mxnet-cu90mkl` (tested on this version, can use other versions) * OpenCV - `pip install opencv-python` * Scikit-learn - `pip install scikit-learn` * Scikit-image - `pip install scikit-image` * EasyDict - `pip install easydict` * numpy - `pip install numpy` Also the following scripts (included in the repo) must be present in the same folder as this notebook: * `face_image.py` (prepares face images in the dataset for training) * `face_preprocess.py` (performs preprocessing on face images) * `fresnet.py` (contains model definition of ResNet100) * `image_iter.py` (helper script) * `symbol_utils.py` (helper script) * `verification.py` (performs verification on validation sets) In order to train the model with a python script: * Generate the script : In Jupyter Notebook browser, go to File -> Download as -> Python (.py) * Run the script: `python train_arcface.py` ### Import dependencies Verify that all dependencies are installed using the cell below. Continue if no errors encountered, warnings can be ignored. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import math import numpy as np import random import logging import pickle import numpy as np from image_iter import FaceImageIter from image_iter import FaceImageIterList import mxnet as mx from mxnet import ndarray as nd import mxnet.optimizer as optimizer import fresnet import verification import sklearn from easydict import EasyDict as edict import multiprocessing ``` ### Specify model, hyperparameters and paths The training was done on a p3.8xlarge ec2 instance on AWS. It has 4 Nvidia Tesla V100 GPUs (16GB each) and Intel(R) Xeon(R) CPU E5-2686 v4 @ 2.30GHz with 32 threads. The batch_size set below is per device. For multiple GPUs there are different batches in each GPU of size batch_size simultaneously. The rest of the parameters can be tuned to fit the needs of a user. The values shown below were used to train the model in the model zoo. ``` # Path to dataset data_dir = '/home/ubuntu/faces_ms1m_112x112' # Path to directory where models will be saved prefix = '/home/ubuntu/resnet100' # Load pretrained model pretrained = '' # Checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save ckpt = 1 # do verification testing and model saving every verbose batches verbose = 2000 # max training batches max_steps = 0 # number of training epochs end_epoch = 30 # initial learning rate lr = 0.1 # learning rate decay iterations lr_steps = [100000, 140000, 160000] # weight decay wd = 0.0005 # weight decay multiplier for fc7 fc7_wd_mult = 1.0 # momentum mom = 0.9 # embedding length emb_size = 512 # batch size in each context per_batch_size = 64 # margin for loss margin_m = 0.5 # scale for feature margin_s = 64.0 # verification targets target = 'lfw,cfp_fp,agedb_30' beta = 1000.0 beta_min = 5.0 beta_freeze = 0 gamma = 0.12 power = 1.0 scale = 0.9993 ``` ### Helper code class `AccMetric` : used to define and update accuracy metrics class `LossValueMetric` : used to define and update loss metrics `load_property()` : Function for loading num_classes and image_size from datasets folder ``` # Helper class for accuracy metrics class AccMetric(mx.metric.EvalMetric): def __init__(self): self.axis = 1 super(AccMetric, self).__init__( 'acc', axis=self.axis, output_names=None, label_names=None) self.losses = [] self.count = 0 def update(self, labels, preds): self.count+=1 preds = [preds[1]] #use softmax output for label, pred_label in zip(labels, preds): if pred_label.shape != label.shape: pred_label = mx.ndarray.argmax(pred_label, axis=self.axis) pred_label = pred_label.asnumpy().astype('int32').flatten() label = label.asnumpy() if label.ndim==2: label = label[:,0] label = label.astype('int32').flatten() assert label.shape==pred_label.shape self.sum_metric += (pred_label.flat == label.flat).sum() self.num_inst += len(pred_label.flat) # Helper class for loss metrics class LossValueMetric(mx.metric.EvalMetric): def __init__(self): self.axis = 1 super(LossValueMetric, self).__init__( 'lossvalue', axis=self.axis, output_names=None, label_names=None) self.losses = [] def update(self, labels, preds): loss = preds[-1].asnumpy()[0] self.sum_metric += loss self.num_inst += 1.0 gt_label = preds[-2].asnumpy() # Helper function for loading num_classes and input image sizes def load_property(data_dir): prop = edict() for line in open(os.path.join(data_dir, 'property')): vec = line.strip().split(',') assert len(vec)==3 prop.num_classes = int(vec[0]) prop.image_size = [int(vec[1]), int(vec[2])] return prop ``` ### Prepare network and define loss `get_symbol()` : Loads the model from the model definition file, defines ArcFace loss ``` def get_symbol(arg_params, aux_params, image_channel, image_h, image_w, num_layers, num_classes, data_dir,prefix,pretrained,ckpt,verbose,max_steps,end_epoch,lr,lr_steps,wd,fc7_wd_mult, mom,emb_size,per_batch_size,margin_m,margin_s,target,beta,beta_min,beta_freeze,gamma,power,scale): data_shape = (image_channel,image_h,image_w) image_shape = ",".join([str(x) for x in data_shape]) margin_symbols = [] print('init resnet', num_layers) # Load Resnet100 model - model definition is present in fresnet.py embedding = fresnet.get_symbol(emb_size, num_layers, version_se=0, version_input=1, version_output='E', version_unit=3, version_act='prelu') all_label = mx.symbol.Variable('softmax_label') gt_label = all_label extra_loss = None _weight = mx.symbol.Variable("fc7_weight", shape=(num_classes, emb_size), lr_mult=1.0, wd_mult=fc7_wd_mult) # Define ArcFace loss s = margin_s m = margin_m assert s>0.0 assert m>=0.0 assert m<(math.pi/2) _weight = mx.symbol.L2Normalization(_weight, mode='instance') nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=num_classes, name='fc7') zy = mx.sym.pick(fc7, gt_label, axis=1) cos_t = zy/s cos_m = math.cos(m) sin_m = math.sin(m) mm = math.sin(math.pi-m)*m threshold = math.cos(math.pi-m) cond_v = cos_t - threshold cond = mx.symbol.Activation(data=cond_v, act_type='relu') body = cos_t*cos_t body = 1.0-body sin_t = mx.sym.sqrt(body) new_zy = cos_t*cos_m b = sin_t*sin_m new_zy = new_zy - b new_zy = new_zy*s zy_keep = zy - s*mm new_zy = mx.sym.where(cond, new_zy, zy_keep) diff = new_zy - zy diff = mx.sym.expand_dims(diff, 1) gt_one_hot = mx.sym.one_hot(gt_label, depth = num_classes, on_value = 1.0, off_value = 0.0) body = mx.sym.broadcast_mul(gt_one_hot, diff) fc7 = fc7+body out_list = [mx.symbol.BlockGrad(embedding)] softmax = mx.symbol.SoftmaxOutput(data=fc7, label = gt_label, name='softmax', normalization='valid') out_list.append(softmax) out = mx.symbol.Group(out_list) return (out, arg_params, aux_params) ``` ### Define train function `train_net()` : Train model, log training progress, save periodic checkpoints, compute and display validation accuracies periodically ``` def train_net(data_dir,prefix,pretrained,ckpt,verbose,max_steps,end_epoch,lr,lr_steps,wd,fc7_wd_mult, mom,emb_size,per_batch_size,margin_m,margin_s,target,beta,beta_min,beta_freeze,gamma,power,scale): # define context ctx = [] num_gpus = max(mx.test_utils.list_gpus()) + 1 if num_gpus>0: for i in range(num_gpus): ctx.append(mx.gpu(i)) if len(ctx)==0: ctx = [mx.cpu()] print('use cpu') logger = logging.getLogger() logger.setLevel(logging.INFO) prefix_dir = os.path.dirname(prefix) if not os.path.exists(prefix_dir): os.makedirs(prefix_dir) ctx_num = len(ctx) num_layers = 100 print('num_layers',num_layers) batch_size = per_batch_size*ctx_num rescale_threshold = 0 image_channel = 3 os.environ['BETA'] = str(beta) data_dir_list = data_dir.split(',') assert len(data_dir_list)==1 data_dir = data_dir_list[0] path_imgrec = None path_imglist = None prop = load_property(data_dir) num_classes = prop.num_classes image_size = prop.image_size image_h = image_size[0] image_w = image_size[1] print('image_size', image_size) assert(num_classes>0) print('num_classes', num_classes) path_imgrec = os.path.join(data_dir, "train.rec") data_shape = (image_channel,image_size[0],image_size[1]) mean = None begin_epoch = 0 base_lr = lr base_wd = wd base_mom = mom if len(pretrained)==0: arg_params = None aux_params = None sym, arg_params, aux_params = get_symbol(arg_params, aux_params, image_channel, image_h, image_w, num_layers, num_classes, data_dir,prefix,pretrained,ckpt, verbose,max_steps,end_epoch,lr,lr_steps,wd,fc7_wd_mult, mom,emb_size,per_batch_size,margin_m,margin_s,target,beta, beta_min,beta_freeze,gamma,power,scale) else: vec = pretrained.split(',') print('loading', vec) _, arg_params, aux_params = mx.model.load_checkpoint(vec[0], int(vec[1])) sym, arg_params, aux_params = get_symbol(arg_params, aux_params) model = mx.mod.Module( context = ctx, symbol = sym, ) val_dataiter = None train_dataiter = FaceImageIter( batch_size = batch_size, data_shape = data_shape, path_imgrec = path_imgrec, shuffle = True, rand_mirror = 1, mean = mean, cutoff = 0, ) _metric = AccMetric() eval_metrics = [mx.metric.create(_metric)] initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style _rescale = 1.0/ctx_num opt = optimizer.SGD(learning_rate=base_lr, momentum=base_mom, wd=base_wd, rescale_grad=_rescale) som = 20 _cb = mx.callback.Speedometer(batch_size, som) ver_list = [] ver_name_list = [] for name in target.split(','): path = os.path.join(data_dir,name+".bin") if os.path.exists(path): data_set = verification.load_bin(path, image_size) ver_list.append(data_set) ver_name_list.append(name) print('ver', name) def ver_test(nbatch): results = [] for i in xrange(len(ver_list)): acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(ver_list[i], model, batch_size, 10, None, None) print('[%s][%d]XNorm: %f' % (ver_name_list[i], nbatch, xnorm)) print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc2, std2)) results.append(acc2) return results highest_acc = [0.0, 0.0] #lfw and target global_step = [0] save_step = [0] p = 512.0/batch_size for l in xrange(len(lr_steps)): lr_steps[l] = int(lr_steps[l]*p) print('lr_steps', lr_steps) def _batch_callback(param): global_step[0]+=1 mbatch = global_step[0] for _lr in lr_steps: if mbatch==beta_freeze+_lr: opt.lr *= 0.1 print('lr change to', opt.lr) break _cb(param) if mbatch%1000==0: print('lr-batch-epoch:',opt.lr,param.nbatch,param.epoch) if mbatch>=0 and mbatch%verbose==0: acc_list = ver_test(mbatch) save_step[0]+=1 msave = save_step[0] do_save = False if len(acc_list)>0: lfw_score = acc_list[0] if lfw_score>highest_acc[0]: highest_acc[0] = lfw_score if lfw_score>=0.998: do_save = True if acc_list[-1]>=highest_acc[-1]: highest_acc[-1] = acc_list[-1] if lfw_score>=0.99: do_save = True if ckpt==0: do_save = False elif ckpt>1: do_save = True if do_save: print('saving', msave) arg, aux = model.get_params() mx.model.save_checkpoint(prefix, msave, model.symbol, arg, aux) print('[%d]Accuracy-Highest: %1.5f'%(mbatch, highest_acc[-1])) if mbatch<=beta_freeze: _beta = beta else: move = max(0, mbatch-beta_freeze) _beta = max(beta_min, beta*math.pow(1+gamma*move, -1.0*power)) os.environ['BETA'] = str(_beta) if max_steps>0 and mbatch>max_steps: sys.exit(0) epoch_cb = None model.fit(train_dataiter, begin_epoch = begin_epoch, num_epoch = end_epoch, eval_data = val_dataiter, eval_metric = eval_metrics, kvstore = 'device', optimizer = opt, initializer = initializer, arg_params = arg_params, aux_params = aux_params, allow_missing = True, batch_end_callback = _batch_callback, epoch_end_callback = epoch_cb ) ``` ### Train model * Run the cell below to start training * Logs are displayed in the cell output * An example run of 2000 batches is shown here * Symbols and params files are saved periodically in the `prefix` folder ``` def main(): train_net(data_dir,prefix,pretrained,ckpt,verbose,max_steps,end_epoch,lr,lr_steps,wd,fc7_wd_mult, mom,emb_size,per_batch_size,margin_m,margin_s,target,beta,beta_min,beta_freeze,gamma,power,scale) if __name__ == '__main__': main() ``` ### Slim model The last layer in the trained model is not required for inference and can be discarded (using cell below) which reduces the model size. ``` # Choose model to slim (give path to syms and params) prefix = '/home/ubuntu/resnet100' epoch = 1 # Load model sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) # Populate list containing nodes to be removed all_layers = sym.get_internals() sym = all_layers['fc1_output'] dellist = [] for k,v in arg_params.iteritems(): if k.startswith('fc7'): dellist.append(k) # Remove nodes for d in dellist: del arg_params[d] # Save slimed model mx.model.save_checkpoint(prefix, 0, sym, arg_params, aux_params) ```
github_jupyter
``` !python --version !wget http://geneontology.org/gene-associations/gene_association.sgd.gz -O ./data/gene_association.sgd.gz !wget http://purl.obolibrary.org/obo/go.obo -O ./data/go.obo ``` # Ontology Tree Generator This is a notebook to generate tree data file from original table and annotations. ``` import pandas as pd from goatools import obo_parser # Latest data sources treeSourceUrl = 'http://chianti.ucsd.edu/~kono/ci/data/collapsed_go.no_IGI.propagated.small_parent_tree' oboUrl = './data/go.obo' yeastAnnotationUrl = './data/gene_association.sgd.gz' kegg2goUrl = 'http://geneontology.org/external2go/kegg2go' reactome2go = 'http://geneontology.org/external2go/reactome2go' phenotypeUrl='http://downloads.yeastgenome.org/curation/literature/phenotype_data.tab' # Load the tree data treeColNames = ['parent', 'child', 'type', 'in_tree'] tree = pd.read_csv(treeSourceUrl, delimiter='\t', names=treeColNames) tree.tail(10) ``` ## Parse yeast annotation file ``` cols = pd.read_csv('./annotation_columns.txt', names=['col_names']) col_names = cols['col_names'].tolist() print(col_names) yeastAnnotation = pd.read_csv(yeastAnnotationUrl, delimiter='\t', comment='!', compression='gzip', names=col_names) yeastAnnotation.head() ``` ## Parse OBO ``` obo = obo_parser.GODag(oboUrl) obo['GO:1902580'].name ph = pd.read_csv(phenotypeUrl, delimiter='\t') ph.head() ``` ## Build Base CyJS Network ``` import json goTree = { 'data': { 'name': 'GO Merged Tree' }, 'elements': { 'nodes': [], 'edges': [] } } print(json.dumps(goTree, indent=4)) node_set= set() row = next(tree.iterrows())[1] print(row) print(row['parent']) def get_node(id, name): node = { 'data': { 'id': id } } if id in obo.keys(): go = obo[id] node['data']['name'] = go.name node['data']['namespace'] = go.namespace return node def get_edge(source, target, itr, is_tree): edge = { 'data': { 'source': source, 'target': target, 'interaction': itr, 'is_tree': is_tree } } return edge edges = [] for data in tree.iterrows(): row=data[1] node_set.add(row['parent']) node_set.add(row['child']) edges.append(get_edge(row['parent'], row['child'], row['type'], row['in_tree'])) nodes = [] for id in node_set: node = get_node(id) nodes.append(node) print(len(nodes)) print(nodes[0]) print(len(edges)) edges[0] goTree['elements']['nodes'] = nodes goTree['elements']['edges'] = edges with open('./data/tree.cyjs', 'w') as outfile: json.dump(goTree, outfile) yeastAnnotation['DB_Object_Synonym'] = yeastAnnotation['DB_Object_Synonym'].fillna('') result = yeastAnnotation[yeastAnnotation['DB_Object_Synonym'].str.contains('YHR083W')] result ```
github_jupyter
# Variational Autoencoders ## Introduction The variational autoencoder (VAE) is arguably the simplest setup that realizes deep probabilistic modeling. Note that we're being careful in our choice of language here. The VAE isn't a model as such&mdash;rather the VAE is a particular setup for doing variational inference for a certain class of models. The class of models is quite broad: basically any (unsupervised) density estimator with latent random variables. The basic structure of such a model is simple, almost deceptively so (see Fig. 1). Here we've depicted the structure of the kind of model we're interested in as a graphical model. We have $N$ observed datapoints $\{ \bf x_i \}$. Each datapoint is generated by a (local) latent random variable $\bf z_i$. There is also a parameter $\theta$, which is global in the sense that all the datapoints depend on it (which is why it's drawn outside the rectangle). Note that since $\theta$ is a parameter, it's not something we're being Bayesian about. Finally, what's of particular importance here is that we allow for each $\bf x_i$ to depend on $\bf z_i$ in a complex, non-linear way. In practice this dependency will be parameterized by a (deep) neural network with parameters $\theta$. It's this non-linearity that makes inference for this class of models particularly challenging. Of course this non-linear structure is also one reason why this class of models offers a very flexible approach to modeling complex data. Indeed it's worth emphasizing that each of the components of the model can be 'reconfigured' in a variety of different ways. For example: - the neural network in $p_\theta({\bf x} | {\bf z})$ can be varied in all the usual ways (number of layers, type of non-linearities, number of hidden units, etc.) - we can choose observation likelihoods that suit the dataset at hand: gaussian, bernoulli, categorical, etc. - we can choose the number of dimensions in the latent space The graphical model representation is a useful way to think about the structure of the model, but it can also be fruitful to look at an explicit factorization of the joint probability density: $$ p({\bf x}, {\bf z}) = \prod_{i=1}^N p_\theta({\bf x}_i | {\bf z}_i) p({\bf z}_i) $$ The fact that $p({\bf x}, {\bf z})$ breaks up into a product of terms like this makes it clear what we mean when we call $\bf z_i$ a local random variable. For any particular $i$, only the single datapoint $\bf x_i$ depends on $\bf z_i$. As such the $\{\bf z_i\}$ describe local structure, i.e. structure that is private to each data point. This factorized structure also means that we can do subsampling during the course of learning. As such this sort of model is amenable to the large data setting. (For more discussion on this and related topics see [SVI Part II](svi_part_ii.ipynb).) That's all there is to the model. Since the observations depend on the latent random variables in a complicated, non-linear way, we expect the posterior over the latents to have a complex structure. Consequently in order to do inference in this model we need to specify a flexibly family of guides (i.e. variational distributions). Since we want to be able to scale to large datasets, our guide is going to make use of amortization to keep the number of variational parameters under control (see [SVI Part II](svi_part_ii.ipynb) for a somewhat more general discussion of amortization). Recall that the job of the guide is to 'guess' good values for the latent random variables&mdash;good in the sense that they're true to the model prior _and_ true to the data. If we weren't making use of amortization, we would introduce variational parameters $\{ \lambda_i \}$ for _each_ datapoint $\bf x_i$. These variational parameters would represent our belief about 'good' values of $\bf z_i$; for example, they could encode the mean and variance of a gaussian distribution in ${\bf z}_i$ space. Amortization means that, rather than introducing variational parameters $\{ \lambda_i \}$, we instead learn a _function_ that maps each $\bf x_i$ to an appropriate $\lambda_i$. Since we need this function to be flexible, we parameterize it as a neural network. We thus end up with a parameterized family of distributions over the latent $\bf z$ space that can be instantiated for all $N$ datapoint ${\bf x}_i$ (see Fig. 2). Note that the guide $q_{\phi}({\bf z} | {\bf x})$ is parameterized by a global parameter $\phi$ shared by all the datapoints. The goal of inference will be to find 'good' values for $\theta$ and $\phi$ so that two conditions are satisfied: - the log evidence $\log p_\theta({\bf x})$ is large. this means our model is a good fit to the data - the guide $q_{\phi}({\bf z} | {\bf x})$ provides a good approximation to the posterior (For an introduction to stochastic variational inference see [SVI Part I](svi_part_i.ipynb).) At this point we can zoom out and consider the high level structure of our setup. For concreteness, let's suppose the $\{ \bf x_i \}$ are images so that the model is a generative model of images. Once we've learned a good value of $\theta$ we can generate images from the model as follows: - sample $\bf z$ according to the prior $p({\bf z})$ - sample $\bf x$ according to the likelihood $p_\theta({\bf x}|{\bf z})$ Each image is being represented by a latent code $\bf z$ and that code gets mapped to images using the likelihood, which depends on the $\theta$ we've learned. This is why the likelihood is often called the decoder in this context: its job is to decode $\bf z$ into $\bf x$. Note that since this is a probabilistic model, there is uncertainty about the $\bf z$ that encodes a given datapoint $\bf x$. Once we've learned good values for $\theta$ and $\phi$ we can also go through the following exercise. - we start with a given image $\bf x$ - using our guide we encode it as $\bf z$ - using the model likelihood we decode $\bf z$ and get a reconstructed image ${\bf x}_\rm{reco}$ If we've learned good values for $\theta$ and $\phi$, $\bf x$ and ${\bf x}_\rm{reco}$ should be similar. This should clarify how the word autoencoder ended up being used to describe this setup: the model is the decoder and the guide is the encoder. Together, they can be thought of as an autoencoder. ## VAE in Pyro So much for preliminaries. Let's see how we implement a VAE in Pyro. The dataset we're going to model is MNIST, a collection of images of handwritten digits. Since this is a popular benchmark dataset, we can make use of PyTorch's convenient data loader functionalities to reduce the amount of boilerplate code we need to write: ``` import os import numpy as np import torch import torchvision.datasets as dset import torch.nn as nn import torchvision.transforms as transforms import pyro import pyro.distributions as dist from pyro.infer import SVI, Trace_ELBO from pyro.optim import Adam pyro.enable_validation(True) pyro.distributions.enable_validation(False) pyro.set_rng_seed(0) # Enable smoke test - run the notebook cells on CI. smoke_test = 'CI' in os.environ # for loading and batching MNIST dataset def setup_data_loaders(batch_size=128, use_cuda=False): root = './data' download = True trans = transforms.ToTensor() train_set = dset.MNIST(root=root, train=True, transform=trans, download=download) test_set = dset.MNIST(root=root, train=False, transform=trans) kwargs = {'num_workers': 1, 'pin_memory': use_cuda} train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False, **kwargs) return train_loader, test_loader ``` The main thing to draw attention to here is that we use `transforms.ToTensor()` to normalize the pixel intensities to the range $[0.0, 1.0]$. Next we define a PyTorch module that encapsulates our decoder network: ``` class Decoder(nn.Module): def __init__(self, z_dim, hidden_dim): super(Decoder, self).__init__() # setup the two linear transformations used self.fc1 = nn.Linear(z_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, 784) # setup the non-linearities self.softplus = nn.Softplus() self.sigmoid = nn.Sigmoid() def forward(self, z): # define the forward computation on the latent z # first compute the hidden units hidden = self.softplus(self.fc1(z)) # return the parameter for the output Bernoulli # each is of size batch_size x 784 loc_img = self.sigmoid(self.fc21(hidden)) return loc_img ``` Given a latent code $z$, the forward call of `Decoder` returns the parameters for a Bernoulli distribution in image space. Since each image is of size $28\times28=784$, `loc_img` is of size `batch_size` x 784. Next we define a PyTorch module that encapsulates our encoder network: ``` class Encoder(nn.Module): def __init__(self, z_dim, hidden_dim): super(Encoder, self).__init__() # setup the three linear transformations used self.fc1 = nn.Linear(784, hidden_dim) self.fc21 = nn.Linear(hidden_dim, z_dim) self.fc22 = nn.Linear(hidden_dim, z_dim) # setup the non-linearities self.softplus = nn.Softplus() def forward(self, x): # define the forward computation on the image x # first shape the mini-batch to have pixels in the rightmost dimension x = x.reshape(-1, 784) # then compute the hidden units hidden = self.softplus(self.fc1(x)) # then return a mean vector and a (positive) square root covariance # each of size batch_size x z_dim z_loc = self.fc21(hidden) z_scale = torch.exp(self.fc22(hidden)) return z_loc, z_scale ``` Given an image $\bf x$ the forward call of `Encoder` returns a mean and covariance that together parameterize a (diagonal) Gaussian distribution in latent space. With our encoder and decoder networks in hand, we can now write down the stochastic functions that represent our model and guide. First the model: ``` # define the model p(x|z)p(z) def model(self, x): # register PyTorch module `decoder` with Pyro pyro.module("decoder", self.decoder) with pyro.iarange("data", x.shape[0]): # setup hyperparameters for prior p(z) z_loc = x.new_zeros(torch.Size((x.shape[0], self.z_dim))) z_scale = x.new_ones(torch.Size((x.shape[0], self.z_dim))) # sample from prior (value will be sampled by guide when computing the ELBO) z = pyro.sample("latent", dist.Normal(z_loc, z_scale).independent(1)) # decode the latent code z loc_img = self.decoder.forward(z) # score against actual images pyro.sample("obs", dist.Bernoulli(loc_img).independent(1), obs=x.reshape(-1, 784)) ``` Note that `model()` is a callable that takes in a mini-batch of images `x` as input. This is a `torch.Tensor` of size `batch_size` x 784. The first thing we do inside of `model()` is register the (previously instantiated) decoder module with Pyro. Note that we give it an appropriate (and unique) name. This call to `pyro.module` lets Pyro know about all the parameters inside of the decoder network. Next we setup the hyperparameters for our prior, which is just a unit normal gaussian distribution. Note that: - we specifically designate independence amongst the data in our mini-batch (i.e. the leftmost dimension) via `pyro.iarange`. Also, note the use of `.independent(1)` when sampling from the latent `z` - this ensures that instead of treating our sample as being generated from a univariate normal with `batch_size = z_dim`, we treat them as being generated from a multivariate normal distribution with diagonal covariance. As such, the log probabilities along each dimension is summed out when we evaluate `.log_prob` for a "latent" sample. Refer to the [Tensor Shapes](tensor_shapes.ipynb) tutorial for more details. - since we're processing an entire mini-batch of images, we need the leftmost dimension of `z_loc` and `z_scale` to equal the mini-batch size - in case we're on GPU, we use `new_zeros` and `new_ones` to ensure that newly created tensors are on the same GPU device. Next we sample the latent `z` from the prior, making sure to give the random variable a unique Pyro name `'latent'`. Then we pass `z` through the decoder network, which returns `loc_img`. We then score the observed images in the mini-batch `x` against the Bernoulli likelihood parametrized by `loc_img`. Note that we flatten `x` so that all the pixels are in the rightmost dimension. That's all there is to it! Note how closely the flow of Pyro primitives in `model` follows the generative story of our model, e.g. as encapsulated by Figure 1. Now we move on to the guide: ``` # define the guide (i.e. variational distribution) q(z|x) def guide(self, x): # register PyTorch module `encoder` with Pyro pyro.module("encoder", self.encoder) with pyro.iarange("data", x.shape[0]): # use the encoder to get the parameters used to define q(z|x) z_loc, z_scale = self.encoder.forward(x) # sample the latent code z pyro.sample("latent", dist.Normal(z_loc, z_scale).independent(1)) ``` Just like in the model, we first register the PyTorch module we're using (namely `encoder`) with Pyro. We take the mini-batch of images `x` and pass it through the encoder. Then using the parameters output by the encoder network we use the normal distribution to sample a value of the latent for each image in the mini-batch. Crucially, we use the same name for the latent random variable as we did in the model: `'latent'`. Also, note the use of `pyro.iarange` to designate independence of the mini-batch dimension, and `.independent(1)` to enforce dependence on `z_dims`, exactly as we did in the model. Now that we've defined the full model and guide we can move on to inference. But before we do so let's see how we package the model and guide in a PyTorch module: ``` class VAE(nn.Module): # by default our latent space is 50-dimensional # and we use 400 hidden units def __init__(self, z_dim=50, hidden_dim=400, use_cuda=False): super(VAE, self).__init__() # create the encoder and decoder networks self.encoder = Encoder(z_dim, hidden_dim) self.decoder = Decoder(z_dim, hidden_dim) if use_cuda: # calling cuda() here will put all the parameters of # the encoder and decoder networks into gpu memory self.cuda() self.use_cuda = use_cuda self.z_dim = z_dim # define the model p(x|z)p(z) def model(self, x): # register PyTorch module `decoder` with Pyro pyro.module("decoder", self.decoder) with pyro.iarange("data", x.shape[0]): # setup hyperparameters for prior p(z) z_loc = x.new_zeros(torch.Size((x.shape[0], self.z_dim))) z_scale = x.new_ones(torch.Size((x.shape[0], self.z_dim))) # sample from prior (value will be sampled by guide when computing the ELBO) z = pyro.sample("latent", dist.Normal(z_loc, z_scale).independent(1)) # decode the latent code z loc_img = self.decoder.forward(z) # score against actual images pyro.sample("obs", dist.Bernoulli(loc_img).independent(1), obs=x.reshape(-1, 784)) # define the guide (i.e. variational distribution) q(z|x) def guide(self, x): # register PyTorch module `encoder` with Pyro pyro.module("encoder", self.encoder) with pyro.iarange("data", x.shape[0]): # use the encoder to get the parameters used to define q(z|x) z_loc, z_scale = self.encoder.forward(x) # sample the latent code z pyro.sample("latent", dist.Normal(z_loc, z_scale).independent(1)) # define a helper function for reconstructing images def reconstruct_img(self, x): # encode image x z_loc, z_scale = self.encoder(x) # sample in latent space z = dist.Normal(z_loc, z_scale).sample() # decode the image (note we don't sample in image space) loc_img = self.decoder(z) return loc_img ``` The point we'd like to make here is that the two `Module`s `encoder` and `decoder` are attributes of `VAE` (which itself inherits from `nn.Module`). This has the consequence they are both automatically registered as belonging to the `VAE` module. So, for example, when we call `parameters()` on an instance of `VAE`, PyTorch will know to return all the relevant parameters. It also means that if we're running on a GPU, the call to `cuda()` will move all the parameters of all the (sub)modules into GPU memory. ## Inference We're now ready for inference. Refer to the full code in the next section. First we instantiate an instance of the `VAE` module. ``` vae = VAE() ``` Then we setup an instance of the Adam optimizer. ``` optimizer = Adam({"lr": 1.0e-3}) ``` Then we setup our inference algorithm, which is going to learn good parameters for the model and guide by maximizing the ELBO: ``` svi = SVI(vae.model, vae.guide, optimizer, loss=Trace_ELBO()) ``` That's all there is to it. Now we just have to define our training loop: ``` def train(svi, train_loader, use_cuda=False): # initialize loss accumulator epoch_loss = 0. # do a training epoch over each mini-batch x returned # by the data loader for x, _ in train_loader: # if on GPU put mini-batch into CUDA memory if use_cuda: x = x.cuda() # do ELBO gradient and accumulate loss epoch_loss += svi.step(x) # return epoch loss normalizer_train = len(train_loader.dataset) total_epoch_loss_train = epoch_loss / normalizer_train return total_epoch_loss_train ``` Note that all the mini-batch logic is handled by the data loader. The meat of the training loop is `svi.step(x)`. There are two things we should draw attention to here: - any arguments to `step` are passed to the model and the guide. consequently `model` and `guide` need to have the same call signature - `step` returns a noisy estimate of the loss (i.e. minus the ELBO). this estimate is not normalized in any way, so e.g. it scales with the size of the mini-batch The logic for adding evaluation logic is analogous: ``` def evaluate(svi, test_loader, use_cuda=False): # initialize loss accumulator test_loss = 0. # compute the loss over the entire test set for x, _ in test_loader: # if on GPU put mini-batch into CUDA memory if use_cuda: x = x.cuda() # compute ELBO estimate and accumulate loss test_loss += svi.evaluate_loss(x) normalizer_test = len(test_loader.dataset) total_epoch_loss_test = test_loss / normalizer_test return total_epoch_loss_test ``` Basically the only change we need to make is that we call evaluate_loss instead of step. This function will compute an estimate of the ELBO but won't take any gradient steps. The final piece of code we'd like to highlight is the helper method `reconstruct_img` in the VAE class: This is just the image reconstruction experiment we described in the introduction translated into code. We take an image and pass it through the encoder. Then we sample in latent space using the gaussian distribution provided by the encoder. Finally we decode the latent code into an image: we return the mean vector `loc_img` instead of sampling with it. Note that since the `sample()` statement is stochastic, we'll get different draws of z every time we run the reconstruct_img function. If we've learned a good model and guide—in particular if we've learned a good latent representation—this plurality of z samples will correspond to different styles of digit writing, and the reconstructed images should exhibit an interesting variety of different styles. ## Code and Sample results Training corresponds to maximizing the evidence lower bound (ELBO) over the training dataset. We train for 100 iterations and evaluate the ELBO for the test dataset, see Figure 3. ``` # Run options LEARNING_RATE = 1.0e-3 USE_CUDA = False # Run only for a single iteration for testing NUM_EPOCHS = 1 if smoke_test else 100 TEST_FREQUENCY = 5 train_loader, test_loader = setup_data_loaders(batch_size=256, use_cuda=USE_CUDA) # clear param store pyro.clear_param_store() # setup the VAE vae = VAE(use_cuda=USE_CUDA) # setup the optimizer adam_args = {"lr": LEARNING_RATE} optimizer = Adam(adam_args) # setup the inference algorithm svi = SVI(vae.model, vae.guide, optimizer, loss=Trace_ELBO()) train_elbo = [] test_elbo = [] # training loop for epoch in range(NUM_EPOCHS): total_epoch_loss_train = train(svi, train_loader, use_cuda=USE_CUDA) train_elbo.append(-total_epoch_loss_train) print("[epoch %03d] average training loss: %.4f" % (epoch, total_epoch_loss_train)) if epoch % TEST_FREQUENCY == 0: # report test diagnostics total_epoch_loss_test = evaluate(svi, test_loader, use_cuda=USE_CUDA) test_elbo.append(-total_epoch_loss_test) print("[epoch %03d] average test loss: %.4f" % (epoch, total_epoch_loss_test)) ``` Next we show a set of randomly sampled images from the model. These are generated by drawing random samples of `z` and generating an image for each one, see Figure 4. We also study the 50-dimensional latent space of the entire test dataset by encoding all MNIST images and embedding their means into a 2-dimensional T-SNE space. We then color each embedded image by its class. The resulting Figure 5 shows separation by class with variance within each class-cluster. See the full code on [Github](https://github.com/uber/pyro/blob/dev/examples/vae/vae.py). ## References [1] `Auto-Encoding Variational Bayes`,<br/>&nbsp;&nbsp;&nbsp;&nbsp; Diederik P Kingma, Max Welling [2] `Stochastic Backpropagation and Approximate Inference in Deep Generative Models`, <br/>&nbsp;&nbsp;&nbsp;&nbsp; Danilo Jimenez Rezende, Shakir Mohamed, Daan Wierstra
github_jupyter
# Colab initialization - install the pipeline in the colab runtime - download files neccessary for this example ``` !pip3 install -U pip > /dev/null !pip3 install -U bio_embeddings[all] > /dev/null !wget http://data.bioembeddings.com/public/embeddings/notebooks/custom_data/disprot_2019_09_labelled_0.2_0.8.csv --output-document disprot_2019_09_labelled_0.2_0.8.csv ``` # Remove some annotations from an annotation file In order to make sure that you are only transfering annotations for embeddings in the reference embedding file, the pipeline checks that all identifiers in the reference annotations are present in the reference embeddings. If there is a mismatch (you have annotations for sequences/embeddings not present in the reference embeddings), the pipeline will ask you to remove those annotations from your annotation file. This is done to make sure that you don't believe a certain sequence/embedding is in your reference set, if it in fact is not! ``` from bio_embeddings.utilities import remove_identifiers_from_annotations_file faulty_identifiers = ['Q12905','Q98XH7','P61244-2','Q62627','O14936','P19619','Q86UX7','B7T1D9','Q8K4J6','Q10Q08','Q99ML1','O76070','Q00987','P45481','Q9Y6Q9','O95718','P10587','Q9UM11','P02313','P03347','Q9UGL1','P05107','Q9NR61','Q13698','P00533','O60829','P00514','Q71UI9','Q13541','Q9Z0P7','P02511','P17677','P12493','P38919','P22303','P29990','Q9NHC3','O00204-2','O88597','Q548Y4','P04486','P52564','P24928','Q2YHF0','P26554','Q15796','Q9BRG1','Q04207','P63165','Q13573','P14921','Q6P8Z1','P27577','Q9JK11','P62152','P16535','Q868N5','P45561','P14340','P17763','P62326','Q92731','P06935','P03254','P61925','Q09472','P10636-8','P30281','P96884','P13861','Q63450','P17870','O43236-6','Q9Q6P4','P42763','Q9NQA5','P04052','P42759','Q9FG31','P23443','P04370-5','Q03519','P20810','P27958','P02628','C4PB33','Q96270','P41351','P0CE48','P05106','K7J0R2','B9UCQ5','P06239','P27782','O92972','P10923','P84051','P08592','P19599','P11632','P12506','P02686-5','P81558','P84022','Q13469','M5BF30','Q15797','Q9WMX2','A0A068MVV3','C4M0U8','P02619','P27695','Q1K7R9','P42224','Q60795','Q32ZE1','A0A1Z3GD08','O94444','E2IHW6','P02259','D2JX42','P31751','P03129','P12823','P35222','P10114','Q96RI1','P03404','P16220','P0A7L8','P03259','P05318','P68363','P49723','P02687','Q61548','P18212','P60604','P14335','P07746','P27285','Q16143','Q9YGY0','E1UJ20','Q16222-2','P17639','P46108-2','Q5UB51'] filtered_annotations_file = remove_identifiers_from_annotations_file( faulty_identifiers, "disprot_2019_09_labelled_0.2_0.8.csv") filtered_annotations_file[:10] filtered_annotations_file.to_csv("disprot_2019_09_labelled_0.2_0.8_filtered.csv", index=False) ```
github_jupyter
``` import numpy as np # linear algebra# linea import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from matplotlib import pyplot as plt import os from sklearn.model_selection import train_test_split from sklearn import metrics rankings = pd.read_csv('fifa_ranking.csv') rankings = rankings.loc[:,['rank', 'country_full', 'country_abrv', 'cur_year_avg_weighted', 'rank_date', 'two_year_ago_weighted', 'three_year_ago_weighted']] rankings = rankings.replace({"IR Iran": "Iran"}) rankings['weighted_points'] = rankings['cur_year_avg_weighted'] + rankings['two_year_ago_weighted'] + rankings['three_year_ago_weighted'] rankings['rank_date'] = pd.to_datetime(rankings['rank_date']) rankings matches = pd.read_csv('results.csv') matches = matches.replace({'Germany DR': 'Germany', 'China': 'China PR'}) matches['date'] = pd.to_datetime(matches['date']) matches world_cup = pd.read_csv('World Cup 2018 Dataset.csv') world_cup = world_cup.loc[:, ['Team', 'Group', 'First match \nagainst', 'Second match\n against', 'Third match\n against']] world_cup = world_cup.dropna(how='all') world_cup = world_cup.replace({"IRAN": "Iran", "Costarica": "Costa Rica", "Porugal": "Portugal", "Columbia": "Colombia", "Korea" : "Korea Republic"}) world_cup = world_cup.set_index('Team') world_cup rankings = rankings.set_index(['rank_date'])\ .groupby(['country_full'], group_keys=False)\ .resample('D').first()\ .fillna(method='ffill')\ .reset_index() # join the ranks matches = matches.merge(rankings, left_on=['date', 'home_team'], right_on=['rank_date', 'country_full']) matches = matches.merge(rankings, left_on=['date', 'away_team'], right_on=['rank_date', 'country_full'], suffixes=('_home', '_away')) ``` ## Feature Generation ``` matches['rank_difference'] = matches['rank_home'] - matches['rank_away'] matches['average_rank'] = (matches['rank_home'] + matches['rank_away'])/2 matches['point_difference'] = matches['weighted_points_home'] - matches['weighted_points_away'] matches['score_difference'] = matches['home_score'] - matches['away_score'] matches['is_won'] = matches['score_difference'] > 0 # take draw as lost matches['is_stake'] = matches['tournament'] != 'Friendly' max_rest = 30 matches['rest_days'] = matches.groupby('home_team').diff()['date'].dt.days.clip(0,max_rest).fillna(max_rest) matches['wc_participant'] = matches['home_team'] * matches['home_team'].isin(world_cup.index.tolist()) matches['wc_participant'] = matches['wc_participant'].replace({'':'Other'}) # matches = matches.join(pd.get_dummies(matches['wc_participant'])) matches matches.info() ``` ## Catboost ``` ycat = matches['is_won'] Xcat = matches.drop('is_won',axis = 1).drop('date',axis =1).drop('rank_date_home',axis =1).drop('rank_date_away',axis=1).drop('score_difference',axis=1).drop('away_score',axis=1).drop('home_score',axis=1) ``` **T-T-S** ``` X_trainc, X_testc, y_trainc, y_testc = train_test_split(Xcat, ycat, test_size=0.2, random_state=42) from catboost import CatBoostRegressor, CatBoostClassifier, Pool categorical_features_indices = np.where(X_trainc.dtypes != np.float)[0] train_pool = Pool(X_trainc, y_trainc, cat_features=categorical_features_indices) Catset=CatBoostClassifier(loss_function = 'Logloss', class_weights = [1,4]) Cat = Catset.fit(X_trainc, y_trainc, cat_features=categorical_features_indices); y_predcat = Cat.predict(X_testc) metrics.accuracy_score(y_testc, y_predcat) metrics.confusion_matrix(y_testc, y_predcat) metrics.precision_score(y_testc, y_predcat, pos_label=0) metrics.recall_score(y_testc, y_predcat, pos_label=0) feature_importances = Cat.get_feature_importance(train_pool) feature_names = X_trainc.columns for score, name in sorted(zip(feature_importances, feature_names), reverse=True): print('{}: {}'.format(name, score)) ```
github_jupyter
# Método de Welch Neste notebook avaliamos o método de Welch, uma ferramenta para análise estatística do espectro. Basicamente tomamos transformadas de Fourier de pequenas janelas temporais que vão se deslocando com o tempo e, então, o espectro médio. ``` # importar as bibliotecas necessárias import numpy as np # arrays import matplotlib.pyplot as plt # plots plt.rcParams.update({'font.size': 14}) from scipy import signal import IPython.display as ipd # to play signals import sounddevice as sd import soundfile as sf ``` # Intuição Vejamos uma intuição sobre o método de Welch. # Os parâmetros do espectograma Dê uma olhada na documentação do scipy: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.csd.html - Os sinais, $x(t)$ e $y(t)$ no tempo - A frequência de amostragem - window: A janela temporal desejada - Overlap: Número de pontos em que teremos uma sobreposição das janelas. - nfft: Número de pontos da fft (zero padding). Se não for fornecido, é o número de pontos na janela temporal. ![sfft_overlap_larger.png](attachment:sfft_overlap_larger.png) # Exemplo 1: O ruído de uma bomba d'agua ``` ##### x(t) water pump ################# xg, Fs = sf.read('waterpump.wav') xt = xg[:int(Fs*5),0] time = np.linspace(0, (len(xt)-1)/Fs, len(xt)) #plot plt.figure(figsize=(12,3)) plt.plot(time, xt, '-b',linewidth = 1, alpha = 0.7) plt.grid(linestyle = '--', which='both') plt.xlabel('Tempo [s]') plt.ylabel('Amplitude [-]') plt.xlim((0, time[-1])) ipd.Audio(xt, rate=Fs) # load a NumPy array ``` # Cálculos dos espectros Vamos calcular: - Via uma FFT direta no sinal - Via método de Welch ``` # Método direto Xw = np.fft.fft(xt) Xw2 = (np.abs(Xw)/len(Xw))**2 freq = np.linspace(0, (len(Xw)-1)*Fs/len(Xw), len(Xw)) # Método de Welch win_size = 2*8192 win = signal.windows.hann(win_size) noverlap = win_size/2 nfft = 8*win_size f, Sxx = signal.csd(xt, xt, Fs, window = win, noverlap = noverlap, nfft = nfft, scaling = 'density') # plots plt.figure(figsize=(12,3)) plt.semilogx(freq, 10*np.log10(Xw2), '-b',linewidth = 1, alpha = 0.7, label = 'direto') plt.semilogx(f, 10*np.log10(Sxx), '-r',linewidth = 2, label = 'Welch') plt.legend(loc = 'upper right') plt.grid(linestyle = '--', which='both') plt.xlabel('Tempo [s]') plt.ylabel('Amplitude [-]') plt.xlim((10, Fs/2)) plt.ylim((-100, -20)); ``` # Exemplo 2 - O ruído branco Vimos nas aulas passadas que o ruído branco tem uma auto-correlação do tipo impulsiva ``` fs = 44100 time = np.arange(0, 2, 1/fs) # sinal aleatório completo xt = np.random.normal(loc = 0, scale = 1, size = len(time)) # plot signal plt.figure(figsize = (10, 3)) plt.plot(time, xt, linewidth = 1, color = 'b', alpha = 0.7) plt.grid(linestyle = '--', which='both') plt.ylabel(r'$x(t)$ [Pa]') plt.xlim((0, time[-1])) plt.ylim((-4, 4)) plt.xlabel('Tempo [s]') plt.tight_layout() # Calculemos a auto-correlação Rxx = np.correlate(xt, xt, mode = 'same') tau = np.linspace(-len(Rxx)/fs, len(Rxx)/fs, len(Rxx)) # plot autocorrelação plt.figure(figsize = (10, 3)) plt.plot(tau, Rxx/fs, linewidth = 1, color = 'b', alpha = 0.7) plt.grid(linestyle = '--', which='both') plt.ylabel(r'$R_{xx}(\tau)$ [Pa$^2$]') plt.xlim((tau[0], tau[-1])) plt.ylim((-0.5, 1.2*max(np.abs(Rxx/fs)))) plt.xlabel(r'$\tau$ [s]') plt.tight_layout() # Método direto Xw = np.fft.fft(xt) Xw2 = (np.abs(Xw)/len(Xw))**2 freq = np.linspace(0, (len(Xw)-1)*Fs/len(Xw), len(Xw)) # Método de Welch win_size = 4096 win = signal.windows.hann(win_size) noverlap = win_size/2 nfft = win_size f, Sxx = signal.csd(xt, xt, Fs, window = win, noverlap = noverlap, nfft = nfft, scaling = 'density') # plots plt.figure(figsize=(12,3)) plt.semilogx(freq, 10*np.log10(Xw2), '-b',linewidth = 1, alpha = 0.7, label = 'direto') plt.semilogx(f, 10*np.log10(Sxx), '-r',linewidth = 2, label = 'Welch') plt.legend(loc = 'lower left') plt.grid(linestyle = '--', which='both') plt.xlabel('Tempo [s]') plt.ylabel('Amplitude [-]') plt.xlim((10, Fs/2)) plt.ylim((-100, -20)); ```
github_jupyter
``` # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. ``` # Absolute camera orientation given set of relative camera pairs This tutorial showcases the `cameras`, `transforms` and `so3` API. The problem we deal with is defined as follows: Given an optical system of $N$ cameras with extrinsics $\{g_1, ..., g_N | g_i \in SE(3)\}$, and a set of relative camera positions $\{g_{ij} | g_{ij}\in SE(3)\}$ that map between coordinate frames of randomly selected pairs of cameras $(i, j)$, we search for the absolute extrinsic parameters $\{g_1, ..., g_N\}$ that are consistent with the relative camera motions. More formally: $$ g_1, ..., g_N = {\arg \min}_{g_1, ..., g_N} \sum_{g_{ij}} d(g_{ij}, g_i^{-1} g_j), $$, where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras $g_i$ and $g_j$. Visually, the problem can be described as follows. The picture below depicts the situation at the beginning of our optimization. The ground truth cameras are plotted in green while the randomly initialized estimated cameras are plotted in blue: ![Initialization](data/bundle_adjustment_initialization.png) Our optimization seeks to align the estimated (blue) cameras with the ground truth (green) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows: ![Solution](data/bundle_adjustment_final.png) In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \in SO(3); T \in \mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exponential_map`) of the axis-angle representation of the rotation `log_R_absolute`. Note that the solution to this problem could only be recovered up to an unknown global rigid transformation $g_{glob} \in SE(3)$. Thus, for simplicity, we assume knowledge of the absolute extrinsics of the first camera $g_0$. We set $g_0$ as a trivial camera $g_0 = (I, \vec{0})$. ## 0. Import Modules ``` # imports import torch from pytorch3d.transforms.so3 import ( so3_exponential_map, so3_relative_angle, ) from pytorch3d.renderer.cameras import ( SfMPerspectiveCameras, ) # add path for demo utils import sys import os sys.path.append(os.path.abspath('')) from utils import plot_camera_scene # set for reproducibility torch.manual_seed(42) ``` ## 1. Set up Cameras and load ground truth positions ``` # load the SE3 graph of relative/absolute camera positions camera_graph_file = './data/camera_graph.pth' (R_absolute_gt, T_absolute_gt), \ (R_relative, T_relative), \ relative_edges = \ torch.load(camera_graph_file) # create the relative cameras cameras_relative = SfMPerspectiveCameras( R = R_relative.cuda(), T = T_relative.cuda(), device = "cuda", ) # create the absolute ground truth cameras cameras_absolute_gt = SfMPerspectiveCameras( R = R_absolute_gt.cuda(), T = T_absolute_gt.cuda(), device = "cuda", ) # the number of absolute camera positions N = R_absolute_gt.shape[0] ``` ## 2. Define optimization functions ### Relative cameras and camera distance We now define two functions crucial for the optimization. **`calc_camera_distance`** compares a pair of cameras. This function is important as it defines the loss that we are minimizing. The method utilizes the `so3_relative_angle` function from the SO3 API. **`get_relative_camera`** computes the parameters of a relative camera that maps between a pair of absolute cameras. Here we utilize the `compose` and `inverse` class methods from the PyTorch3d Transforms API. ``` def calc_camera_distance(cam_1, cam_2): """ Calculates the divergence of a batch of pairs of cameras cam_1, cam_2. The distance is composed of the cosine of the relative angle between the rotation components of the camera extrinsics and the l2 distance between the translation vectors. """ # rotation distance R_distance = (1.-so3_relative_angle(cam_1.R, cam_2.R, cos_angle=True)).mean() # translation distance T_distance = ((cam_1.T - cam_2.T)**2).sum(1).mean() # the final distance is the sum return R_distance + T_distance def get_relative_camera(cams, edges): """ For each pair of indices (i,j) in "edges" generate a camera that maps from the coordinates of the camera cams[i] to the coordinates of the camera cams[j] """ # first generate the world-to-view Transform3d objects of each # camera pair (i, j) according to the edges argument trans_i, trans_j = [ SfMPerspectiveCameras( R = cams.R[edges[:, i]], T = cams.T[edges[:, i]], device = "cuda", ).get_world_to_view_transform() for i in (0, 1) ] # compose the relative transformation as g_i^{-1} g_j trans_rel = trans_i.inverse().compose(trans_j) # generate a camera from the relative transform matrix_rel = trans_rel.get_matrix() cams_relative = SfMPerspectiveCameras( R = matrix_rel[:, :3, :3], T = matrix_rel[:, 3, :3], device = "cuda", ) return cams_relative ``` ## 3. Optimization Finally, we start the optimization of the absolute cameras. We use SGD with momentum and optimize over `log_R_absolute` and `T_absolute`. As mentioned earlier, `log_R_absolute` is the axis angle representation of the rotation part of our absolute cameras. We can obtain the 3x3 rotation matrix `R_absolute` that corresponds to `log_R_absolute` with: `R_absolute = so3_exponential_map(log_R_absolute)` ``` # initialize the absolute log-rotations/translations with random entries log_R_absolute_init = torch.randn(N, 3).float().cuda() T_absolute_init = torch.randn(N, 3).float().cuda() # futhermore, we know that the first camera is a trivial one # (see the description above) log_R_absolute_init[0, :] = 0. T_absolute_init[0, :] = 0. # instantiate a copy of the initialization of log_R / T log_R_absolute = log_R_absolute_init.clone().detach() log_R_absolute.requires_grad = True T_absolute = T_absolute_init.clone().detach() T_absolute.requires_grad = True # the mask the specifies which cameras are going to be optimized # (since we know the first camera is already correct, # we only optimize over the 2nd-to-last cameras) camera_mask = torch.ones(N, 1).float().cuda() camera_mask[0] = 0. # init the optimizer optimizer = torch.optim.SGD([log_R_absolute, T_absolute], lr=.1, momentum=0.9) # run the optimization n_iter = 2000 # fix the number of iterations for it in range(n_iter): # re-init the optimizer gradients optimizer.zero_grad() # compute the absolute camera rotations as # an exponential map of the logarithms (=axis-angles) # of the absolute rotations R_absolute = so3_exponential_map(log_R_absolute * camera_mask) # get the current absolute cameras cameras_absolute = SfMPerspectiveCameras( R = R_absolute, T = T_absolute * camera_mask, device = "cuda", ) # compute the relative cameras as a compositon of the absolute cameras cameras_relative_composed = \ get_relative_camera(cameras_absolute, relative_edges) # compare the composed cameras with the ground truth relative cameras # camera_distance corresponds to $d$ from the description camera_distance = \ calc_camera_distance(cameras_relative_composed, cameras_relative) # our loss function is the camera_distance camera_distance.backward() # apply the gradients optimizer.step() # plot and print status message if it % 200==0 or it==n_iter-1: status = 'iteration=%3d; camera_distance=%1.3e' % (it, camera_distance) plot_camera_scene(cameras_absolute, cameras_absolute_gt, status) print('Optimization finished.') ```
github_jupyter
``` %matplotlib inline ``` # Histograms Demonstrates how to plot histograms with matplotlib. ``` import matplotlib.pyplot as plt import numpy as np from matplotlib import colors from matplotlib.ticker import PercentFormatter # Fixing random state for reproducibility np.random.seed(19680801) ``` Generate data and plot a simple histogram ----------------------------------------- To generate a 1D histogram we only need a single vector of numbers. For a 2D histogram we'll need a second vector. We'll generate both below, and show the histogram for each vector. ``` N_points = 100000 n_bins = 20 # Generate a normal distribution, center at x=0 and y=5 x = np.random.randn(N_points) y = .4 * x + np.random.randn(100000) + 5 fig, axs = plt.subplots(1, 2, sharey=True, tight_layout=True) # We can set the number of bins with the `bins` kwarg axs[0].hist(x, bins=n_bins) axs[1].hist(y, bins=n_bins) ``` Updating histogram colors ------------------------- The histogram method returns (among other things) a ``patches`` object. This gives us access to the properties of the objects drawn. Using this, we can edit the histogram to our liking. Let's change the color of each bar based on its y value. ``` fig, axs = plt.subplots(1, 2, tight_layout=True) # N is the count in each bin, bins is the lower-limit of the bin N, bins, patches = axs[0].hist(x, bins=n_bins) # We'll color code by height, but you could use any scalar fracs = N / N.max() # we need to normalize the data to 0..1 for the full range of the colormap norm = colors.Normalize(fracs.min(), fracs.max()) # Now, we'll loop through our objects and set the color of each accordingly for thisfrac, thispatch in zip(fracs, patches): color = plt.cm.viridis(norm(thisfrac)) thispatch.set_facecolor(color) # We can also normalize our inputs by the total number of counts axs[1].hist(x, bins=n_bins, density=True) # Now we format the y-axis to display percentage axs[1].yaxis.set_major_formatter(PercentFormatter(xmax=1)) ``` Plot a 2D histogram ------------------- To plot a 2D histogram, one only needs two vectors of the same length, corresponding to each axis of the histogram. ``` fig, ax = plt.subplots(tight_layout=True) hist = ax.hist2d(x, y) ``` Customizing your histogram -------------------------- Customizing a 2D histogram is similar to the 1D case, you can control visual components such as the bin size or color normalization. ``` fig, axs = plt.subplots(3, 1, figsize=(5, 15), sharex=True, sharey=True, tight_layout=True) # We can increase the number of bins on each axis axs[0].hist2d(x, y, bins=40) # As well as define normalization of the colors axs[1].hist2d(x, y, bins=40, norm=colors.LogNorm()) # We can also define custom numbers of bins for each axis axs[2].hist2d(x, y, bins=(80, 10), norm=colors.LogNorm()) plt.show() ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/image_overview.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_overview.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/image_overview.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# Group_Data_Analysis_PCA_10th_adding multiple params * Version: '0.0.4' * Date: 2021-05-03 * Author: Jea Kwon * Description: PCA analysis with multiple params 3D plot ``` from avatarpy import Avatar import os import glob import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import cufflinks as cf from scipy.stats import zscore from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA cf.go_offline(connected=True) root = r"C:\Users\Jay\Desktop\avatar_young_adult\data\best1_20210503" avatars = dict( wt=dict( young=[], adult=[], ), ko=dict( young=[], adult=[], ) ) for path, subdirs, files in os.walk(root): for name in files: if name.lower().endswith('.csv'): csv_path = os.path.join(path, name) age = os.path.basename(os.path.dirname(path)) genotype = os.path.basename(os.path.dirname(os.path.dirname(path))) avatars[genotype][age].append(Avatar(csv_path=csv_path, ID=name)) ``` ## Create walking event data ### Definition of walking - Moved more than 5 cm in 1 second(20=Frame) - More details take a look Group_Data_Analysis_PCA_1st_Trial ## Event Search function ``` def get_event_indices(boo, event_length): """Returns list of event indices. ex) [(start 1, end 1), (start 2, end 2), (start 3, end 3), ..., (start N, end N)] """ indices = np.arange(len(boo)) condition = np.nonzero(boo[1:] != boo[:-1])[0] + 1 split_indices = np.split(indices, condition) true_indices = split_indices[0::2] if boo[0] else split_indices[1::2] event_indice_pair = [(idx[0]-event_length+1, idx[0]+1) for idx in true_indices] return event_indice_pair ``` ## Features ``` wt_young_event_data = [] for avatar in avatars['wt']['young']: boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array event_indices = get_event_indices(boo, 20) avatar_aoa = avatar.transform.align_on_axis() avatar_aop = avatar.transform.align_on_plane() for i, idx in enumerate(event_indices): raw_coords = avatar.data.loc[avatar.index[idx[0]:idx[1]]] aoa_coords = avatar_aoa.data.loc[avatar.index[idx[0]:idx[1]]] aop_coords = avatar_aop.data.loc[avatar.index[idx[0]:idx[1]]] velocity = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]] acceleration = avatar.acceleration.loc[avatar.index[idx[0]:idx[1]]] angle = avatar.angle.loc[avatar.index[idx[0]:idx[1]]] angle_diff = avatar.angle.diff().loc[avatar.index[idx[0]:idx[1]]] vector_length = avatar.vector_length.loc[avatar.index[idx[0]:idx[1]]] acc_corr = acceleration.corr() mask = np.triu(np.ones_like(acc_corr, dtype=bool), 1) acc_corr = acc_corr.values.flatten()[mask.flatten()] ang_corr = angle_diff.corr() mask = np.triu(np.ones_like(ang_corr, dtype=bool), 1) ang_corr = ang_corr.values.flatten()[mask.flatten()] if raw_coords.shape[0]!=20:continue # elif aoa_coords.shape[0]!=20:continue # elif aop_coords.shape[0]!=20:continue X1 = raw_coords.values.flatten() X2 = aoa_coords.values.flatten() X3 = aop_coords.values.flatten() X4 = velocity.values.flatten() X5 = acceleration.values.flatten() X6 = angle.values.flatten() X7 = angle_diff.values.flatten() X8 = vector_length.values.flatten() X9 = acc_corr X10 = ang_corr X = np.concatenate([X1,X2,X3,X4,X5,X6,X7,X8,X9,X10]) wt_young_event_data.append(X) wt_young_event_data = np.stack(wt_young_event_data) wt_adult_event_data = [] for avatar in avatars['wt']['adult']: boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array event_indices = get_event_indices(boo, 20) avatar_aoa = avatar.transform.align_on_axis() avatar_aop = avatar.transform.align_on_plane() for i, idx in enumerate(event_indices): raw_coords = avatar.data.loc[avatar.index[idx[0]:idx[1]]] aoa_coords = avatar_aoa.data.loc[avatar.index[idx[0]:idx[1]]] aop_coords = avatar_aop.data.loc[avatar.index[idx[0]:idx[1]]] velocity = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]] acceleration = avatar.acceleration.loc[avatar.index[idx[0]:idx[1]]] angle = avatar.angle.loc[avatar.index[idx[0]:idx[1]]] angle_diff = avatar.angle.diff().loc[avatar.index[idx[0]:idx[1]]] vector_length = avatar.vector_length.loc[avatar.index[idx[0]:idx[1]]] acc_corr = acceleration.corr() mask = np.triu(np.ones_like(acc_corr, dtype=bool), 1) acc_corr = acc_corr.values.flatten()[mask.flatten()] ang_corr = angle_diff.corr() mask = np.triu(np.ones_like(ang_corr, dtype=bool), 1) ang_corr = ang_corr.values.flatten()[mask.flatten()] if raw_coords.shape[0]!=20:continue # elif aoa_coords.shape[0]!=20:continue # elif aop_coords.shape[0]!=20:continue X1 = raw_coords.values.flatten() X2 = aoa_coords.values.flatten() X3 = aop_coords.values.flatten() X4 = velocity.values.flatten() X5 = acceleration.values.flatten() X6 = angle.values.flatten() X7 = angle_diff.values.flatten() X8 = vector_length.values.flatten() X9 = acc_corr X10 = ang_corr X = np.concatenate([X1,X2,X3,X4,X5,X6,X7,X8,X9,X10]) wt_adult_event_data.append(X) wt_adult_event_data = np.stack(wt_adult_event_data) ``` total 1857 events acquired from 5 wt young mice with 5 session. total 2248 events acquired from 5 wt adult mice with 5 session. ``` X = np.concatenate([wt_young_event_data, wt_adult_event_data]) X_ = StandardScaler().fit_transform(X) X_[np.isnan(X_)] = 0 pca = PCA(n_components=3) pc = pca.fit_transform(X_) df = pd.DataFrame(pc,columns=['PC1','PC2', 'PC3']) y = np.concatenate([np.zeros(wt_young_event_data.shape[0]), np.ones(wt_adult_event_data.shape[0])]) lbl = ['young']*wt_young_event_data.shape[0] + ['adult']*wt_adult_event_data.shape[0] df['class'] = y df['genotype'] = lbl import plotly.express as px fig = px.scatter_3d(df, x='PC1', y='PC2', z='PC3', color='genotype', opacity=0.5, range_x=[-50, 50], range_y=[-50, 50], range_z=[-50, 50]) fig.update_traces(marker=dict(size=1)) fig.update_layout(scene_aspectmode='cube') ```
github_jupyter
<a href="https://colab.research.google.com/github/SLCFLAB/Data-Science-Python/blob/main/Day%208/answers/8_2.SVM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # colab에서 열기 ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` Reference https://datascienceschool.net/ Hands on machine learning ## 1. Support Vector Machine ### 1) Basic SVM ``` from sklearn.datasets import make_blobs X, y = make_blobs(n_samples=50, centers=2, cluster_std=0.5, random_state=4) #center의 개수만큼 정규분포를 만든다 (x->data, y->label(0,1)) y = 2 * y - 1 plt.scatter(X[y == -1, 0], X[y == -1, 1], marker='o', label="-1 Class") plt.scatter(X[y == +1, 0], X[y == +1, 1], marker='x', label="+1 Class") plt.xlabel("x1") plt.ylabel("x2") plt.legend() plt.title("Train") plt.show() from sklearn.svm import SVC model = SVC(kernel='linear', C=1e10).fit(X, y) #x라는 데이터를 가지고 y를 학습 xmin = X[:, 0].min() xmax = X[:, 0].max() ymin = X[:, 1].min() ymax = X[:, 1].max() xx = np.linspace(xmin, xmax, 10) yy = np.linspace(ymin, ymax, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = model.decision_function([[x1, x2]]) Z[i, j] = p[0] #지금은 데이터가 두개이기 때문에 decision boundary가 한 개만 존재 levels = [-1, 0, 1] linestyles = ['dashed', 'solid', 'dashed'] plt.scatter(X[y == -1, 0], X[y == -1, 1], marker='o', label="-1 Class") plt.scatter(X[y == +1, 0], X[y == +1, 1], marker='x', label="+1 Class") plt.contour(X1, X2, Z, levels, colors='k', linestyles=linestyles) plt.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=300, alpha=0.3) x_new = [10, 2] plt.scatter(x_new[0], x_new[1], marker='^', s=100) plt.text(x_new[0] + 0.03, x_new[1] + 0.08, "Test Data") plt.xlabel("x1") plt.ylabel("x2") plt.legend() plt.title("Test") plt.show() model.coef_.dot(x_new) + model.intercept_ #새로운 점은 -1에 가깝다 ``` ### 2) With Slack ``` np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [-1] * 20 + [1] * 20 plt.figure(figsize=(12, 4)) plotnum = 1 for name, penalty in (('C=5', 5), ('C=0.1', 0.1), ('C=0.01', 0.01)): #얼마나 트레이닝 에러를 줄이는 데 집중하는가 clf = SVC(kernel='linear', C=penalty).fit(X, Y) xx = np.linspace(-5, 5) x_jin = -5 x_jax = 5 y_jin = -9 y_jax = 9 XX, YY = np.mgrid[x_jin:x_jax:200j, y_jin:y_jax:200j] levels = [-1, 0, 1] linestyles = ['dashed', 'solid', 'dashed'] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) Z = Z.reshape(XX.shape) plt.subplot(1, 3, plotnum) plt.contour(XX, YY, Z, levels, colors='k', linestyles=linestyles) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=120, linewidth=4) plt.scatter(X[:, 0], X[:, 1], c=Y, s=60, linewidth=1, cmap=plt.cm.Paired) plt.xlim(x_jin, x_jax) plt.ylim(y_jin, y_jax) plt.title(name) plotnum += 1 plt.tight_layout() plt.show() ``` ### 3) With Kernel ``` np.random.seed(0) X_xor = np.random.randn(200, 2) y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0) y_xor = np.where(y_xor, 1, 0) plt.scatter(X_xor[y_xor == 1, 0], X_xor[y_xor == 1, 1], c='b', marker='o', label='Class 1', s=50) plt.scatter(X_xor[y_xor == 0, 0], X_xor[y_xor == 0, 1], c='r', marker='s', label='Class 0', s=50) plt.legend() plt.xlabel("x1") plt.ylabel("x2") plt.title("XOR") plt.show() def plot_xor(X, y, model, title, xmin=-3, xmax=3, ymin=-3, ymax=3): XX, YY = np.meshgrid(np.arange(xmin, xmax, (xmax-xmin)/1000), np.arange(ymin, ymax, (ymax-ymin)/1000)) ZZ = np.reshape(model.predict( np.array([XX.ravel(), YY.ravel()]).T), XX.shape) plt.contourf(XX, YY, ZZ, cmap='Paired_r', alpha=0.5) plt.scatter(X[y == 1, 0], X[y == 1, 1], c='b', marker='o', label='Class 1', s=50) plt.scatter(X[y == 0, 0], X[y == 0, 1], c='r', marker='s', label='Class 0', s=50) plt.xlim(xmin, xmax) plt.ylim(ymin, ymax) plt.title(title) plt.xlabel("x1") plt.ylabel("x2") from sklearn.svm import SVC svc = SVC(kernel="linear").fit(X_xor, y_xor) plot_xor(X_xor, y_xor, svc, "Results") plt.show() from sklearn.preprocessing import FunctionTransformer def basis(X): return np.vstack([X[:, 0]**2, np.sqrt(2)*X[:, 0]*X[:, 1], X[:, 1]**2]).T # FunctionTransformer(basis).fit_transform(X) X_xor2 = FunctionTransformer(basis).fit_transform(X_xor) plt.scatter(X_xor2[y_xor == 1, 0], X_xor2[y_xor == 1, 1], c="b", marker='o', s=50) plt.scatter(X_xor2[y_xor == 0, 0], X_xor2[y_xor == 0, 1], c="r", marker='s', s=50) plt.ylim(-6, 6) plt.title("Transformed Data") plt.xlabel(r"$\phi_1$") plt.ylabel(r"$\phi_2$") plt.show() from sklearn.pipeline import Pipeline basismodel = Pipeline([("basis", FunctionTransformer(basis)), ("svc", SVC(kernel="linear"))]).fit(X_xor, y_xor) plot_xor(X_xor, y_xor, basismodel, "Results") plt.show() from sklearn.pipeline import Pipeline kernelmodel = SVC(kernel="rbf").fit(X_xor, y_xor) plot_xor(X_xor, y_xor, kernelmodel, "Results") plt.show() ``` # SVM with real dataset ``` from sklearn import datasets cancer_data = datasets.load_breast_cancer() print(cancer_data.data) cancer_data.feature_names print(cancer_data.data.shape) #target set print(cancer_data.target) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(cancer_data.data, cancer_data.target, test_size=0.4,random_state=109) # To-do # SVM classifier 정의, kernel 설정 # X_train, y_train으로 학습시키고 X_test 예측해서 pred에 저장 # 파라미터 바꿔가며 아래 metrics score로 성능 변화 관찰해보기 from sklearn import svm #create a classifier cls = svm.SVC(kernel="linear") # rbf, poly 등 바꿀 수 있음 #train the model cls.fit(X_train,y_train) #predict the response pred = cls.predict(X_test) from sklearn import metrics #accuracy print("acuracy:", metrics.accuracy_score(y_test,y_pred=pred)) #precision score print("precision:", metrics.precision_score(y_test,y_pred=pred)) #recall score print("recall" , metrics.recall_score(y_test,y_pred=pred)) print(metrics.classification_report(y_test, y_pred=pred)) ```
github_jupyter
Margento: SpaCy & Intro to Gensim ``` import sys sys.path import networkx import gensim import nltk from nltk.corpus import brown ``` SPACY ``` import spacy # Load English tokenizer, tagger, parser, NER, and word vectors nlp = spacy.load('en_core_web_sm') ``` spaCy is able to compare two objects, and make a prediction of how similar they are. Predicting similarity is useful for building recommendation systems or flagging duplicates. For example, you can suggest a user content that's similar to what they're currently looking at, or label a support ticket as a duplicate if it's very similar to an already existing one. Each Doc, Span, and Token comes with a .similarity() method that lets you compare it with another object, and determine the similarity. Of course similarity is always subjective – whether "dog" and "cat" are similar really depends on how you're looking at it. spaCy's similarity model usually assumes a pretty general-purpose definition of similarity. ``` tokens = nlp(u'machine computer human') for token1 in tokens: for token2 in tokens: print(token1.similarity(token2)) tokens = nlp(u'cat dog banana') for token1 in tokens: for token2 in tokens: print(token1.similarity(token2)) ``` LET US COMPARE THIS WITH ANOTHER MODEL ``` nlp2 = spacy.load('en_core_web_lg') tokens = nlp2(u'machine computer human') for token1 in tokens: for token2 in tokens: print(token1.similarity(token2)) ``` the previous model outputted: 1.0 0.6076606 0.20924059 0.6076606 1.0 0.40710005 0.20924059 0.40710005 1.0 Interesting differences, don't you think? What is happening here? "lg" actually stands for large... (so it is a larger version of the same model actually) ``` tokens = nlp(u'machine computer husband wife') for token1 in tokens: for token2 in tokens: print(token1.similarity(token2)) tokens2 = nlp2(u'machine computer husband wife') for token1 in tokens2: for token2 in tokens2: print(token1.similarity(token2)) tokens = nlp2(u'cat dog banana') ``` LET US REPEAT THE EVALUATION (the comparison between the three tokens above): ``` #Code here for token1 in tokens: for token2 in tokens: print(token1.similarity(token2)) ``` the output of the previous model (in fact the small version of the same model) was: 1.0 0.4759995 0.2811343 0.4759995 1.0 0.51964444 0.2811343 0.51964444 1.0 SIMILARITIES IN CONTEXT Aside from spaCy's built-in word vectors, which were trained on a lot of text with a wide vocabulary, the parsing, tagging and NER models also rely on vector representations of the meanings of words in context. As the processing pipeline is applied spaCy encodes a document's internal meaning representations as an array of floats, also called a tensor. This allows spaCy to make a reasonable guess at a word's meaning, based on its surrounding words. Even if a word hasn't been seen before, spaCy will know something about it. Because spaCy uses a 4-layer convolutional network, the tensors are sensitive to up to four words on either side of a word. For example, here are three sentences containing the out-of-vocabulary word "labrador" in different contexts. ``` doc1 = nlp(u"The labrador barked.") doc2 = nlp(u"The labrador swam.") doc3 = nlp(u"the labrador people live in canada.") ``` Even though the model has never seen the word "labrador", it can make a fairly accurate prediction of its similarity to "dog" in different contexts. ``` for doc in [doc1, doc2, doc3]: labrador = doc[1] dog = nlp(u"dog") print(labrador.similarity(dog)) nlp2 = spacy.load('en_core_web_lg') doc1 = nlp2(u"The labrador barked.") doc2 = nlp2(u"The labrador swam.") doc3 = nlp2(u"the labrador people live in canada.") for doc in [doc1, doc2, doc3]: labrador = doc[1] dog = nlp2(u"dog") print(labrador.similarity(dog)) ``` Let us consider an even more extraneous word: ``` doc1 = nlp(u"The vaca is a cow.") doc2 = nlp(u"The vaca is a rocket flying in the sky.") doc3 = nlp(u"The vaca teaches digital humanities.") for doc in [doc1, doc2, doc3]: vaca = doc[1] cow = nlp(u"cow") print(vaca.similarity(cow)) doc1 = nlp2(u"The vaca is a Romanian cow.") doc2 = nlp2(u"The vaca is a rocket flying in the sky.") doc3 = nlp2(u"The vaca teaches digital humanities.") for doc in [doc1, doc2, doc3]: vaca = doc[1] cow = nlp2(u"cow") print(vaca.similarity(cow)) ``` EXERCISE Please give an example of your own and let us see what happens ;) :) ``` #Example and code here ``` We can also use SpaCy to compare documents. ``` doc1 = nlp(u"Paris is the largest city in France.") doc2 = nlp(u"Bucharest is known as the Paris of the East.") doc3 = nlp(u"It is raining cats and dogs.") for doc in [doc1, doc2, doc3]: for other_doc in [doc1, doc2, doc3]: print(doc.similarity(other_doc)) doca = nlp2(u"Paris is the largest city in France.") docb = nlp2(u"Bucharest is known as the Paris of the East.") docc = nlp2(u"It is raining cats and dogs.") for doc in [doca, docb, docc]: for other_doc in [doca, docb, docc]: print(doc.similarity(other_doc)) ``` Let us run this on the documents we compared earlier with Sklearn! ``` doc0 = nlp(u"I am interested in NLP and would like to learn more about vectors and vectorizers for language processing.") doc1 = nlp(u"For those who like to experiment with vectors in dealing with various data, it might be useful to work with NLP vectorizers.") doc2 = nlp(u"A significant computational experiment involving language will have to involve vectors, so you will want to look into vectorizers.") doc3 = nlp(u"Even if you are not interested in NLP, you can use vectors to organize your data.") for doc in [doc0, doc1, doc2, doc3]: for other_doc in [doc0, doc1, doc2, doc3]: print(doc.similarity(other_doc)) doca = nlp2(u"I am interested in NLP and would like to learn more about vectors and vectorizers for language processing.") docb = nlp2(u"For those who like to experiment with vectors in dealing with various data, it might be useful to work with NLP vectorizers.") docc = nlp2(u"A significant computational experiment involving language will have to involve vectors, so you will want to look into vectorizers.") docd = nlp2(u"Even if you are not interested in NLP, you can use vectors to organize your data.") for doc in [doca, docb, docc, docd]: for other_doc in [doca, docb, docc, docd]: print(doc.similarity(other_doc)) ``` Do we have the means in scikitlearn to compare documents that directly? Let us go back and have a look. The similarity matrix in the NLP--Intro notebook: [[(1. ,) (0.23144825,) (0.17254319,) (0.19211725,)] [(0.23144825,) (1. ,) (0.11253336,) (0.298293 ,)] [(0.17254319,) (0.11253336,) (1. ,) (0.02081005,)] [(0.19211725,) (0.298293 ,) (0.02081005,) (1. ,)]] ``` s1 = "I am interested in NLP and would like to learn more about vectors and vectorizers for language processing." s2 = "For those who like to experiment with vectors in dealing with various data, it might be useful to work with NLP vectorizers." s3 = "A significant computational experiment involving language will have to involve vectors, so you will want to look into vectorizers." s4 = "Even if you are not interested in NLP, you can use vectors to organize your data." texts = ["I am interested in NLP and would like to learn more about vectors and vectorizers for language processing.", "For those who like to experiment with vectors in dealing with various data, it might be useful to work with NLP vectorizers.", "A significant computational experiment involving language will have to involve vectors, so you will want to look into vectorizers.", "Even if you are not interested in NLP, you can use vectors to organize your data."] from gensim import corpora from gensim.corpora import Dictionary from spacy.lang.en import English parser = English() def tokenize(text): lda_tokens = [] tokens = parser(text) for token in tokens: if token.orth_.isspace(): continue #elif token.like_url: #lda_tokens.append('URL') #elif token.orth_.startswith('@'): #lda_tokens.append('SCREEN_NAME') else: lda_tokens.append(token.lower_) return lda_tokens import nltk nltk.download('wordnet') from nltk.corpus import wordnet as wn def get_lemma(word): lemma = wn.morphy(word) if lemma is None: return word else: return lemma from nltk.stem.wordnet import WordNetLemmatizer def get_lemma2(word): return WordNetLemmatizer().lemmatize(word) nltk.download('stopwords') en_stop = set(nltk.corpus.stopwords.words('english')) def prepare_text_for_lda(text): tokens = tokenize(text) tokens = [token for token in tokens if len(token) > 1] tokens = [token for token in tokens if token not in en_stop] tokens = [get_lemma(token) for token in tokens] return tokens text_data = [] for f in texts: tokens = prepare_text_for_lda(f) text_data.append(tokens) print(text_data) from gensim import corpora from gensim.corpora import Dictionary dictionary = corpora.Dictionary(text_data) corpus = [dictionary.doc2bow(text) for text in text_data] import pickle # Pickle in Python is primarily used in serializing and deserializing a Python object structure. In other words, it's the process of converting a Python object into a byte stream to store it in a file/database, maintain program state across sessions, or transport data over the network. pickle.dump(corpus, open('corpus.pkl', 'wb')) # Returns the reconstructed Python object from the pickle source. The pickle source could be a file or a memory buffer. The dump() method of the pickle module in Python, converts a Python object hierarchy into a byte stream. This process is also called as serialization. # The converted byte stream can be written to a buffer or to a disk file. The byte stream of a pickled Python object can converted back to a Python object using the pickle.load() method. dictionary.save('dictionary.gensim') #import gensim ldamodel = gensim.models.ldamodel.LdaModel(corpus, id2word=dictionary) from gensim import similarities index = similarities.MatrixSimilarity(ldamodel[corpus]) print(index) index.save("simIndex.index") s2 = text_data[1] vec_bow = dictionary.doc2bow(s2) vec_lda = ldamodel[vec_bow] sims = index[vec_lda] sims = sorted(enumerate(sims), key=lambda item: -item[1]) print(sims) import os from os import listdir from os.path import isfile, join stopwords = nltk.corpus.stopwords.words('stop_words_poetry.txt') stopwords.append('...') stopwords.append("'d") stopwords.append('...') stopwords.append("&") stopwords.append("upon") stopwords.append("also") stopwords.append("hath") stopwords.append("must") stopwords.append("therefore") stopwords.append("doth") stopwords.append("could") stopwords.append("would") #stopwords.append("another") stopwords.append("much") #stopwords.append("give") stopwords.append("like") stopwords.append("since") #stopwords.append("many") #stopwords.append("without") #stopwords.append("first") stopwords.append("though") #stopwords.append("well") #stopwords.append("often") #stopwords.append("great") stopwords.append("either") #stopwords.append("even") stopwords.append("shall") #stopwords.append("they") stopwords.append("what") stopwords.append("their") #stopwords.append("more") #stopwords.append("there") #stopwords.append("your") #stopwords.append("them") stopwords.append("’") stopwords.append("“") stopwords.append("2") stopwords.append("3") stopwords.append("”") stopwords.extend(['a', 'like', 'you', 'they', 'he', 'be', 'it', 'your', 'her', 'of', 'more', 'there', 'no', 'not', '’', 'what', 'my', 'his', 'she', 'to', 'our', 'me', 'we', 'in', 'can', 'us', 'an', 'if', 'do', 'this', '”', 'because', 'who', 'hand', 'but', 'him']) def _pre_clean(list_of_text): ''' preliminary cleaning of the text - remove new line character i.e. \n or \r - remove tabs i.e. \t - remove extra spaces ''' cleaned_list = [] for text in list_of_text: # print("original:", text) text = text.replace('\\n', ' ') text = text.replace('\\r', ' ') text = text.replace('\\t', ' ') pattern = re.compile(r'\s+') text = re.sub(pattern, ' ', text) text = text.strip() # check for empty strings if text != '' and text is not None: cleaned_list.append(text) return cleaned_list from nltk.tokenize import sent_tokenize, word_tokenize def tokenize(text): tokens = word_tokenize(text) tokens = _pre_clean(tokens) tokens = [token for token in tokens if len(token) > 2] tokens = [token for token in tokens if token not in stopwords] #tokens = [get_lemma(token) for token in tokens] return tokens import re HOME = os.getcwd() TEXTS_DIR = HOME + "/US_Poets_Anthology2/" #TEXTS_DIR = HOME filelabels = {} texts_data = [] files = [f for f in os.listdir(TEXTS_DIR) if os.path.isfile(os.path.join(TEXTS_DIR, f))] import string from string import punctuation remove_punct_map = dict.fromkeys(map(ord, string.punctuation)) tokens_total = [] count = -1 os.chdir(TEXTS_DIR) for f in files: #os.chdir(TEXTS_DIR) with open(f, "r", encoding='utf-8', errors = 'ignore') as openf: tokens = [] count = count + 1 filelabels[count] = os.path.basename(openf.name) for line in openf: sent_text = nltk.sent_tokenize(line) for sentence in sent_text: tokens1 = tokenize(sentence) tokens1 = [item.translate(remove_punct_map) for item in tokens1] #filter_object = filter(lambda x: x != "", tokens1) tokens1 = [x for x in tokens1 if x!= ""] tokens1 = [x.lower() for x in tokens1] for token in tokens1: tokens.append(token) tokens_total.append(token) #if random.random() > .99: #print(tokens) #print(tokens_total) texts_data.append(tokens) print(filelabels) dictionary = corpora.Dictionary(texts_data) corpus = [dictionary.doc2bow(text) for text in texts_data] import pickle pickle.dump(corpus, open('corpus.pkl', 'wb')) dictionary.save('dictionary.gensim') ldamodel = gensim.models.ldamodel.LdaModel(corpus, id2word=dictionary) from gensim import similarities index = similarities.MatrixSimilarity(ldamodel[corpus]) index.save("simIndex.index") texts_data[0] p0 = texts_data[0] vec_bow = dictionary.doc2bow(p0) vec_lda = ldamodel[vec_bow] sims = index[vec_lda] sims = sorted(enumerate(sims), key=lambda item: -item[1]) print(sims) texts_data[33] p33 = texts_data[33] vec_bow = dictionary.doc2bow(p33) vec_lda = ldamodel[vec_bow] sims = index[vec_lda] sims = sorted(enumerate(sims), key=lambda item: -item[1]) print(sims) p_stranger = text_data[0] vec_bow = dictionary.doc2bow(p_stranger) vec_lda = ldamodel[vec_bow] sims = index[vec_lda] sims = sorted(enumerate(sims), key=lambda item: -item[1]) print(sims) ``` References Mikolov, Tomas, et al. 2013. "Distributed Representations of Words and Phrases and their Compositionality" Le, Quoc and Mikolov, Tomas. 2014. "Distributed Representations of Sentences and Documents SpaCy. https://spacy.io/ TensorFlow, "Vector Representations of Words" Brownlee, Jason. 2017. Machine Learning Mastery
github_jupyter
<a href="https://colab.research.google.com/github/PyTorchLightning/lightning-flash/blob/master/flash_notebooks/tabular_classification.ipynb" target="_parent"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> In this notebook, we'll go over the basics of lightning Flash by training a TabularClassifier on [Titanic Dataset](https://www.kaggle.com/c/titanic). --- - Give us a ⭐ [on Github](https://www.github.com/PytorchLightning/pytorch-lightning/) - Check out [Flash documentation](https://lightning-flash.readthedocs.io/en/latest/) - Check out [Lightning documentation](https://pytorch-lightning.readthedocs.io/en/latest/) - Join us [on Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-f6bl2l0l-JYMK3tbAgAmGRrlNr00f1A) # Training ``` # %%capture ! pip install git+https://github.com/PyTorchLightning/pytorch-flash.git from torchmetrics.classification import Accuracy, Precision, Recall import flash from flash.data.utils import download_data from flash.tabular import TabularClassifier, TabularData ``` ### 1. Download the data The data are downloaded from a URL, and save in a 'data' directory. ``` download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", 'data/') ``` ### 2. Load the data Flash Tasks have built-in DataModules that you can use to organize your data. Pass in a train, validation and test folders and Flash will take care of the rest. Creates a TabularData relies on [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html). ``` datamodule = TabularData.from_csv( train_csv="./data/titanic/titanic.csv", test_csv="./data/titanic/test.csv", categorical_cols=["Sex", "Age", "SibSp", "Parch", "Ticket", "Cabin", "Embarked"], numerical_cols=["Fare"], target_col="Survived", val_size=0.25, ) ``` ### 3. Build the model Note: Categorical columns will be mapped to the embedding space. Embedding space is set of tensors to be trained associated to each categorical column. ``` model = TabularClassifier.from_data(datamodule, metrics=[Accuracy(), Precision(), Recall()]) ``` ### 4. Create the trainer. Run 10 times on data ``` trainer = flash.Trainer(max_epochs=10) ``` ### 5. Train the model ``` trainer.fit(model, datamodule=datamodule) ``` ### 6. Test model ``` trainer.test() ``` ### 7. Save it! ``` trainer.save_checkpoint("tabular_classification_model.pt") ``` # Predicting ### 8. Load the model from a checkpoint `TabularClassifier.load_from_checkpoint` supports both url or local_path to a checkpoint. If provided with an url, the checkpoint will first be downloaded and laoded to re-create the model. ``` model = TabularClassifier.load_from_checkpoint( "https://flash-weights.s3.amazonaws.com/tabular_classification_model.pt") ``` ### 9. Generate predictions from a sheet file! Who would survive? `TabularClassifier.predict` support both DataFrame and path to `.csv` file. ``` predictions = model.predict("data/titanic/titanic.csv") print(predictions) ``` <code style="color:#792ee5;"> <h1> <strong> Congratulations - Time to Join the Community! </strong> </h1> </code> Congratulations on completing this notebook tutorial! If you enjoyed it and would like to join the Lightning movement, you can do so in the following ways! ### Help us build Flash by adding support for new data-types and new tasks. Flash aims at becoming the first task hub, so anyone can get started to great amazing application using deep learning. If you are interested, please open a PR with your contributions !!! ### Star [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) on GitHub The easiest way to help our community is just by starring the GitHub repos! This helps raise awareness of the cool tools we're building. * Please, star [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) ### Join our [Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-f6bl2l0l-JYMK3tbAgAmGRrlNr00f1A)! The best way to keep up to date on the latest advancements is to join our community! Make sure to introduce yourself and share your interests in `#general` channel ### Interested by SOTA AI models ! Check out [Bolt](https://github.com/PyTorchLightning/lightning-bolts) Bolts has a collection of state-of-the-art models, all implemented in [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) and can be easily integrated within your own projects. * Please, star [Bolt](https://github.com/PyTorchLightning/lightning-bolts) ### Contributions ! The best way to contribute to our community is to become a code contributor! At any time you can go to [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) or [Bolt](https://github.com/PyTorchLightning/lightning-bolts) GitHub Issues page and filter for "good first issue". * [Lightning good first issue](https://github.com/PyTorchLightning/pytorch-lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) * [Bolt good first issue](https://github.com/PyTorchLightning/lightning-bolts/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) * You can also contribute your own notebooks with useful examples ! ### Great thanks from the entire Pytorch Lightning Team for your interest ! <img src="https://raw.githubusercontent.com/PyTorchLightning/lightning-flash/18c591747e40a0ad862d4f82943d209b8cc25358/docs/source/_static/images/logo.svg" width="800" height="200" />
github_jupyter
# Example notebook 1 ## Build, predict and evaluate a CP model for one Tox21 endpoint This notebook is part of the supporting information of the manuscript entitled *Assessing the Calibration in Toxicological in Vitro Models with Conformal Prediction*. * A. Morger, F. Svensson, S. Arvidsson McShane, N. Gauraha, U. Norinder, O. Spjuth and A. Volkamer. * Last updated: January 2021 ## Content This notebooks demonstrates the main workflow to obtain the results for the manuscript on *Assessing the Calibration in Toxicological in Vitro Models with Conformal Prediction* for an example endpoint. * It can be used to train aggregated conformal predictors (ACP) on the individual Tox21Train datasets. * The predictions of Tox21Score can be compared in different experiments (as introduced in the manuscript) * With and without updated calibration sets * With updating the complete training set. * The notebook may be adapted to use the code for different datasets if a different endpoint is selected in `input cell 7`. For a general introduction on conformal prediction (CP) and calibration plots, we refer to the manuscript. ### Table of contents 1. [Preparation](#preparation) <br> 1.1. [Import libraries and modules](#import-libraries-and-modules)<br> 1.2. [Define paths and parameters](#define-paths-parameters)<br> 2. [Conformal prediction experiments](#cp-experiments)<br> 2.1. [Load datasets](#load-data)<br> 2.2. [Using different calibration sets](#different-cal-sets)<br> 2.3. [Using updated training set](#updated-train-set)<br> 3. [Evaluate conformal predictions](#evaluate-cp)<br> 3.1. [Calibration and efficiency plots](#cal-plots)<br> 3.2. [Root-mean-square deviations from the diagonal (RMSD)](#rmsd)<br> ## 1. Preparation <a id='preparation'></a> ### 1.1. Import libraries and modules <a id='import-libraries-and-modules'></a> ``` import os import numpy as np import json from pathlib import Path import pandas as pd from sklearn.svm import SVC from nonconformist.nc import NcFactory, MarginErrFunc import matplotlib.pyplot as plt from cptox21 import ( define_path, load_signatures_files, StratifiedRatioSampler,CrossValidationSampler, KnownIndicesSampler, InductiveConformalPredictor, AggregatedConformalPredictor, CPTox21AggregatedConformalPredictor, CPTox21CrossValidator, CPTox21TrainUpdateCrossValidator, calculate_rmsd_from_df ) import warnings warnings.simplefilter("ignore") ``` ### 1.2. Define paths and parameters <a id='define-paths-parameters'></a> ``` HERE = Path(_dh[-1]) DATA = HERE / "../data" data_signatures_path = DATA / "data_signatures/" ``` Note: Per default nuclear receptor estrogen receptor (`NR_ER`) is chosen as endpoint. The `endpoint` parameter may be changed to any of the other Tox21 endpoints: * Stress response (SR) pathways: SR_ATAD5, SR_HSE, SR_MMP, SR_p53, SR_ARE * Nuclear reseptors (NR): NR_ER, NR_AR, NR_Aromatase, NR_AR_LBD, NR_AhR, NR_ER_LBD, NR_PPAR_gamma ``` # Example endpoint "estrogen receptor" (a nuclear receptor). endpoint = "NR_ER" # Other endpoints # 'SR_ATAD5', 'SR_HSE', 'SR_MMP', 'SR_p53', 'SR_ARE' # 'NR_ER', 'NR_AR', 'NR_Aromatase', 'NR_AR_LBD', 'NR_AhR', 'NR_ER_LBD', 'NR_PPAR_gamma') # Number of loops used in ACP n_folds_acp = 10 # Set the random seed for deterministic results set_random_state = 42 ``` ## 2. Conformal prediction experiments <a id='cp-experiments'></a> Note that for programmatic reasons, the conformal prediction experiments are calculated in a different order, not following the story in the manuscript. However, in Section [*3. Evaluate conformal predictions*](#evaluate-cp), the order from the manuscript is preserved to explain the results. To perform the conformal prediction experiments, we need an `AggregatedConformalPredictor` object, which consolidates all the information needed to train an aggregated conformal predictor with a given number of loops (`n_folds_acp`) and a `CrossValidator` object to handle the splits and folds of a cross-validation. The two classes are adapted for the different parts (update calibration set and update training set) as shown below. ### 2.1. Load datasets <a id='load-data'></a> Per endpoint, three datasets are loaded (Tox21Train, Tox21Test, Tox21Score, see manuscript for more explanation) which consist of the precalculated signature descriptors and the activity labels for all compounds from the chosen endpoint. ``` dataset_names = ["train", "test", "score"] # Tox21 dataset_names train_path = os.path.join( data_signatures_path, f"data_signatures_{endpoint}_{dataset_names[0]}.csr" ) test_path = os.path.join( data_signatures_path, f"data_signatures_{endpoint}_{dataset_names[1]}.csr" ) score_path = os.path.join( data_signatures_path, f"data_signatures_{endpoint}_{dataset_names[2]}.csr" ) X_train, y_train, X_test, y_test, X_score, y_score = load_signatures_files(train_path, test_path, score_path) # FOR TEST ROUNDS # Leave below code as comment for final run # Or: Uncomment to shorten calculation time for test runs. # X_train = X_train[:500] # y_train = y_train[:500] ``` #### Compare size of datasets To interpret and assess the results, it might be useful to know, how many data points we actually have per dataset, and how balanced they are. ``` datasets_size_dict = {"name": dataset_names, "actives": [], "inactives": []} for dataset, labels in zip(dataset_names, [y_train, y_test, y_score]): datasets_size_dict["actives"].append(len(labels[labels==1])) datasets_size_dict["inactives"].append(len(labels[labels==0])) pd.DataFrame.from_dict(datasets_size_dict) ``` ### 2.2. Using different calibration sets <a id='different-cal-sets'></a> With the `CPTox21AggregatedConformalPredictor` class, an aggregated conformal predictor can be trained and calibrated. Here, we chose three different calibration sets: * The original calibration set split from Tox21train * The more recent update set Tox21test * The random stratified split (implying the same distribution as the prediction set) from Tox21score. #### Initialise Inductive Conformal Prediction classifier (ICP) ``` clf = SVC( kernel ='rbf', C = 50, gamma = 0.002, probability = True, random_state = set_random_state ) # ML classifier error_function = MarginErrFunc() # Error function normaliser_model = None # Normaliser model nc = NcFactory.create_nc(clf, err_func = error_function) # Nonconformity scorer icp = InductiveConformalPredictor( nc_function = nc, condition = (lambda instance: instance[1]) ) # Mondrian as (default) condition) ``` #### Initialise Aggregated Conformal Predictor (ACP) for main framework (using different calibration sets) ``` acp = CPTox21AggregatedConformalPredictor( predictor = icp, sampler = StratifiedRatioSampler(n_folds = n_folds_acp, random_state = set_random_state), aggregation_func = np.median ) ``` #### Define crossvalidator, with which a crossvalidation can be performed for all ACP experiments except for train_update ``` cross_validator = CPTox21CrossValidator( acp, cv_splitter = CrossValidationSampler(random_state = set_random_state), score_splitter=StratifiedRatioSampler(test_ratio = 0.5, random_state = set_random_state) ) ``` #### Crossvalidate ``` cross_validation_dfs = cross_validator.cross_validate( steps = 10, # Number of steps (significance level) for evaluating conformal predictions endpoint = endpoint, X_train = X_train, y_train = y_train, X_update = X_test, y_update = y_test, X_score = X_score, y_score = y_score, ) ``` ## 2.3. Using updated training set <a id='updated-train-set'></a> With the updated training set, new machine learning models are trained. This can be done with the `train_update_acp` class. The cross-validation splits from the [2.2. Using different calibration sets](#different-cal-sets) part were saved and can be accessed and used here. #### Get splits from crossvalidator, so that exactly the same splits can be used for train_update ``` train_index, test_index = cross_validator.train_indices, cross_validator.test_indices known_indices_sampler = KnownIndicesSampler(known_train = train_index, known_test = test_index) ``` #### Define ACP for train_update experiment Difference to CPTox21AggregatedConformalPredictor: As we do not further update the calibration set for this experiment, we use the `train_update_acp` class. ``` train_update_acp = AggregatedConformalPredictor( predictor = icp, sampler = StratifiedRatioSampler(n_folds = n_folds_acp, random_state = set_random_state), aggregation_func = np.median ) ``` #### Define train_update crossvalidator using same splits as before and crossvalidate Note: Original train and test set are internally concatenated to the new training set, which is then used for training a new model. ``` train_update_cross_validator = CPTox21TrainUpdateCrossValidator( train_update_acp, cv_splitter = known_indices_sampler ) train_update_cross_validation_dfs = train_update_cross_validator.cross_validate( steps = 10, # Number of steps (significance level) for evaluating conformal predictions endpoint = endpoint, X_train = X_train, y_train = y_train, X_update = X_test, y_update = y_test, X_score = X_score, y_score = y_score, ) ``` ## 3. Evaluate conformal predictions <a id='evaluate-cp'></a> ### 3.1. Calibration and efficiency plots <a id='cal-plots'></a> `Calibration and Efficiency Plots` (CEPs). * Calibration plot: Observed error rate for a batch of predictions is plotted against the expected error rate. * Efficiency: For complementary tracing of the informational efficiency, we also plot the efficiency, defined as the ratio of single class predictions. * The firm lines in the plot are the mean values of a five-fold cross-validation, the shaded areas represent the standard deviation. CEPs are useful to assess the calibration of conformal prediction models and to analyse the impact of model update strategies on the calibration (see below). ``` plots = {} ``` #### Cross-validation As proof-of-concept, let's look at the CEP of the cross-validation. Given the random stratified splitting and sufficient data, the error rate should follow the diagonal line. ##### Experiment **1_internal_CV** ``` plots["cv"] = cross_validator.calibration_plot( averaged_evaluation_df=cross_validator.averaged_evaluation_df_cv, endpoint=endpoint, title_name="cross-validation with original calibration set" ) ``` #### Predict score and test set using the ACP with the 'original' calibration set If the training set and the predicted data originate from the same distribution, and if they are available in sufficient amount, the error rates should follow the diagonal line. Any deviations suggest deviations from the CP exchangeability assumption (or a too small data size). ##### Experiment *2_pred_score* ``` plots["pred_score"] = cross_validator.calibration_plot( averaged_evaluation_df = cross_validator.averaged_evaluation_df_pred_score, endpoint = endpoint, title_name = "predict score set (original calibration set)" ) ``` ##### Experiment SI *pred_test* ``` plots["pred_test"] = cross_validator.calibration_plot( averaged_evaluation_df = cross_validator.averaged_evaluation_df_pred_test, endpoint = endpoint, title_name = "predict test set (original calibration set)" ) ``` #### Update (augment) the training set with more recent data Probably the most intuitive way to improve the validity of the predictions will be to update the training set with more recent data. If "old" and "new" data are available, they can be combined to train a more up-to-date model. The following plots show the cross-validation of this model as well as the prediction of score data. If you want to see the CV of the train update experiment change the next cell to a code cell ##### Experiment *4_train_update* ``` plots["train_update_pred_score"] = train_update_cross_validator.calibration_plot( averaged_evaluation_df = train_update_cross_validator.averaged_evaluation_df_pred_score, endpoint = endpoint, title_name = "predict score set (updated training set)" ) ``` For the prediction of the score data, we, usually (for most endpoints), do not see a big difference to the above CEP, since the number of recent compounds is almost negligible compared to the number of original training compounds. On the other hand, the more recent data would not be enough for training a model on them alone. #### Update (exchange) the calibration set with a more recent dataset A proposed strategy to improve the calibration of the model, is to update the calibration set with more recent data. Based on the chronological release of the Tox21 datasets, we assume that Tox21test is more similar to Tox21score than Tox21train. Thus, Tox21test is used to update the calibration set. ##### Experiment *5_cal_update* ``` plots["cal_update"] = cross_validator.calibration_plot( averaged_evaluation_df = cross_validator.averaged_evaluation_df_cal_update, endpoint = endpoint, title_name = "predict score set (updated calibration set)" ) ``` #### Update (exchange) the calibration set with data, which are inherently exchangeable With the above updating experiment, we made an assumption about the similarity of the datasets. This assumption may not hold for all datasets. Thus, a more suitable experiment would be to update the calibration set with one part (50%) of the Tox21score set and to predict the other part of the Tox21score set. This gives us the certainty that the calibration and the prediction set stem from the same distribution. Note, that this might be more suitable as a proof of concept, on the other hand it does not represent a real-life scenario. Furthermore, we might see a larger standard deviation in the error rate, as we predict fewer data points. ##### Experiment *6_cal_update2* ``` plots["cal_update2"] = cross_validator.calibration_plot( averaged_evaluation_df = cross_validator.averaged_evaluation_df_cal_update2, endpoint = endpoint, title_name = "predict part of score set (updated calibration set \n with (other) part of score set)" ) ``` ### 3.2. Root-mean-square deviations from the diagonal (RMSD) <a id='rmsd'></a> To have a value to compare the calibration plots over all experiments, we calculate the RMSD of the observed error rate to the expected error rate (for 10 significance levels). #### Collect data (evaluation dfs) from the cross-validators To appear in the same order as in the manuscript ``` evaluation_dfs = {} # Experiment 1 evaluation_dfs["1_internal_cv"] = getattr( cross_validator, "averaged_evaluation_df_cv" ) # Experiment 2 evaluation_dfs["2_pred_score"] = getattr( cross_validator, "averaged_evaluation_df_pred_score" ) # Experiment 4 evaluation_dfs["4_train_update"] = getattr( train_update_cross_validator, "averaged_evaluation_df_pred_score" ) # Experiment 5 evaluation_dfs["5_cal_update"] = getattr( cross_validator, "averaged_evaluation_df_cal_update" ) # Experiment 6 evaluation_dfs["6_cal_update2"] = getattr( cross_validator, "averaged_evaluation_df_cal_update2" ) ``` #### Calculate RMSD's Root-mean-square deviations of the error rates from the diagonal. ``` rmsds = {} for k, v in evaluation_dfs.items(): rmsd = calculate_rmsd_from_df(v) rmsds[k] = rmsd rmsds ``` #### Scatter plot To visualise the RMSDs over all strategies, a scatter plot can be used. ``` def plot_rmsds(rmsds, strategies, endpoint = endpoint): plt.clf() fig = plt.figure(figsize=(10,5)) plt.scatter(strategies, [rmsds[s] for s in strategies], marker = '_', s = 500) plt.xticks(rotation = 'vertical') plt.title(f"RMSDs for different CP set-ups - {endpoint} endpoint") strategies = rmsds.keys() plot_rmsds(rmsds, strategies) ``` Note: The SCP model and evaluation experiment (*3_pred_score_SCP*) is not part of this notebook.
github_jupyter
# Big Earth Net Preprocessing ## Irrigation Capstone Fall 2020 ### TP Goter This notebook is used to preprocess the GeoTiff files that contain the Sentinel-2 MSI data comprising the BigEarthNet dataset into dataframes. We originally were using tfrecords, but after creating balanced datasets. wehave little enough data to make dataframe storage a reasonable solution. We will use the the same standardization routine as used by the root Big Earth Net data, but we will package the standardized/scaled data into a single dataframe with binary labels. It is based on the preprocessing scripts from the BigEarthNet repo, but has been updated to work in Colaboratory with Python3.7+ and TensorFlow 2.3. This version of the preprocessor is for specifically isolating the irrigated and non-irrigated examples. ``` import pandas as pd import tensorflow as tf from glob import glob import os #from matplotlib import pyplot as plt #%matplotlib inline import numpy as np from tqdm import tqdm #from google.colab import drive #import seaborn as sns #from matplotlib.cm import get_cmap #import folium #import gdal import rasterio import csv import json from PIL import Image import cv2 print(pd.__version__) print(tf.__version__) ``` ## Mount Google Drive and Set Paths ``` #from google.colab import drive #drive.mount('/content/gdrive') #base_path = '/content/gdrive/My Drive/Capstone Project' big_earth_path ='./BigEarthNet-v1.0/' ``` ## Convert data to dataframes instead of TFRecords We already have our splits in csv files in the bigearthnet-models/splits folders. So we just need to read in these files, and concatenate them into one list. We can then convert that to a labeled dataframe. ``` FILE = 'balanced_train_4' filenames_tif = list(pd.read_csv(f'./bigearthnet-models/splits/{FILE}.csv')['file']) filenames_tif = [f'{file}/{file.split("/")[-1]}' for file in filenames_tif] filenames_tif[:10] os.listdir('/'.join(filenames_tif[0].split('/')[:-1])) BAND_NAMES = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B11', 'B12'] BAND_STATS = { 'mean': { 'B01': 340.76769064, 'B02': 429.9430203, 'B03': 614.21682446, 'B04': 590.23569706, 'B05': 950.68368468, 'B06': 1792.46290469, 'B07': 2075.46795189, 'B08': 2218.94553375, 'B8A': 2266.46036911, 'B09': 2246.0605464, 'B11': 1594.42694882, 'B12': 1009.32729131 }, 'std': { 'B01': 554.81258967, 'B02': 572.41639287, 'B03': 582.87945694, 'B04': 675.88746967, 'B05': 729.89827633, 'B06': 1096.01480586, 'B07': 1273.45393088, 'B08': 1365.45589904, 'B8A': 1356.13789355, 'B09': 1302.3292881, 'B11': 1079.19066363, 'B12': 818.86747235 } } # Use this one-liner to standardize each feature prior to reshaping. def standardize_feature(data, band_name): return ((tf.dtypes.cast(data, tf.float32) - BAND_STATS['mean'][band_name]) / BAND_STATS['std'][band_name]) data = [] std_data = [] for file in tqdm(filenames_tif): bands = {} std_bands = {} for band_name in BAND_NAMES: # First finds related GeoTIFF path and reads values as an array band_path = f'{file}_{band_name}.tif' band_ds = rasterio.open(band_path) bands[band_name] = np.array(band_ds.read(1)) std_bands[band_name] = standardize_feature(np.array(band_ds.read(1)), band_name) # print(f'B01 Shape: {bands["B01"].shape}') # print(f'B02 Shape: {bands["B02"].shape}') # print(f'B03 Shape: {bands["B03"].shape}') # print(f'B04 Shape: {bands["B04"].shape}') # print(f'B05 Shape: {bands["B05"].shape}') # print(f'B06 Shape: {bands["B06"].shape}') # print(f'B07 Shape: {bands["B07"].shape}') # print(f'B08 Shape: {bands["B08"].shape}') # print(f'B8A Shape: {bands["B8A"].shape}') # print(f'B09 Shape: {bands["B09"].shape}') # print(f'B11 Shape: {bands["B11"].shape}') # print(f'B12 Shape: {bands["B12"].shape}') # bands_10m = np.stack([bands['B04'], # bands['B03'], # bands['B02'], # bands['B08']], axis=2) # bands_20m = np.stack([bands['B05'], # bands['B06'], # bands['B07'], # bands['B8A'], # bands['B11'], # bands['B12']], axis=2) std_bands_10m = np.stack([std_bands['B04'], std_bands['B03'], std_bands['B02'], std_bands['B08']], axis=2) std_bands_20m = np.stack([std_bands['B05'], std_bands['B06'], std_bands['B07'], std_bands['B8A'], std_bands['B11'], std_bands['B12']], axis=2) # msi_bands = np.concatenate([bands_10m, # cv2.resize(bands_20m, dsize=(120, 120), interpolation=cv2.INTER_CUBIC)],axis=2) msi_std_bands = np.concatenate([std_bands_10m, cv2.resize(std_bands_20m, dsize=(120, 120), interpolation=cv2.INTER_CUBIC)],axis=2) # break file_json_path = f'{file}_labels_metadata.json' with open(file_json_path, 'rb') as f: patch_json = json.load(f) if 'Permanently irrigated land' in patch_json['labels']: label = np.array(1) else: label = np.array(0) # data.append((msi_bands, labels)) std_data.append((msi_std_bands, label)) # df = pd.DataFrame(data, columns=['X', 'y']) # del data std_df = pd.DataFrame(std_data, columns=['X', 'y']) del std_data std_df.to_pickle(f'./bigearthnet-models/splits/{FILE}.pkl') del std_df ```
github_jupyter
# 3D Instance Segmentation with Discriminative Instance Loss --- Implemntation of paper: [Semantic Instance Segmentation with a Discriminative Loss Function](https://arxiv.org/abs/1708.02551) ``` import os import errno import datetime import numpy as np import deepcell ``` ## Load the data ### Download the data from `deepcell.datasets` `deepcell.datasets` provides access to a set of annotated live-cell imaging datasets which can be used for training cell segmentation and tracking models. All dataset objects share the `load_data()` method, which allows the user to specify the name of the file (`path`), the fraction of data reserved for testing (`test_size`) and a `seed` which is used to generate the random train-test split. Metadata associated with the dataset can be accessed through the `metadata` attribute. ``` # Download the data (saves to ~/.keras/datasets) filename = 'mousebrain.npz' test_size = 0.2 # % of data saved as test seed = 0 # seed for random train-test split (X_train, y_train), (X_test, y_test) = deepcell.datasets.mousebrain.load_data( filename, test_size=test_size, seed=seed) print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape)) ``` ### Set up filepath constants ``` # the path to the data file is currently required for `train_model_()` functions # change DATA_DIR if you are not using `deepcell.datasets` DATA_DIR = os.path.expanduser(os.path.join('~', '.keras', 'datasets')) # DATA_FILE should be a npz file, preferably from `make_training_data` DATA_FILE = os.path.join(DATA_DIR, filename) # confirm the data file is available assert os.path.isfile(DATA_FILE) # Set up other required filepaths # If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR) ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX)) LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX)) # create directories if they do not exist for d in (MODEL_DIR, LOG_DIR): try: os.makedirs(d) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise ``` ## Create the Foreground/Background FeatureNet Model Here we instantiate two `FeatureNet` models from `deepcell.model_zoo` for foreground/background separation as well as the interior/edge segmentation. ``` norm_method = 'whole_image' # data normalization receptive_field = 61 # should be adjusted for the scale of the data n_skips = 1 # number of skip-connections (only for FC training) frames_per_batch = 3 embedding_dim = 3 from deepcell import model_zoo fgbg_model = model_zoo.bn_feature_net_skip_3D( receptive_field=receptive_field, n_features=2, # segmentation mask (is_cell, is_not_cell) n_frames=frames_per_batch, n_skips=n_skips, n_conv_filters=32, n_dense_filters=128, input_shape=tuple([frames_per_batch] + list(X_train.shape[2:])), multires=False, last_only=False, norm_method=norm_method) ``` ## Prepare for training ### Set up training parameters. There are a number of tunable hyper parameters necessary for training deep learning models: **model_name**: Incorporated into any files generated during the training process. **n_epoch**: The number of complete passes through the training dataset. **lr**: The learning rate determines the speed at which the model learns. Specifically it controls the relative size of the updates to model values after each batch. **optimizer**: The TensorFlow module [tf.keras.optimizers](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers) offers optimizers with a variety of algorithm implementations. DeepCell typically uses the Adam or the SGD optimizers. **lr_sched**: A learning rate scheduler allows the learning rate to adapt over the course of model training. Typically a larger learning rate is preferred during the start of the training process, while a small learning rate allows for fine-tuning during the end of training. **batch_size**: The batch size determines the number of samples that are processed before the model is updated. The value must be greater than one and less than or equal to the number of samples in the training dataset. ``` from tensorflow.keras.optimizers import SGD from deepcell.utils.train_utils import rate_scheduler fgbg_model_name = 'disc_fgbg' disc_3d_model_name = 'disc_3d' n_epoch = 5 # Number of training epochs lr = 0.01 optimizer = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True) lr_sched = rate_scheduler(lr=lr, decay=0.99) batch_size = 1 # FC training uses 1 image per batch ``` ### Create the DataGenerators The `MovieDataGenerator` outputs a raw image (`X`) with it's labeled annotation mask (`y`). Additionally, it can apply a transform to `y` to change the task the model learns. Below we generate 2 training and validation data sets for both the foreground/background model and the pixelwise model. ``` from deepcell.image_generators import MovieDataGenerator datagen = MovieDataGenerator( rotation_range=180, zoom_range=(.8, 1.2), horizontal_flip=True, vertical_flip=True) datagen_val = MovieDataGenerator() # Create the foreground/background data iterators fgbg_train_data = datagen.flow( {'X': X_train, 'y': y_train}, seed=seed, skip=n_skips, transform='fgbg', batch_size=batch_size) fgbg_val_data = datagen_val.flow( {'X': X_test, 'y': y_test}, seed=seed, skip=n_skips, transform='fgbg', batch_size=batch_size) # Create the pixelwise data iterators disc_train_data = datagen.flow( {'X': X_train, 'y': y_train}, seed=seed, skip=n_skips, transform='disc', batch_size=batch_size) disc_val_data = datagen_val.flow( {'X': X_test, 'y': y_test}, seed=seed, skip=n_skips, transform='disc', batch_size=batch_size) ``` ### Compile the model with a loss function Each model is trained with it's own loss function. `weighted_categorical_crossentropy` is often used for classification models, and `disc_loss` is used for the discriminative instance models. ``` from deepcell import losses def loss_function(y_true, y_pred): return losses.weighted_categorical_crossentropy( y_true, y_pred, n_classes=2, from_logits=False) fgbg_model.compile( loss=loss_function, optimizer=SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy']) ``` ## Train the foreground/background model Call `fit()` on the compiled model, along with a default set of callbacks. ``` from deepcell.utils.train_utils import get_callbacks from deepcell.utils.train_utils import count_gpus model_path = os.path.join(MODEL_DIR, '{}.h5'.format(fgbg_model_name)) loss_path = os.path.join(MODEL_DIR, '{}.npz'.format(fgbg_model_name)) num_gpus = count_gpus() print('Training on', num_gpus, 'GPUs.') train_callbacks = get_callbacks( model_path, lr_sched=lr_sched, save_weights_only=num_gpus >= 2, monitor='val_loss', verbose=1) loss_history = fgbg_model.fit( fgbg_train_data, steps_per_epoch=fgbg_train_data.y.shape[0] // batch_size, epochs=n_epoch, validation_data=fgbg_val_data, validation_steps=fgbg_val_data.y.shape[0] // batch_size, callbacks=train_callbacks) ``` ## Create the 3D vector embedding FeatureNet Model Instatiate a `FeatureNet` for discriminative instance loss, which maps each pixel to a N-dimensional vector. ``` from deepcell import model_zoo disc_3d_model = model_zoo.bn_feature_net_skip_3D( fgbg_model=fgbg_model, receptive_field=receptive_field, n_skips=n_skips, n_features=embedding_dim, n_dense_filters=128, n_conv_filters=32, input_shape=tuple([frames_per_batch] + list(X_train.shape[2:])), norm_method=norm_method) ``` ### Compile the model with a loss function Just like the foreground/background model, the `disc` model is compiled with the discriminative instance loss (`disc_loss`) function. ``` import tensorflow as tf from deepcell import losses disc_3d_model.compile( loss=losses.discriminative_instance_loss, optimizer=SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True), metrics=[tf.keras.metrics.Accuracy()]) ``` ## Train the 3D vector embedding `disc` model Call `fit()` on the compiled model, along with a default set of callbacks. ``` from deepcell.utils.train_utils import get_callbacks from deepcell.utils.train_utils import count_gpus model_path = os.path.join(MODEL_DIR, '{}.h5'.format(disc_3d_model_name)) loss_path = os.path.join(MODEL_DIR, '{}.npz'.format(disc_3d_model_name)) num_gpus = count_gpus() print('Training on', num_gpus, 'GPUs.') train_callbacks = get_callbacks( model_path, lr_sched=lr_sched, save_weights_only=num_gpus >= 2, monitor='val_loss', verbose=1) loss_history = disc_3d_model.fit( disc_train_data, steps_per_epoch=disc_train_data.y.shape[0] // batch_size, epochs=n_epoch, validation_data=disc_val_data, validation_steps=disc_val_data.y.shape[0] // batch_size, callbacks=train_callbacks) ``` ### Run the model ``` # With the trained model, make predictions on testing data test_images = disc_3d_model.predict(X_test[:1, :frames_per_batch]) test_images_fgbg = fgbg_model.predict(X_test[:1, :frames_per_batch])[-1] print(test_images.shape) print(test_images_fgbg.shape) ``` ## Predict on test data Use the trained model to predict on new data and post-process the results into a label mask. #### Threshold the foreground/background ``` # threshold the foreground/background # and remove background from vector embedding fg_thresh = test_images_fgbg[..., 1] > 0.9 fg_thresh = np.expand_dims(fg_thresh, axis=-1) test_images_post_fgbg = test_images * fg_thresh ``` #### Mean Shift ``` from itertools import cycle from sklearn.cluster import MeanShift, estimate_bandwidth def fit_mean_shift(images, index, frame, ndim, n_samples=500): test_plot = images[index, frame].reshape(-1, ndim) bandwidth = estimate_bandwidth(test_plot, n_samples=n_samples) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(test_plot) return ms ``` #### DB SCAN ``` from sklearn.cluster import DBSCAN from sklearn import metrics def fit_dbscan(images, index, frame, ndim, eps=.15): test_plot = images[index, frame].reshape(-1, ndim) db = DBSCAN(eps=eps, min_samples=5, algorithm='kd_tree').fit(test_plot) return db ``` ### Plot the results #### Scatter plots of the embedded vectors ``` import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D index = 0#np.random.randint(low=0, high=test_images_post_fgbg.shape[0]) frame = 2#np.random.randint(low=0, high=test_images_post_fgbg.shape[1]) fig = plt.figure(figsize=(15, 15)) # 3D Vector Embedding ax = fig.add_subplot(221, projection='3d') test_plot = test_images[index, frame].reshape(-1, embedding_dim) ax.scatter(test_plot[:, 0], test_plot[:, 1], test_plot[:, 2], c='b', marker='o', s=5) ax.set_title('3D Vector Embedding') # 3D Vector Embedding sans Background ax = fig.add_subplot(222, projection='3d') test_plot = test_images_post_fgbg[index, frame].reshape(-1, 3) ax.scatter(test_plot[:, 0], test_plot[:, 1], test_plot[:, 2], c='b', marker='o', s=5) ax.set_title('3D Vector Embedding sans Background') # Scatter plot after MeanShift ms = fit_mean_shift(test_images_post_fgbg, index=index, frame=frame, ndim=embedding_dim, n_samples=2000) n_clusters_ms = np.unique(ms.labels_).size ax = fig.add_subplot(223, projection='3d') for k, col in zip(range(n_clusters_ms), cycle('bgrcmyk')): my_members = ms.labels_ == k ax.scatter(test_plot[my_members, 0], test_plot[my_members, 1], test_plot[my_members, 2], c=col, s=5) ax.set_title('MeanShift: %d estimated clusters' % n_clusters_ms) # Scatter plot after DBSCAN db = fit_dbscan(test_images_post_fgbg, index=index, frame=frame, ndim=embedding_dim, eps=0.2) # Number of clusters in labels, ignoring noise if present. n_clusters_db = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0) ax = fig.add_subplot(224, projection='3d') core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True # Scatter plot after DBScan unique_labels = set(db.labels_) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] for k, col in zip(unique_labels, colors): if k == -1: col = [0, 0, 0, 1] # Black used for noise. class_member_mask = (db.labels_ == k) xy = test_plot[class_member_mask & core_samples_mask] ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], 'o', c=col, s=5) xy = test_plot[class_member_mask & ~core_samples_mask] ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], 'o', c=col, s=5) ax.set_title('DBSCAN: %d estimated clusters' % n_clusters_db) plt.show() ``` #### Plot segmented images ``` import matplotlib.pyplot as plt index = 0#np.random.randint(low=0, high=test_images_post_fgbg.shape[0]) frame = 0#np.random.randint(low=0, high=test_images_post_fgbg.shape[1]) fig, axes = plt.subplots(ncols=2, nrows=4, figsize=(15, 30), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(X_test[index, frame, ..., 0]) ax[0].set_title('Source Image') ax[1].imshow(test_images_fgbg[index, frame, ..., 1]) ax[1].set_title('Segmentation Prediction') ax[2].imshow(test_images[index, frame, ..., 0], cmap='jet') ax[2].set_title('Vector Embedding: Component 0') ax[3].imshow(test_images[index, frame, ..., 1], cmap='jet') ax[3].set_title('Vector Embedding: Component 1') ax[4].imshow(test_images_post_fgbg[index, frame, ..., 0], cmap='jet') ax[4].set_title('Vector Embedding: Component 0 sans Background') ax[5].imshow(test_images_post_fgbg[index, frame, ..., 0], cmap='jet') ax[5].set_title('Vector Embedding: Component 1 sans Background') ms = fit_mean_shift(test_images_post_fgbg, index=index, frame=frame, ndim=3, n_samples=2000) ax[6].imshow(ms.labels_.reshape(test_images_post_fgbg.shape[2:-1]), cmap='jet') ax[6].set_title('MeanShift: %d Clusters' % np.unique(ms.labels_).size) db = fit_dbscan(test_images_post_fgbg, index=index, frame=frame, ndim=3, eps=0.2) ax[7].imshow(db.labels_.reshape(test_images_post_fgbg.shape[2:-1]), cmap='jet') ax[7].set_title('DBSCAN: %d Clusters' % (len(set(db.labels_)) - int(-1 in db.labels_))) fig.tight_layout() plt.show() ```
github_jupyter
### x lines of Python # Amplitude-vs-offset plot This notebook accompanies [a blog post at Agile*](http://ageo.co/xlines03). In the first **x lines** we made a 2D synthetic seismogram. A major simplification in that model was normal incidence at 0 degrees of offset: the ray of seismic energy was assumed to be perfectly vertical. In this notebook, we'll model non-vertical incidence. When the reflection is not perpendicular to the geological interface, we have to use the Zoeppritz equation, or simplications of it, to model the angle-dependent reflectivity. Agile's library [`bruges`](http://github.com/agile-geoscience/bruges) (Bag of Really Useful Geophysical Equations and Stuff) has lots of different reflectivity formulations to compare; we'll look at three. The data is from Blangy, JP, 1994, AVO in tranversely isotropic media—An overview. *Geophysics* **59** (5), 775–781. Blangy conveniently defined his model rocks very fully and clearly (take not would-be authors!). Related blog post: [The Blangy equation](http://www.agilegeoscience.com/blog/2014/8/7/the-blangy-equation.html?rq=blangy) Before we start, the usual prelims: ``` import numpy as np import matplotlib.pyplot as plt ``` We'll initiate some variables with some of Blangy's test data: the Type 3 AVO rocks from his Table 1. We only need the acoustic properties at first, but we'll define the elastic and anisotropic parameters as well, just in case we need them later (we will!). ``` # Upper layer: shale. vp0, vs0, ρ0, δ0, ε0 = 2307, 1108, 2150, 0.15, 0.30 # Line 1 # Lower layer: wet sand. vp1, vs1, ρ1, δ1, ε1 = 1951, 930, 2200, 0.00, 0.00 # Line 2 # Lower layer: gas sand. vp1g, vs1g, ρ1g, δ1g, ε1g = 1951, 1301, 1950, 0.00, 0.00 # Line 3 ``` For peace of mind, or just for fun, we can make a plot of these properties. ``` fig = plt.figure(figsize=(12, 3)) z = np.arange(0, 20) ax0 = fig.add_subplot(1, 3, 1) ax0.plot(np.append(np.repeat(vp0, 10), np.repeat(vp1, 10)), z, 'ob', markeredgecolor='none') ax0.plot(np.append(np.repeat(vp0, 10), np.repeat(vp1g, 10)), z, 'ob', alpha=0.4, markeredgecolor='none') ax0.set_xlim(1900, 2400) ax0.axhline(9.5, c='k') ax0.invert_yaxis() ax0.set_title('Vp') ax0.text(2000, 5, 'SHALE') ax0.text(2200, 15, 'SANDSTONE') ax0.text(1980, 13, 'wet') ax0.text(1980, 17, 'gas', alpha=0.4) ax1 = fig.add_subplot(1, 3, 2) ax1.plot(np.append(np.repeat(vs0, 10), np.repeat(vs1, 10)), z, 'og', markeredgecolor='none') ax1.plot(np.append(np.repeat(vs0, 10), np.repeat(vs1g, 10)), z, 'og', alpha=0.4, markeredgecolor='none') ax1.set_xlim(850, 1350) ax1.axhline(9.5, c='k') ax1.invert_yaxis() ax1.set_title('Vs') ax1.text(950, 5, 'SHALE') ax1.text(1050, 15, 'SANDSTONE') ax1.text(950, 13, 'wet') ax1.text(1220, 17, 'gas', alpha=0.4) ax2 = fig.add_subplot(1, 3, 3) ax2.plot(np.append(np.repeat(ρ0, 10), np.repeat(ρ1, 10)), z, 'or', markeredgecolor='none') ax2.plot(np.append(np.repeat(ρ0, 10), np.repeat(ρ1g, 10)), z, 'or', alpha=0.4, markeredgecolor='none') ax2.set_xlim(1800, 2500) ax2.axhline(9.5, c='k') ax2.invert_yaxis() ax2.set_title('rho') ax2.text(1900, 5, 'SHALE') ax2.text(2250, 15, 'SANDSTONE') ax2.text(2100, 13, 'wet') ax2.text(2000, 17, 'gas', alpha=0.4) plt.show() ``` ## Linear Shuey equation Let's start with a bit of maths — [the 2-term Shuey approximation](http://subsurfwiki.org/wiki/Shuey_equation). I'm using the formulation given by Avesth, P, T Mukerji and G Mavko (2005). *Quantitative seismic interpretation.* Cambridge University Press, Cambridge, UK. $$R(\theta) \approx R(0) + G \sin^2 \theta$$ where $$R(0) = \frac{1}{2} \left( \frac{\Delta V_\mathrm{P}}{V_\mathrm{P}} + \frac{\Delta \rho}{\rho} \right)$$ and $$G = \frac{1}{2} \frac{\Delta V_\mathrm{P}}{V_\mathrm{P}} - 2 \frac{V^2_\mathrm{S}}{V^2_\mathrm{P}} \left( \frac{\Delta \rho}{\rho} + 2 \frac{\Delta V_\mathrm{S}}{V_\mathrm{S}} \right)$$ In these equations, $\Delta V_\mathrm{P}$ means the difference in the velocity of the two layers, and $V_\mathrm{P}$ means the mean of the two layers. Let's make a function to help with this 'difference over mean': ``` # I'm on a tight line budget so I'm defining a function on a # single line. Don't do this, it makes your code less readable. def dom(upper, lower): return np.subtract(lower, upper) / np.mean((lower, upper)) ``` First term: ``` R0 = 0.5 * (dom(vp0, vp1) + dom(ρ0, ρ1)) R0 ``` OK, that looks reasonable, but the second term $G$ is going to take some really fiddly math... and I'm on a budget, I don't have enough lines for all that. Besides, I might easily make a mistake. Luckily, our library `bruges` has all these equations. There's a `bruges.reflection.shuey2` function that returns the 2-term Shuey reflectivity for a given interface and angle range. ``` import bruges # I have to use 31 because `arange()` goes up to but not including. θ = range(0, 31) # Line 4 shuey = bruges.reflection.shuey2(vp0, vs0, ρ0, # Line 5 vp1, vs1, ρ1, θ) ``` I now have an array of the reflection coefficients corresponding to the angles I passed in (0 to 40 degrees of offset). ``` shuey shuey_g = bruges.reflection.shuey2(vp0, vs0, ρ0, # Line 6 vp1g, vs1g, ρ1g, θ) plt.plot(shuey, label='Brine case') # Line 7 plt.plot(shuey_g, 'red', label='Gas case') # Line 8 plt.legend(loc='best'); # Line 9 ``` That's the 10 lines of Python used up, but we already got a useful plot. Weith a few more lines, we can make the plot a bit prettier. ``` plt.axhline(0, color='k', alpha=0.3) plt.plot(θ, shuey, 'b', lw=2, label='Brine case') plt.plot(θ, shuey_g, 'r', lw=2, label='Gas case') plt.axhline(0, color='k', alpha=0.4) plt.ylim(-0.25, 0.1) plt.xlabel('theta [deg]') plt.ylabel('reflectivity [unitless]') plt.legend(loc='best') plt.show() ``` ## Compare to Zoeppritz and Aki-Richards We could also replace that 2-term Shuey evaluation with another algorithm. For example, let's compute the Aki-Richards approximation and the full Zoeppritz solution, and compare the three results. First, we'll make a wider angle range, so we can compare them outside the reliability 'window' of Shuey's approximation (up to about 25 degrees or so for most interfaces). ``` θ = np.arange(0, 51) shuey = bruges.reflection.shuey2(vp0, vs0, ρ0, vp1, vs1, ρ1, θ) zoeppritz = bruges.reflection.zoeppritz_rpp(vp0, vs0, ρ0, vp1, vs1, ρ1, θ) akirichards = bruges.reflection.akirichards(vp0, vs0, ρ0, vp1, vs1, ρ1, θ) plt.plot(shuey, label='Shuey') plt.plot(zoeppritz, 'r', lw=2, label='Zoeppritz') plt.plot(akirichards, label='Aki-Richards') plt.axhline(0, color='k', alpha=0.4) plt.xlabel('theta [deg]') plt.ylabel('reflectivity [unitless]') plt.legend(loc='best') plt.show() ``` You can see how Shuey breaks down at about 25 degrees, whereas Aki-Richards is quite reliable even to wide offsets. ## Isotropic and anisotropic approximations We can go further still. Blangy's paper gives us an AVO approximation for weakly anisotropic rocks. We can use another `bruges` function to compute that response. ``` # The function returns the isotropic and the anisotropic reponses. # Since we don't need the isotropic response (it's the same as the # Aki-Richards solution), we can assign it to _, a sort of throwaway. _, blangy = bruges.rockphysics.blangy(vp0, vs0, ρ0, δ0, ε0, vp1, vs1, ρ1, δ1, ε1, θ) plt.plot(akirichards, label='Aki-Richards') plt.plot(blangy, label='Blangy') plt.axhline(0, color='k', alpha=0.4) plt.legend(loc='best') plt.show() ``` <hr /> <div> <img src="https://avatars1.githubusercontent.com/u/1692321?s=50"><p style="text-align:center">© Agile Geoscience 2016</p> </div>
github_jupyter
## Introduction Text Classification can be used to solve various use-cases like sentiment analysis, spam detection, hashtag prediction etc. This notebook demonstrates the use of SageMaker BlazingText to perform supervised binary/multi class with single or multi label text classification. BlazingText can train the model on more than a billion words in a couple of minutes using a multi-core CPU or a GPU, while achieving performance on par with the state-of-the-art deep learning text classification algorithms. BlazingText extends the fastText text classifier to leverage GPU acceleration using custom CUDA kernels. ## Initialize Your Resources SageMaker needs unique training jobs to run, and we as the users need to be able to see our job! So here we'll provide our name once, and use that to track our resources throughout the lab. ``` YOUR_NAME = 'first-last' import sagemaker from sagemaker import get_execution_role import json import boto3 sess = sagemaker.Session() role = get_execution_role() print(role) # This is the role that SageMaker would use to leverage AWS resources (S3, CloudWatch) on your behalf bucket = sess.default_bucket() # Replace with your own bucket name if needed print(bucket) prefix = '{}/blazingtext/supervised'.format(YOUR_NAME) #Replace with the prefix under which you want to store the data if needed ``` ### Data Preparation Now we'll download a dataset from the web on which we want to train the text classification model. BlazingText expects a single preprocessed text file with space separated tokens and each line of the file should contain a single sentence and the corresponding label(s) prefixed by "\__label\__". In this example, let us train the text classification model on the [DBPedia Ontology Dataset](https://wiki.dbpedia.org/services-resources/dbpedia-data-set-2014#2) as done by [Zhang et al](https://arxiv.org/pdf/1509.01626.pdf). The DBpedia ontology dataset is constructed by picking 14 nonoverlapping classes from DBpedia 2014. It has 560,000 training samples and 70,000 testing samples. The fields we used for this dataset contain title and abstract of each Wikipedia article. ``` !wget https://github.com/saurabh3949/Text-Classification-Datasets/raw/master/dbpedia_csv.tar.gz !tar -xzvf dbpedia_csv.tar.gz ``` Let us inspect the dataset and the classes to get some understanding about how the data and the label is provided in the dataset. ``` !head dbpedia_csv/train.csv -n 3 ``` As can be seen from the above output, the CSV has 3 fields - Label index, title and abstract. Let us first create a label index to label name mapping and then proceed to preprocess the dataset for ingestion by BlazingText. Next we will print the labels file (`classes.txt`) to see all possible labels followed by creating an index to label mapping. ``` !cat dbpedia_csv/classes.txt ``` The following code creates the mapping from integer indices to class label which will later be used to retrieve the actual class name during inference. ``` index_to_label = {} with open("dbpedia_csv/classes.txt") as f: for i,label in enumerate(f.readlines()): index_to_label[str(i+1)] = label.strip() print(index_to_label) ``` ## Data Preprocessing We need to preprocess the training data into **space separated tokenized text** format which can be consumed by `BlazingText` algorithm. Also, as mentioned previously, the class label(s) should be prefixed with `__label__` and it should be present in the same line along with the original sentence. We'll use `nltk` library to tokenize the input sentences from DBPedia dataset. Download the nltk tokenizer and other libraries ``` from random import shuffle import multiprocessing from multiprocessing import Pool import csv import nltk nltk.download('punkt') def transform_instance(row): cur_row = [] label = "__label__" + index_to_label[row[0]] #Prefix the index-ed label with __label__ cur_row.append(label) cur_row.extend(nltk.word_tokenize(row[1].lower())) cur_row.extend(nltk.word_tokenize(row[2].lower())) return cur_row ``` The `transform_instance` will be applied to each data instance in parallel using python's multiprocessing module ``` def preprocess(input_file, output_file, keep=1): all_rows = [] with open(input_file, 'r') as csvinfile: csv_reader = csv.reader(csvinfile, delimiter=',') for row in csv_reader: all_rows.append(row) shuffle(all_rows) all_rows = all_rows[:int(keep*len(all_rows))] pool = Pool(processes=multiprocessing.cpu_count()) transformed_rows = pool.map(transform_instance, all_rows) pool.close() pool.join() with open(output_file, 'w') as csvoutfile: csv_writer = csv.writer(csvoutfile, delimiter=' ', lineterminator='\n') csv_writer.writerows(transformed_rows) %%time # Preparing the training dataset # Since preprocessing the whole dataset might take a couple of mintutes, # we keep 20% of the training dataset for this demo. # Set keep to 1 if you want to use the complete dataset preprocess('dbpedia_csv/train.csv', 'dbpedia.train', keep=.2) # Preparing the validation dataset preprocess('dbpedia_csv/test.csv', 'dbpedia.validation') ``` The data preprocessing cell might take a minute to run. After the data preprocessing is complete, we need to upload it to S3 so that it can be consumed by SageMaker to execute training jobs. We'll use Python SDK to upload these two files to the bucket and prefix location that we have set above. ``` %%time train_channel = prefix + '/train' validation_channel = prefix + '/validation' sess.upload_data(path='dbpedia.train', bucket=bucket, key_prefix=train_channel) sess.upload_data(path='dbpedia.validation', bucket=bucket, key_prefix=validation_channel) s3_train_data = 's3://{}/{}'.format(bucket, train_channel) s3_validation_data = 's3://{}/{}'.format(bucket, validation_channel) ``` Next we need to setup an output location at S3, where the model artifact will be dumped. These artifacts are also the output of the algorithm's traning job. ``` s3_output_location = 's3://{}/{}/output'.format(bucket, prefix) ``` ## Training Now that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job. ``` region_name = boto3.Session().region_name container = sagemaker.amazon.amazon_estimator.get_image_uri(region_name, "blazingtext", "latest") print('Using SageMaker BlazingText container: {} ({})'.format(container, region_name)) ``` ## Training the BlazingText model for supervised text classification Similar to the original implementation of [Word2Vec](https://arxiv.org/pdf/1301.3781.pdf), SageMaker BlazingText provides an efficient implementation of the continuous bag-of-words (CBOW) and skip-gram architectures using Negative Sampling, on CPUs and additionally on GPU[s]. The GPU implementation uses highly optimized CUDA kernels. To learn more, please refer to [*BlazingText: Scaling and Accelerating Word2Vec using Multiple GPUs*](https://dl.acm.org/citation.cfm?doid=3146347.3146354). Besides skip-gram and CBOW, SageMaker BlazingText also supports the "Batch Skipgram" mode, which uses efficient mini-batching and matrix-matrix operations ([BLAS Level 3 routines](https://software.intel.com/en-us/mkl-developer-reference-fortran-blas-level-3-routines)). This mode enables distributed word2vec training across multiple CPU nodes, allowing almost linear scale up of word2vec computation to process hundreds of millions of words per second. Please refer to [*Parallelizing Word2Vec in Shared and Distributed Memory*](https://arxiv.org/pdf/1604.04661.pdf) to learn more. BlazingText also supports a *supervised* mode for text classification. It extends the FastText text classifier to leverage GPU acceleration using custom CUDA kernels. The model can be trained on more than a billion words in a couple of minutes using a multi-core CPU or a GPU, while achieving performance on par with the state-of-the-art deep learning text classification algorithms. For more information, please refer to the [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html). To summarize, the following modes are supported by BlazingText on different types instances: | Modes | cbow (supports subwords training) | skipgram (supports subwords training) | batch_skipgram | supervised | |:----------------------: |:----: |:--------: |:--------------: | :--------------: | | Single CPU instance | ✔ | ✔ | ✔ | ✔ | | Single GPU instance | ✔ | ✔ | | ✔ (Instance with 1 GPU only) | | Multiple CPU instances | | | ✔ | | | Now, let's define the SageMaker `Estimator` with resource configurations and hyperparameters to train Text Classification on *DBPedia* dataset, using "supervised" mode on a `c4.4xlarge` instance. ``` bt_model = sagemaker.estimator.Estimator(container, role, base_job_name = YOUR_NAME, train_instance_count=1, train_instance_type='ml.c4.4xlarge', train_volume_size = 30, train_max_run = 360000, input_mode= 'File', output_path=s3_output_location, sagemaker_session=sess) ``` Please refer to [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext_hyperparameters.html) for the complete list of hyperparameters. ``` bt_model.set_hyperparameters(mode="supervised", epochs=10, min_count=2, learning_rate=0.05, vector_dim=10, early_stopping=True, patience=4, min_epochs=5, word_ngrams=2) ``` Now that the hyper-parameters are setup, let us prepare the handshake between our data channels and the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes. ``` train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') data_channels = {'train': train_data, 'validation': validation_data} ``` We have our `Estimator` object, we have set the hyper-parameters for this object and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm. The following command will train the algorithm. Training the algorithm involves a few steps. Firstly, the instance that we requested while creating the `Estimator` classes is provisioned and is setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the training job begins. The provisioning and data downloading will take some time, depending on the size of the data. Therefore it might be a few minutes before we start getting training logs for our training jobs. The data logs will also print out Accuracy on the validation data for every epoch after training job has executed `min_epochs`. This metric is a proxy for the quality of the algorithm. Once the job has finished a "Job complete" message will be printed. The trained model can be found in the S3 bucket that was setup as `output_path` in the estimator. ``` bt_model.fit(inputs=data_channels, logs=True) ``` ## Hosting / Inference Once the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This will allow us to make predictions (or inference) from the model. Note that we don't have to host on the same type of instance that we used to train. Because instance endpoints will be up and running for long, it's advisable to choose a cheaper instance for inference. ``` text_classifier = bt_model.deploy(initial_instance_count = 1,instance_type = 'ml.m4.xlarge') ``` #### Use JSON format for inference BlazingText supports `application/json` as the content-type for inference. The payload should contain a list of sentences with the key as "**instances**" while being passed to the endpoint. ``` sentences = ["Convair was an american aircraft manufacturing company which later expanded into rockets and spacecraft.", "Berwick secondary college is situated in the outer melbourne metropolitan suburb of berwick ."] # using the same nltk tokenizer that we used during data preparation for training tokenized_sentences = [' '.join(nltk.word_tokenize(sent)) for sent in sentences] payload = {"instances" : tokenized_sentences} response = text_classifier.predict(json.dumps(payload)) predictions = json.loads(response) print(json.dumps(predictions, indent=2)) ``` By default, the model will return only one prediction, the one with the highest probability. For retrieving the top k predictions, you can set `k` in the configuration as shown below: ``` payload = {"instances" : tokenized_sentences, "configuration": {"k": 2}} response = text_classifier.predict(json.dumps(payload)) predictions = json.loads(response) print(json.dumps(predictions, indent=2)) ``` ### Stop / Close the Endpoint (Optional) Finally, we should delete the endpoint before we close the notebook if we don't need to keep the endpoint running for serving realtime predictions. ``` # sess.delete_endpoint(text_classifier.endpoint) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 構造化されたデータの分類 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/alpha/tutorials/keras/feature_columns"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ja/r2/tutorials/keras/feature_columns.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ja/r2/tutorials/keras/feature_columns.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> </table> Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [docs-ja@tensorflow.org メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。 このチュートリアルでは、(例えばCSVファイルに保存された表形式データのような)構造化されたデータをどうやって分類するかを示します。ここでは、モデルの定義に[Keras](https://www.tensorflow.org/guide/keras)を、[feature columns](https://www.tensorflow.org/guide/feature_columns)をCSVファイルの列をモデルを訓練するための特徴量にマッピングするための橋渡し役として使用します。このチュートリアルには、下記のことを行うコードすべてが含まれています。 * [Pandas](https://pandas.pydata.org/)を使用したCSVファイルの読み込み * [tf.data](https://www.tensorflow.org/guide/datasets)を使用して行データをシャッフルし、バッチ化するための入力パイプライン構築 * feature columnsを使ったCSVの列のモデル訓練用の特徴量へのマッピング * Kerasを使ったモデルの構築と、訓練及び評価 ## データセット ここでは、Cleveland Clinic Foundation for Heart Diseaseが提供している小さな[データセット](https://archive.ics.uci.edu/ml/datasets/heart+Disease)を使用します。このCSVファイルには数百行が含まれています。行が患者を、列がその属性を表します。この情報を使用して、患者が心臓疾患を持っているかを予測します。このデータセットの場合には二値分類タスクとなります。 下記はこのデータセットの[說明](https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/heart-disease.names)です。数値列とカテゴリー列があることに注目してください。 >列| 說明| 特徴量の型 | データ型 >------------|--------------------|----------------------|----------------- >Age | 年齢 | 数値型 | 整数 >Sex | (1 = 男性; 0 = 女性) | カテゴリー型 | 整数 >CP | 胸痛のタイプ (0, 1, 2, 3, 4) | カテゴリー型 | 整数 >Trestbpd | 安静時血圧 (単位:mm Hg 入院時) | 数値型 | 整数 >Chol | 血清コレステロール 単位:mg/dl | 数値型 | 整数 >FBS | (空腹時血糖 > 120 mg/dl) (1 = 真; 0 = 偽) | カテゴリー型 | 整数 >RestECG | 安静時心電図の診断結果 (0, 1, 2) | カテゴリー型 | 整数 >Thalach | 最大心拍数 | 数値型 | 整数 >Exang | 運動誘発狭心症 (1 = はい; 0 = いいえ) | カテゴリー型 | 整数 >Oldpeak | 安静時と比較した運動時のST低下 | 数値型 | 整数 >Slope | ピーク運動STセグメントの勾配 | 数値型 | 浮動小数点数 >CA | 蛍光透視法によって着色された主要血管の数(0−3) | 数値型 | 整数 >Thal | 3 = 正常; 6 = 固定欠陥; 7 = 可逆的欠陥 | カテゴリー型 | 文字列 >Target | 心臓疾患の診断 (1 = 真; 0 = 偽) | 分類 | 整数 ## TensorFlow他ライブラリのインポート ``` !pip install sklearn from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd !pip install tensorflow==2.0.0-alpha0 import tensorflow as tf from tensorflow import feature_column from tensorflow.keras import layers from sklearn.model_selection import train_test_split ``` ## Pandasを使ったデータフレーム作成 [Pandas](https://pandas.pydata.org/)は、構造化データの読み込みや操作のための便利なユーティリティを持つPythonのライブラリです。ここでは、Pandasを使ってURLからデータをダウンロードし、データフレームに読み込みます。 ``` URL = 'https://storage.googleapis.com/applied-dl/heart.csv' dataframe = pd.read_csv(URL) dataframe.head() ``` ## データフレームを、訓練用、検証用、テスト用に分割 ダウンロードしたデータセットは1つのCSVファイルです。これを、訓練用、検証用、テスト用のデータセットに分割します。 ``` train, test = train_test_split(dataframe, test_size=0.2) train, val = train_test_split(train, test_size=0.2) print(len(train), 'train examples') print(len(val), 'validation examples') print(len(test), 'test examples') ``` ## tf.dataを使った入力パイプラインの構築 次に、[tf.data](https://www.tensorflow.org/guide/datasets)を使ってデータフレームをラップします。こうすることで、feature columns をPandasデータフレームの列をモデル訓練用の特徴量へのマッピングするための橋渡し役として使うことができます。(メモリに収まらないぐらいの)非常に大きなCSVファイルを扱う場合には、tf.dataを使ってディスクから直接CSVファイルを読み込むことになります。この方法は、このチュートリアルでは扱いません。 ``` # Pandasデータフレームからtf.dataデータセットを作るためのユーティリティメソッド def df_to_dataset(dataframe, shuffle=True, batch_size=32): dataframe = dataframe.copy() labels = dataframe.pop('target') ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) if shuffle: ds = ds.shuffle(buffer_size=len(dataframe)) ds = ds.batch(batch_size) return ds batch_size = 5 # デモ用として小さなバッチサイズを使用 train_ds = df_to_dataset(train, batch_size=batch_size) val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size) ``` ## 入力パイプラインを理解する 入力パイプラインを構築したので、それが返すデータのフォーマットを見るために呼び出してみましょう。出力を読みやすくするためにバッチサイズを小さくしてあります。 ``` for feature_batch, label_batch in train_ds.take(1): print('Every feature:', list(feature_batch.keys())) print('A batch of ages:', feature_batch['age']) print('A batch of targets:', label_batch ) ``` データセットが(データフレームにある)列名からなるディクショナリを返すことがわかります。列名から、データフレームの行に含まれる列の値が得られます。 ## feature columnsの様々な型の例 TensorFlowにはたくさんの型のfeature columnがあります。このセクションでは、いくつかの型のfeature columnsを作り、データフレームの列をどのように変換しているかを示します。 ``` # いくつかの型のfeature columnsを例示するためこのバッチを使用する example_batch = next(iter(train_ds))[0] # feature columnsを作りデータのバッチを変換する # ユーティリティメソッド def demo(feature_column): feature_layer = layers.DenseFeatures(feature_column) print(feature_layer(example_batch).numpy()) ``` ### 数値コラム feature columnsの出力はモデルへの入力になります(上記で定義したdemo関数を使うと、データフレームの列がどのように変換されるかをつぶさに見ることができます)。[数値コラム](https://www.tensorflow.org/api_docs/python/tf/feature_column/numeric_column)は、最も単純な型のコラムです。数値コラムは実数特徴量を表現するのに使われます。このコラムを使う場合、モデルにはデータフレームの列の値がそのまま渡されます。 ``` age = feature_column.numeric_column("age") demo(age) ``` 心臓疾患データセットでは、データフレームのほとんどの列が数値型です。 ### バケット化コラム 数値をそのままモデルに入力するのではなく、値の範囲に基づいた異なるカテゴリーに分割したいことがあります。例えば、人の年齢を表す生データを考えてみましょう。[バケット化コラム](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column)を使うと年齢を数値コラムとして表現するのではなく、年齢をいくつかのバケットに分割できます。下記のワンホット値が、各行がどの年齢範囲にあるかを表していることに注目してください。 ``` age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) demo(age_buckets) ``` ### カテゴリー型コラム このデータセットでは、Thalは('fixed'、'normal'、'reversible'のような)文字列として表現されています。文字列を直接モデルに入力することはできません。まず、文字列を数値にマッピングする必要があります。categorical vocabulary コラムを使うと、(上記で示した年齢バケットのように)文字列をワンホットベクトルとして表現することができます。カテゴリーを表す語彙(vocabulary)は[categorical_column_with_vocabulary_list](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list)を使ってリストで渡すか、[categorical_column_with_vocabulary_file](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_file)を使ってファイルから読み込むことができます。 ``` thal = feature_column.categorical_column_with_vocabulary_list( 'thal', ['fixed', 'normal', 'reversible']) thal_one_hot = feature_column.indicator_column(thal) demo(thal_one_hot) ``` より複雑なデータセットでは、たくさんの列がカテゴリー型(例えば文字列)であることでしょう。feature columns はカテゴリー型データを扱う際に最も役に立ちます。このデータセットでは、カテゴリー型コラムは1つだけですが、他のデータセットを扱う際に使用できるいくつかの重要な型のfeature columnsを紹介するために、この列を使用することにします。 ### 埋め込み型コラム 数種類の候補となる文字列ではなく、カテゴリー毎に数千(あるいはそれ以上)の値があるとしましょう。カテゴリーの数が多くなってくると、様々な理由から、ワンホットエンコーディングを使ってニューラルネットワークを訓練することが難しくなります。埋込み型コラムを使うと、こうした制約を克服することが可能です。[埋込み型コラム](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column)は、データを多次元のワンホットベクトルとして表すのではなく、セルの値が0か1かだけではなく、どんな数値でもとれるような密な低次元ベクトルとして表現します。埋め込みのサイズ(下記の例では8)は、チューニングが必要なパラメータです。 キーポイント:カテゴリー型コラムがたくさんの選択肢を持つ場合、埋め込み型コラムを使用することが最善の方法です。ここでは例を一つ示しますので、今後様々なデータセットを扱う際には、この例を参考にしてください。 ``` # この埋込み型コラムの入力は、先程作成したカテゴリ型コラムであることに注意 thal_embedding = feature_column.embedding_column(thal, dimension=8) demo(thal_embedding) ``` ### ハッシュ化特徴コラム 値の種類が多いカテゴリー型コラムを表現するもう一つの方法が、[categorical_column_with_hash_bucket](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_hash_bucket)を使う方法です。このfeature columnは文字列をエンコードするために入力のハッシュ値を計算し、`hash_bucket_size`個のバケットの中から1つを選択します。このコラムを使用する場合には、語彙を用意する必要はありません。また、スペースの節約のために、実際のカテゴリー数に比べて極めて少ないバケット数を選択することも可能です。 キーポイント:この手法の重要な欠点の一つは、異なる文字列が同じバケットにマッピングされるというハッシュ値の衝突が起きることです。実務上は、データセットによっては、この問題を無視できることがあります。 ``` thal_hashed = feature_column.categorical_column_with_hash_bucket( 'thal', hash_bucket_size=1000) demo(feature_column.indicator_column(thal_hashed)) ``` ### クロスフィーチャーコラム 複数の特徴量をまとめて1つの特徴量にする、[フィーチャークロス](https://developers.google.com/machine-learning/glossary/#feature_cross)として知られている手法は、モデルが特徴量の組み合わせの一つ一つに別々の重みを学習することを可能にします。ここでは年齢とThalをクロスさせて新しい特徴量を作ってみます。交差列(`crossed_column`)が、起こりうるすべての組み合わせ全体のテーブル(これは非常に大きくなる可能性があります)を作るものではないことに注意してください。クロスフィーチャーコラムは、代わりにバックエンドとしてハッシュ化コラムを使用しているため、テーブルの大きさを選択することができます。 ``` crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000) demo(feature_column.indicator_column(crossed_feature)) ``` ## 使用するコラムを選択する これまで、いくつかのfeature columnの使い方を見てきました。いよいよモデルの訓練にそれらを使用することにします。このチュートリアルの目的は、feature columnsを使うのに必要な完全なコード(いわば力学)を示すことです。以下ではモデルを訓練するための列を適当に選びました。 キーポイント:正確なモデルを構築するのが目的である場合には、できるだけ大きなデータセットを使用して、どの特徴量を含めるのがもっとも意味があるのかや、それらをどう表現したらよいかを、慎重に検討してください。 ``` feature_columns = [] # 数値コラム for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']: feature_columns.append(feature_column.numeric_column(header)) # バケット化コラム age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) feature_columns.append(age_buckets) # インジケーター(カテゴリー型)コラム thal = feature_column.categorical_column_with_vocabulary_list( 'thal', ['fixed', 'normal', 'reversible']) thal_one_hot = feature_column.indicator_column(thal) feature_columns.append(thal_one_hot) # 埋め込み型コラム thal_embedding = feature_column.embedding_column(thal, dimension=8) feature_columns.append(thal_embedding) # クロスフィーチャーコラム crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000) crossed_feature = feature_column.indicator_column(crossed_feature) feature_columns.append(crossed_feature) ``` ### 特徴量層の構築 feature columnsを定義し終わったので、次に[DenseFeatures](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/DenseFeatures)層を使ってKerasモデルへの入力とします。 ``` feature_layer = tf.keras.layers.DenseFeatures(feature_columns) ``` これまでは、feature columnsの働きを見るため、小さなバッチサイズを使ってきました。ここではもう少し大きなバッチサイズの新しい入力パイプラインを作ります。 ``` batch_size = 32 train_ds = df_to_dataset(train, batch_size=batch_size) val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size) ``` ## モデルの構築、コンパイルと訓練 ``` model = tf.keras.Sequential([ feature_layer, layers.Dense(128, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(train_ds, validation_data=val_ds, epochs=5) loss, accuracy = model.evaluate(test_ds) print("Accuracy", accuracy) ``` キーポイント:一般的に、ディープラーニングが最良の結果となるのは、もっと大きくて、もっと複雑なデータセットです。この例のように小さなデータセットを使用する際には、強固なベースラインとして、決定木やランダムフォレストを使うことをおすすめします。このチュートリアルの目的は、訓練により正確なモデルを得ることではなく、構造化データの使い方をデモすることです。今後ご自分のデータセットに取り組まれる際の出発点として、これらのコードをお使いください。 ## 次のステップ 構造化データの分類について更に多くのことを学ぶためには、自分自身で試してみることです。別のデータセットを見つけ、上記と同様のコードを使って、それを分類するモデルを訓練してみてください。正解率を上げるためには、モデルにどの特徴量を含めたらよいかや、その特徴量をどのように表現すべきかをじっくり考えてください。
github_jupyter
Copyright 2020 Andrew M. Olney and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code. # Data Science and the Nature of Data This notebook introduces some foundational concepts in data science. As a result, this notebook will have more reading and less practical exercises than normal. But don't worry, we'll have some practical exercises at the end. <!-- for getting data from files. --> We have organized this notebook around **big ideas** in data science. You may wish to refer to this notebook throughout the course when these ideas come up. It's OK if you don't completely understand them today. Some of these ideas are quite subtle and take time to master. Let's get started! ## Not all data is the same When you hear people talk about "data," you may get the impression that all data is the same. However, there are *many* different kinds of data. Just like we can compare animals by how many legs they have, whether they have fur, or whether they have tails, we can compare data along various *dimensions* that affect how we work with the data and what kinds of conclusions we can draw from it. ### Is the data structured or unstructured? One of the most basic properties of data is whether it is **structured.** It might surprise you to hear that data can be unstructured; after all, why would someone collect data that wasn't structured? And you'd be right: normally when people plan to collect data, they structure it. Structure means that the data is organized and ready for analysis. The most common kind of structure is **tabular data**, like you'd see in a spreadsheet. We'll talk about this more in a little bit. Databases are another common source of structured data. Unstructured data usually comes about when the data collection wasn't planned or if people didn't know how to structure it in the first place. For example, imagine that you have a million photographs - how would you structure them as data? Textual data is another common example of unstructured data. If textual data were structured, in the way we're talking about, we wouldn't need search engines like Google to find things for us! When we work with unstructured data, we must take an extra step of structuring it somehow for our analysis, i.e. we have to turn unstructured data into structured data to do something with it. Again, images, audio, and text are common examples that need this extra step. ### Is the data clean or dirty? Another basic property of data is **cleanliness**. If data is clean, then we don't need to correct it or process it to remove garbage values or correct noisy values. Just like with structured data, you might expect all data to be clean. However, even carefully collected data can have problems that require correction before it can be used properly. Dirty data is the norm for unplanned data collection. So unplanned data collection is more likely to result in *both* unstructured data and dirty data. There are many ways that data can be dirty, but to make the idea more concrete, let's consider a few examples. Imagine that you are interested in the weather in your backyard, so you put out a battery operated thermometer that records the temperature every hour. You then leave it there for a month. Now imagine that it worked fine for the first two weeks, but since you didn't change the batteries, the measurements for the last two weeks become increasingly unreliable, e.g. reporting up to 10 degrees above or below the actual temperature, until it finally shuts off leaving you with no data for the remaining days. This kind of problem, an **instrument failure** leading to **unreliable measurement**, is actually quite common and can take a lot of planning to avoid. <!-- Another example of dirty data is at the recording stage. Imagine a computer is recording audio data by writing it to the hard disk. If the computer suddenly becomes active with another task (say streaming a video or installing an operating system update) the audio data may "glitch". --> While *very* dirty data is usually obvious, it can sometimes be hard to recognize. For this reason, it is important to check your data for problems (e.g. crazy values, like a person being 1 ft tall or 200 years old) and think very seriously about how problems should be corrected. Data cleaning is such a tricky topic that we will delay it until much later in the course. ### Is the data experimental or nonexperimental? The last major dimension of data we'll talk about is whether the data came from an **experiment** or not. Why is this important? Knowing whether the data came from an experiment is important because it tells you if you can draw causal conclusions from it. When we talk about experiments here, what we mean are randomized controlled trials (RCTs) or an equivalent method of constructing a counterfactual. The basic idea with an RCT is that your **randomly** assign what you are studying (i.e. people, animals, etc) into **two or more groups**. In one of those groups, you do nothing - this is the control group. In one of the other groups, you **do something** to what you are studying - this is the treatment group. After the experiment, you can see what happened when you **did something** by comparing the treatment group to the control group. Since the two groups are the same in every other respect, you know that any differences are a result of what you did. This is why experiments allow you to draw causal conclusions - **because you only changed one thing, you know that change caused the difference you see.** Let's take a common example, vaccines. To discover if a vaccine against coronavirus is effective, I would randomly assign people to two groups. The treatment group would receive the vaccine, and the control group wouldn't. I would then follow up with both groups 1-2 months later and see which of them had gotten sick and which hadn't. If there was no difference in illness between the two groups, I'd say that the vaccine had no effect. Otherwise, I'd say the vaccine had some effectiveness. There's some subtlety we're skipping over here about *reliable differences*, but this is the basic idea. Let's take another example of a non-experimental study. Some researchers sent a survey to a million people in Europe and asked them how much coffee they drank and how old they were. After analyzing the results, the researchers found that older people drank more coffee. Can we infer that coffee makes people live longer? No, we can't, because we have no control group to compare to. Without that control group, there are many other reasons that older people could be drinking more coffee. It could be that coffee is less popular now than 10 years ago, it could be that older people have more money to spend on coffee, or it could be some other reason we haven't thought of yet. When we have a non-experimental result like this, we have to be **very** careful about interpretation. The best we can say is that there seems to be an **association** between drinking coffee and being older, but we can't say what the cause is. We'll talk more about associations like this later on in the course. ## Different questions need different types of data As you might expect, there are many different things data can tell us. However, what data can tell us is limited by the type of data we have. We can think of three kinds of questions we can ask of data in terms of *levels*, as shown in the image below. <!-- Andrew Olney made this picture --> <!-- Too big, resizing with html --> <!-- ![Screenshot_2020-05-30_11-47-06.png](attachment:Screenshot_2020-05-30_11-47-06.png) --> <div> <img src="attachment:Screenshot_2020-05-30_11-47-06.png" width="300"/> </div> The most basic questions are descriptive questions. For example, we could have a dataset containing the heights of everyone in the U.S., and we could ask descriptive questions like: - How tall is the tallest person? - How short is the shortest person? - What is the average height? Descriptive questions highlight a particular data point (like the tallest height) or summarize multiple data points (like the average height). The next level above description is prediction. In prediction, we have at least two kinds of data, or variables, where we can predict one using the other. For example, we can look at age and height, and predict height based on age. Clearly there is a strong relationship between age and height, at least up to the teenage years, when people tend to stop growing. We can ask descriptive questions about both age and height, and once we bring them together, we can further asks predictive questions like: - How much taller does someone grow in a year? - If someone is 10 years old, how tall do we expect them to be? The final level is explaining. In explaining, we want to understand causality, which we just talked about with respect to experimental data. Going back to our example, we want to know *why* people get taller as they age, not just that they do. To know why, we need to perform an experiment. Explaining is really important and is the main focus of science. However, it's important to appreciate that describing and predicting are also really important, and that they may be the only questions you are interested in. For example, if I want to increase my coffee sales, I don't need to understand why older people drink more coffee, I just need to know that they do. Then I can target my marketing to older age groups and expect to increase my profits. ## Limits of analysis As you progress through this course, you will feel empowered by the models you can build and the questions you can answer. Therefore, it's important for you to understand that none of your models will be 100% correct, for reasons we'll discuss next. This idea has been famously expressed as: > Essentially, all models are wrong, but some are useful. > > &mdash; <cite>Box & Draper (1987), p. 424</cite> There are many ways for models to be wrong, but we'll focus on three common ways to illustrate this concept. ### Missing variables and misspecified models Let's return to the example of growing taller with age. If we collected a lot of data, we'd see this is a pretty strong relationship. However, is it the case that there are no other variables that determine height? Thinking about it more, we realize that nutrition is also an important factor. Are there other important factors? It turns out that air pollution is associated with stunted growth. We could go on and on here, but the basic idea is this: you may have identified some of the important variables in your model, and you may have identified the most important ones, but it is unlikely that you've identified *all* of them. ### Measurement error Even if your model is perfectly specified, your data might be subject to measurement error. For example, let's say I'm interested in how many squirrels get run over in December vs. June. I might send out teams of students to walk up and down streets looking for dead squirrels. Some people on those teams might be very diligent and accurately count the squirrels, but others may not pay as much attention and only count about half of them. As a result, my model will be based on inaccurate data, which may lead me to draw the wrong conclusion. Almost all data has *some* measurement error, so this can be a real issue. ### Generalization Finally, my model may be specified well, and my data may be free of measurement error, but I may not be **sampling** my data in a way that allows for **generalization**. Suppose I'm trying to predict the outcome of the next election with survey data, and I only send surveys to farmers in Iowa. Will that help me predict how people in Chicago will vote? Or the U.S. as a whole? Probably not, because I have not captured the diversity of the U.S. in my sample -- I've only captured one occupation in one area of the country. If you want your model to generalize to new situations, which we almost always want, it's important to think about whether your data captures the complexity and diversity of the real world or only a small slice of it. ## Types of variables We've talked about structured vs. unstructured data already, but we haven't gone into detail about how structured data is created. Structured data begins with **measurements** of some type of thing in the real world, which we call a **variable**. Let's return to the example of height. I may measure 10 people and find that their heights in centimeters are: | Height | |--------| | 165 | | 188 | | 153 | | 164 | | 150 | | 190 | | 169 | | 163 | | 165 | | 190 | Each of these values (e.g. 165) is a measurement of the variable *height*. We call *height* a variable because its value isn't constant. If everyone in the world were the same height, we wouldn't call height a variable, and we also wouldn't bother measuring it, because we'd know everyone is the same. Variables have different **types** that can affect your analysis. ### Nominal A nominal variable consists of unordered categories, like *male* or *female* for biological sex. Notice that these categories are not numbers, and there is no order to the categories. We do not say that male comes before female or is smaller than female. ### Ordinal Ordinal variables consist of ordered categories. You can think of it as nominal data but with an ordering from first to last or smallest to largest. A common example of ordinal data are Likert questions like: ``` (1) Strongly disagree (2) Disagree (3) Neither agree nor disagree (4) Agree (5) Strongly agree ``` Even though these options are numbered 1 to 5, those numbers only indicate which comes before the others, not how "big" an option is. For example, we wouldn't say that the difference between *Agree* and *Disagree* is the same as the difference between *Neither agree nor disagree* and *Strongly agree*. ### Interval Interval variables are ordered *and* their measurement scales are evenly spaced. A classic example is temperature in Fahrenheit. In degrees Fahrenheit, the difference between 70 and 71 is the same as the difference between 90 and 91 - either case is one degree. The other most important characteristic of interval variables is also the most confusing one, which is that interval variables don't have a meaningful zero value. Degrees Fahrenheit is an example of this because there's nothing special about 0 degrees. 0 degrees doesn't mean there's no temperature or no heat energy, it's just an arbitrary point on the scale. ### Ratio Ratio variables are like interval variables but with meaningful zeros. Age and height are good examples because 0 age means you have no age, and 0 height means you have no height. The name *ratio* reflects that you can form a ratio with these variables, which means that you can say age 20 is twice as old as age 10. Notice you can't say that about degrees Fahrenheit: 100 degrees is not really twice as hot as 50 degrees, because 0 degrees Fahrenheit doesn't mean "no temperature." ## Measurement We previously said that structured data begins with measurement of a variable, but we haven't explained what measurement really is. Measurement is, quite simply, the assignment of a value to a variable. In the context of a categorical variable like biological sex, we would say the assignment of *male* or *female* is a measurement. Similarly for height, we would say that 180 cm is a measurement. Notice that in these two examples, the measurement depends closely on type of variable (e.g. categorical or ratio). How we measure is tightly connected to how we've defined the variable. This makes sense, because our measurements serve as a way of defining the variable. For some variables, this is more obvious than for other variables. For example, we all know what *length* is. It is a measure of distance that we can see with our eyes, and we can measure it in different units like centimeters or inches. However, some variables are not as obvious, like *justice*. How do we measure *justice*? One way would be to ask people, e.g. to ask them how just or unjust they thought a situation was. There are two problems with this approach. First, different people will tell you different things. Second, you may not really be measuring *justice* when you ask this question; you could end up measuring something else by accident, like people's religious beliefs. When we talk about measurement, especially of things we can't directly observe, there are two important properties of measurement that we want, **validity** and **reliability**. The picture below presents a conceptual illustration of these ideas using a target. <!-- Attribution: © Nevit Dilmen --> <!-- https://commons.wikimedia.org/wiki/File:Reliability_and_validity.svg --> ![image.png](attachment:image.png) Simply stated, **validity means we are measuring what we intend to measure**. In the images, validity is being "on target," so that our measurements are *centered* on what we are trying to measure. In contrast, **reliability means our measurements are consistent**. Our measurements could be consistently wrong, which would make them reliable but not valid (lower left). Ideally, our measurements will be both valid and reliable (lower right). When it comes to validity and reliability, the most important thing to understand is that **validity is not optional.** If you don't have validity, your variable is wrong - you're not measuring what you think you're measuring. Reliability is optional to a certain extent, but if the reliability is very low, we won't be able to get much information out of the variable. ## Tabular data The most common type of structured data is **tabular data** which is what you find in spreadsheets. If you've ever used a spreadsheet, you know something about tabular data! Here's an example of tabular data, with *height* in centimeters, *age* in years, and *weight* in kilograms: | Height | Age | Weight | |--------|-----|--------| | 161 | 50 | 53 | | 161 | 17 | 53 | | 155 | 33 | 84 | | 180 | 51 | 84 | | 186 | 18 | 88 | In tabular data like this, each **row** is a person. More generically, we would say each row is an **observation** or **datapoint** (in statistics terminology) or an **item** (in machine learning terminology). In each row, we have measurements for each of our variables for that particular person. Since we have five rows of measurements, we know that there are five people in this dataset. We can also think about tabular data in terms of **columns**. Each column represents a variable, with the name of that variable in the **column header**. For example, *height* is at the top of the first column and is the name of the variable for that column. Importantly, the header is not an observation but rather a description of our data. This is why we don't count the header when we are counting the rows in our data. ### Delimited tabular data - CSV and TSV You are probably familiar with spreadsheet files, e.g. Microsoft Excel has files that end in `.xls` or `.xlsx`. However, in data science, it is more common to have tabular data files that are **delimited**. A delimited file is just a plain text file where column boundaries are represented by a specific character, usually a comma or a tab. Here's what the data above looks like in **comma separated value (CSV)** form: ``` Height,Age,Weight 161,50,53 161,17,53 155,33,84 180,51,84 186,18,88 ``` and here's what the data looks like in **tab separated value (TSV)** form: ``` Height Age Weight 161 50 53 161 17 53 155 33 84 180 51 84 186 18 88 ``` The choice of the delimiter (comma, tab, or something else) is really arbitrary, but it's always better to use a delimiter that doesn't appear in your data. ### Dataframes Data scientists often load tabular data into a **dataframe** that they can manipulate in a program. In other words, tabular data from a file is brought into the computational notebook in a variable that represents rows, columns, header, etc just like they are stored in the tabular data file. Because dataframes match tabular data in files, they are very intuitive to work with, which may explain their popularity. We're now at the practical portion of this notebook, so let's work with dataframes! **If you haven't seen a demonstration of Blockly, [see this short video tutorial](https://youtu.be/ovCJln08mG8?vq=hd720) or [this long video tutorial](https://youtu.be/-luPzplPDI0?vq=hd720).** #### Read CSV into dataframe First, let's read a CSV file into a dataframe. To do that, we need to import a dataframe library called `pandas`. **If it isn't already open**, open up the Blockly extension by clicking on the painter's palette icon, then clicking on `Blockly Jupyterlab Extension`. ![screenshot_7.png](attachment:screenshot_7.png) Using the IMPORT menu in the Blockly palette, click on an import block `import some library as variable name`: ![screenshot_8.png](attachment:screenshot_8.png) When you click on the block, it drops onto the Blockly workspace. Change `some library` to `pandas` by typing into that box. Click on the `variable name` dropdown, choose `Rename variable...`, and type `pd` into the box that pops up. This imports the `pandas` dataframe library and gives it the variable name, or alias, `pd`. In the future, we will abbreviate these steps as: - `import pandas as pd` Make sure the code cell below is selected (has a blue bar next to it) and press the `Blocks to Code` button below the Blockly workspace. This will insert the code corresponding to the blocks into the **active cell** in Jupyter, which is the cell that has a blue bar next to it. Once the code appears in that Jupyter cell, you must **execute** or **run** it by either pressing the &#9658; button at the top of the window or by pressing Shift + Enter on your keyboard. ``` import pandas as pd #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="importAs" id="*]+[97*lfc0xBExPyl{#" x="73" y="63"><field name="libraryName">pandas</field><field name="libraryAlias" id="i!#]:2XI=^qLb$e.|iwo">pd</field></block></xml> ``` We can now do things with `pd`, like load datasets! Our file is called `height-age-weight.csv` and it is in the `datasets` folder. That means the **path** from this notebook (the one you're reading) to the data is `datasets/height-age-weight.csv`. To read this file into a dataframe, we will use `pd`. Go to the VARIABLES menu in the Blockly palette and click on the `with pd do ...` block. ![screenshot_9.png](attachment:screenshot_9.png) After it drops into the Blockly workspace, wait a second until the dropdown stops loading, and then click on it and select `read_csv`. Then get a `" "` block from TEXT, drop it on the workspace, drag it to the `using` part of the first block, and type the file path `datasets/height-age-weight.csv` into it. Your blocks should look like this: ![image.png](attachment:image.png) Make sure the cell below is selected, then press `Blocks to Codes`, and execute the cell to run the code by pressing the &#9658; button. In the future, we will abbreviate these steps as: - `with pd do read_csv using "datasets/height-age-weight.csv"` ``` pd.read_csv('datasets/height-age-weight.csv') #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="varDoMethod" id="h[BIiU^0[[vbD`zoBn6+" x="8" y="188"><field name="VAR" id="i!#]:2XI=^qLb$e.|iwo">pd</field><field name="MEMBER">read_csv</field><data>pd:read_csv</data><value name="INPUT"><block type="text" id="HyH?(x3/MuPXE`T5;)[@"><field name="TEXT">datasets/height-age-weight.csv</field></block></value></block></xml> ``` When you run the cell, it will display the dataframe directly below it. This is one of the nice things about Jupyter - **it will display the output of the last line of code in a cell**, even if the output is text, a table, or a plot. Right now, we haven't actually stored the dataframe anywhere. We used `pd` to read the csv file, and then Jupyter output that so we could see it. But if we wanted to do anything with the dataframe, we'd have to read the file again. Instead of reading the file every time we want to access the data, we can **store it in a variable**. In other words, we will create a variable and set it to be the dataframe we created from the file. Using VARIABLES menu in the Blockly palette, click on `Create variable...` and type `dataframe` into the pop up window. Then click on the `set dataframe to` block so that your blocks below look like this: ![image.png](attachment:image.png) Then go get the same blocks you used before to read the file and connect them to the `set dataframe to` block. You can do this from scratch or you can use the following procedure: - Press `Blocks to Code` to save your intermediate work (the `set dataframe to` block) - Go back to the previous cell, click on the block you want, and copy it using Ctrl+c - Click on the cell below to select it, click the Blockly workspace, and paste the block using Ctrl+v *Tip: If you don't save your intermediate work, you'll lose it because `Notebook Sync` will clear the Blockly workspace when it loads the blocks in the previous cell.* After you've added the blocks to read the dataframe, drop a variable block for `dataframe` underneath it to display the dataframe. In the future, we will abbreviate these steps as: - Create `dataframe` and set it to `with pd do read_csv using "datasets/height-age-weight.csv"` - `dataframe` As always, you need to hit the &#9658; button or press Shift + Enter to run the code. ``` dataframe = pd.read_csv('datasets/height-age-weight.csv') dataframe #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="variables_set" id="r],|2vIV^N=z3?l0hFT?" x="12" y="214"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="VALUE"><block type="varDoMethod" id="0bf2,0hgIs8bO;HK_m*O"><field name="VAR" id="i!#]:2XI=^qLb$e.|iwo">pd</field><field name="MEMBER">read_csv</field><data>pd:read_csv</data><value name="INPUT"><block type="text" id="`Rzjw8PmU[oj-92Vpe7V"><field name="TEXT">datasets/height-age-weight.csv</field></block></value></block></value></block><block type="variables_get" id="67Xno.B#_5#5Ekhvod.[" x="8" y="282"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field></block></xml> ``` You should see the same output as before - the only difference is that we've read the csv and stored the data into the `dataframe` block, so we will use the `dataframe` block whenever we want to work with the data. #### Dataframes as a list of rows There are many things we can do with dataframes. One thing we can do is get specific rows, which are our datapoints. Using the LIST menu in the Blockly palette, click on the `in list listVariable get sub-list from to`. Next change `listVariable` to `dataframe`, the first `#` to `first`, and drop a number block `123` from MATH in the second `#`, then change the value of the number block to `1`. Your blocks should look like this: ![image.png](attachment:image.png) In the future, we'll abbreviate this as: - `in list dataframe get sub-list from first to 1` *Then &#9658; or Shift + Enter* ``` dataframe[ : 1] #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="lists_getSublist" id="5hFiqAq|QPxp%Xc:h/@A" x="8" y="518"><mutation at1="false" at2="true"></mutation><field name="WHERE1">FIRST</field><field name="WHERE2">FROM_START</field><value name="LIST"><block type="variables_get" id="Y`nWHnCXvg4!GpZ![Z8S"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field></block></value><value name="AT2"><block type="math_number" id="D1[i#!$i}_h;`?.h;x*w"><field name="NUM">1</field></block></value></block></xml> ``` As you can see, the output is only the first row of the dataframe. Try it again (i.e. copy the blocks, select the cell below, and paste the blocks in the Blockly workspace), but this time, change the `1` to a `2`: - `in list dataframe get sub-list from first to 2` *Then &#9658; or Shift + Enter* ``` dataframe[ : 2] #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="lists_getSublist" id="J=Y2q,G9vz$#fGJl6Et=" x="8" y="518"><mutation at1="false" at2="true"></mutation><field name="WHERE1">FIRST</field><field name="WHERE2">FROM_START</field><value name="LIST"><block type="variables_get" id="`pG,vo{P(1~U+ZtKB-Zr"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field></block></value><value name="AT2"><block type="math_number" id="n+8Lq=-557|wZ@WBd}%l"><field name="NUM">2</field></block></value></block></xml> ``` Now the output is the first two rows of the dataframe. We could get arbitrary rows of the dataframe by starting at a different number and ending at a different number. Sometimes people call this a **slice**. You might be wondering at this point what the numbers are on the left side of our output. They aren't data in our dataframe - they are actually row identifiers `pandas` has automatically assigned to our datapoints. In pandas, these are called an `index`. When the index is numeric, it's easy to see if you got the rows you wanted. Just remember that the index starts at 0 by default rather than 1 (computers count from zero!). #### Dataframes as a list of columns Similarly, we can get a column of the dataframe by using the name of the variable for that column. Before we go any further, let's step back for a second to talk about **lists**. We can think of a dataframe in two ways: - A list of rows - A list of columns We just saw the first way using a `sub-list`. So why are columns any different? The difference is that our columns have variable names, and we often want to refer to columns using those names. For example, we want to say something like "give me the Age column" instead of "give me column 2." Let's make a list from scratch to illustrate this. Using the LISTS menu in the Blockly palette, click on `create empty list`. Then click on the blue gear icon so a mini-window pops up like this: ![mutator1.png](attachment:mutator1.png) To add an element to the list, drag the `item` block into the `list` block. Notice that this creates a place for a new block to go (red arrow). ![image.png](attachment:image.png) When you are done, click the blue gear icon again to close it. Now you can add something to the list. Add a `" "` block, type `Height` into it, and connect to the list. Your blocks should look like this: ![image.png](attachment:image.png) Abbreviated as: - Create a list with `"Height"` Now execute the cell (scroll up if you need a reminder how). ``` ['Height'] #<xml xmlns="https://developers.google.com/blockly/xml"><block type="lists_create_with" id="}?PFK8{][wK}l+_/Dyr{" x="213" y="343"><mutation items="1"></mutation><value name="ADD0"><block type="text" id="C=wFe;_vf~B$H?gZl~!V"><field name="TEXT">Height</field></block></value></block></xml> ``` What makes it a list are the square brackets around it. `"Height"` is just a string, but `["Height"]` is a list with one string inside it. Lists can have multiple things inside them, making lists a container for other variables. Now let's get back to columns. Using the LISTS menu in the Blockly palette, click on the `{dictVariable}[ ]` block, change `{dictVariable}` to `dataframe`, and drop a `["Height"]` inside it. In other words: - `dataframe[` using a list containing `"Height"` `]` Which looks like this: ![image.png](attachment:image.png) And run it. ``` dataframe[['Height']] #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="indexer" id="ZdwM_nb6yN19P)R7!QRn" x="8" y="300"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="INDEX"><block type="lists_create_with" id="(ZoPs6H[04~zI%%oztG{"><mutation items="1"></mutation><value name="ADD0"><block type="text" id="#HR[cin?z#YSEvCnxdqv"><field name="TEXT">Height</field></block></value></block></value></block></xml> ``` Just like before when we got more than one row, we can get more than one column. All you need to do is add another element to the list. Copy the blocks above, select the cell below, paste the blocks, and then use the gear icon to add `"Age"` to the list: - `dataframe[` using a list containing `"Height"` and `"Age"` `]` Which looks like this: ![image.png](attachment:image.png) And run the cell (try Shift + Enter if you haven't tried it yet). ``` dataframe[['Height', 'Age']] #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="indexer" id="oVNd/g7vyxV[^cAbT$JX" x="8" y="300"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="INDEX"><block type="lists_create_with" id="^oGt7#=i@OMTRwaf:kdd"><mutation items="2"></mutation><value name="ADD0"><block type="text" id="=k__Y{.gxL5z.AJpll.A"><field name="TEXT">Height</field></block></value><value name="ADD1"><block type="text" id="uQyKHkAg(%zJh!AAX=[#"><field name="TEXT">Age</field></block></value></block></value></block></xml> ``` To recap, data dataframes are both lists of rows and lists of columns, and lists are themselves containers for other variables. Whether we treat a dataframe as a list of rows or list of columns depends on what we want to do. If we want to select datapoints (observations), then we treat it as a list of rows, because each row is a datapoint. In our dataset above, this would be like selecting the people in the dataset we want to analyze, since each row is a person. In contrast, if we want to select variables, then we treat the dataframe like a list of columns. #### Dataframes and types of variables Earlier we talked about four different kinds of variables: nominal, ordinal, interval, and ratio. These are really important to know, because many kinds of analysis are only valid on particular types of variable. Does `pandas` take care of this for us? Let's find out! Using the VARIABLES menu, click on the `from dataframe get` block and select `dtypes` from the dropdown menu. In other words: - `from dataframe get dtypes` And run it. ``` dataframe.dtypes #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="varGetProperty" id="BH7oEB_9wN^Cx==6^_[a" x="8" y="142"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><field name="MEMBER">dtypes</field><data>dataframe:dtypes</data></block></xml> ``` The `from ... get` block is different from the `with ... do` block. The `with ... do` block performs some kind of **action** (i.e. a verb) whereas `from ... get` gets a **property** (i.e. a noun). The `dtypes` property tells us the data type of each variable, in this case `int64`. This is just one way the computer can store information. Some of the common ways are: - int: an integer (no decimal) - float: a floating point value (has decimal) - string: a text string - bool: a true/false value - object: something else! When these are followed by a number, e.g. `int64`, that's just the amount of storage space the computer has put aside for each value. As you can see, data types don't line up that well with nominal, ordinal, interval, and ratio types of variables. Often nominal and ordinal are represented by `string`, and interval and ratio by `int` or `float`, so the type of variable is ambiguous between these. What this means in practice is that we have to keep track of the type of variable ourselves, because `pandas` won't do it for us. That means by default `pandas` will let us do thinks with our data that don't make sense, so watch out!
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_parent" href="https://github.com/giswqs/geemap/tree/master/tutorials/Image/05_conditional_operations.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_parent" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/tutorials/Image/05_conditional_operations.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_parent" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/tutorials/Image/05_conditional_operations.ipynb"><img width=26px src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> # Relational, conditional and Boolean operations To perform per-pixel comparisons between images, use relational operators. To extract urbanized areas in an image, this example uses relational operators to threshold spectral indices, combining the thresholds with `And()`: ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.foliumap as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Load a Landsat 8 image. image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318') # Create NDVI and NDWI spectral indices. ndvi = image.normalizedDifference(['B5', 'B4']) ndwi = image.normalizedDifference(['B3', 'B5']) # Create a binary layer using logical operations. bare = ndvi.lt(0.2).And(ndwi.lt(0)) # Mask and display the binary layer. Map.setCenter(-122.3578, 37.7726, 12) Map.addLayer(bare.updateMask(bare), {}, 'bare') Map.addLayerControl() Map ``` As illustrated by this example, the output of relational and boolean operators is either True (1) or False (0). To mask the 0's, you can mask the resultant binary image with itself. The binary images that are returned by relational and boolean operators can be used with mathematical operators. This example creates zones of urbanization in a nighttime lights image using relational operators and `image.add()`: ``` Map = emap.Map() # Load a 2012 nightlights image. nl2012 = ee.Image('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS/F182012') lights = nl2012.select('stable_lights') Map.addLayer(lights, {}, 'Nighttime lights') # Define arbitrary thresholds on the 6-bit stable lights band. zones = lights.gt(30).add(lights.gt(55)).add(lights.gt(62)) # Display the thresholded image as three distinct zones near Paris. palette = ['000000', '0000FF', '00FF00', 'FF0000'] Map.setCenter(2.373, 48.8683, 8) Map.addLayer(zones, {'min': 0, 'max': 3, 'palette': palette}, 'development zones') Map.addLayerControl() Map ``` Note that the code in the previous example is equivalent to using a [ternary operator](http://en.wikipedia.org/wiki/%3F:) implemented by `expression()`: ``` Map = emap.Map() # Create zones using an expression, display. zonesExp = nl2012.expression( "(b('stable_lights') > 62) ? 3" + ": (b('stable_lights') > 55) ? 2" + ": (b('stable_lights') > 30) ? 1" + ": 0" ) Map.addLayer(zonesExp, {'min': 0, 'max': 3, 'palette': palette}, 'development zones (ternary)') Map.setCenter(2.373, 48.8683, 8) Map.addLayerControl() Map ``` Observe that in the previous expression example, the band of interest is referenced using the`b()` function, rather than a dictionary of variable names. (Learn more about image expressions on [this page](https://developers.google.com/earth-engine/image_math#expressions). Using either mathematical operators or an expression, the output is the same and should look something like Figure 2. Another way to implement conditional operations on images is with the `image.where()` operator. Consider the need to replace masked pixels with some other data. In the following example, cloudy pixels are replaced by pixels from a cloud-free image using `where()`: ``` Map = emap.Map() # Load a cloudy Landsat 8 image. image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603') Map.addLayer(image, {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5}, 'original image') # Load another image to replace the cloudy pixels. replacement = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416') # Compute a cloud score band. cloud = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud') # Set cloudy pixels to the other image. replaced = image.where(cloud.gt(10), replacement) # Display the result. Map.centerObject(image, 9) Map.addLayer(replaced, {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5}, 'clouds replaced') Map.addLayerControl() Map ``` In this example, observe the use of the `simpleCloudScore()` algorithm. This algorithm ranks pixels by cloudiness on a scale of 0-100, with 100 most cloudy. Learn more about `simpleCloudScore()` on the [Landsat Algorithms page](https://developers.google.com/earth-engine/landsat#simple-cloud-score).
github_jupyter
# Build Data for the Map Check and merge datasets from: cordis creative esif fts erasmus nhs nweurope life ``` %matplotlib inline from functools import reduce import glob import json import os import pandas as pd import numpy as np pd.set_option('display.max_columns', 50) ``` ## Define Validity Checks ``` ukpostcodes = pd.read_csv('../postcodes/input/ukpostcodes.csv.gz') ukpostcodes.shape def validate_postcodes(df): assert 'postcode' in df.columns assert (~df['postcode'].isin(ukpostcodes.postcode)).sum() == 0 def validate_date_range(df): assert 'start_date' in df.columns assert 'end_date' in df.columns assert df['start_date'].dtype == 'datetime64[ns]' assert df['end_date'].dtype == 'datetime64[ns]' assert (df['start_date'] > df['end_date']).sum() == 0 ``` ## Load Cleaned Data ### CORDIS ``` fp7_organizations = pd.read_pickle('../cordis/output/fp7_organizations.pkl.gz') validate_postcodes(fp7_organizations) fp7_organizations.head() fp7_projects = pd.read_pickle('../cordis/output/fp7_projects.pkl.gz') validate_date_range(fp7_projects) fp7_projects.head() fp7 = pd.merge( fp7_projects, fp7_organizations, left_on='rcn', right_on='project_rcn', validate='1:m' ) fp7['my_eu_id'] = 'fp7_' + fp7.project_rcn.astype('str') + '_' + fp7.organization_id.astype('str') fp7.head() h2020_organizations = pd.read_pickle('../cordis/output/h2020_organizations.pkl.gz') validate_postcodes(h2020_organizations) h2020_organizations.head() h2020_projects = pd.read_pickle('../cordis/output/h2020_projects.pkl.gz') validate_date_range(h2020_projects) h2020_projects.head() h2020 = pd.merge( h2020_projects, h2020_organizations, left_on='rcn', right_on='project_rcn', validate='1:m' ) h2020['my_eu_id'] = 'h2020_' + h2020.project_rcn.astype('str') + '_' + h2020.organization_id.astype('str') # no briefs available for H2020 h2020['related_report_title'] = float('nan') h2020['brief_title'] = float('nan') h2020['teaser'] = float('nan') h2020['article'] = float('nan') h2020['image_path'] = float('nan') h2020.head() assert set(fp7.columns) == set(h2020.columns) cordis = pd.concat([fp7, h2020[fp7.columns]]) cordis.shape cordis['total_cost_gbp'] = (cordis.total_cost_eur * cordis.eur_gbp).round() cordis['max_contribution_gbp'] = (cordis.max_contribution_eur * cordis.eur_gbp).round() cordis['contribution_gbp'] = (cordis.contribution_eur * cordis.eur_gbp).round() cordis.head() cordis.describe() (cordis.contribution_eur > cordis.total_cost_eur + 0.1).sum() [ cordis.start_date.isna().sum(), cordis.end_date.isna().sum() ] ``` ### Creative Europe ``` creative_organisations = pd.read_pickle('../creative/output/creative_europe_organisations.pkl.gz') creative_organisations.shape creative_projects = pd.read_pickle('../creative/output/creative_europe_projects.pkl.gz') creative_projects.shape creative = pd.merge(creative_projects, creative_organisations, on='project_number', validate='1:m') creative.shape validate_postcodes(creative) validate_date_range(creative) creative['max_contribution_gbp'] = (creative.max_contribution_eur * creative.eur_gbp).round() creative['my_eu_id'] = \ 'creative_' + creative.project_number + '_' + \ creative.partner_number.apply('{:.0f}'.format).\ str.replace('nan', 'coordinator', regex=False) assert creative.shape[0] == creative.my_eu_id.unique().shape[0] creative.head() creative.results_available.value_counts() creative.results_url[0] [creative.start_date.isna().sum(), creative.end_date.isna().sum()] ``` ### ESIF (ESF/ERDF) ``` esif_england = pd.read_pickle('../esif/output/esif_england_2014_2020.pkl.gz') validate_postcodes(esif_england) validate_date_range(esif_england) esif_england.head() esif_ni = pd.read_pickle('../esif/output/esif_ni_2014_2020.pkl.gz') validate_postcodes(esif_ni) validate_date_range(esif_ni) esif_ni.head() esif_scotland = pd.read_pickle('../esif/output/esif_scotland.pkl.gz') validate_postcodes(esif_scotland) validate_date_range(esif_scotland) esif_scotland.head() esif_wales = pd.read_pickle('../esif/output/esif_wales.pkl.gz') validate_postcodes(esif_wales) validate_date_range(esif_wales) esif_wales.head() assert set(esif_england.columns) == set(esif_ni.columns) assert set(esif_england.columns) == set(esif_scotland.columns) assert set(esif_england.columns) == set(esif_wales.columns) esif_columns = esif_england.columns esif = pd.concat([ esif_england, esif_ni[esif_columns], esif_scotland[esif_columns], esif_wales[esif_columns] ]) esif.shape [esif.start_date.isna().sum(), esif.end_date.isna().sum()] ``` ### FTS ``` fts_2016 = pd.read_pickle('../fts/output/fts_2016.pkl.gz') validate_postcodes(fts_2016) fts_2016['year'] = 2016 fts_2016.head() fts_2017 = pd.read_pickle('../fts/output/fts_2017.pkl.gz') validate_postcodes(fts_2017) fts_2017['year'] = 2017 fts_2017.head() fts = pd.concat([fts_2016, fts_2017]) fts['amount_gbp'] = (fts.amount * fts.eur_gbp).round() fts['total_amount_gbp'] = (fts.total_amount_eur * fts.eur_gbp).round() fts.shape ``` ### Erasmus ``` erasmus_organisations = pd.read_pickle('../erasmus/output/erasmus_mobility_organisations.pkl.gz') erasmus_organisations.shape erasmus_projects = pd.read_pickle('../erasmus/output/erasmus_mobility_projects.pkl.gz') erasmus_projects.shape erasmus = pd.merge(erasmus_projects, erasmus_organisations, on='project_identifier', validate='1:m') erasmus.shape validate_postcodes(erasmus) erasmus['max_contribution_gbp'] = (erasmus.max_contribution_eur * erasmus.eur_gbp).round() erasmus['my_eu_id'] = \ 'erasmus_' + erasmus.project_identifier + '_' + \ erasmus.partner_number.apply('{:.0f}'.format).\ str.replace('nan', 'coordinator', regex=False) assert erasmus.shape[0] == erasmus.my_eu_id.unique().shape[0] erasmus.head() ``` ### NHS ``` nhs_staff = pd.read_pickle('../nhs/output/staff.pkl.gz') nhs_hospital_postcodes = pd.read_pickle('../nhs/output/hospital_postcodes.pkl.gz') validate_postcodes(nhs_hospital_postcodes) [nhs_staff.shape, nhs_hospital_postcodes.shape, nhs_hospital_postcodes.hospital_organisation.nunique()] # dummy amount so we can put it in nhs_hospital_postcodes['zero'] = 0 nhs_hospital_postcodes['my_eu_id'] = \ 'nhs_' + nhs_hospital_postcodes.hospital_organisation nhs_hospital_postcodes.head() ``` ### Interreg NW Europe ``` nweurope_projects = pd.read_pickle('../nweurope/output/projects.pkl.gz') nweurope_partnerships = pd.read_pickle('../nweurope/output/partnerships.pkl.gz') nweurope = pd.merge(nweurope_projects, nweurope_partnerships) validate_postcodes(nweurope) nweurope['my_eu_id'] = \ 'nweurope_' + nweurope.project_id.astype('str') + '_' + nweurope.partner_number.astype('str') nweurope['contribution_gbp'] = (nweurope.contribution_eur * nweurope.eur_gbp).round() nweurope.head(2) ``` ### LIFE ``` life = pd.read_pickle('../life/output/life.pkl.gz') validate_postcodes(life) life['total_budget_gbp'] = (life.total_budget_eur * life.eur_gbp).round() life['eu_contribution_gbp'] = (life.eu_contribution_eur * life.eur_gbp).round() life = life[life.eu_contribution_gbp > 1000] # filter out a few token £1 contributions life.shape ``` ## Idea 1: All Points on Map, Data by District This should make the map look fairly similar to how it looks now, so it seems like a good starting point. ``` ALL_PLACES = [ (cordis, 'contribution_gbp', 'money'), (creative, 'max_contribution_gbp', 'money'), (esif, 'eu_investment', 'money'), (fts.drop('amount', axis=1), 'amount_gbp', 'money'), (erasmus, 'max_contribution_gbp', 'money'), (nhs_hospital_postcodes, 'zero', 'hospital'), (nweurope, 'contribution_gbp', 'money'), (life, 'eu_contribution_gbp', 'money') ] ``` GeoJSON is very inefficient for representing a bunch of points, so let's use a relatively simple packed format. ``` min_longitude min_latitude outcode incode delta_longitude delta_latitude incode delta_longitude delta_latitude ``` We need [about 4 decimal places](https://gis.stackexchange.com/questions/8650/measuring-accuracy-of-latitude-and-longitude). ``` def add_outward_and_inward_codes(df): df['outward_code'] = df.postcode.str.split(' ').str[0] df['inward_code'] = df.postcode.str.split(' ').str[1] return df def add_packed_icon_mask(df): def pack_icon_mask(icons): # So far we just have one bit in our mask; we may have more. if 'hospital' in icons: return 1 else: return 0 MASK_BITS = 1 assert df.amount.max() < 2**(32 - MASK_BITS) df['icon_mask'] = df.icons.apply(pack_icon_mask).astype('uint32') df['packed_amount'] = np.bitwise_or( np.left_shift(df.amount, MASK_BITS), df.icon_mask ) return df def pack_geocoded_postcodes(dfs): all_postcode_amounts = pd.concat([ df.rename(columns={amount_column: 'amount'}).assign(icons=icon)\ [['postcode', 'amount', 'icons']] for df, amount_column, icon in dfs ]) postcode_amounts = all_postcode_amounts.groupby('postcode').\ aggregate({'amount': sum, 'icons': lambda icons: set(icons)}) postcode_amounts.reset_index(inplace=True) postcode_amounts.amount = postcode_amounts.amount.astype('uint32') add_outward_and_inward_codes(postcode_amounts) add_packed_icon_mask(postcode_amounts) geocoded_postcodes = pd.merge(postcode_amounts, ukpostcodes, validate='1:1') min_longitude = geocoded_postcodes.longitude.min() min_latitude = geocoded_postcodes.latitude.min() geocoded_postcodes['delta_longitude'] = geocoded_postcodes.longitude - min_longitude geocoded_postcodes['delta_latitude'] = geocoded_postcodes.latitude - min_latitude return { 'min_longitude': min_longitude, 'min_latitude': min_latitude, 'geocoded_postcodes': geocoded_postcodes } packed_postcodes = pack_geocoded_postcodes(ALL_PLACES) [ packed_postcodes['min_longitude'], packed_postcodes['min_latitude'], packed_postcodes['geocoded_postcodes'].shape[0] ] packed_postcodes['geocoded_postcodes'].head() def make_packed_postcode_json(packed_postcodes): packed_postcodes = packed_postcodes.copy() grouped_postcodes = packed_postcodes['geocoded_postcodes'].\ sort_values('outward_code').groupby('outward_code') def make_code_tuples(row): coordinate = '{0:.4f}' return [ row['inward_code'], float(coordinate.format(row['delta_longitude'])), float(coordinate.format(row['delta_latitude'])), row['packed_amount'] ] postcodes = {} for outward_code, group in grouped_postcodes: postcodes[outward_code] = [ x for code in group.sort_values('inward_code').apply(make_code_tuples, axis=1) for x in code ] min_coordinate = '{0:.6f}' return { 'min_longitude': float(min_coordinate.format(packed_postcodes['min_longitude'])), 'min_latitude': float(min_coordinate.format(packed_postcodes['min_latitude'])), 'postcodes': postcodes } with open('output/packed_postcodes.data.json', 'w') as file: json.dump(make_packed_postcode_json(packed_postcodes), file, sort_keys=True) ``` ### Some Aggregates ``` [place[0].shape[0] for place in ALL_PLACES] sum(place[0].shape[0] for place in ALL_PLACES) ``` ### Data by District #### CORDIS ``` MAX_PROJECTS = 50 # Dump to JSON using pandas, because it puts in nulls instead of NaNs for # missing values. Then load the JSON into dicts for def make_district_data_json(df, name): key = ['outwardCode', 'inwardCode'] def to_json(group): group.drop(key, axis=1, inplace=True) return json.loads(group.to_json(orient='split', index=False)) grouped = df.groupby(key).apply(to_json) grouped = grouped.reset_index() grouped.columns = key + [name] for _key, row in grouped.iterrows(): count = len(row[name]['data']) if count > MAX_PROJECTS: row[name]['data'] = row[name]['data'][0:MAX_PROJECTS] row[name]['extra'] = count - MAX_PROJECTS return grouped def make_cordis_district_data(cordis): cordis = add_outward_and_inward_codes(cordis.copy()) cordis = cordis[[ 'outward_code', 'inward_code', 'start_date', 'end_date', 'title', 'name', # of organization 'objective', 'contribution_gbp', 'total_cost_gbp', 'num_countries', 'num_organizations', 'acronym', 'project_url', 'organization_url', 'image_path', 'my_eu_id' ]] cordis.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'start_date': 'startDate', 'end_date': 'endDate', 'title': 'projectTitle', 'name': 'organisationName', 'contribution_gbp': 'contribution', 'total_cost_gbp': 'totalCost', 'num_countries': 'numCountries', 'num_organizations': 'numOrganisations', 'project_url': 'projectUrl', 'organization_url': 'organisationUrl', 'image_path': 'imagePath', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) cordis.sort_values( ['outwardCode', 'inwardCode', 'contribution'], ascending=[True, True, False], inplace=True ) return make_district_data_json(cordis, 'cordis') cordis_district_data = make_cordis_district_data(cordis) cordis_district_data.head() ``` #### Creative Europe ``` def make_creative_district_data(creative): creative = add_outward_and_inward_codes(creative.copy()) coordinators = creative[creative.organisation_coordinator] coordinators = coordinators[['project_number', 'organisation_name']] creative = pd.merge( creative, coordinators, how='left', on='project_number', suffixes=('', '_coordinator')) creative = creative[[ 'outward_code', 'inward_code', 'start_date', 'end_date', 'project', 'organisation_name', 'max_contribution_gbp', 'num_countries', 'num_organisations', 'summary', 'organisation_website', 'organisation_name_coordinator', 'my_eu_id' ]] creative.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'start_date': 'startDate', 'end_date': 'endDate', 'organisation_name': 'organisationName', 'max_contribution_gbp': 'maxContribution', 'num_countries': 'numCountries', 'num_organisations': 'numOrganisations', 'organisation_website': 'organisationWebsite', 'organisation_name_coordinator': 'coordinatorName', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) creative.sort_values( ['outwardCode', 'inwardCode', 'maxContribution'], ascending=[True, True, False], inplace=True ) return make_district_data_json(creative, 'creative') creative_district_data = make_creative_district_data(creative) creative_district_data.head() ``` #### ESIF ``` def make_esif_district_data(esif): esif = add_outward_and_inward_codes(esif.copy()) esif = esif[[ 'outward_code', 'inward_code', 'start_date', 'end_date', 'project', 'beneficiary', 'summary', 'funds', 'eu_investment', 'project_cost', 'my_eu_id' ]] esif.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'start_date': 'startDate', 'end_date': 'endDate', 'project': 'projectTitle', 'beneficiary': 'organisationName', 'eu_investment': 'euInvestment', 'project_cost': 'projectCost', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) esif.sort_values( ['outwardCode', 'inwardCode', 'euInvestment'], ascending=[True, True, False], inplace=True ) return make_district_data_json(esif, 'esif') esif_district_data = make_esif_district_data(esif) esif_district_data.head() ``` #### FTS ``` fts.columns def make_fts_district_data(fts): fts = add_outward_and_inward_codes(fts.copy()) fts = fts[[ 'outward_code', 'inward_code', 'year', 'beneficiary', 'amount_gbp', 'budget_line_name_and_number', 'my_eu_id' ]] fts.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'amount_gbp': 'amount', 'budget_line_name_and_number': 'budgetLineNameAndNumber', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) fts.sort_values( ['outwardCode', 'inwardCode', 'amount'], ascending=[True, True, False], inplace=True ) return make_district_data_json(fts, 'fts') fts_district_data = make_fts_district_data(fts) fts_district_data.head() ``` #### Erasmus ``` def make_erasmus_district_data(erasmus): erasmus = add_outward_and_inward_codes(erasmus.copy()) coordinators = erasmus[erasmus.organisation_coordinator] coordinators = coordinators[['project_identifier', 'organisation_name']] erasmus = pd.merge( erasmus, coordinators, how='left', on='project_identifier', suffixes=('', '_coordinator')) erasmus = erasmus[[ 'outward_code', 'inward_code', 'call_year', 'project', 'organisation_name', 'max_contribution_gbp', 'num_countries', 'num_organisations', 'summary', 'organisation_website', 'organisation_name_coordinator', 'my_eu_id' ]] erasmus.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'call_year': 'callYear', 'organisation_name': 'organisationName', 'max_contribution_gbp': 'maxContribution', 'num_countries': 'numCountries', 'num_organisations': 'numOrganisations', 'organisation_website': 'organisationWebsite', 'organisation_name_coordinator': 'coordinatorName', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) erasmus.sort_values( ['outwardCode', 'inwardCode', 'maxContribution'], ascending=[True, True, False], inplace=True ) return make_district_data_json(erasmus, 'erasmus') erasmus_district_data = make_erasmus_district_data(erasmus) erasmus_district_data.head() ``` #### NHS Just store the organisation key for now. The data doesn't really fit with the district model. ``` def make_nhs_district_data(nhs_hospital_postcodes, nhs_staff): nhs = add_outward_and_inward_codes(pd.merge( nhs_hospital_postcodes, nhs_staff, on='organisation', validate='m:1' )) nhs.sort_values( ['outward_code', 'inward_code', 'eu_nurses'], ascending=[True, True, False], inplace=True ) nhs = nhs[[ 'outward_code', 'inward_code', 'organisation', 'hospital_name', 'my_eu_id' ]] nhs.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'hospital_name': 'hospitalName', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) return make_district_data_json(nhs, 'nhs') nhs_district_data = make_nhs_district_data(nhs_hospital_postcodes, nhs_staff) nhs_district_data.head() ``` #### Interreg NW Europe ``` nweurope.columns nweurope.head(2) def make_nweurope_district_data(nweurope): nweurope = add_outward_and_inward_codes(nweurope.copy()) nweurope = nweurope[[ 'outward_code', 'inward_code', 'beneficiary', 'project', 'project_summary', 'start_date', 'end_date', 'contribution_gbp', 'my_eu_id' ]] nweurope.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'project_summary': 'projectSummary', 'start_date': 'startDate', 'end_date': 'endDate', 'contribution_gbp': 'contribution', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) nweurope.sort_values( ['outwardCode', 'inwardCode', 'contribution'], ascending=[True, True, False], inplace=True ) return make_district_data_json(nweurope, 'nweurope') nweurope_district_data = make_nweurope_district_data(nweurope) nweurope_district_data.head() ``` #### LIFE ``` life.columns def make_life_district_data(life): life = add_outward_and_inward_codes(life.copy()) life = life[[ 'outward_code', 'inward_code', 'year', 'project_title', 'coordinator', 'total_budget_gbp', 'eu_contribution_gbp', 'background', 'project_url', 'website', 'my_eu_id' ]] life.rename({ 'outward_code': 'outwardCode', 'inward_code': 'inwardCode', 'total_budget_gbp': 'amount', 'eu_contribution_gbp': 'euContribution', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) life.sort_values( ['outwardCode', 'inwardCode', 'amount'], ascending=[True, True, False], inplace=True ) return make_district_data_json(life, 'life') life_district_data = make_life_district_data(life) life_district_data.head() ``` # Assemble full dataset in dataframe Looking at each programme in turn, create a full_data_ dataframe with the key attributes: postcode, projectTitle, euContribution, amount, website, myEuId Check union_cofinancing same as euContribution ## Cordis ``` cordis.head(2) full_data_cordis = cordis[['postcode','title','contribution_gbp','total_cost_gbp','project_url','my_eu_id']].copy() full_data_cordis.rename({ 'title': 'projectTitle', 'contribution_gbp': 'euContribution', 'total_cost_gbp': 'amount', 'project_url': 'website', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_cordis.head(2) ``` ## Creative Europe ``` creative.head(2) full_data_creative = creative[['postcode','max_contribution_gbp','organisation_website','my_eu_id']].copy() full_data_creative.rename({ 'project': 'projectTitle', 'max_contribution_gbp': 'euContribution', 'organisation_website': 'website', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_creative.head(2) ``` ## ESIF ``` esif.head(2) full_data_esif = esif[['postcode','project','eu_investment','project_cost','my_eu_id']].copy() full_data_esif.rename({ 'project': 'projectTitle', 'eu_investment': 'euContribution', 'project_cost': 'amount', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_esif.head(2) ``` ## FTS ``` fts.head(2) full_data_fts = fts[['postcode','budget_line_name_and_number','total_amount_gbp','amount_gbp','my_eu_id']].copy() full_data_fts.rename({ 'budget_line_name_and_number': 'projectTitle', 'total_amount_gbp': 'amount', 'amount_gbp': 'euContribution', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_fts.head(2) ``` ## Erasmus ``` full_data_cordis.head(2) erasmus.head(2) full_data_erasmus = erasmus[['postcode','project','max_contribution_gbp','organisation_website','my_eu_id']].copy() full_data_erasmus.rename({ 'project': 'projectTitle', 'max_contribution_gbp': 'euContribution', 'organisation_website' : 'website', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_erasmus.head(2) ``` ## NWEurope ``` nweurope.head(2) full_data_nweurope = nweurope[['postcode','project','contribution_gbp','my_eu_id']].copy() full_data_nweurope.rename({ 'project': 'projectTitle', 'contribution_gbp': 'euContribution', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_nweurope.head(2) ``` ## LIFE ``` life.head(2) full_data_life = life[['postcode','project_title','total_budget_gbp','eu_contribution_gbp','project_url','my_eu_id']].copy() full_data_life.rename({ 'project_title': 'projectTitle', 'total_budget_gbp': 'amount', 'eu_contribution_gbp': 'euContribution', 'project_url': 'website', 'my_eu_id': 'myEuId' }, axis=1, inplace=True) full_data_life.head(2) full_data = pd.concat([full_data_cordis, full_data_creative, full_data_esif, full_data_fts, full_data_erasmus, full_data_nweurope, full_data_life], sort=False) full_data.shape ``` Location on myeu.uk website ``` full_data['myeuURL'] = full_data.apply( lambda row: 'https://www.myeu.uk/postcode/' + "/".join( row['postcode'].split() ), axis=1) full_data.head(2) full_data.to_csv('../constituencyprojects/output/full_data.csv', index=False) ``` ### Save Data ``` districtData.postcodes['A11'].cordis.data districtData.postcodes['A11'].cordis.extra ``` ``` ALL_DISTRICT_DATA = reduce( lambda left, right: pd.merge(left, right, on=('outwardCode', 'inwardCode'), how='outer'), [ cordis_district_data, creative_district_data, esif_district_data, fts_district_data, erasmus_district_data, nhs_district_data, nweurope_district_data, life_district_data ]) ALL_DATASET_NAMES = list(set(ALL_DISTRICT_DATA.columns) - set(['outwardCode', 'inwardCode'])) def save_columns(): def find_columns(data): return data[~data.isna()].iloc[0]['columns'] columns = { dataset_name: find_columns(ALL_DISTRICT_DATA[dataset_name]) for dataset_name in ALL_DATASET_NAMES } with open(os.path.join('output', 'district_columns.json'), 'w') as file: json.dump(columns, file, sort_keys=True) save_columns() life.head() ALL_DATASET_NAMES ALL_DISTRICT_DATA[(~ALL_DISTRICT_DATA[ALL_DATASET_NAMES].isna()).sum(axis=1) > 4][['outwardCode', 'inwardCode']].head() def merge_district_data(all_data): def find_dataset_data(row): dataset_data = row.drop(['outwardCode', 'inwardCode']).dropna().to_dict() return { dataset: { k: v for k, v in json.items() if k != 'columns' } for dataset, json in dataset_data.items() } def make_merged_district_data(outward_code, group): return { 'outwardCode': outward_code, 'postcodes': { row.inwardCode: find_dataset_data(row) for _index, row in group.iterrows() } } return { outward_code: make_merged_district_data(outward_code, group) for outward_code, group in all_data.groupby('outwardCode') } district_data = merge_district_data(ALL_DISTRICT_DATA) district_data['CA4'] OUTPUT_DISTRICT_PATH = 'output/district' def list_district_data(path): return glob.glob(os.path.join(path, '*.data.json')) def clear_district_data(path): for f in list_district_data(path): os.remove(f) def write_district_data(district_data, path): os.makedirs(path, exist_ok=True) clear_district_data(path) for outward_code, data in district_data.items(): output_pathname = os.path.join(path, outward_code + '.data.json') with open(output_pathname, 'w') as file: json.dump(data, file, sort_keys=True) write_district_data(district_data, OUTPUT_DISTRICT_PATH) def find_district_data_stats(): files = list_district_data(OUTPUT_DISTRICT_PATH) return pd.DataFrame({ 'file': [file for file in files], 'byte_size': [os.stat(file).st_size for file in files] }) district_data_stats = find_district_data_stats() district_data_stats.describe() district_data_stats.byte_size.sum() / 1024 / 1024 district_data_stats[district_data_stats.byte_size > 1024*1024] district_data_stats.describe().hist() ``` #### Data Index Generate a JS file that webpack can use to make paths for all of the data files. ``` def write_district_data_js(): data_files = list_district_data(OUTPUT_DISTRICT_PATH) def make_require(data_file): basename = os.path.basename(data_file) pathname = os.path.join('.', 'district', basename) outward_code = basename.split('.')[0] return " {}: require('{}')".format(outward_code, pathname) with open('output/district.js', 'w') as file: file.write('// NB: This file is generated automatically. Do not edit.\n') file.write('export default {\n') requires = [ make_require(data_file) for data_file in sorted(data_files) ] file.write(',\n'.join(requires)) file.write('\n}\n') write_district_data_js() ``` #### NHS Data ``` [nhs_staff.shape[0], nhs_hospital_postcodes.shape[0]] nhs_hospital_postcodes.groupby('hospital_organisation').organisation.count().max() def write_nhs_staff_data(): with open('output/nhs_staff.json', 'w') as file: file.write( nhs_staff.sort_values('organisation', ascending=True).\ to_json(orient='split', index=False)) write_nhs_staff_data() ``` ## Put all project stories into one dataframe Looking at each programme in turn, create an project_stories dataframe with the key attributes. ### Cordis ``` cordis.head(2) project_stories_cordis = cordis[[ 'my_eu_id', 'postcode', 'title', 'name', 'contribution_gbp', 'max_contribution_gbp', 'total_cost_gbp', 'start_date', 'end_date', 'project_url', 'organization_url' ]].copy().rename({ 'title': 'project_title', 'name': 'organisation_name', 'contribution_gbp': 'eu_contribution_gbp', 'max_contribution_gbp': 'project_eu_contribution_gbp', 'total_cost_gbp': 'project_total_cost_gbp', 'organization_url': 'organisation_url' }, axis=1) project_stories_cordis.head(2) ``` ### Creative Europe ``` creative.head(2) project_stories_creative = creative.copy() project_stories_creative['eu_contribution_gbp'] = np.nan # not available project_stories_creative['project_total_cost_gbp'] = np.nan # not available project_stories_creative = project_stories_creative[[ 'my_eu_id', 'postcode', 'project', 'organisation_name', 'eu_contribution_gbp', 'max_contribution_gbp', 'project_total_cost_gbp', 'start_date', 'end_date', 'project_url', 'organisation_website', ]].rename({ 'project': 'project_title', 'max_contribution_gbp': 'project_eu_contribution_gbp', 'organisation_website': 'organisation_url' }, axis=1) assert np.all(project_stories_cordis.columns == project_stories_creative.columns) project_stories_creative.head(2) ``` ### ESIF ``` esif.head(2) project_stories_esif = esif.copy() project_stories_esif['project_eu_contribution_gbp'] = esif.eu_investment # story and project are the same project_stories_esif['project_url'] = np.nan # not available project_stories_esif['organisation_url'] = np.nan # not available project_stories_esif = project_stories_esif[[ 'my_eu_id', 'postcode', 'project', 'beneficiary', 'eu_investment', 'project_eu_contribution_gbp', 'project_cost', 'start_date', 'end_date', 'project_url', 'organisation_url' ]].copy().rename({ 'project': 'project_title', 'beneficiary': 'organisation_name', 'eu_investment': 'eu_contribution_gbp', 'project_cost': 'project_total_cost_gbp' }, axis=1) assert np.all(project_stories_cordis.columns == project_stories_esif.columns) project_stories_esif.head(2) ``` ### FTS I am not sure whether to include these, so for now I have excluded them from the build later. ``` fts.head(2) project_stories_fts = fts.copy() project_stories_fts['project_eu_contribution_gbp'] = project_stories_fts.amount_gbp # story and project are the same project_stories_fts['start_date'] = pd.to_datetime({'year': fts.year, 'month': 1, 'day': 1}) project_stories_fts['end_date'] = np.nan # not available project_stories_fts['project_url'] = np.nan # not available project_stories_fts['organisation_url'] = np.nan # not available project_stories_fts = project_stories_fts[[ 'my_eu_id', 'postcode', 'budget_line_name_and_number', 'beneficiary', 'amount_gbp', 'project_eu_contribution_gbp', 'total_amount_gbp', 'start_date', 'end_date', 'project_url', 'organisation_url' ]].rename({ 'budget_line_name_and_number': 'project_title', 'beneficiary': 'organisation_name', 'amount_gbp': 'eu_contribution_gbp', 'total_amount_gbp': 'project_total_cost_gbp' }, axis=1) assert np.all(project_stories_cordis.columns == project_stories_fts.columns) project_stories_fts.head(2) ``` ### Erasmus ``` erasmus.head(2) project_stories_erasmus = erasmus.copy() project_stories_erasmus['eu_contribution_gbp'] = np.nan # not available project_stories_erasmus['project_total_cost_gbp'] = np.nan # not available project_stories_erasmus['start_date'] = pd.to_datetime({'year': erasmus.call_year, 'month': 1, 'day': 1}) project_stories_erasmus['end_date'] = np.nan # not available project_stories_erasmus = project_stories_erasmus[[ 'my_eu_id', 'postcode', 'project', 'organisation_name', 'eu_contribution_gbp', 'max_contribution_gbp', 'project_total_cost_gbp', 'start_date', 'end_date', 'project_url', 'organisation_website', ]].rename({ 'project': 'project_title', 'max_contribution_gbp': 'project_eu_contribution_gbp', 'organisation_website': 'organisation_url' }, axis=1) assert np.all(project_stories_cordis.columns == project_stories_erasmus.columns) project_stories_erasmus.head(2) ``` ### NWEurope ``` nweurope.head(2) project_stories_nweurope = nweurope.copy() project_stories_nweurope['project_eu_contribution_gbp'] = \ nweurope.project_contribution_eur * nweurope.eur_gbp project_stories_nweurope['project_total_cost_gbp'] = \ nweurope.project_total_cost_eur * nweurope.eur_gbp project_stories_nweurope['project_url'] = 'http://www.nweurope.eu' + nweurope.page_path project_stories_nweurope['organisation_url'] = np.nan # not available project_stories_nweurope = project_stories_nweurope[[ 'my_eu_id', 'postcode', 'project', 'beneficiary', 'contribution_gbp', 'project_eu_contribution_gbp', 'project_total_cost_gbp', 'start_date', 'end_date', 'project_url', 'organisation_url' ]].rename({ 'project': 'project_title', 'beneficiary': 'organisation_name', 'contribution_gbp': 'eu_contribution_gbp' }, axis=1) assert np.all(project_stories_cordis.columns == project_stories_nweurope.columns) project_stories_nweurope.head(2) ``` ### LIFE ``` life.head(2) project_stories_life = life.copy() project_stories_life['organisation_name'] = life.coordinator + ' (' + life.area_name + ')' project_stories_life['project_eu_contribution_gbp'] = life.eu_contribution_gbp # story and project are the same project_stories_life['start_date'] = pd.to_datetime({'year': life.year, 'month': 1, 'day': 1}) project_stories_life['end_date'] = np.nan # not available project_stories_life = project_stories_life[[ 'my_eu_id', 'postcode', 'project_title', 'organisation_name', 'eu_contribution_gbp', 'project_eu_contribution_gbp', 'total_budget_gbp', 'start_date', 'end_date', 'project_url', 'website' ]].rename({ 'total_budget_gbp': 'project_total_cost_gbp', 'website': 'organisation_url', }, axis=1) assert np.all(project_stories_cordis.columns == project_stories_life.columns) project_stories_life.head(2) ``` ### Save All Project Stories ``` project_stories = pd.concat([ project_stories_cordis, project_stories_creative, project_stories_esif, project_stories_erasmus, project_stories_nweurope, project_stories_life ], sort=False) project_stories.shape ``` Add location on myeu.uk website: ``` project_stories['my_eu_url'] = project_stories.apply( lambda row: 'https://www.myeu.uk/postcode/' + "/".join( row['postcode'].split() ), axis=1) project_stories.sort_values(['postcode', 'my_eu_id'], inplace=True) project_stories.head(2) project_stories.to_pickle('output/project_stories.pkl.gz') ``` ### Save All NHS Stories ``` nhs_stories = pd.merge( nhs_hospital_postcodes, nhs_staff, on='organisation', validate='m:1' ) nhs_stories = nhs_stories[[ 'my_eu_id', 'postcode', 'organisation', 'organisation_name', 'hospital_organisation', 'hospital_name', 'eu_doctors', 'eu_nurses', 'eu_other', 'non_eu_doctors', 'non_eu_nurses', 'non_eu_other', 'unknown_doctors', 'unknown_nurses', 'unknown_other' ]] nhs_stories.sort_values('postcode', inplace=True) nhs_stories['my_eu_url'] = nhs_stories.apply( lambda row: 'https://www.myeu.uk/postcode/' + "/".join( row['postcode'].split() ), axis=1) nhs_stories.head() nhs_stories.to_pickle('output/nhs_stories.pkl.gz') ```
github_jupyter
# Summarize Data The `features` module packs a set of data summarization tools to calculate total counts, lengths, areas, and basic descriptive statistics of features and their attributes within areas or near other features. You can access these tools using the `summarize_data` sub module. ## Aggregate points In this example, let us observe how to use `aggregate_points` tool to summarize data from spot measurements by area. To learn more about this tool and the formula it uses, refer to the documentation [here](http://doc.arcgis.com/en/arcgis-online/use-maps/aggregate-points.htm) ``` # connect to GIS from arcgis.gis import GIS gis = GIS("portal url", "username", "password") #search for earthquakes data - point data eq_search = gis.content.search("world earthquakes", "feature layer", max_items=1) eq_item = eq_search[0] eq_item # search for USA states - area / polygon data states_search = gis.content.search("title:USA_states and owner:arcgis_python_api", "feature layer", max_items=1) states_item = states_search[0] states_item ``` Lets draw the layers on a map and observe how they are distributed ``` map1 = gis.map("USA") map1 ``` ![earthquakes and states](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_features_summarize_aggregate_points_01.PNG) ``` map1.add_layer(states_item) map1.add_layer(eq_item) ``` ### Aggregate earthquakes by state As you can see, a number of earthquakes fall on the boundary of tectonic plates ([ring of fire](https://en.wikipedia.org/wiki/Ring_of_Fire)). However, there are a few more dispersed into other states. It would be interesting to aggregate all the earthquakes by state and plot that as a chart. The `aggregate_points` tool in the `summarize_data` sub module is a valid candidate for such analyses. The example below shows how to run this tool using ArcGIS API for Python. To start with, let us access the layers in the states and earthquakes items and view their attribute information to understand how the data can be summarized ``` eq_fl = eq_item.layers[0] states_fl = states_item.layers[0] ``` We have accessed the layers in the items as `FeatureLayer` objects. We can query the `fields` property to understand what kind of attribute data is stored in the layers ``` #query the fields in eq_fl layer for field in eq_fl.properties.fields: print(field['name']) # similarly for states data for field in states_fl.properties.fields: print(field['name'], end="\t") ``` Let us aggreate the points by state and summarize the `magnitude` field and use `mean` as the summary type. ``` from arcgis.features import summarize_data sum_fields = ['magnitude Mean', 'depth Min'] eq_summary = summarize_data.aggregate_points(point_layer = eq_fl, polygon_layer = states_fl, keep_boundaries_with_no_points=False, summary_fields=sum_fields) ``` When running the tool above, we did not specify a name for the `output_name` parameter. Hence the analyses results were not stored on the portal, instead stored in the variable `eq_summary`. ``` eq_summary # access the aggregation feature colleciton eq_aggregate_fc = eq_summary['aggregated_layer'] #query this feature collection to get a data as a feature set eq_aggregate_fset = eq_aggregate_fc.query() ``` `FeatureSet` objects support visualizing attribute information as a pandas dataframe. This is a neat feature since you do not have to iterate through each feature to view their attribute information. Let us view the summary results as a pandas dataframe table. Note, the `aggregate_points` tool appends the polygon layer's original set of fields to the analysis result in order to provide it context ``` aggregation_df = eq_aggregate_fset.df aggregation_df ``` Thus, from our data, of the 50 states, only 23 have had earthquakes. Let us plot a bar chart to view which states had the most earthquakes ``` aggregation_df.plot('state_name','Point_Count', kind='bar') ``` Clearly, California tops the list with the most number of earthquakes. Let us view what the average intensity and minimum depth is in the plots below: ``` aggregation_df.plot('state_name',['MEAN_magnitude', 'MIN_depth'],kind='bar', subplots=True) ```
github_jupyter
``` import os import pandas as pd # Some of the columns we will look at can be quite wide, but it's good to get an idea of what they contain print(pd.get_option('max_colwidth')) pd.set_option('max_colwidth',500) ``` # Page types Edit the query to look at the dates you care about We want to know what page types different pages are because we care about navigation elements clicked **from a content page** and the total number of journeys that contain **only content pages and related links**. Initially we wanted to use [User journey supertype](https://docs.publishing.service.gov.uk/document-types/user_journey_document_supertype.html) from [custom dimensions](https://gov-uk.atlassian.net/wiki/spaces/GOVUK/pages/23855552/Analytics+on+GOV.UK#AnalyticsonGOV.UK-customDimensionsCustomdimensions), where each page is either classified as 'finding' or 'thing'. Unfortunately this dimension is poorly populated in BigQuery due to someone disabling it, and occasionally the same page path and page title can be reported as both a 'finding' and a 'thing' at different times (e.g. the GOV.UK homepage). The next option is using 'Format' (document type) from [custom dimensions](https://gov-uk.atlassian.net/wiki/spaces/GOVUK/pages/23855552/Analytics+on+GOV.UK#AnalyticsonGOV.UK-customDimensionsCustomdimensions), which is a lot better populated but a lot more granular, you can find a sample of a day's data with pageviews from GA [here](https://docs.google.com/spreadsheets/d/1-jen8DbRgvmvF9aYapmwsFAe0ncHeq5y-ks75iyCvVU/edit#gid=177174931). *NB - a data dump from the content API is probably the most reliable source for a mapping of page paths to document types and then to finding/thing, however there's a bit of outlay in understanding how that data can be accessed and how it is structured so a BigQuery query is most appropriate for a first stab at this.* **TODO** one query to get pagePath, format (or map this to finding/thing), content ID, where content_id != '00000000-0000-0000-0000-000000000000' ``` ProjectID = 'govuk-bigquery-analytics' KEY_DIR = os.getenv("BQ_KEY_DIR") key_file_path = os.path.join(KEY_DIR, os.listdir(KEY_DIR)[0]) QUERY = """ SELECT pagePath, MAX(IF(document_type IN ('document_collection', 'finder', 'homepage', 'license_finder', 'mainstream_browse_page', 'organisation', 'search', 'service_manual_homepage', 'service_manual_topic', 'services_and_information', 'taxon', 'topic', 'topical_event'),1,0)) AS is_finding FROM (SELECT ( SELECT value FROM hits.customDimensions WHERE index=4) AS content_id, hits.page.pagePath, ( SELECT value FROM hits.customDimensions WHERE index=2) AS document_type FROM `govuk-bigquery-analytics.87773428.ga_sessions_*` AS sessions CROSS JOIN UNNEST(sessions.hits) AS hits WHERE _TABLE_SUFFIX BETWEEN '20190214' AND '20190218' ) WHERE content_id != '00000000-0000-0000-0000-000000000000' AND content_id != '[object Object]' AND content_id IS NOT NULL GROUP BY 1 """ df_finding_thing = pd.io.gbq.read_gbq(QUERY, project_id=ProjectID, reauth=False, # verbose=True, private_key=key_file_path, dialect='standard') df_finding_thing.to_csv('../../data/raw_bq_extract/document_types_20190214_20190218.csv.gz', sep="\t", index=False, compression='gzip') # df_finding_thing = pd.read_csv( # '../../data/raw_bq_extract/document_types.csv.gz', # sep="\t", compression='gzip') ```
github_jupyter
This notebook walks through the creation of multitask models on MUV. The goal is to demonstrate that multitask methods outperform singletask methods on MUV. ``` %reload_ext autoreload %autoreload 2 %pdb off reload = True import deepchem as dc dataset_file= "../../datasets/muv.csv.gz" dataset = dc.utils.save.load_from_disk(dataset_file) print("Columns of dataset: %s" % str(dataset.columns.values)) print("Number of examples in dataset: %s" % str(dataset.shape[0])) ``` Now, let's visualize some compounds from our dataset ``` from rdkit import Chem from rdkit.Chem import Draw from itertools import islice from IPython.display import Image, display, HTML def display_images(filenames): """Helper to pretty-print images.""" imagesList=''.join( ["<img style='width: 140px; margin: 0px; float: left; border: 1px solid black;' src='%s' />" % str(s) for s in sorted(filenames)]) display(HTML(imagesList)) def mols_to_pngs(mols, basename="test"): """Helper to write RDKit mols to png files.""" filenames = [] for i, mol in enumerate(mols): filename = "%s%d.png" % (basename, i) Draw.MolToFile(mol, filename) filenames.append(filename) return filenames num_to_display = 12 molecules = [] for _, data in islice(dataset.iterrows(), num_to_display): molecules.append(Chem.MolFromSmiles(data["smiles"])) display_images(mols_to_pngs(molecules)) MUV_tasks = ['MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548', 'MUV-852', 'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858', 'MUV-713', 'MUV-733', 'MUV-652', 'MUV-466', 'MUV-832'] featurizer = dc.feat.CircularFingerprint(size=1024) loader = dc.data.CSVLoader( tasks=MUV_tasks, smiles_field="smiles", featurizer=featurizer) dataset = loader.featurize(dataset_file) splitter = dc.splits.RandomSplitter(dataset_file) train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( dataset) #NOTE THE RENAMING: valid_dataset, test_dataset = test_dataset, valid_dataset import numpy as np import numpy.random params_dict = {"activation": ["relu"], "momentum": [.9], "batch_size": [50], "init": ["glorot_uniform"], "data_shape": [train_dataset.get_data_shape()], "learning_rate": [1e-3], "decay": [1e-6], "nb_epoch": [1], "nesterov": [False], "dropouts": [(.5,)], "nb_layers": [1], "batchnorm": [False], "layer_sizes": [(1000,)], "weight_init_stddevs": [(.1,)], "bias_init_consts": [(1.,)], "penalty": [0.], } n_features = train_dataset.get_data_shape()[0] def model_builder(model_params, model_dir): model = dc.models.TensorflowMultiTaskClassifier( len(MUV_tasks), n_features, **model_params) return model metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean) optimizer = dc.hyper.HyperparamOpt(model_builder) best_dnn, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, train_dataset, valid_dataset, [], metric) ```
github_jupyter
# Regular expressions: A Gentle Introduction By [Allison Parrish](http://www.decontextualize.com/) A [regular expression](https://en.wikipedia.org/wiki/Regular_expression) is more than just a phrase that sounds like a euphemism for what happens when your diet includes enough fiber. It's a way of writing what amount to small programs for matching patterns in text that would otherwise be difficult to match with the regular toolbox of string filtering and searching tools. This tutorial will take you through the basics of using regular expressions in Python. But many (if not most) other programming languages also support regular expressions in some form or other ([like JavaScript](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions)), so the skills you'll learn here will apply to other languages as well. ## "Escape" sequences in strings Before we go into too much detail about regular expressions, I want to review with you how escape sequences work in Python strings. Inside of strings that you type into your Python code, there are certain sequences of characters that have a special meaning. These sequences start with a backslash character (`\`) and allow you to insert into your string characters that would otherwise be difficult to type, or that would go against Python syntax. Here's some code illustrating a few common sequences: ``` print("1. include \"double quotes\" (inside of a double-quoted string)") print('2. include \'single quotes\' (inside of a single-quoted string)') print("3. one\ttab, two\ttabs") print("4. new\nline") print("5. include an actual backslash \\ (two backslashes in the string)") ``` ## Regular expressions [So far, we've discussed how to write Python expressions that are able to check whether strings meet very simple criteria](expressions-and-strings.ipynb), such as “does this string begin with a particular character” or “does this string contain another string”? But imagine writing a program that performs the following task: find and print all ZIP codes in a string (i.e., a five-character sequence of digits). Give up? Here’s my attempt, using only the tools we’ve discussed so far: ``` input_str = "here's a zip code: 12345. 567 isn't a zip code, but 45678 is. 23456? yet another zip code." current = "" zips = [] for ch in input_str: if ch in '0123456789': current += ch else: current = "" if len(current) == 5: zips.append(current) current = "" zips ``` Basically, we have to iterate over each character in the string, check to see if that character is a digit, append to a string variable if so, continue reading characters until we reach a non-digit character, check to see if we found exactly five digit characters, and add it to a list if so. At the end, we print out the list that has all of our results. Problems with this code: it’s messy; it doesn’t overtly communicate what it’s doing; it’s not easily generalized to other, similar tasks (e.g., if we wanted to write a program that printed out phone numbers from a string, the code would likely look completely different). Our ancient UNIX pioneers had this problem, and in pursuit of a solution, thought to themselves, "Let’s make a tiny language that allows us to write specifications for textual patterns, and match those patterns against strings. No one will ever have to write fiddly code that checks strings character-by-character ever again." And thus regular expressions were born. Here's the code for accomplishing the same task with regular expressions, by the way: ``` import re zips = re.findall(r"\d{5}", input_str) zips ``` I’ll allow that the `r"\d{5}"` in there is mighty cryptic (though hopefully it won’t be when you’re done reading this page and/or participating in the associated lecture). But the overall structure of the program is much simpler. ### Fetching our corpus For this section of class, we'll be using the subject lines of all e-mails in the [EnronSent corpus](http://verbs.colorado.edu/enronsent/), kindly put into the public domain by the United States Federal Energy Regulatory Commission. Download a copy of [this file](https://raw.githubusercontent.com/ledeprogram/courses/master/databases/data/enronsubjects.txt) and place it in the same directory as this notebook. ### Matching strings with regular expressions The most basic operation that regular expressions perform is matching strings: you’re asking the computer whether a particular string matches some description. We're going to be using regular expressions to print only those lines from our `enronsubjects.txt` corpus that match particular sequences. Let's load our corpus into a list of lines first: ``` subjects = [x.strip() for x in open("enronsubjects.txt").readlines()] ``` We can check whether or not a pattern matches a given string in Python with the `re.search()` function. The first parameter to search is the regular expression you're trying to match; the second parameter is the string you're matching against. Here's an example, using a very simple regular expression. The following code prints out only those lines in our Enron corpus that match the (very simple) regular expression `shipping`: ``` import re [line for line in subjects if re.search("shipping", line)] ``` At its simplest, a regular expression matches a string if that string contains exactly the characters you've specified in the regular expression. So the expression `shipping` matches strings that contain exactly the sequences of `s`, `h`, `i`, `p`, `p`, `i`, `n`, and `g` in a row. If the regular expression matches, `re.search()` evaluates to `True` and the matching line is included in the evaluation of the list comprehension. > BONUS TECH TIP: `re.search()` doesn't actually evaluate to `True` or `False`---it evaluates to either a `Match` object if a match is found, or `None` if no match was found. Those two count as `True` and `False` for the purposes of an `if` statement, though. ### Metacharacters: character classes The "shipping" example is pretty boring. (There was hardly any fan fiction in there at all.) Let's go a bit deeper into detail with what you can do with regular expressions. There are certain characters or strings of characters that we can insert into a regular expressions that have special meaning. For example: ``` [line for line in subjects if re.search("sh.pping", line)] ``` In a regular expression, the character `.` means "match any character here." So, using the regular expression `sh.pping`, we get lines that match `shipping` but also `shopping`. The `.` is an example of a regular expression *metacharacter*---a character (or string of characters) that has a special meaning. Here are a few more metacharacters. These metacharacters allow you to say that a character belonging to a particular *class* of characters should be matched in a particular position: | metacharacter | meaning | |---------------|---------| | `.` | match any character | | `\w` | match any alphanumeric ("*w*ord") character (lowercase and capital letters, 0 through 9, underscore) | | `\s` | match any whitespace character (i.e., space and tab) | | `\S` | match any non-whitespace character (the inverse of \s) | | `\d` | match any digit (0 through 9) | | `\.` | match a literal `.` | Here, for example, is a (clearly imperfect) regular expression to search for all subject lines containing a time of day: ``` [line for line in subjects if re.search(r"\d:\d\d\wm", line)] ``` Here's that regular expression again: `r"\d:\d\d\wm"`. I'm going to show you how to read this, one unit at a time. "Hey, regular expression engine. Tell me if you can find this pattern in the current string. First of all, look for any number (`\d`). If you find that, look for a colon right after it (`:`). If you find that, look for another number right after it (`\d`). If you find *that*, look for any alphanumeric character---you know, a letter, a number, an underscore. If you find that, then look for a `m`. Good? If you found all of those things in a row, then the pattern matched." #### But what about that weirdo `r""`? Python provides another way to include string literals in your program, in addition to the single- and double-quoted strings we've already discussed. The r"" string literal, or "raw" string, includes all characters inside the quotes literally, without interpolating special escape characters. Here's an example: ``` print("1. this is\na test") print(r"2. this is\na test") print("3. I love \\ backslashes!") print(r"4. I love \ backslashes!") ``` As you can see, whereas a double- or single-quoted string literal interprets `\n` as a new line character, the raw quoted string includes those characters as they were literally written. More importantly, for our purposes at least, is the fact that, in the raw quoted string, we only need to write one backslash in order to get a literal backslash in our string. Why is this important? Because regular expressions use backslashes all the time, and we don't want Python to try to interpret those backslashes as special characters. (Inside a regular string, we'd have to write a simple regular expression like `\b\w+\b` as `\\b\\w+\\b`---yecch.) So the basic rule of thumb is this: use r"" to quote any regular expressions in your program. All of the examples you'll see below will use this convention. ### Character classes in-depth You can define your own character classes by enclosing a list of characters, or range of characters, inside square brackets: | regex | explanation | |-------|-------------| | `[aeiou]` | matches any vowel | | `[02468]` | matches any even digit | | `[a-z]` | matches any lower-case letter | | `[A-Z]` | matches any upper-case character | | `[^0-9]` | matches any non-digit (the ^ inverts the class, matches anything not in the list) | | `[Ee]` | matches either `E` or `e` | Let's find every subject line where we have four or more vowels in a row: ``` [line for line in subjects if re.search(r"[aeiou][aeiou][aeiou][aeiou]", line)] ``` ### Metacharacters: anchors The next important kind of metacharacter is the *anchor*. An anchor doesn't match a character, but matches a particular place in a string. | anchor | meaning | |--------|---------| | `^` | match at beginning of string | | `$` | match at end of string | | `\b` | match at word boundary | > Note: `^` in a character class has a different meaning from `^` outside a character class! > Note #2: If you want to search for a literal dollar sign (`$`), you need to put a backslash in front of it, like so: `\$` Now we have enough regular expression knowledge to do some fairly sophisticated matching. As an example, all the subject lines that begin with the string `New York`, regardless of whether or not the initial letters were capitalized: ``` [line for line in subjects if re.search(r"^[Nn]ew [Yy]ork", line)] ``` Every subject line that ends with an ellipsis (there are a lot of these, so I'm only displaying the first 30): ``` [line for line in subjects if re.search(r"\.\.\.$", line)][:30] ``` The first thirty subject lines containing the word "oil": ``` [line for line in subjects if re.search(r"\b[Oo]il\b", line)][:30] ``` ### Metacharacters: quantifiers Above we had a regular expression that looked like this: [aeiou][aeiou][aeiou][aeiou] Typing out all of those things is kind of a pain. Fortunately, there’s a way to specify how many times to match a particular character, using quantifiers. These affect the character that immediately precede them: | quantifier | meaning | |------------|---------| | `{n}` | match exactly n times | | `{n,m}` | match at least n times, but no more than m times | | `{n,}` | match at least n times | | `+` | match at least once (same as {1,}) | | `*` | match zero or more times | | `?` | match one time or zero times | For example, here's an example of a regular expression that finds subjects that contain at least fifteen capital letters in a row: ``` [line for line in subjects if re.search(r"[A-Z]{15,}", line)] ``` Lines that contain five consecutive vowels: ``` [line for line in subjects if re.search(r"[aeiou]{5}", line)] ``` Count the number of lines that are e-mail forwards, regardless of whether the subject line begins with `Fw:`, `FW:`, `Fwd:` or `FWD:` ``` len([line for line in subjects if re.search(r"^F[Ww]d?:", line)]) ``` Lines that have the word `news` in them and end in an exclamation point: ``` [line for line in subjects if re.search(r"\b[Nn]ews\b.*!$", line)] ``` ### Metacharacters: alternation One final bit of regular expression syntax: alternation. * `(?:x|y)`: match either x or y * `(?:x|y|z)`: match x, y or z * etc. So for example, if you wanted to count every subject line that begins with either `Re:` or `Fwd:`: ``` len([line for line in subjects if re.search(r"^(?:Re|Fwd):", line)]) ``` Every subject line that mentions kinds of cats: ``` [line for line in subjects if re.search(r"\b(?:[Cc]at|[Kk]itten|[Kk]itty)\b", line)] ``` ## Capturing what matches The `re.search()` function allows us to check to see *whether or not* a string matches a regular expression. Sometimes we want to find out not just if the string matches, but also to what, exactly, in the string matched. In other words, we want to *capture* whatever it was that matched. The easiest way to do this is with the `re.findall()` function, which takes a regular expression and a string to match it against, and returns a list of all parts of the string that the regular expression matched. Here's an example: ``` import re re.findall(r"\b\w{5}\b", "alpha beta gamma delta epsilon zeta eta theta") ``` The regular expression above, `\b\w{5}\b`, is a regular expression that means "find me strings of five non-white space characters between word boundaries"---in other words, find me five-letter words. The `re.findall()` method returns a list of strings---not just telling us whether or not the string matched, but which parts of the string matched. For the following `re.findall()` examples, we'll be operating on the entire file of subject lines as a single string, instead of using a list comprehension for individual subject lines. Here's how to read in the entire file as one string, instead of as a list of strings: ``` all_subjects = open("enronsubjects.txt").read() ``` Having done that, let's write a regular expression that finds all domain names in the subject lines (displaying just the first thirty because the list is long): ``` re.findall(r"\b\w+\.(?:com|net|org)", all_subjects)[:30] ``` Every time the string `New York` is found, along with the word that comes directly afterward: ``` re.findall(r"New York \b\w+\b", all_subjects) ``` And just to bring things full-circle, everything that looks like a zip code, sorted: ``` sorted(re.findall(r"\b\d{5}\b", all_subjects))[:30] ``` ## Full example: finding the dollar value of the Enron e-mail subject corpus Here's an example that combines our regular expression prowess with our ability to do smaller manipulations on strings. We want to find all dollar amounts in the subject lines, and then figure out what their sum is. To understand what we're working with, let's start by writing a list comprehension that finds strings that just have the dollar sign (`$`) in them: ``` [line for line in subjects if re.search(r"\$", line)] ``` Based on this data, we can guess at the steps we'd need to do in order to figure out these values. We're going to ignore anything that doesn't have "k", "million" or "billion" after it as chump change. So what we need to find is: a dollar sign, followed by any series of numbers (or a period), followed potentially by a space (but sometimes not), followed by a "k", "m" or "b" (which will sometimes start the word "million" or "billion" but sometimes not... so we won't bother looking). Here's how I would translate that into a regular expression: \$[0-9.]+ ?(?:[Kk]|[Mm]|[Bb]) We can use `re.findall()` to capture all instances where we found this regular expression in the text. Here's what that would look like: ``` re.findall(r"\$[0-9.]+ ?(?:[Kk]|[Mm]|[Bb])", all_subjects) ``` If we want to actually make a sum, though, we're going to need to do a little massaging. ``` total_value = 0 dollar_amounts = re.findall(r"\$\d+ ?(?:[Kk]|[Mm]|[Bb])", all_subjects) for amount in dollar_amounts: # the last character will be 'k', 'm', or 'b'; "normalize" by making lowercase. multiplier = amount[-1].lower() # trim off the beginning $ and ending multiplier value amount = amount[1:-1] # remove any remaining whitespace amount = amount.strip() # convert to a floating-point number float_amount = float(amount) # multiply by an amount, based on what the last character was if multiplier == 'k': float_amount = float_amount * 1000 elif multiplier == 'm': float_amount = float_amount * 1000000 elif multiplier == 'b': float_amount = float_amount * 1000000000 # add to total value total_value = total_value + float_amount total_value ``` That's over one trillion dollars! Nice work, guys. ## Finer-grained matches with grouping We used `re.search()` above to check whether or not a string matches a particular regular expression, in a context like this: ``` import re dickens = [ "it was the best of times", "it was the worst of times"] [line for line in dickens if re.search(r"best", line)] ``` But the match object doesn't actually return `True` or `False`. If the search succeeds, the function returns something called a "match object." Let's assign the result of `re.search()` to a variable and see what we can do with it. ``` source_string = "this example has been used 423 times" match = re.search(r"\d\d\d", source_string) type(match) ``` It's a value of type `_sre.SRE_Match`. This value has several methods that we can use to access helpful and interesting information about the way the regular expression matched the string. [Read more about the methods of the match object here](https://docs.python.org/2/library/re.html#match-objects). For example, we can see both where the match *started* in the string and where it *ended*, using the `.start()` and `.end()` methods. These methods return the indexes in the string where the regular expression matched. ``` match.start() match.end() ``` Together, we can use these methods to grab exactly the part of the string that matched the regular expression, by using the start/end values to get a slice: ``` source_string[match.start():match.end()] ``` Because it's so common, there's a shortcut for this operation, which is the match object's `.group()` method: ``` match.group() ``` The `.group()` method of a match object, in other words, returns exactly the part of the string that matched the regular expression. As an example of how to use the match object and its `.group()` method in context, let's revisit the example from above which found every subject line in the Enron corpus that had fifteen or more consecutive capital letters. In that example, we could only display the *entire subject line*. If we wanted to show just the part of the string that matched (i.e., the sequence of fifteen or more capital letters), we could use `.group()`: ``` for line in subjects: match = re.search(r"[A-Z]{15,}", line) if match: print(match.group()) ``` An important thing to remember about `re.search()` is that it returns `None` if there is no match. For this reason, you always need to check to make sure the object is *not* `None` before you attempt to call the value's `.group()` method. This is the reason that it's difficult to write the above example as a list comprehension---you need to check the result of `re.search()` before you can use it. An attempt to do something like this, for example, will fail: ``` [re.search(r"[A-Z]{15,}", line).group() for line in subjects] ``` Python complains that `NoneType` has no `group()` method. This happens because sometimes the result of `re.search()` is none. We could, of course, write a little function to get around this limitation: ``` # make a function def filter_and_group(source, regex): return [re.search(regex, item).group() for item in source if re.search(regex, item)] # now call it filter_and_group(subjects, r"[A-Z]{15,}") ``` ### Multiple groups in one regular expression So `re.search()` lets us get the parts of a string that match a regular expression, using the `.group()` method of the match object it returns. You can get even finer-grained matches using a feature of regular expressions called *grouping*. Let's start with a toy example. Say you have a list of University courses in the following format: ``` courses = [ "CSCI 105: Introductory Programming for Cat-Lovers", "LING 214: Pronouncing Things Backwards", "ANTHRO 342: Theory and Practice of Cheesemongery (Graduate Seminar)", "CSCI 205: Advanced Programming for Cat-Lovers", "ENGL 112: Speculative Travel Writing" ] ``` Let's say you want to extract the following items from this data: * A unique list of all departments (e.g., CSCI, LING, ANTHRO, etc.) * A list of all course names * A dictionary with all of the 100-level classes, 200-level classes, and 300-level classes Somehow we need to get *three* items from each line of data: the department, the number, and the course name. You can do this easily with regular expressions using *grouping*. To use grouping, put parentheses (`()`) around the portions of the regular expression that are of interest to you. You can then use the `.groups()` (note the `s`!) function to get the portion of the string that matched the portion of the regular expression inside the parentheses individually. Here's what it looks like, just operating on the first item of the list: ``` first_course = courses[0] match = re.search(r"(\w+) (\d+): (.+)$", first_course) match.groups() ``` The regular expression in `re.search()` above roughly translates as the following: * Find me a sequence of one or more alphanumeric characters. Save this sequence as the first group. * Find a space. * Find me a sequence of one or more digits. Save this as the second group. * Find a colon followed by a space. * Find me one or more characters---I don't care which characters---and save the sequence as the third group. * Match the end of the line. Calling the `.groups()` method returns a tuple containing each of the saved items from the grouping. You can use it like so: ``` groups = match.groups() print("Department:", groups[0]) # department print("Course number:", groups[1]) # course number print("Course name:", groups[2]) # course name ``` Now let's iterate over the entire list of courses and put them in the data structure as appropriate: ``` departments = set() course_names = [] course_levels = {} for item in courses: # search and create match object match = re.search(r"(\w+) (\d+): (.+)$", item) if match: # if there's a match... groups = match.groups() # get the groups: 0 is department, 1 is course number, 2 is name departments.add(groups[0]) # add to department set (we wanted a list of *unique* departments) course_names.append(groups[2]) # add to list of courses level = int(groups[1]) / 100 # get the course "level" by dividing by 100 # add the level/course key-value pair to course_levels if level not in course_levels: course_levels[level*100] = [] course_levels[level*100].append(groups[2]) ``` After you run this cell, you can check out the unique list of departments: ``` departments ``` ... the list of course names: ``` course_names ``` ... and the dictionary that maps course "levels" to a list of courses at that level: ``` course_levels ``` ## Grouping with multiple matches in the same string A problem with `re.search()` is that it only returns the *first* match in a string. What if we want to find *all* of the matches? It turns out that `re.findall()` *also* supports the regular expression grouping syntax. If the regular expression you pass to `re.findall()` includes any grouping parentheses, then the function returns not a list of strings, but a list of tuples, where each tuple has elements corresponding in order to the groups in the regular expression. As a quick example, here's a test string with number names and digits, and a regular expression to extract all instances of a series of alphanumeric characters, followed by a space, followed by a single digit: ``` test = "one 1 two 2 three 3 four 4 five 5" re.findall(r"(\w+) (\d)", test) ``` We can use this to extract every phone number from the Enron subjects corpus, separating out the components of the numbers by group: ``` re.findall(r"(\d\d\d)-(\d\d\d)-(\d\d\d\d)", all_subjects) ``` And then we can do a quick little data analysis on the frequency of area codes in these numbers, using the [Counter](https://docs.python.org/2/library/collections.html#counter-objects) object from the `collections` module: ``` from collections import Counter area_codes = [item[0] for item in re.findall(r"(\d\d\d)-(\d\d\d)-(\d\d\d\d)", all_subjects)] count = Counter(area_codes) count.most_common(1) ``` ### Multiple match objects with `re.finditer()` The `re` library also has a `re.finditer()` function, which returns not a list of matching strings in tuples (like `re.findall()`), but an iterator of *match objects*. This is useful if you need to know not just which text matched, but *where* in the text the match occurs. So, for example, to find the positions in the `all_subjects` corpus where the word "Oregon" occurs, regardless of capitalization: ``` [(match.start(), match.end(), match.group()) for match in re.finditer(r"[Oo]regon", all_subjects)] ``` ### Conclusion Regular expressions are a great way to take some raw text and find the parts that are of interest to you. Python's string methods and string slicing syntax are a great way to massage and clean up data. You know them both now, which makes you powerful. But as powerful as you are, you have only scratched the surface of your potential! We only scratched the surface of what's possible with regular expressions. Here's some further reading: * [egrep for Linguists](http://stts.se/egrep_for_linguists/egrep_for_linguists.html) explains how to use regular expressions using the command-line tool `egrep` (which I recommend becoming familiar with!) * Once you've mastered the basics, check the official [Python regular expressions HOWTO](https://docs.python.org/3/howto/regex.html). The official [Python documentation on regular expressions](https://docs.python.org/3/library/re.html) is a deep dive on the subject.
github_jupyter
``` test_index = 0 from load_data import * # load_data() from load_data import * X_train,X_test,y_train,y_test = load_data() len(X_train),len(y_train) len(X_test),len(y_test) import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F class Test_Model(nn.Module): def __init__(self) -> None: super().__init__() self.c1 = nn.Conv2d(1,64,5) self.c2 = nn.Conv2d(64,128,5) self.c3 = nn.Conv2d(128,256,5) self.fc4 = nn.Linear(256*10*10,256) self.fc6 = nn.Linear(256,128) self.fc5 = nn.Linear(128,4) def forward(self,X): preds = F.max_pool2d(F.relu(self.c1(X)),(2,2)) preds = F.max_pool2d(F.relu(self.c2(preds)),(2,2)) preds = F.max_pool2d(F.relu(self.c3(preds)),(2,2)) # print(preds.shape) preds = preds.view(-1,256*10*10) preds = F.relu(self.fc4(preds)) preds = F.relu(self.fc6(preds)) preds = self.fc5(preds) return preds device = torch.device('cuda') BATCH_SIZE = 32 IMG_SIZE = 112 model = Test_Model().to(device) optimizer = optim.SGD(model.parameters(),lr=0.1) criterion = nn.CrossEntropyLoss() EPOCHS = 125 from tqdm import tqdm PROJECT_NAME = 'Weather-Clf' import wandb # test_index += 1 # wandb.init(project=PROJECT_NAME,name=f'test-{test_index}') # for _ in tqdm(range(EPOCHS)): # for i in range(0,len(X_train),BATCH_SIZE): # X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device) # y_batch = y_train[i:i+BATCH_SIZE].to(device) # model.to(device) # preds = model(X_batch.float()) # preds.to(device) # loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long)) # optimizer.zero_grad() # loss.backward() # optimizer.step() # wandb.log({'loss':loss.item()}) # wandb.finish() # for index in range(10): # print(torch.argmax(preds[index])) # print(y_batch[index]) # print('\n') class Test_Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1,16,5) self.conv2 = nn.Conv2d(16,32,5) self.conv3 = nn.Conv2d(32,64,5) self.fc1 = nn.Linear(64*10*10,16) self.fc2 = nn.Linear(16,32) self.fc3 = nn.Linear(32,64) self.fc4 = nn.Linear(64,32) self.fc5 = nn.Linear(32,6) def forward(self,X): preds = F.max_pool2d(F.relu(self.conv1(X)),(2,2)) preds = F.max_pool2d(F.relu(self.conv2(preds)),(2,2)) preds = F.max_pool2d(F.relu(self.conv3(preds)),(2,2)) # print(preds.shape) preds = preds.view(-1,64*10*10) preds = F.relu(self.fc1(preds)) preds = F.relu(self.fc2(preds)) preds = F.relu(self.fc3(preds)) preds = F.relu(self.fc4(preds)) preds = F.relu(self.fc5(preds)) return preds model = Test_Model().to(device) optimizer = optim.SGD(model.parameters(),lr=0.1) criterion = nn.CrossEntropyLoss() # test_index += 1 # wandb.init(project=PROJECT_NAME,name=f'test-{test_index}') # for _ in tqdm(range(EPOCHS)): # for i in range(0,len(X_train),BATCH_SIZE): # X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device) # y_batch = y_train[i:i+BATCH_SIZE].to(device) # model.to(device) # preds = model(X_batch.float()) # preds.to(device) # loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long)) # optimizer.zero_grad() # loss.backward() # optimizer.step() # wandb.log({'loss':loss.item()}) # wandb.finish() class Test_Model(nn.Module): def __init__(self,conv1_output=16,conv2_output=32,conv3_output=64,fc1_output=16,fc2_output=32,fc3_output=64,activation=F.relu): super().__init__() self.conv3_output = conv3_output self.conv1 = nn.Conv2d(1,conv1_output,5) self.conv2 = nn.Conv2d(conv1_output,conv2_output,5) self.conv3 = nn.Conv2d(conv2_output,conv3_output,5) self.fc1 = nn.Linear(conv3_output*10*10,fc1_output) self.fc2 = nn.Linear(fc1_output,fc2_output) self.fc3 = nn.Linear(fc2_output,fc3_output) self.fc4 = nn.Linear(fc3_output,fc2_output) self.fc5 = nn.Linear(fc2_output,6) self.activation = activation def forward(self,X): preds = F.max_pool2d(self.activation(self.conv1(X)),(2,2)) preds = F.max_pool2d(self.activation(self.conv2(preds)),(2,2)) preds = F.max_pool2d(self.activation(self.conv3(preds)),(2,2)) # print(preds.shape) preds = preds.view(-1,self.conv3_output*10*10) preds = self.activation(self.fc1(preds)) preds = self.activation(self.fc2(preds)) preds = self.activation(self.fc3(preds)) preds = self.activation(self.fc4(preds)) preds = self.activation(self.fc5(preds)) return preds # conv1_output # conv2_output # conv3_output # fc1_output # fc2_output # fc3_output # activation # optimizer # loss # lr # num of epochs def get_loss(criterion,y,model,X): model.to('cpu') preds = model(X.view(-1,1,112,112).to('cpu').float()) preds.to('cpu') loss = criterion(preds,torch.tensor(y,dtype=torch.long).to('cpu')) loss.backward() return loss.item() def test(net,X,y): device = 'cpu' net.to(device) correct = 0 total = 0 net.eval() with torch.no_grad(): for i in range(len(X)): real_class = torch.argmax(y[i]).to(device) net_out = net(X[i].view(-1,1,112,112).to(device).float()) net_out = net_out[0] predictied_class = torch.argmax(net_out) if predictied_class == real_class: correct += 1 total += 1 net.train() net.to('cuda') return round(correct/total,3) EPOCHS = 3 conv1_outputs = [8,16,32,64,128] for conv1_output in conv1_outputs: wandb.init(project=PROJECT_NAME,name=f'conv1_output-{conv1_output}') model = Test_Model(conv1_output=conv1_output).to(device) optimizer = optim.SGD(model.parameters(),lr=0.1) criterion = nn.CrossEntropyLoss() for _ in tqdm(range(EPOCHS)): for i in range(0,len(X_train),BATCH_SIZE): X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device) y_batch = y_train[i:i+BATCH_SIZE].to(device) model.to(device) preds = model(X_batch.float()) preds.to(device) loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long)) optimizer.zero_grad() loss.backward() optimizer.step() wandb.log({'loss':get_loss(criterion,y_train,model,X_train),'accuracy':test(model,X_train,y_train),'val_accuracy':test(model,X_test,y_test),'val_loss':get_loss(criterion,y_test,model,X_test)}) for index in range(10): print(torch.argmax(preds[index])) print(y_batch[index]) print('\n') wandb.finish() ```
github_jupyter
<img src="logo.png" align="left"> ## Stackarator Tutorial Welcome the the Stackarator tutorial! Stackarator is a tool for stacking (sub-)millimetre datacubes of extended sources to extract weak lines. For instance, perhaps you have $^{12}$CO ALMA observations of a nice extended galaxy/protoplanetary disc/molecular cloud and $^{13}$CO was in the lower sideband. You may not detect the weaker line purely in the datacubes, but by using the $^{12}$CO velocity information you can stack the data and pull the signal out. Below I will show you how to use this tool to do just that! ### Stackarator install To start we need to install Stackarator. This can be done using PiPy, with the command `pip install stackarator`. You can add `--user` if you need to install on a system where you don't have admin rights. Stackarator requires the following modules, which pip should install automatically for you if you dont have them already: * numpy * matplotlib * scipy * tqdm If you install Stackarator from the github source files then you will want to ensure you have these packages installed yourself. ### Example usage Here we will show a basic example of Stackarator usage, using a noiseless mock observation created using the `KinMS` python package (see https://github.com/TimothyADavis/KinMSpy). This allows us to modify the noise level etc on the fly. I am also using the plotting tool that comes with that package to visulise the data for you here, but thats not required in any way by Stackarator. ``` #imports from stackarator import stackarator import numpy as np %matplotlib inline import matplotlib.pyplot as plt from kinms.utils.KinMS_figures import KinMS_plotter # just for making nice plots ``` Now we can initalise the stackarator object, and read in our fits datacube. This datacube has RA-DEC-velocity axes. RA-DEC-Frequency cubes are also supported, but it should be noted that internally the code will convert to velocities using the _radio_ redshift definition. ``` stack=stackarator() stack.read_fits_cube("TestGalaxy_simcube.fits",rms=0) ``` If you don't supply the RMS then the code will try and estimate it from channels 2->5 in your cube - if that won't work for you then remember to supply it directly. Lets look and see what our galaxy looks like. You can access various properties of your loaded datacube to help with this. ``` KinMS_plotter(stack.datacube, stack.xcoord.size, stack.ycoord.size, stack.vcoord.size*stack.dv,\ 1, stack.dv, [stack.bmaj,stack.bmin,stack.bpa],posang=90).makeplots() ``` Now we can read in the velocity field we want to use to stack the data. This doesnt have to be evaluated on the same pixel grid, or even observed at the same resolution, as 2D interpolation is used to match it to the datacube. In this case its just the moment one of the data shown above. ``` stack.read_fits_mom1('TestGalaxy_moment1.fits') ``` Note that if your moment one has been centred around zero, then the code will check and see if the original systemic velocity is encoded in the SYSVEL fits header keyword. If not then you will need to pass the vsys (in km/s) to this command. E.g. `stack.read_fits_mom1('TestGalaxy_moment1.fits',vsys=1194)` Now we can stack this data! Obviously its noiseless right now, so its a very easy case! ``` vout,outspec,outrms,outn = stack.stack() plt.plot(vout,outspec,drawstyle='steps-mid') plt.xlabel("Velocity (km/s)") plt.ylabel("Intensity (Jy/beam)") plt.xlim(-200,200) ``` As you can see, we get a nice single peak, with an _approximately_ gaussian shape. NOTE: For galaxies at least, the stacked line is unlikely to be exactly gaussian. As seen in the example above, objects where the rotation curve rises quickly at the centre will show broad wings around the central gaussian peak. This is _not_ a signature of non-circular motions (our model galaxy only has a perfectly rotating gas disc), but simply reflects the presence of beam smearing. If you want to fit a function to estimate total fluxes then a nested double gaussian usually provides a good fit. Now, as we have a perfect noiseless example, we can check for consistency! Do we retrieve all the flux in the stacked spectrum? ``` print("Total flux in this stacked line is",outspec.sum()*stack.dv,"Jy/beam km/s") print("Total flux in the original cube is",stack.datacube.sum()*stack.dv,"Jy/beam km/s") ``` Yes we do! Now we can try something more realistic. Lets add significant noise, and try again. ``` from numpy.random import default_rng rng = default_rng() rms=3e-2 noise=(rng.standard_normal((stack.xcoord.size,stack.ycoord.size,stack.vcoord.size))*rms) import copy stack2=copy.deepcopy(stack) stack2.datacube=stack.datacube+noise stack2.rms=rms ``` Lets plot the integrated spectra from both cubes, and see how much of a mess we made: ``` plt.plot(stack2.vcoord,stack2.datacube.sum(axis=0).sum(axis=0),drawstyle='steps-mid',label="noisy") plt.plot(stack.vcoord,stack.datacube.sum(axis=0).sum(axis=0),drawstyle='steps-mid',label="noiseless") plt.xlim(-500,500) plt.xlabel("Velocity (km/s)") plt.ylabel("Intensity (Jy/beam)") plt.legend() ``` Lets see what stackarator makes of this! ``` vout2,outspec2,outrms2,outn2 = stack2.stack() plt.errorbar(vout2,outspec2,yerr=outrms2,drawstyle='steps-mid',label="noisy") plt.plot(vout,outspec,drawstyle='steps-mid',label="noiseless") plt.xlim(-200,200) plt.xlabel("Velocity (km/s)") plt.ylabel("Intensity (Jy/beam)") plt.legend() ``` As you can see we are able to retrive the signal buried in the noisy data. Stackarator provides us with uncertanty estimates for each channel, and indicates the number of spaxels summed (the `outn` variable above). ## More advanced usage Stackarator has other (hopefully) useful features built in! We will cover two of these here. ### Elliptical apertures Stackarator can define elliptical regions to conduct our stacking within. In the code below we will take advantage of this to stack the spectra in two different apertures. ``` gal_centre=[12.,10.] # RA and Dec of our mock galaxy centre inc=45 #inclination of our mock galaxy pa=90 #position angle of our mock galaxy ## stack the inner 10 arcseconds stack.define_region_ellipse(gal_centre,inc,pa,rad_inner=0,rad_outer=10) vout_inner,outspec_inner,outrms_inner,outn_inner = stack.stack() ## Now stack the 10-20 arcsecond region stack.define_region_ellipse(gal_centre,inc,pa,rad_inner=10,rad_outer=20) vout_outer,outspec_outer,outrms_outer,outn_outer = stack.stack() plt.plot(vout_inner,outspec_inner,drawstyle='steps-mid',label="inner") plt.plot(vout_outer,outspec_outer,drawstyle='steps-mid',label="outer") plt.xlim(-200,200) plt.legend() ``` You can really see the difference in the shape of the stacked line caused by beam smearing. This elliptical apperture stacking can be useful both for creating radial profiles of faint lines, and to define masks to just sum part of your datacube. ### Reading in arrays rather than fits files If you are working with models, for instance, you might not want to read your data in from FITS files. Stackarator provides two convience functions to enable this: Calls to `stack.read_fits_cube(filename,rms=rms)` can be replaced with calls to `stack.input_cube(cube,xcoord,ycoord,vcoord,rms=rms)`, where cube is a 3D-array (X,Y,Vel), and xcoord, ycoord and vcoord are 1D arrays defining the X,Y and Velocity axes. In a similar way, calls to `stack.read_fits_mom1(filename,vsys=vsys)` can be replaced with `stack.input_mom1(x1,y1,mom1,vsys=vsys)` where mom1 is a 2D array of intensity-weighted mean velocities, and x1 and y1 are 1D arrays defining the image axes. <b>Note- if you use these options be careful about the array axis ordering! Python loves to switch this around - you may need to pass in the transpose of e.g. the mom1 if python is up to those tricks!</b> e.g. `stack.input_mom1(x1,y1,mom1.T,vsys=vsys)`
github_jupyter
# Lattice light sheet raw data deskew / rotate in Python -- step by step. Volker.Hilsenstein@monash.edu ### Aim: This Jupyter notebook attempts to exlpain the steps involved in deskewing a lattice light sheet raw image volume and rotating it back to coverslip coordinates. ### Credits: * David Potter (Monash) and Keith Schulze for helpful discussions and the sample dataset. * Talley Lambert for helpful discussions. * Martin Weigert for assistance and bugfixes to gputools. * The general workflow outlined in this notebook is modelled on the original Janelia Matlab code * Felix Kraus from the Ryan Lab at Monash University for the sample image ### Further Resources * [gputools](https://github.com/maweigert/gputools) * [Keith Schulzes Scala code](https://monash-merc.github.io/llsm/) * Harvard (Talley Lambert) have some useful information and visualization here (https://cbmf.hms.harvard.edu/lattice-faqs/) ### Reference Chen, B.-C., Legant, W. R., Wang, K., Shao, L., Milkie, D. E., Davidson, M. W., et al. (2014). Lattice light-sheet microscopy: imaging molecules to embryos at high spatiotemporal resolution. *Science* (New York, N.Y.), 346(6208), 1257998–1257998. [http://doi.org/10.1126/science.1257998](http://doi.org/10.1126/science.1257998) # Acquisition with a lattice light sheet microscope A lattice light sheet microscope based on the Janelia design (see the reference above) has a light sheet that is incident at an oblique angle to the sample coverslip. Due to the oblique light sheet angle * the x,y coordinate directiond of individual slices are not perpendicular to the stage z axis, and * the acquired image stack appears skewed (sheared is the better term). I found it helpful to use a deck of cards as a tactile model for the captured image stack. During acquisition, the deck of cards is oriented like this (the sample projection is representd by a circle drawn with pencil): ![CARDS1](./notebook_illustrations/cards1.jpg) However the representation as a `.tif` stack is like this: ![CARDS2](./notebook_illustrations/cards2.jpg) We can shift each card such that the sample is no longer distorted: ![CARDS3](./notebook_illustrations/cards3.jpg) Such a shift can be represented as an affine tranform (specifically: a shearing transform). ## Important parameters The voxel spacing in x,y is determined by the pixel pitch of the camera and the magnification of the detection object. The voxel spacing in z is determined by the stage step size in z direction (coverslip z) and the angle of the light sheet. Typically (apart from the stage z step size) these values are fixed for a given lattice light sheet microscope. For our particular microscope the values are: * light sheet angle: 31.8 degrees * x,y voxel spacing = 0.1040 $\mu$m If the stage z step size is $z_{stage}$, the z-voxel spacing is given as * $z_{voxel} = \sin(31.8) * z_{stage}$ # Workflow outline: The workflow for deskewing and rotating the raw volume back coverslip coordinates are as follows: 1. Deskew the volume using a shearing transform. 2. Make the pixel spacing isotropic by scaling and resampling along the Z axis. 3. Rotate the volume such that the z axis of the volume is aligned with the coverlip. Steps 2 and 3 are optional but may help for downstream visualization and processing. Each of these steps can be expressed as an [affine transform](https://en.wikipedia.org/wiki/Affine_transformation), that maps a location in the source volume to and location in the output volume. Such an affine transformation can can be represented by a matrix multiplication of the source coordinate (augmented with a 1) by an augmented 4x4 matrix. In the following we will demonstrate each step individually using three successive affine transforms. This is for educational purposes only. Because affine transformations are linear, one can combine all three steps into a single affine transformation. The combined transform can be calculated by a simple matrix multiplication of the individual matrices. # Implementation * We use `tifffile` to read the source volume (you could also use `skimage.io` or `imread` as a numpy array. * Affine transforms on volumes are implemented in `scipy.ndimage`, which uses the **CPU** to process them. * Affine transforms can be computed much faster on a graphics card (**GPU**). One implementation of such a GPU accelerated affine transform is in Martin Weigert's `gputools` package (https://github.com/maweigert/gputools. You need a version of gputools `>= 0.2.8` and you need to install `pyopencl`, which can be a bit tricky. I wrote a wrapper around `gputools`' `affine` function. The wrapper has the same interface as `scipy.ndimage.affine_transform` and handles padding and cropping of the array (something that is provided in `ndimage` by the `output_shape` parameter. If you have `gputools` simply uncomment the `import` statement below. * We import some helper funtions to display projections in the notebook and to calculate the output size of arrays after an affine transformation. ``` import numpy as np import tifffile import matplotlib.pyplot as plt from numpy.linalg import inv %matplotlib inline # lls_dd imports from lls_dd.transform_helpers import * from lls_dd.transforms import * ## Depending on whether you want to use ndimage or gputools, uncomment one of the following ## two lines #from scipy.ndimage import affine_transform from lls_dd.gputools_wrapper import affine_transform_gputools as affine_transform ``` # Parameters Here we define the parameters as variables. In pratice you will want to extract most of these from the metadata/settings file. ``` xypixelsize = 0.1040 angle = 31.8 dz_stage = 0.299401 dz = np.sin(angle*np.pi/180.0)*dz_stage dx = xypixelsize deskewfactor = np.cos(angle*np.pi/180.0)*dz_stage/dx dzdx_aspect = dz/dx print("Parameter summary:") print("==================") print("dx, dy:", dx) print("dz:", dz) print("deskewfactor:", deskewfactor) print("voxel aspect ratio z-voxel/xy-voxel:", dzdx_aspect) ``` # Reading the source volume Note: the sample file is available here: https://www.dropbox.com/s/34ei5jj0qgylf8q/drp1_dendra2_test_1_CamA_ch0_stack0000_488nm_0000000msec_0018218290msecAbs.tif?dl=0 You will have to adjust the input path in the cell below! ``` # Suppress annoying warnings due to malformed tiffs from Labview import logging logging.getLogger("tifffile").setLevel(logging.ERROR) vol = tifffile.imread("../testset/drp1_dendra2_test_1_CamA_ch0_stack0000_488nm_0000000msec_0018218290msecAbs.tif") vol.shape ``` ### axis order note that the numpy array is in axis order (z,y,x) ### plot maximum intensity projections along each axis to inspect the source volume ``` plt.rcParams['figure.figsize'] = (16,8) plot_all([get_projection_montage(vol)]) ``` # Step 1: Shearing transform Referring to the deck of card model in the introcution, the amount to shift each slice (card) is given in `deskewfactor`. The required shearing matrix looks like this: ``` # deskew skew = np.eye(4) skew[2,0] = deskewfactor skew ``` Note that this transformation will change the output shape of the array. We determine the output shape using a helper function that transforms all the corner points of the input volume and determines their maximum extent after the transformation: ``` np.ndarray? np.array(vol).shape output_shape = get_output_dimensions(skew, vol) output_shape ``` ### Apply the transform. Note that `scipy.ndimage.affine_transform` and `gputools.affine` (for `gputools >= 0.2.8`) expect the inverse of the transform that we want to do. We choose linear interpolation (order = 0) as this is supported by both libraries. ``` deskewed = affine_transform(vol, np.linalg.inv(skew), output_shape=output_shape,order=1) plot_all([get_projection_montage(deskewed)]) ``` The deskewed data is already much easier to work with. You could stop here. However, the pixel spacing is not isotropic. This can be easily confirmed by measuring the angle. We can see it does not match the 31.8 degrees of the light sheet angle. (TODO: update illustration for new data set) ![ANGLE AFTER DESKEW](./notebook_illustrations/angle_pre_scaling.JPG") ``` # Save intermediate result for deconvolution notebook tifffile.imsave("deskewed.tif", deskewed) ``` # Step 2: Scale Z to create isotropic pixels This is very simple. We simply scale each Z value by the dz/dx aspect ratio. The affine transform implementation will deal with the upsampling for us. ``` scale = np.eye(4) scale[0,0] = dzdx_aspect scale # Calculate the output shape output_shape = get_output_dimensions(scale, deskewed) output_shape scaled_vol = affine_transform(deskewed, np.linalg.inv(scale), output_shape=output_shape, order=1) plot_all([get_projection_montage(scaled_vol)]) ``` We can confirm that the angle in the image volume now corresponds to the light sheet angle. We now have istotropic voxels. TODO: update illustration with current data set ![](./notebook_illustrations/angle_post_scaling.JPG) # Step 3. Rotate To align the volume to the coverslip coordinate system we now simply rotate to the left by the lightsheet angle. A suitable rotation matrix looks like this: ``` rot = rot_around_y(-angle) rot ``` However, we can't use this straight away as rotation would be around the origin. We want to rotate around the centre of the volume and therefore shift the centre of the volume to the origin using a translation matrix ``` shift = shift_centre(scaled_vol.shape) shift ``` After this, the volume will be larger. The size of the output volume will be: ``` output_shape = get_output_dimensions(rot @ shift, scaled_vol) output_shape ``` Note that the `@` sign performs matrix multiplication. We also have to shift the volume back. We do this by applying a translation that moves the origin to the centre of the output volume shape. ``` unshift = unshift_centre(output_shape) unshift ``` Now we apply the combined transform: ``` rotated = affine_transform(scaled_vol, np.linalg.inv(unshift @ rot @ shift), output_shape=output_shape, order=1) plot_all([get_projection_montage(rotated)]) ``` Now the volume is in coverslip coordinates. Doing everything step by step introduces fill pixels at intermediate steps so this would now need to be cropped. Instead we show how to perform the operation with a single transform: # All in one ... We can simply combine multiple affine transformation into a single affine transformation. This is easiest to express using matrix multiplications. In the following we already shift the source volume to the centre, as the translation only affects the rotation but not the shear and the scaling. ## However ... beware: Although it may appear elegant to combine the individual steps described above into a single transformation, I realized that this creates artefacts. The artefacts arise from the interpolation between axes that have different physical length scale. (see [this issue, comments towards the end](https://github.com/VolkerH/Lattice_Lightsheet_Deskew_Deconv/issues/22)). ``` shift_original = shift_centre(vol.shape) shift_original combined = rot @ scale @ skew @ shift_original combined # determine final output size and required unshift output_shape = get_output_dimensions(combined, vol) unshift_final = unshift_centre(output_shape) output_shape, unshift_final # All in one all_in_one = unshift_final @ rot @ scale @ skew @ shift_original ``` # Perform all in one go and show final result ``` # Apply to source volume: processed_volume = affine_transform(vol, np.linalg.inv(all_in_one), output_shape=output_shape) plot_all([get_projection_montage(processed_volume)]) ```
github_jupyter
# Predicting the crew size from cruise ship attributes A large ship manufacturer would like us to give them accurate estimates of how many crew members a ship will require. Our model must predict how many crew members the ships will need so that this information can be passed on to the customers of these cruise ships, in order to help with the purchase decision making process. We will be creating a regression model that will help predict how many crew members will be needed for future ships. ### Importing Libraries ``` import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from pyspark.ml.linalg import Vectors from pyspark.ml.feature import VectorAssembler from pyspark.ml.feature import StringIndexer from pyspark.ml.regression import LinearRegression ``` ### Creating the `SparkSession` and importing the dataset ``` spark = SparkSession.builder.appName('ship_crews').getOrCreate() os.chdir('..') DATA_FILE = os.getcwd() + '/data/cruise_ship_info.csv' df = spark.read.csv(DATA_FILE, inferSchema=True, header=True) df.printSchema() ``` ### EDA and summaries ``` print("Rows ->", df.count()) stringCols = [item[0] for item in df.dtypes if item[1].startswith('string')] numCols = [item[0] for item in df.dtypes if item[0] not in stringCols] print(stringCols) print(numCols) df.select(numCols).describe().show(truncate=False, vertical=True) ``` We have data for **158 ships**. The **independent variables** are - 1. `Ship_name` (**string**) : The name of the ship. 2. `Cruise_line`(**string**) : The cruise line that owns that ship (recall that these are ships that have already been sold). 3. `Age` (**numeric**) : The Age of the ship. 4. `Tonnage` (**numeric**) : The weight the ship can carry. 5. `passengers` (**numeric**) : The number of passengers the ship can carry. 6. `length` (**numeric**) : The length of the ship. 7. `cabins` (**numeric**) : The number of cabins on the ship. 8. `passenger_density` (**numeric**) : How many passengers the ship can sustain in a pre-determined area. The **dependent variable**, that we are trying to predict - - `crew` (**numeric**) : The number of crew needed to service the ship. ### Data Transformations To get the data into `pyspark.ml`, we need to transform it into a `DenseVector` format. To do so we must first encode the string columns to their "numerical equivalents". To do so, we use `StringIndexer` in conjunction with PySpark `Pipelines`. ``` indexers = [StringIndexer(inputCol=column, outputCol=column+"_index").fit(df) for column in stringCols] pipeline = Pipeline(stages=indexers) new_df = pipeline.fit(df).transform(df) new_df.head(1)[0].asDict() ``` Using `Pipelines` and `StringIndexer` we have encoded the string columns to numeric ones, with the appendix `index` in the column names. We can now use `VectorAssember` to transform the numerical features to a `DenseVector` to build our model. ``` new_df.printSchema() stringCols = [item[0] for item in new_df.dtypes if item[1].startswith('string')] numCols = [item[0] for item in new_df.dtypes if item[0] not in stringCols] new_df = new_df.select(numCols) new_df.printSchema() ``` `VectorAssember` requires as input our **independent variables**, i.e **features**. ``` indep = list(set(numCols) - set(['crew'])) indep assembler = VectorAssembler(inputCols = indep, outputCol = 'features') output = assembler.transform(new_df) output.printSchema() output.head(1)[0].asDict() ``` The `features` column contains all the numeric **independent variables** in `DenseVector` representation. The next step is to build our final dataset containing only `features` and **dependent variable**. ``` final_df = output.select(['features', 'crew']) final_df.show(n = 5, truncate=False, vertical=True) ``` ### Train-Test Split ``` train, test = final_df.randomSplit([0.75, 0.25]) train.describe().show() test.describe().show() ``` ### Fit the model on the training data ``` lr = LinearRegression(labelCol='crew') lr_model = lr.fit(train) ``` ### Evaluate the model on the testing data ``` eval_results = lr_model.evaluate(test) eval_results.residuals.show() ``` ### Evaluation Metrics ``` print("RMSE ::", round(eval_results.rootMeanSquaredError, 4)) print("R2 ::", round(eval_results.r2, 4)) final_df.describe().show() ``` We have fit a linear regression model to our training data and upon testing have seen an `r-squared` of $0.8914$ and a `RMSE` of $1.2873$, considering that the `mean(crew)` is $7.8$ our RMSE, indicates that the model is fairly accurate.
github_jupyter
``` import os import sys from glob import glob import numpy as np import pandas as pd import cv2 import torch.nn as nn import torch.optim as optim from PIL import Image from sklearn.model_selection import train_test_split from tqdm.notebook import tqdm from time import time from matplotlib import image from torchvision import transforms, datasets, models from efficientnet_pytorch import EfficientNet import time import timm import torch import torch.utils.data as data import matplotlib.pyplot as plt import seaborn as sns train_data = pd.read_csv(r'/opt/ml/input/data/train/new_train.csv') train_data ``` ## DataSet 형성 ``` from torch.utils.data import Dataset from PIL import Image from torchvision import transforms class imgDataset(Dataset): def __init__(self, csv_path): #self.to_tensor = transforms.ToTensor() self.main_path = csv_path self.total_data = pd.read_csv(self.main_path + '/' + 'new_train.csv') #self.image_folder = (self.main_path + '/' + 'images') self.transform = transforms.Compose([ transforms.CenterCrop(384), transforms.Resize(256), transforms.ToTensor(), transforms.Normalize(mean = [0.5,0.5,0.5], std =[0.5,0.5,0.5])])#384 384로 crop을 하고, 그런 후 256으로 resize #self.path = r'C:\Users\harry\Desktop\Naver\train\train\images' #def __len__(self): # return 7 * self.human_data.shape[0] def __getitem__(self,index): image = Image.open(self.total_data.iloc[index]['Full Path']) image = self.transform(image) class_num = self.total_data.iloc[index]['NewClass'] return class_num, image def __len__(self): return self.total_data.shape[0] class_img = imgDataset(r'/opt/ml/input/data/train') len(class_img) k, j = class_img[18899] print(k) print(j) ``` ## Train Valid 분리 ``` train_len = int(len(class_img) * 0.8) print(train_len) val_len = len(class_img) - train_len print(val_len) from torch.utils.data.dataset import random_split train_dataset, val_dataset = random_split(class_img, [train_len,val_len]) ``` ## DataLoader 형성 ``` from torch.utils.data import DataLoader train_loader = DataLoader(dataset = train_dataset, batch_size=64, shuffle = True, drop_last=True) #통상 첨에 시작할때 batch는 64에서 128로 잡는다 val_loader = DataLoader(dataset = val_dataset, batch_size=64, shuffle = True, drop_last=True) ``` ## Pretrained 모델(다중 택일) + MLP 형성 ``` device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) #model = Model().to(device) #loss_func = nn.CrossEntropyLoss() #optimizer = torch.optim.SGD(model.parameters(), lr=0.001) def getModel(model_name): # PyTorch에서 제공하는 ResNet을 불러오고, 마지막 FC layer를 수정하는 방법을 살펴보겠습니다. if model_name == 'resnet152': model = models.resnet152(pretrained=True) num_features = model.fc.in_features #for param in model.parameters(): # param.requires_grad = False model.fc = nn.Linear(num_features, 18) elif model_name == 'resnet34': model = models.resnet34(pretrained=True) num_features = model.fc.in_features #for param in model.parameters(): # param.requires_grad = False model.fc = nn.Linear(num_features, 18) elif model_name == 'EfficientNet': model = EfficientNet.from_pretrained('efficientnet-b0', num_classes = 18) #for n, p in model.named_parameters(): # if '_fc' not in n: # p.requires_grad = False # model = torch.nn.parallel.DistributedDataParallel(model) elif model_name == 'vit_base_patch16_224': model = timm.create_model('vit_base_patch16_224',pretrained=True, num_classes = 18) #for param in model.parameters(): # param.requires_grad = False model = model.to(device) #criterion = nn.CrossEntropyLoss() #optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) #optimizer = optim.SGD(model.parameters(), lr=lr) return model ``` ## Model을 넣었을때, train data에 대한 성능, valid data에 대한 성능을 구해주고, epoch마다 model을 저장해 주는 함수 ``` def Train_and_Valid_check(model, model_name): from tqdm.notebook import tqdm MODEL_PATH ="models_saved" num_epochs = 10 #start_time = time.time() for epoch in range(num_epochs): """ Training Phase """ model.train() running_loss = 0. running_corrects = 0 # load a batch data of images for labels, img in tqdm(train_loader): #inputs = inputs.to(device) labels = labels.to(device)#label이 class img = img.to(device)#img가 # forward inputs and get output optimizer.zero_grad() outputs = model(img) _, preds = torch.max(outputs, 1) loss = loss_func(outputs, labels) # get loss value and update the network weights loss.backward() optimizer.step() #print(img.size(0)) running_loss += loss.item() * img.size(0)#64개 만큼의 loss가 나온다 # loss.item() 으로 손실이 갖고 있는 스칼라 값을 가져올 수 있습니다. #딕셔너리.items()와 다른것 주의!! running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(train_dataset) epoch_acc = running_corrects / len(train_dataset) * 100. print('[Train #{}] Loss: {:.4f} Acc: {:.4f}% '.format(epoch, epoch_loss, epoch_acc)) model.eval() with torch.no_grad(): running_loss = 0. running_corrects = 0 for labels, inputs in tqdm(val_loader):#얼마나 진행됐고 얼마나 남았는지 알려줌 inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = loss_func(outputs, labels) running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(val_dataset) epoch_acc = running_corrects / len(val_dataset) * 100. #wandb.log({"val_acc":epoch_acc, "val_loss":epoch_loss}) print('[Validation #{}] Loss: {:.4f} Acc: {:.4f}% '.format(epoch, epoch_loss, epoch_acc)) if not os.path.exists(MODEL_PATH): os.makedirs(MODEL_PATH) torch.save(model, os.path.join(MODEL_PATH, f"{model_name}_is_model_and_{epoch}th_epoch.pt")) #os.path.join(MODEL_PATH, "hand_mademodel_with_new_csv.pt" ``` ## 모델 설정(resnet152), lossfunction 설정, optimizer 설정 ``` model = getModel('resnet152') loss_func = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) ``` ## Selecting Pretrained Model(resnet152 성능 check) ``` Train_and_Valid_check(model, 'resnet152') ``` ## 모델 설정(resnet34), lossfunction 설정, optimizer 설정 ``` model = getModel('resnet34') loss_func = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum = 0.9) ``` ## Selecting Pretrained Model(resnet34 성능 check) ``` Train_and_Valid_check(model,'resnet34') ``` ## 모델설정(EfficientNet), lossfunction 설정, optimizer 설정 ``` model = getModel('EfficientNet') loss_func = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum = 0.9) ``` ## Selecting Pretrained Model(EfficientNet 성능 check) ``` Train_and_Valid_check(model,'EfficientNet') ``` ## 가장 좋았던 모델 불러오기 ``` model = torch.load(os.path.join("models_saved", "resnet152_is_model_and_9th_epoch.pt")) ``` ## Test Data Set정의 ``` # 테스트 데이터셋 폴더 경로를 지정해주세요. test_dir = '/opt/ml/input/data/eval' from torch.utils.data import Dataset from PIL import Image from torchvision import transforms class TestDataset(Dataset): def __init__(self, img_paths, transform): self.img_paths = img_paths self.transform = transform def __getitem__(self, index): image = Image.open(self.img_paths[index]) if self.transform: image = self.transform(image) return image def __len__(self): return len(self.img_paths) from torch.utils.data import DataLoader # meta 데이터와 이미지 경로를 불러옵니다. submission = pd.read_csv(os.path.join(test_dir, 'info.csv')) image_dir = os.path.join(test_dir, 'images') # Test Dataset 클래스 객체를 생성하고 DataLoader를 만듭니다. image_paths = [os.path.join(image_dir, img_id) for img_id in submission.ImageID] ''' self.transform = transforms.Compose([ transforms.CenterCrop(384), transforms.Resize(256), transforms.ToTensor(), transforms.Normalize(mean = [0.5,0.5,0.5], std =[0.5,0.5,0.5])])#384 384로 crop을 하고, 그런 후 256으로 resize ''' transform = transforms.Compose([ transforms.CenterCrop(384), transforms.Resize(256), transforms.ToTensor(), transforms.Normalize(mean = [0.5,0.5,0.5],std =[0.5,0.5,0.5]) ]) dataset = TestDataset(image_paths, transform) loader = DataLoader( dataset, shuffle=False ) # 모델을 정의합니다. (학습한 모델이 있다면 torch.load로 모델을 불러주세요!) device = torch.device('cuda') #model = MyModel(num_classes=18).to(device) #model = torch.load(os.path.join(MODEL_PATH, "model_pickle.pt")) model.eval() # 모델이 테스트 데이터셋을 예측하고 결과를 저장합니다. all_predictions = [] for images in loader: with torch.no_grad(): images = images.to(device) pred = model(images) pred = pred.argmax(dim=-1) all_predictions.extend(pred.cpu().numpy()) submission['ans'] = all_predictions # 제출할 파일을 저장합니다. submission.to_csv(os.path.join(test_dir, 'submission.csv'), index=False) print('test inference is done!') ```
github_jupyter
# Communication in Crisis ## Executive Summary ## Background ## Acquire Data: [Los Angeles Parking Citations](https://www.kaggle.com/cityofLA/los-angeles-parking-citations)<br> Let's acquire the parking citations data from our file, `parking-citations.csv`. __Initial findings__ - `Issue time` is quasi-normally distributed. - It's interesting to see the distribution of our activity on earth follows a normal distribution. - Agencies 50+ write the most parking citations. - Most parking citations are less than $100.00 ## Prepare - Remove spaces and lowercase all column names. - Cast `Plate Expiry Date` to datetime data type. - Cast `Issue Date` and `Issue Time` to datetime data types. - Drop columns missing >=74.42% of their values. - Drop duplicate values. - Transform Latitude and Longitude columns from NAD1983StatePlaneCaliforniaVFIPS0405 feet projection to EPSG:4326 World Geodetic System 1984: used in GPS [Standard] - Filter the data on these conditions: - Citations issued from 2017-01-01 to 2021-04-12. - Street Sweeping violations where `Violation Description` == __"NO PARK/STREET CLEAN"__ ``` # Import libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import folium.plugins as plugins from IPython.display import HTML import datetime import calplot import folium import math sns.set() import src # Prepare the data using a function stored in prepare.py df = src.prep_sweep_data() # Display the first two rows df.head(2) # Check the column data types and non-null counts. df.info() ``` # Exploration --- ## Parking Enforcement is Enforced Again: Where it all started ### City Council Demands a Plan The **Los Angeles City Council** tasked the **Los Angeles Department of Transportation** (LADOT) with creating a phased plan to resume parking enforcement on October 1st. Delayed parking enforcement added to the city's financial strain during the pandemic, with citation revenue 62% below budget.[1] ### A Plan is Formed: How to Collect Revenue, Detailed. Outreach, Vague On September 30th city council voted to resume parking enforcement on October 15th. Between October 1st and October 14th, 2020 LADOT was responsible for informing the public [2] using social media and the press.[3] 1. `public-records\city-council-documents\LADOT-transition-plan.pdf` 2. `public-records\city-council-documents\public-outreach-period.pdf` 3. `public-records\LADOT-press-releases\enforcement.pdf` --- ## Informing the Public The Los Angeles Department of Transportation informed the public of steet sweeping violations using flyers on wind shields, the press, and social media. ### Communication Channels #### Social Media #### Flyers #### Newspapers #### TV News Let's take a look at social engagement ## Twitter: Tweets from City Officials ---- ## Street Sweeping Citations ## ### How much revenue is generated from street sweeper citations daily? ``` # Daily street sweeping citation revenue daily_revenue = df.groupby('issue_date').fine_amount.sum() daily_revenue.index = pd.to_datetime(daily_revenue.index) sns.set_context('talk') # Plot daily revenue from street sweeping citations daily_revenue.plot(figsize=(14, 7), label='Revenue', color='DodgerBlue') plt.axhline(daily_revenue.mean(), color='black', label='Average Revenue') plt.title("Daily Revenue from Street Sweeping Citations") plt.xlabel('') plt.ylabel("Revenue (in thousand's)") plt.xticks(rotation=0, horizontalalignment='center', fontsize=13) plt.yticks(range(0, 1_000_000, 200_000), ['$0', '$200', '$400', '$600', '$800']) plt.ylim(0, 1_000_000) plt.legend(loc=2, framealpha=.8); ``` > __Anomaly 1__: What happened between July/August of 2019 toh January of 2020?<br> > > __Anomaly 2__: Between March 2020 and October 2020 a Local Emergency was Declared by the Mayor of Los Angeles in response to COVID-19. Street Sweeping was halted to help Angelenos shelter in place. _Street Sweeping resumed on 10/15/2020_. ### Anomaly 2: Declaration of Local Emergency ``` sns.set_context('talk') # Plot daily revenue from street sweeping citations daily_revenue.plot(figsize=(14, 7), label='Revenue', color='DodgerBlue') plt.axvspan('2020-03-16', '2020-10-14', color='grey', alpha=.25) plt.text('2020-03-29', 890_000, 'Declaration of\nLocal Emergency', fontsize=11) plt.title("Daily Revenue from Street Sweeping Citations") plt.xlabel('') plt.ylabel("Revenue (in thousand's)") plt.xticks(rotation=0, horizontalalignment='center', fontsize=13) plt.yticks(range(0, 1_000_000, 200_000), ['$0', '$200', '$400', '$600', '$800']) plt.ylim(0, 1_000_000) plt.legend(loc=2, framealpha=.8); sns.set_context('talk') # Plot daily revenue from street sweeping citations daily_revenue.plot(figsize=(14, 7), label='Revenue', color='DodgerBlue') plt.axhline(daily_revenue.mean(), color='black', label='Average Revenue') plt.axvline(datetime.datetime(2020, 10, 15), color='red', linestyle="--", label='October 15, 2020', alpha=.2) plt.title("Daily Revenue from Street Sweeping Citations") plt.xlabel('') plt.ylabel("Revenue (in thousand's)") plt.xticks(rotation=0, horizontalalignment='center', fontsize=13) plt.yticks(range(0, 1_000_000, 200_000), ['$0', '$200', '$400', '$600', '$800']) plt.ylim(0, 1_000_000) plt.legend(loc=2, framealpha=.8); ``` ## Twitter ## Hypothesis Test ### General Inquiry Is the daily citation revenue after 10/15/2020 significantly greater than average? ### Z-Score $H_0$: The daily citation revenue after 10/15/2020 is less than or equal to the average daily revenue. $H_a$: The daily citation revenue after 10/15/2020 is significantly greater than average. ``` confidence_interval = .997 # Directional Test alpha = (1 - confidence_interval)/2 # Data to calculate z-scores using precovid values to calculate the mean and std daily_revenue_precovid = df.loc[df.issue_date < '2020-06-01'] daily_revenue_precovid = daily_revenue_precovid.groupby('issue_date').fine_amount.sum() mean_precovid, std_precovid = daily_revenue_precovid.agg(['mean', 'std']).values mean, std = daily_revenue.agg(['mean', 'std']).values # Calculating Z-Scores using precovid mean and std z_scores_precovid = (daily_revenue - mean_precovid)/std_precovid z_scores_precovid.index = pd.to_datetime(z_scores_precovid.index) sig_zscores_pre_covid = z_scores_precovid[z_scores_precovid>3] # Calculating Z-Scores using entire data z_scores = (daily_revenue - mean)/std z_scores.index = pd.to_datetime(z_scores.index) sig_zscores = z_scores[z_scores>3] sns.set_context('talk') plt.figure(figsize=(12, 6)) sns.histplot(data=z_scores_precovid, bins=50, label='preCOVID z-scores') sns.histplot(data=z_scores, bins=50, color='orange', label='z-scores') plt.title('Daily citation revenue after 10/15/2020 is significantly greater than average', fontsize=16) plt.xlabel('Standard Deviations') plt.ylabel('# of Days') plt.axvline(3, color='Black', linestyle="--", label='3 Standard Deviations') plt.xticks(np.linspace(-1, 9, 11)) plt.legend(fontsize=13); a = stats.zscore(daily_revenue) fig, ax = plt.subplots(figsize=(8, 8)) stats.probplot(a, plot=ax) plt.xlabel("Quantile of Normal Distribution") plt.ylabel("z-score"); ``` ### p-values ``` p_values_precovid = z_scores_precovid.apply(stats.norm.cdf) p_values = z_scores_precovid.apply(stats.norm.cdf) significant_dates_precovid = p_values_precovid[(1-p_values_precovid) < alpha] significant_dates = p_values[(1-p_values) < alpha] # The chance of an outcome occuring by random chance print(f'{alpha:0.3%}') ``` ### Cohen's D ``` fractions = [.1, .2, .5, .7, .9] cohen_d = [] for percentage in fractions: cohen_d_trial = [] for i in range(10000): sim = daily_revenue.sample(frac=percentage) sim_mean = sim.mean() d = (sim_mean - mean) / (std/math.sqrt(int(len(daily_revenue)*percentage))) cohen_d_trial.append(d) cohen_d.append(np.mean(cohen_d_trial)) cohen_d fractions = [.1, .2, .5, .7, .9] cohen_d_precovid = [] for percentage in fractions: cohen_d_trial = [] for i in range(10000): sim = daily_revenue_precovid.sample(frac=percentage) sim_mean = sim.mean() d = (sim_mean - mean_precovid) / (std_precovid/math.sqrt(int(len(daily_revenue_precovid)*percentage))) cohen_d_trial.append(d) cohen_d_precovid.append(np.mean(cohen_d_trial)) cohen_d_precovid ``` ### Significant Dates with less than a 0.15% chance of occuring - All dates that are considered significant occur after 10/15/2020 - In the two weeks following 10/15/2020 significant events occured on __Tuesday's and Wednesday's__. ``` dates_precovid = set(list(sig_zscores_pre_covid.index)) dates = set(list(sig_zscores.index)) common_dates = list(dates.intersection(dates_precovid)) common_dates = pd.to_datetime(common_dates).sort_values() sig_zscores pd.Series(common_dates.day_name(), common_dates) np.random.seed(sum(map(ord, 'calplot'))) all_days = pd.date_range('1/1/2020', '12/22/2020', freq='D') significant_events = pd.Series(np.ones_like(len(common_dates)), index=common_dates) for i in significant_events.index: print(i) calplot.calplot(significant_events, figsize=(18, 12), cmap='coolwarm_r'); ``` Reject the null hypothesis that daily citation revenue after 10/15/2020 is less than or equal to the average daily revenue. - 2020-10-15 - 2020-10-16 - 2020-10-19 - 2020-10-20 - 2020-10-21 - 2020-10-22 - 2020-10-27 - 2020-10-28 - 2020-10-29 ## Which parts of the city were impacted the most? ``` df_outliers = df.loc[df.issue_date.isin(list(common_dates.astype('str')))] df_outliers.reset_index(drop=True, inplace=True) print(df_outliers.shape) df_outliers.head() # m = folium.Map(location=[34.0522, -118.2437], # min_zoom=8, # max_bounds=True) # mc = plugins.MarkerCluster() # for index, row in df_outliers.iterrows(): # mc.add_child( # folium.Marker(location=[str(row['latitude']), str(row['longitude'])], # popup='Cited {} {} at {}'.format(row['day_of_week'], # row['issue_date'], # row['issue_time'][:-3]), # control_scale=True, # clustered_marker=True # ) # ) # m.add_child(mc) ``` Transfering map to Tablaeu # Conclusions
github_jupyter
# Programming Assignment 3 ## Problem Statement Last week, the vectors that we tried to analyze had length, or dimension, of 365, corresponding to the number of days in a year. We outsourced the computation of the math into the `lib/computeStatistics.py` file. In this programming assignment, your task is to fill in the function that is required to efficiently calculate the covariance matrix. All of the necessary helper code is included in this notebook. However, we advise you to go over the necessary material, the EdX videos and the corresponding notebooks before you attempt this Programming Assignment. ### Computing Covariance Efficiently First, we refresh some of the strategy we went over during last week videos and notebooks to efficiently compute the covariance matrix while also calculating the mean of the set of vectors. You are required to use these strategies effectively in this assignment. To perform [Principle component analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) on vectors, we seperated out two steps to this process: 1) Computing the covariance matrix: this is a simple computation. 2) Computing the eigenvector decomposition. <font size=4>**In this homework, you will fill up a function that is necessary in order to correctly calculate the covariance matrix in an efficient manner.**</font> ## Reviewing the Theory ### Computing the covariance matrix Suppose that the data vectors are the column vectors denoted $x$ then the covariance matrix is defined to be $$ E(x x^T)-E(x)E(x)^T $$ Where $x x^T$ is the **outer product** of $x$ with itself. If the data that we have is $x_1,x_2,x_n$ then we estimate the covariance matrix: $$ \hat{E}(x x^T)-\hat{E}(x)\hat{E}(x)^T $$ the estimates we use are: $$ \hat{E}(x x^T) = \frac{1}{n} \sum_{i=1}^n x_i x_i^T,\;\;\;\;\; \hat{E}(x) = \frac{1}{n} \sum_{i=1}^n x_i $$ ### Covariance matrix while taking care of `nan`s <a id='compCovariance'></a> #### The effect of `nan`s in arithmetic operations * We use an RDD of numpy arrays, instead of Dataframes. * Why? Because unlike dataframes, `numpy.nanmean` treats `nan` entries correctly. #### Calculating the mean of a vector with nan's * We often get vectors $x$ in which some, but not all, of the entries are `nan`. * We want to compute the mean of the elements of $x$. * If we use `np.mean` we will get the result `nan`. * A useful alternative is to use `np.nanmean` which removes the `nan` elements and takes the mean of the rest. #### Computing the covariance when there are `nan`s The covariance is a mean of outer products. We calculate two matrices: * $S$ - the sum of the matrices, where `nan`->0 * $N$ - the number of not-`nan` element for each matrix location. We then calculate the mean as $S/N$ (division is done element-wise) ## Notebook Setup ``` import numpy as np from numpy import linalg as LA from pyspark import SparkContext,SparkConf sc = SparkContext() ``` # Computing Statistics ## Computing the mean together with the covariance <a id='compCovariances'></a> To compute the covariance matrix we need to compute both $\hat{E}(x x^T)$ and $\hat{E}(x)$. Using a simple trick, we can compute both at the same time. Here is the trick: lets denote a $d$ dimensional **column vector** by $\vec{x} = (x_1,x_2,\ldots,x_d)$ (note that the subscript here is the index of the coordinate, not the index of the example in the training set as used above). The augmented vector $\vec{x}'$ is defined to be the $d+1$ dimensional vector $\vec{x}' = (1,x_1,x_2,\ldots,x_d)$. The outer product of $\vec{x}'$ with itself is equal to $$ \vec{x}' {\vec{x}'}^T = \left[\begin{array}{c|ccc} 1 & &{\vec{x}}^T &\\ \hline \\ \vec{x} & &\vec{x} {\vec{x}}^T \\ \\ \end{array} \right] $$ Where the lower left matrix is the original outer product $\vec{x} {\vec{x}}^T$ and the first row and the first column are $\vec{x}^T$ and $\vec{x}$ respectively. Now suppose that we take the average of the outer product of the augmented vector and convince yourself that: $$ \hat{E}(\vec{x}' {\vec{x}'}^T) = \frac{1}{n} \sum_{i=1}^n {\vec{x}'}_i {\vec{x}'}_i^T = \left[\begin{array}{c|ccc} 1 & &\hat{E}(\vec{x})^T &\\ \hline \\ \hat{E}(\vec{x}) & &\hat{E}(\vec{x} {\vec{x}}^T) \\ \\ \end{array} \right] $$ So indeed, we have produced the outer product average together with (two copies of) the average $\hat{E}(\vec{x})$ ## Helper Functions ### OuterProduct #### Description The function <font color="blue">outerProduct</font> computes outer product and indicates which locations in matrix are undefined. **Input**: X is a 1 x n matrix **Output**: The output is a tuple of: 1. O is a n x n matrix which is the outer product of the two matrices. 2. N is a n x n matrix which represents whether each position in the matrix has a "valid" non-NaN element. ``` def outerProduct(X): O=np.outer(X,X) N=1-np.isnan(O) return (O,N) testOuter = [np.nan, 12, np.nan, 0, 7, np.nan] outer = outerProduct(testOuter) print(outer[0]) print(outer[1]) np.cumsum(outer[1]) ``` ### sumWithNan #### Description The function <font color="blue">sumWithNan</font> adds two pairs of (**matrix**, **count**) where **matrix** and **count** are the O and N returned from the outerProduct function. **Input** : M1 and M2 are tuples of n x n matrices. The first matrix in each tuple is derived from the outer product and the second matrix in each tuple represents the count of non-NaN elements in that position **Output** : Two matrices. The first (X) contains the Nansum of elements in the outer-product matrix in M1 and M2 and the second (N) contains the count of non-Nan elements in M1 and M2. This output has the same shape as the input i.e a tuple of n x n matrices. ``` def sumWithNan(M1,M2): (X1,N1)=M1 (X2,N2)=M2 N=N1+N2 X=np.nansum(np.dstack((X1,X2)),axis=2) return (X,N) ``` # Exercise ## Description The function <font color="blue">HW_func</font> takes in two $n$ x $n$ matrices, S and N. The first $n$ x $n$ matrix, `S`, is the output from reducing the outer product of vectors by taking the sum at each position in the outer product. Remember from the theory that the vectors have been augmented with a leading 1 to facilitate the computation of the mean and the co-variance in the same computation. The second $n$ x $n$ matrix, `N`, is derived from reducing boolean matrices that denote the presence of a valid value in the outer product of a vector. The reduction is done by summing up the boolean matrices. This means that the $n$ x $n$ matrix would contain the count of valid not-nan entries at each position in the outer product. For example, if the vectors that we want to do PCA on are: `[array([-0.09993104, nan]), array([-0.17819987, -0.70368551])]` Then the matrix `S` would be: `[[ 2. -0.2781309 -0.70368551] [-0.2781309 0.0417414 0.12539666] [-0.70368551 0.12539666 0.4951733 ]]` And the matrix `N` would be: `[[2 2 1] [2 2 1] [1 1 1]]` Note how `S` and `N` are generated: ``` x = np.array([1, -0.09993104, np.nan]) y = np.array([1, -0.17819987, -0.70368551]) S,N = sumWithNan(outerProduct(x),outerProduct(y)) ``` The matrices `S` and `N` are both `numpy.ndarrays` You have to calculate and return the following statistics: 1. E : The nan-sum of the vectors, as described in [Computing Covariance With NaNs](#compCovariance) 2. NE : The number of not-nan entries for each coordinate of the vectors 3. Mean : The Mean vector (ignoring nans) 4. O : The sum of the outer products 5. NO : The number of non-nans in the outer product. Be careful with the data types of variables returned from ```HW_func()```. The assertion tests should help you in figuring this out. **<font color="magenta" size=2>Example Code</font>** ``` python my_S = np.array([[1, 2, 3],[2,4,5][3,5,6]]) my_N = np.array([[2, 2, 1],[2,2,1],[1,1,1]]) HW_func(my_S, my_N) ``` **<font color="blue" size=2>Example Output</font>** ``` python E = np.array([2, 3]) NE = np.array([ 2., 1.]) Mean = np.array([ 1., 3.]) O = np.array([[4, 5], [5, 6]]) NO = array([[ 2., 1.], [ 1., 1.]]) ``` ## Definition ``` def HW_func(S,N): # E is the sum of the vectors # NE is the number of not-nan entries for each coordinate of the vectors # Mean is the Mean vector (ignoring nans) # O is the sum of the outer products # NO is the number of non-nans in the outer product. # For the following cases you need to use Numpy indexing, # you will get different results if you use A[x][y] instead # of A[x,y] # The sum is in the first line because of the outer # product executed previously on S. E = S[0, 1:].astype(np.float64) # Same idea to N. NE = N[0, 1:].astype(np.float64) # Basic mean Mean = E / NE.astype(np.float64) # Since we appended [1] to the start of every vector before calculating S, # the outer sum is the matrix excluding first row and column. O = S[1:, 1:].astype(np.float64) # Same idea applied on N. NO = N[1:, 1:].astype(np.float64) return E, NE, Mean, O, NO my_S = np.array([[1, 2, 3],[2,4,5],[3,5,6]]) my_N = np.array([[2, 2, 1],[2,2,1],[1,1,1]]) ret = HW_func(my_S, my_N) S = np.array([[12,7,0],[7,14,6],[0,6,8]]) N = np.array([[2,2,2],[2,2,2],[2,2,2]]) E, NE, Mean, O, NO = HW_func(S, N) print(E) ``` ## Tests ### Test 1 ``` S = np.array([[ 2.0, 0.24553034, -0.03128947], [0.24553034, 0.06099381, -0.38770712], [-0.03128947, -0.38770712, 4.77673193]]) N = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) E, NE, Mean, O, NO = HW_func(S, N) expected_result = {'E': np.array([ 0.24553034, -0.03128947]), 'NE': np.array([ 2., 2.]), 'O': np.array([[ 0.06099381, -0.38770712], [-0.38770712, 4.77673193]]), 'NO': np.array([[ 2., 2.],[ 2., 2.]]), 'Mean': np.array([ 0.12276517, -0.01564473]) } ``` #### Type Check ``` assert type(E) == np.ndarray, "Incorrect return type. Should return np.array" assert type(NE) == np.ndarray, 'Invalid return type. We expected numpy.ndarray' assert type(NE[0]) == np.float64, 'Invalid return type. Each element in numpy.ndarray should \ be numpy.float64' assert type(Mean) == np.ndarray, "Incorrect return type. Should return np.array" assert type(O) == np.ndarray, "Incorrect return type. Should return np.array" assert type(NO) == np.ndarray, 'Invalid return type. We expected numpy.ndarray' assert type(NO[0,0]) == np.float64, 'Invalid return type. Each element in numpy.ndarray should \ be numpy.float64. Your elements have datatype ' + str(type(NO[0,0])) ``` #### Shape Check ``` assert E.shape == (2,), "Returned np.array should be a vector of size n-1 = 2" assert NE.shape == (2,), "Returned np.array should be a vector of size n-1 = 2" assert Mean.shape == (2,), "Returned np.array should be a vector of size n-1 = 2" assert O.shape == (2,2), "We expected a numpy ndarray of shape (2,2), You returned: " + str(O.shape) assert NO.shape == (2,2), "We expected a numpy ndarray of shape (2,2), You returned: " + str(NO.shape) ``` #### Value Check ``` assert (np.around(E, decimals=6) == np.around(expected_result['E'], decimals=6)).all(), "Output value of E does not match expected output of function. \ You returned " + str(E) assert (np.around(NE, decimals=6) == np.around(expected_result['NE'], decimals=6)).all(), "Output value of NE does not match expected output of function. \ You returned " + str(NE) assert (np.around(Mean, decimals=6) == np.around(expected_result['Mean'], decimals=6)).all(), "Output value of Mean does not match expected output of function. \ You returned " + str(Mean) assert (np.around(O, decimals=6) == np.around(expected_result['O'], decimals=6)).all(), "Output value of O does not match expected output of function. \ You returned " + str(O) assert (np.around(NO, decimals=6) == np.around(expected_result['NO'], decimals=6)).all(), "Output value of NO does not match expected output of function. \ You returned " + str(NO) ``` ### Test 2 ``` S = np.array([[ 2., -0.92050828, -0.90843676], [-0.92050828, 0.51012277, 0.60698693], [-0.90843676, 0.60698693, 0.82525735]]) N = np.array([[2, 2, 1], [2, 2, 1], [1, 1, 1]]) E, NE, Mean, O, NO = HW_func(S, N) expected_result = {'E': np.array([-0.92050828, -0.90843676]), 'NE': np.array([ 2., 1.]), 'O': np.array([[0.51012277, 0.60698693], [0.60698693, 0.82525735]]), 'NO': np.array([[ 2., 1.], [ 1., 1.]]), 'Mean': np.array([-0.46025414, -0.90843676]) } assert (np.around(E, decimals=6) == np.around(expected_result['E'], decimals=6)).all(), "Output value of E does not match expected output of function. \ You returned " + str(E) assert (np.around(NE, decimals=6) == np.around(expected_result['NE'], decimals=6)).all(), "Output value of NE does not match expected output of function. \ You returned " + str(NE) assert (np.around(Mean, decimals=6) == np.around(expected_result['Mean'], decimals=6)).all(), "Output value of Mean does not match expected output of function. \ You returned " + str(Mean) assert (np.around(O, decimals=6) == np.around(expected_result['O'], decimals=6)).all(), "Output value of O does not match expected output of function. \ You returned " + str(O) assert (np.around(NO, decimals=6) == np.around(expected_result['NO'], decimals=6)).all(), "Output value of NO does not match expected output of function. \ You returned " + str(NO) ``` ### Hidden test 1 ``` # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # ``` ### Hidden test 2 ``` # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # # Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # ``` # Covariance, Eigen values and Eigen Vectors ## computeCov ### Description The function <font color="blue">computeCov</font> calls the <font color="blue">HW_func</font> and uses the values returned to compute covariance. **Input**: RDD containing a set of numpy arrays (vectors), all of the same length. **Output**: This returns a dictionary containing the E, NE, O, NO and Mean computed by the <font color="blue">HW_func</font> along with the variance(Var) and covariance(Cov) matrix computed for that set of vectors. You are not expected to change this function. This is only for understanding how the values computed in the <font color="blue">HW_func</font> contribute to the computation of the covariance matrix. ``` def computeCov(RDDin): """ computeCov receives as input an RDD of np arrays, all of the same length, and computes the covariance matrix for that set of vectors """ RDD=RDDin.map(lambda v:np.array(np.insert(v,0,1),dtype=np.float64)) # insert a 1 at the beginning of each vector so that the same # calculation also yields the mean vector OuterRDD=RDD.map(outerProduct) # separating map and reduce does not matter because of Spark's lazy execution (S,N)=OuterRDD.reduce(sumWithNan) E,NE,Mean,O,NO=HW_func(S,N) Cov=O/NO - np.outer(Mean,Mean) # Output also the diagnal which is the variance for each day Var=np.array([Cov[i,i] for i in range(Cov.shape[0])]) return {'E':E,'NE':NE,'O':O,'NO':NO,'Cov':Cov,'Mean':Mean,'Var':Var} ``` ## The process function ### Description The function <font color="blue">process</font> calls the <font color="blue">computeCov</font> and uses the covariance matrix returned to compute the Eigen Values and Eigen Vectors. **Input**: A list of numpy arrays (vectors), all of the same length. **Output**: This returns the Eigen value and Eigen Vector matrix for the given set of vectors. You are not expected to change this function. This is only for understanding how the values computed in the <font color="blue">HW_func</font> contribute to the computation of the covariance matrix and consequently the Eigen Values and Eigen Vectors. ``` def process(data_list): # compute covariance matrix RDD=sc.parallelize(data_list) OUT=computeCov(RDD) #find PCA decomposition eigval,eigvec=LA.eig(OUT['Cov']) return eigval, eigvec ``` ### Tests ``` data_list = ([np.array([ -1.43475066e-03, 1.52970999e+00]), np.array([ 0.24696509, -1.56099945])]) eigval, eigvec = process(data_list) expected_result = {'eigval': np.array([0., 2.40354683]), 'eigvec': np.array([[-0.99678591, 0.08011153], [-0.08011153, -0.99678591]]) } assert (np.around(eigval, decimals=6) == np.around(expected_result['eigval'], decimals=6)).all(), "Output value \ of eigval does not match expected output of function. You returned " + str(eigval) assert (np.around(eigvec, decimals=6) == np.around(expected_result['eigvec'], decimals=6)).all(), "Output value \ of eigvec does not match expected output of function. You returned " + str(eigvec) data_list = ([np.array([-0.25234187, np.nan]), np.array([-0.66816641, -0.90843676])]) eigval, eigvec = process(data_list) expected_result = {'eigval': np.array([ 0.21172155, -0.16849404]), 'eigvec': np.array([[ 0.74622118, -0.66569809], [ 0.66569809, 0.74622118]]) } assert (np.around(eigval, decimals=6) == np.around(expected_result['eigval'], decimals=6)).all(), "Output value \ of eigval does not match expected output of function. You returned " + str(eigval) assert (np.around(eigvec, decimals=6) == np.around(expected_result['eigvec'], decimals=6)).all(), "Output value \ of eigvec does not match expected output of function. You returned " + str(eigvec) #Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # #Hidden Tests here # # AUTOGRADER TEST - DO NOT REMOVE # ``` ## If you have passed all the visible asserts, you may choose to submit at this point. You will only be able to submit ONCE.
github_jupyter
``` #from google.colab import drive #drive.mount('/content/drive') ``` # Twitter Developer Account In order to use Twitter’s API, we have to create a developer account on the Twitter apps site. * Log in or make a Twitter account at https://apps.twitter.com/. * Create a new app (button on the top right) <img src=https://miro.medium.com/max/1400/0*Dq78m3JKoSqZY5SS.png style="width: 200px;"> Fill in the app creation page with a unique name, a website name (use a placeholder website if you don’t have one), and a project description. Accept the terms and conditions and proceed to the next page. Once your project has been created, click on the “Keys and Access Tokens” tab. You should now be able to see your consumer secret and consumer key. <img src=https://miro.medium.com/max/1400/0*YU1pFqTw6Dn-ZmOd.png style="width: 200px;"> You’ll also need a pair of access tokens. Scroll down and request those tokens. The page should refresh, and you should now have an access token and access token secret. <img src=https://miro.medium.com/max/1400/0*_gnOgA0aaAqPgDJG.png style="width: 200px;"> ## Import necessary modules ``` import sys import os import json import pandas as pd import matplotlib.pyplot as plt #Import the necessary methods from tweepy library #install tweepy if you don't have it #!pip install tweepy import tweepy from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream #sentiment analysis package #!pip install textblob from textblob import TextBlob #general text pre-processor #!pip install nltk import nltk from nltk.corpus import stopwords #tweet pre-processor #!pip install tweet-preprocessor import preprocessor as ppr ``` ### Starting code Below we define some starting codes (python classes and function) to illustrate and assist on how to fetch data from twitter and analyse them. ### **Your task is** 1. Go through the code and understand it. Know what each function does 2. If you find error, fix it. Ask for help in the slack channel if you find serious mistake 3. Extend the code such that it will be useful for topics you choose to analyse 4. Make nice plots and share your finding (e.g. insight on the main covid19 twitter converstions about your country) 5. Submit what ever you managed to do by Wednesday morning. But you should keep using what you build to write blogs, share on facebook, etc. ``` class tweetsearch(): ''' This is a basic class to search and download twitter data. You can build up on it to extend the functionalities for more sophisticated analysis ''' def __init__(cols=None,auth=None): # if not cols is None: self.cols = cols else: self.cols = ['id', 'created_at', 'source', 'original_text','clean_text', 'sentiment','polarity','subjectivity', 'lang', 'favorite_count', 'retweet_count', 'original_author', 'possibly_sensitive', 'hashtags', 'user_mentions', 'place', 'place_coord_boundaries'] if auth is None: #Variables that contains the user credentials to access Twitter API consumer_key = os.environ.get('gaGNsJKDKlofnPH94ppdrCeTd') consumer_secret = os.environ.get('zLiCEI1R5ucDMlXd2QtYtAAG1DKfaKKElFFVxLacwvghYuqtfC') access_token = os.environ.get('1013077926634500096-3snJJ1lR0NXnTgrRq82sFbaRYxHVjm') access_token_secret = os.environ.get('rr27t3uyWqvv5pbfpPSigoKIvwyGHzEpxYn3hHygZKVNk') #This handles Twitter authetification and the connection to Twitter Streaming API auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) # self.auth = auth self.api = tweepy.API(auth) def clean_tweets(twitter_text): #use pre processor tweet = p.clean(twitter_text) #HappyEmoticons emoticons_happy = set([ ':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)', '<3' ]) # Sad Emoticons emoticons_sad = set([ ':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<', ':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c', ':c', ':{', '>:\\', ';(' ]) #Emoji patterns emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) #combine sad and happy emoticons emoticons = emoticons_happy.union(emoticons_sad) stop_words = set(stopwords.words('english')) word_tokens = word_tokenize(tweet) #after tweepy preprocessing the colon symbol left remain after #removing mentions tweet = re.sub(r':', '', tweet) tweet = re.sub(r'‚Ķ', '', tweet) #replace consecutive non-ASCII characters with a space tweet = re.sub(r'[^\x00-\x7F]+',' ', tweet) #remove emojis from tweet tweet = emoji_pattern.sub(r'', tweet) #filter using NLTK library append it to a string filtered_tweet = [w for w in word_tokens if not w in stop_words] #looping through conditions filtered_tweet = [] for w in word_tokens: #check tokens against stop words , emoticons and punctuations if w not in stop_words and w not in emoticons and w not in string.punctuation: filtered_tweet.append(w) return ' '.join(filtered_tweet) def get_tweets(self, keyword, csvfile=None): df = pd.DataFrame(columns=self.cols) if not csvfile is None: #If the file exists, then read the existing data from the CSV file. if os.path.exists(csvfile): df = pd.read_csv(csvfile, header=0) #page attribute in tweepy.cursor and iteration for page in tweepy.Cursor(api.search, q=keyword,count=200, include_rts=False): for status in page: new_entry = [] status = status._json #filter by language if status['lang'] != 'en': continue #if this tweet is a retweet update retweet count if status['created_at'] in df['created_at'].values: i = df.loc[df['created_at'] == status['created_at']].index[0] # cond1 = status['favorite_count'] != df.at[i, 'favorite_count'] cond2 = status['retweet_count'] != df.at[i, 'retweet_count'] if cond1 or cond2: df.at[i, 'favorite_count'] = status['favorite_count'] df.at[i, 'retweet_count'] = status['retweet_count'] continue #calculate sentiment blob = TextBlob(filtered_tweet) Sentiment = blob.sentiment polarity = Sentiment.polarity subjectivity = Sentiment.subjectivity new_entry += [status['id'], status['created_at'], status['source'], status['text'],filtered_tweet, Sentiment,polarity,subjectivity, status['lang'], status['favorite_count'], status['retweet_count']] new_entry.append(status['user']['screen_name']) try: is_sensitive = status['possibly_sensitive'] except KeyError: is_sensitive = None new_entry.append(is_sensitive) hashtags = ", ".join([hashtag_item['text'] for hashtag_item in status['entities']['hashtags']]) new_entry.append(hashtags) #append the hashtags # mentions = ", ".join([mention['screen_name'] for mention in status['entities']['user_mentions']]) new_entry.append(mentions) #append the user mentions try: xyz = status['place']['bounding_box']['coordinates'] coordinates = [coord for loc in xyz for coord in loc] except TypeError: coordinates = None # new_entry.append(coordinates) try: location = status['user']['location'] except TypeError: location = '' # new_entry.append(location) #now append a row to the dataframe single_tweet_df = pd.DataFrame([new_entry], columns=self.cols) df = df.append(single_tweet_df, ignore_index=True) if not csvfile is None: #save it to file df.to_csv(csvfile, columns=self.cols, index=False, encoding="utf-8") return df ``` ### Search twitter and fetch data example ``` covid_keywords = '#COVID19Ethiopia OR #COVID19Africa' #hashtag based search tweets_file = 'data/covid19_23june2020.json' #get data on keywords ts = tweetsearch() df = ts.get_data(covid_keywords, csvfile=tweets_file) #you saved the ``` ## Stream data and save it to file In the above we saw how to search and fetch data, below we will see how we will stream data from twitter. Make sure you understand the difference between search and stream features of twitter api. ### **SAME TASK AS ABOVE** ``` #This is a basic listener that writes received tweets to file. class StdOutListener(StreamListener): def __init__(self,fhandle, stop_at = 1000): self.tweet_counter = 0 self.stop_at = stop_at self.fhandle = fhandle def on_data(self, data): self.fhandle.write(data) #stop if enough tweets are obtained self.tweet_counter += 1 if self.tweet_counter < self.stop_at: return True else: print('Max number of tweets reached: #tweets = ' + str(self.tweet_counter)) return False def on_error(self, status): print (status) def stream_tweet_data(filename='data/tweets.json', keywords=['COVID19Africa','COVID19Ethiopia'], is_async=False): # tweet topics to use as a filter. The tweets downloaded # will have one of the topics in their text or hashtag print('saving data to file: ',filename) #print the tweet topics print('TweetKeywords are: ',keywords) print('For testing case, please interupt the downloading process using ctrl+x after about 5 mins ') print('To keep streaming in the background, pass is_async=True') #Variables that contains the user credentials to access Twitter API consumer_key = 'gaGNsJKDKlofnPH94ppdrCeTd' consumer_secret = 'zLiCEI1R5ucDMlXd2QtYtAAG1DKfaKKElFFVxLacwvghYuqtfC' access_token = '1013077926634500096-3snJJ1lR0NXnTgrRq82sFbaRYxHVjm' access_token_secret = 'rr27t3uyWqvv5pbfpPSigoKIvwyGHzEpxYn3hHygZKVNk' #open file fhandle=open(filename,'w') #This handles Twitter authetification and the connection to Twitter Streaming API l = StdOutListener(fhandle) auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) stream = Stream(auth, l) #This line filter Twitter Streams to capture data by the keywords: first argument to this code stream.filter(track=keywords,is_async=is_async) return None ``` ### Use case of the above code ``` tweets_file = 'data/covid19_23june2020.json' stream_tweet_data(filename=tweets_file,keywords=['covid19']) # ``` ### Filter twitter data and do basic analysis **Extend it to gain more insight** ``` tweets_data = [] for line in open(tweets_file, "r"): try: tweet = json.loads(line) x=tweet['text'] tweets_data.append(tweet) except: continue print('saved numbers of tweets: ', len(tweets_data)) ``` ${\textbf{Most Used Reference Username}}$ ``` a = list(map(lambda x: x['entities']['user_mentions'], tweets_data)) pprint(a[1][0]['screen_name']) screen_name = [] for name in a: if name == []: screen_name.append(None) continue screen_name.append(name[0]['screen_name']) ref_username = pd.Series(screen_name) ref_name_count = ref_username.value_counts() ref_name_count[:5] ``` ${\textbf{Most Hashtags}}$ ``` a = list(map(lambda x: x['entities']['hashtags'], tweets_data)) hashtags_text = [] for v in a: if v == []: hashtags_text.append('None') continue h_text = v[0]['text'] hashtags_text.append(h_text) hashtags = pd.Series(hashtags_text) hashtags = hashtags.str.lower() hashtags.value_counts()[:5] ``` <b>Please check to the last cell </b> ${\textbf{Most Retweeted Tweets}}$ ``` retweet_count = list(map(lambda tweet: tweet['retweet_count'], tweets_data)) tweets = pd.DataFrame(columns=['text','lang','country', 'retweet_count', 'hashtags']) tweets['text'] = list(map(lambda tweet: tweet['text'], tweets_data)) tweets['lang'] = list(map(lambda tweet: tweet['lang'], tweets_data)) tweets['country'] = list(map(lambda tweet: tweet['place']['country'] if tweet['place'] != None else None, tweets_data)) tweets['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'], tweets_data)) tweets['hashtags'] = hashtags tweets_by_lang = tweets['lang'].value_counts() fig, ax = plt.subplots() ax.tick_params(axis='x', labelsize=15) ax.tick_params(axis='y', labelsize=10) ax.set_xlabel('Languages', fontsize=15) ax.set_ylabel('Number of tweets' , fontsize=15) ax.set_title('Top 5 languages', fontsize=15, fontweight='bold') tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red') tweets_by_country = tweets['country'].value_counts() fig, ax = plt.subplots() ax.tick_params(axis='x', labelsize=15) ax.tick_params(axis='y', labelsize=10) ax.set_xlabel('Countries', fontsize=15) ax.set_ylabel('Number of tweets' , fontsize=15) ax.set_title('Top 5 countries', fontsize=15, fontweight='bold') tweets_by_country[:5].plot(ax=ax, kind='bar', color='blue') ``` # Hashtag histogram. ## Please write code that will help you answer the following questions 1) What is the most used hashtag? 2) What is the most used referenced username? 3) What is the most retweeted tweet? ``` # 1) What is the most used hashtag? no_none_hash = tweets[tweets['hashtags'] != 'none'] hashtags_counts = no_none_hash['hashtags'].value_counts() fig, ax = plt.subplots() ax.tick_params(labelsize=15) ax.set_xlabel('Hashtags', fontsize=15, ) ax.set_ylabel('Number of hashtags' , fontsize=15) ax.set_title('Top 5 Hashtags', fontsize=15, fontweight='bold') hashtags_counts[:5].plot(ax=ax, kind='bar', color='blue') fig, ax = plt.subplots() ax.tick_params(labelsize=15) ax.set_xlabel('Usernames', fontsize=15, ) ax.set_ylabel('Count of Username' , fontsize=15) ax.set_title('Top 5 Reference Name', fontsize=15, fontweight='bold') ref_name_count[:5].plot(ax=ax, kind='bar', color='#324567') ```
github_jupyter
## Dependencies ``` from tweet_utility_scripts import * from transformers import TFDistilBertModel, DistilBertConfig from tokenizers import BertWordPieceTokenizer from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, GlobalMaxPooling1D, Concatenate ``` # Load data ``` test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') print('Test samples: %s' % len(test)) display(test.head()) ``` # Model parameters ``` MAX_LEN = 128 base_path = '/kaggle/input/qa-transformers/distilbert/' base_model_path = base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5' config_path = base_path + 'distilbert-base-uncased-distilled-squad-config.json' tokenizer_path = base_path + 'bert-large-uncased-vocab.txt' input_base_path = '/kaggle/input/14-tweet-train-distilbert-base-uncased-lbl-lower/' model_path_list = glob.glob(input_base_path + '*.h5') model_path_list.sort() print('Models to predict:') print(*model_path_list, sep = "\n") ``` # Tokenizer ``` tokenizer = BertWordPieceTokenizer(tokenizer_path , lowercase=True) ``` # Pre process ``` test['text'].fillna('', inplace=True) test["text"] = test["text"].apply(lambda x: x.lower()) x_test = get_data_test(test, tokenizer, MAX_LEN) ``` # Model ``` module_config = DistilBertConfig.from_pretrained(config_path, output_hidden_states=False) def model_fn(): input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids') base_model = TFDistilBertModel.from_pretrained(base_model_path, config=module_config, name="base_model") sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}) last_state = sequence_output[0] x = GlobalAveragePooling1D()(last_state) y_start = Dense(MAX_LEN, activation='softmax', name='y_start')(x) y_end = Dense(MAX_LEN, activation='softmax', name='y_end')(x) model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end]) return model ``` # Make predictions ``` NUM_TEST_IMAGES = len(test) test_start_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN)) test_end_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN)) for model_path in model_path_list: print(model_path) model = model_fn() model.load_weights(model_path) test_preds = model.predict(x_test) test_start_preds += test_preds[0] / len(model_path_list) test_end_preds += test_preds[1] / len(model_path_list) ``` # Post process ``` test['start'] = test_start_preds.argmax(axis=-1) test['end'] = test_end_preds.argmax(axis=-1) test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], tokenizer), axis=1) ``` # Visualize predictions ``` display(test.head(10)) ``` # Test set predictions ``` submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv') submission['selected_text'] = test["selected_text"] submission.to_csv('submission.csv', index=False) submission.head(10) ```
github_jupyter
# Data description: This dataset describes the number of daily female births in California in 1959. The units are a count and there are 365 observations. The source of the dataset is credited to Newton (1988). # Workflow: - Load the Time Series (TS) by Pandas Library - Prepare the data, i.e. convert the problem to a supervised ML problem - Build and evaluate the RNN model: - Fit the best RNN model - Evaluate model by in-sample prediction: Calculate RMSE - Forecast the future trend: Out-of-sample prediction Note: For data exploration of this TS, please refer to the notebook of my alternative solution with "Seasonal ARIMA model" ``` import keras import sklearn import tensorflow as tf import numpy as np from scipy import stats import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics from sklearn import preprocessing import random as rn import math %matplotlib inline from keras import backend as K session_conf = tf.ConfigProto(intra_op_parallelism_threads=5, inter_op_parallelism_threads=5) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) import warnings warnings.filterwarnings("ignore") # Load data using Series.from_csv from pandas import Series #TS = Series.from_csv('C:/Users/rhash/Documents/Datasets/Time Series analysis/daily-minimum-temperatures.csv', header=0) # Load data using pandas.read_csv # in case, specify your own date parsing function and use the date_parser argument from pandas import read_csv from pandas import datetime #def parser(x): # return datetime.strptime('190'+x, '%Y-%m') TS = read_csv('C:/Users/rhash/Documents/Datasets/Time Series analysis/daily-total-female-births-in-cal.csv', header=0, parse_dates=[0], index_col=0, squeeze=True) print(TS.head()) #TS=pd.to_numeric(TS, errors='coerce') TS.dropna(inplace=True) data=pd.DataFrame(TS.values) # prepare the data (i.e. convert problem to a supervised ML problem) def prepare_data(data, lags=1): """ Create lagged data from an input time series """ X, y = [], [] for row in range(len(data) - lags - 1): a = data[row:(row + lags), 0] X.append(a) y.append(data[row + lags, 0]) return np.array(X), np.array(y) # normalize the dataset from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(data) # split into train and test sets train = dataset[0:270, :] test = dataset[270:, :] # LSTM RNN model: _________________________________________________________________ from keras.models import Sequential, Model from keras.layers import Dense, LSTM, Dropout, average, Input, merge, concatenate from keras.layers.merge import concatenate from keras.regularizers import l2, l1 from keras.callbacks import EarlyStopping, ModelCheckpoint from sklearn.utils.class_weight import compute_sample_weight from keras.layers.normalization import BatchNormalization np.random.seed(42) rn.seed(42) tf.set_random_seed(42) # reshape into X=t and Y=t+1 lags = 1 X_train, y_train = prepare_data(train, lags) X_test, y_test = prepare_data(test, lags) # reshape input to be [samples, time steps, features] X_train = np.reshape(X_train, (X_train.shape[0], lags, 1)) X_test = np.reshape(X_test, (X_test.shape[0], lags, 1)) # create and fit the LSTM network mdl = Sequential() mdl.add(Dense(150, input_shape=(lags, 1), activation='relu')) mdl.add(LSTM(300, activation='relu')) #mdl.add(Dense(50, activation='relu')) mdl.add(Dense(1)) mdl.compile(loss='mean_squared_error', optimizer='adam') monitor=EarlyStopping(monitor='loss', min_delta=0.001, patience=100, verbose=1, mode='auto') checkpointer = ModelCheckpoint(filepath="DFB_weights.hdf5", verbose=0, save_best_only=True) # save best model history=mdl.fit(X_train, y_train, epochs=150, batch_size=1, validation_data=(X_test, y_test), callbacks=[monitor, checkpointer], verbose=0) mdl.load_weights('DFB_weights.hdf5') # load weights from best model # To measure RMSE and evaluate the RNN model: from sklearn.metrics import mean_squared_error # make predictions train_predict = mdl.predict(X_train) test_predict = mdl.predict(X_test) # invert transformation train_predict = scaler.inverse_transform(pd.DataFrame(train_predict)) y_train = scaler.inverse_transform(pd.DataFrame(y_train)) test_predict = scaler.inverse_transform(pd.DataFrame(test_predict)) y_test = scaler.inverse_transform(pd.DataFrame(y_test)) # calculate root mean squared error train_score = math.sqrt(mean_squared_error(y_train, train_predict[:,0])) print('Train Score: {:.2f} RMSE'.format(train_score)) test_score = math.sqrt(mean_squared_error(y_test, test_predict[:,0])) print('Test Score: {:.2f} RMSE'.format(test_score)) # list all data in history #print(history.history.keys()) # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # shift train predictions for plotting train_predict_plot =np.full(data.shape, np.nan) train_predict_plot[lags:len(train_predict)+lags, :] = train_predict # shift test predictions for plotting test_predict_plot =np.full(data.shape, np.nan) test_predict_plot[len(train_predict) + (lags * 2)+1:len(data)-1, :] = test_predict # plot observation and predictions plt.figure(figsize=(12,7)) plt.plot(data, label='Observed', color='#006699'); plt.plot(train_predict_plot, label='Prediction for Train Set', color='#006699', alpha=0.5); plt.plot(test_predict_plot, label='Prediction for Test Set', color='#ff0066'); plt.legend(loc='upper left') plt.title('LSTM Recurrent Neural Net') plt.show() plt.figure(figsize=(8,6)) mse = mean_squared_error(y_test, test_predict[:,0]) plt.title('Prediction quality: {:.2f} MSE ({:.2f} RMSE)'.format(mse, math.sqrt(mse))) plt.plot(y_test.reshape(-1, 1), label='Observed', color='#006699') plt.plot(test_predict.reshape(-1, 1), label='Prediction', color='#ff0066') plt.legend(loc='upper left'); plt.show() ```
github_jupyter
``` import os import sys os.environ['CUDA_VISIBLE_DEVICES'] = '0' sys.path.append('../../S_EqT_codes/') sys.path.append('../../S_EqT_codes/src/EqT_libs/') import warnings warnings.filterwarnings("ignore") from tensorflow.python.util import deprecation deprecation._PRINT_DEPRECATION_WARNINGS = False from keras.models import Model from keras.layers import Input import keras.backend as K import numpy as np from src.S_EqT_concate_fix_corr import S_EqT_Concate_RSRN_Model, feature_map_corr_func, feature_map_corr_layer, build_corr_model from src.data_preprocessing import get_siamese_input_list, get_response_list_for_vis import yaml from src.data_preprocessing import normalize_by_std import matplotlib.pyplot as plt import matplotlib.image as mpimg ``` # Explore hidden responses inside the P branch ### Load EqT and S-EqT models ``` cfgs = yaml.load(open('../../S_EqT_codes/configs/P_branch.yaml','r'), Loader=yaml.BaseLoader) encode_model, siamese_model, EqT_model = S_EqT_Concate_RSRN_Model(cfgs) siamese_model.load_weights('../../models/S_EqT/S_EqT_P_branch.hdf5') SEqT_penultimate = Model(siamese_model.input,siamese_model.get_layer('concatenate_26').output) model_corr = build_corr_model(cfgs = cfgs, EqT_model = EqT_model) ``` ### Load example data ``` four_examples = np.load('./resources/Four_examples.npy',allow_pickle=True)[()] template = four_examples['CI.DEC'] search = four_examples['CI.RIN'] ``` ### Calculate hidden responses inside the EqT and S-EqT models ``` # get hidden responses and predictions from the pre-trained EqT model # template data data_t = template['data'] data_t = normalize_by_std(data_t) data_t_in = np.zeros([1,6000,3]) data_t_in[0,:,:] = data_t[:,:] encoded_t = encode_model.predict(data_t_in) template_eqt_pred = EqT_model.predict(data_t_in) # get EqT prediction on the templates template_eqt_output = EqT_model.predict(data_t_in) spt_t_eqt = np.argmax(template_eqt_output[1][0,:,0]) sst_t_eqt = np.argmax(template_eqt_output[2][0,:,0]) # search data data_s = search['data'] data_s = normalize_by_std(data_s) data_s_in = np.zeros([1,6000,3]) data_s_in[0,:,:] = data_s[:,:] encoded_s = encode_model.predict(data_s_in) search_eqt_pred = EqT_model.predict(data_s_in) # S-EqT predicition on search seismogram siamese_input_list = get_siamese_input_list(cfgs, spt_t_eqt, sst_t_eqt, encoded_t.copy(), encoded_s.copy()) search_seqt_pred = siamese_model.predict(siamese_input_list) search_SEqT_penultimate_response = SEqT_penultimate.predict(siamese_input_list) # get responses for visualization vis_cfgs = yaml.load(open('../../S_EqT_codes/configs/Vis_config.yaml','r'), Loader=yaml.BaseLoader) output_dict = dict() output_list = [] RSRN_lengths = vis_cfgs['Model']['RSRN_Encoded_lengths'] RSRN_channels = vis_cfgs['Model']['RSRN_Encoded_channels'] encoded_list = vis_cfgs['Model']['RSRN_Encoded_list'] for encoded_name in encoded_list: output_dict[encoded_name] = EqT_model.get_layer(encoded_name).output output_list.append(output_dict[encoded_name]) model_encoded_for_vis = Model(inputs = EqT_model.input, outputs = output_list) encoded_t_plot = model_encoded_for_vis.predict(data_t_in) encoded_s_plot = model_encoded_for_vis.predict(data_s_in) encoded_t_slice = encoded_t_plot.copy() # define inputs S_EqT_Input_dict = dict() S_EqT_Input_list = [] encoded_channels = RSRN_channels encoded_lengths = RSRN_lengths for idx, encoded_name in enumerate(encoded_list): S_EqT_Input_dict[encoded_name+'_Template'] = Input(shape=[None,1,int(encoded_channels[idx])],name=encoded_name+'_Template') S_EqT_Input_dict[encoded_name+'_Search'] = Input(shape=[int(encoded_lengths[idx]),1,int(encoded_channels[idx])],name=encoded_name+'_Search') S_EqT_Input_list.append(S_EqT_Input_dict[encoded_name+'_Template']) S_EqT_Input_list.append(S_EqT_Input_dict[encoded_name+'_Search']) # define correlation results concate_with_ori = int(cfgs['Model']['Concate_with_ori']) feature_corr_dict = dict() S_EqT_Output_list = [] for idx, encoded_name in enumerate(encoded_list): feature_corr_dict[encoded_name+'_corr'] = feature_map_corr_layer(encoded_name+'_corr',(int(encoded_lengths[idx]),1,int(encoded_channels[idx])))([S_EqT_Input_dict[encoded_name+'_Template'],S_EqT_Input_dict[encoded_name+'_Search'] ]) S_EqT_Output_list.append(feature_corr_dict[encoded_name+'_corr']) model_corr_for_vis = Model(inputs = S_EqT_Input_list, outputs = S_EqT_Output_list) # origion responses and their cross-corelations ori_response_list_for_vis, enhanced_response_list_for_vis = get_response_list_for_vis(vis_cfgs, spt_t_eqt, sst_t_eqt, encoded_t_slice, encoded_s_plot) ori_corr = model_corr_for_vis.predict(ori_response_list_for_vis) enhanced_corr = model_corr_for_vis.predict(enhanced_response_list_for_vis) """ Codes to be cleaned data_t data_s plot_t start_sample = 0 end_sample = 3001 spt_t_eqt sst_t_eqt """ #encoded_t_plot = encoded_t encoded_s = encoded_s_plot corr_res = enhanced_corr res_search = search_eqt_pred pred_res = search_seqt_pred SEqT_P = np.argmax(search_seqt_pred[-1][0,:,0,0]) plot_t = np.arange(0,30.01,0.01) plt.figure(figsize=(16,12)) ax1 = plt.subplot2grid((6,6),(0,4),colspan=2,rowspan=2) plt.title('Simplified EqT diagram',fontsize=14) img = mpimg.imread('./resources/EqT_diagram.jpg') plt.imshow(img,aspect='auto') plt.xticks([]) plt.yticks([]) ax1.axis('off') ax_t1 = plt.subplot2grid((6,6),(0,0),rowspan=2,colspan=2) for idx in range(3): plt.plot(plot_t,data_t[0:3001,idx]/np.max(np.abs(data_t[0:3001,idx]))+idx*2 + 2,color='k') if idx == '0': plt.text(28, idx*2 + 2 + 0.1, 'E') if idx == '1': plt.text(28, idx*2 + 2 + 0.1, 'N') if idx == '2': plt.text(28, idx*2 + 2 + 0.1, 'Z') plt.plot([spt_t_eqt/100.0,spt_t_eqt/100.0],[1,8],color='b',label='EqT P') plt.plot([sst_t_eqt/100.0,sst_t_eqt/100.0],[1,8],color='r',label='EqT S') plt.title('Template seismogram from station CI.DEC\nStart Time: 2020-08-09T16:46:00',fontsize=14) plt.ylim([1,7.5]) plt.legend(loc='upper right',prop= {'size':14}) plt.yticks([]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.xlim([0,30]) plt.xlabel('time (s)',fontsize=14) ax_t2 = plt.subplot2grid((6,6),(2,0),colspan=2) rdx = 29 t_len = int(len(encoded_t_plot[rdx][0,:,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(encoded_t_plot[rdx][0,:t_len,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('OTR conv1d_35',fontsize=14) plt.plot([spt_t_eqt/4.0,spt_t_eqt/4.0],[-50,50],color='b',label='EqT P') plt.plot([sst_t_eqt/4.0,sst_t_eqt/4.0],[-50,50],color='r',label='EqT S') t_min_plot = np.min(encoded_t_plot[rdx]) t_max_plot = np.max(encoded_t_plot[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.text(-100,t_min_plot+t_gain*0.15,'Amplitude',fontsize=14,rotation=90) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_t3 = plt.subplot2grid((6,6),(3,0),colspan=2) rdx = 29 t_len = int(len(encoded_t_plot[rdx][0,:,0])/2.0) start_dx = int(spt_t_eqt*int(RSRN_lengths[rdx])/6000.0) end_dx = start_dx + len(encoded_t_slice[rdx][0,:,0,0]) temp_x_plot = np.arange(start_dx,end_dx,1) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(temp_x_plot, encoded_t_slice[rdx][0,:,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('ETR conv1d_35',fontsize=14) plt.plot([spt_t_eqt/4.0,spt_t_eqt/4.0],[-50,50],color='b',label='EqT P') plt.plot([sst_t_eqt/4.0,sst_t_eqt/4.0],[-50,50],color='r',label='EqT S') t_min_plot = np.min(encoded_t_slice[rdx]) t_max_plot = np.max(encoded_t_slice[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.text(-140,t_min_plot+t_gain*0.15,'Amplitude',fontsize=14,rotation=90) plt.xlabel('Array Index (N)',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_t4 = plt.subplot2grid((6,6),(2,4),colspan=2) rdx = 29 t_len = int(len(encoded_t_plot[rdx][0,:,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(ori_corr[rdx][0,:t_len,0,channel_dx]/57.0 + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('OCC conv1d_35',fontsize=14) t_min_plot = np.min(ori_corr[rdx])/57.0 t_max_plot = np.max(ori_corr[rdx])/57.0 t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') #plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_s1 = plt.subplot2grid((6,6),(0,2),rowspan=2,colspan=2) for idx in range(3): plt.plot(plot_t,data_s[0:3001,idx]/np.max(np.abs(data_s[0:3001,idx]))+idx*2 + 2,color='k') if idx == '0': plt.text(28, idx*2 + 2 + 0.1, 'E') if idx == '1': plt.text(28, idx*2 + 2 + 0.1, 'N') if idx == '2': plt.text(28, idx*2 + 2 + 0.1, 'Z') plt.yticks([]) plt.plot([SEqT_P/100.0,SEqT_P/100.0],[1,8],color='b',linestyle='--',label='S-EqT P') plt.xlim([0,30]) plt.ylim([1,7.5]) plt.legend(loc='upper right',prop= {'size':14}) #plt.text(0,7.5,'(c)',fontsize=20) plt.title('Searching seismogram from station CI.RIN\nStart Time: 2020-08-09T16:46:00',fontsize=14) plt.xlabel('time (s)',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_s2 = plt.subplot2grid((6,6),(2,2),colspan=2) rdx = 29 t_len = int(len(ori_response_list_for_vis[rdx*2+1][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(ori_response_list_for_vis[rdx*2+1][0,:t_len,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('OSR conv1d_35',fontsize=14) t_min_plot = np.min(ori_response_list_for_vis[rdx*2+1]) t_max_plot = np.max(ori_response_list_for_vis[rdx*2+1]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') #plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_s3 = plt.subplot2grid((6,6),(3,2),colspan=2) rdx = 29 t_len = int(len(encoded_s[rdx][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(encoded_s[rdx][0,:t_len,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('ESR conv1d_35',fontsize=14) t_min_plot = np.min(encoded_s[rdx]) t_max_plot = np.max(encoded_s[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') #plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xlabel('Array Index (N)',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_s4 = plt.subplot2grid((6,6),(3,4),colspan=2) rdx = 29 t_len = int(len(encoded_s[rdx][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(corr_res[rdx][0,:,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('ECC conv1d_35',fontsize=14) t_min_plot = np.min(corr_res[rdx]) t_max_plot = np.max(corr_res[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xlabel('Array Index (N)',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_eqt_final_resp = plt.subplot2grid((6,6),(4,0),colspan=3) rdx = 31 t_len = int(len(ori_response_list_for_vis[rdx*2+1][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(ori_response_list_for_vis[rdx*2+1][0,:t_len,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('OSR conv1d_35',fontsize=14) t_min_plot = np.min(ori_response_list_for_vis[rdx*2+1]) t_max_plot = np.max(ori_response_list_for_vis[rdx*2+1]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) plt.plot([SEqT_P,SEqT_P],[-50,50],color='b',linestyle='--',label='S-EqT P') #t_ax=plt.gca();t_ax.spines['right'].set_color('b');t_ax.spines['top'].set_color('b');t_ax.spines['bottom'].set_color('b');t_ax.spines['left'].set_color('b');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xlabel('Array Index (N)',fontsize=14) plt.title('Response of the penultimate layer in P branch of EqT model',fontsize=14) plt.ylabel('Amplitude',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_seqt_final_resp = plt.subplot2grid((6,6),(4,3),colspan=3) for idx in range(7): plt.plot(search_SEqT_penultimate_response[0,:,0,idx],color='k') t_min_plot = np.min(search_SEqT_penultimate_response) t_max_plot = np.max(search_SEqT_penultimate_response) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([-20,t_max_plot + t_gain]) plt.plot([SEqT_P,SEqT_P],[-40,40],color='b',linestyle='--',label='S-EqT P') #plt.plot([SEqT_S,SEqT_S],[-40,40],color='r',linestyle='--',label='S-EqT S') plt.xlim([0,t_len]) #plt.ylabel('Amplitude',fontsize=14) plt.xlabel('Array Index (N)',fontsize=14) plt.title('Response of the penultimate layer in P branch of S-EqT model',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_eqt_final = plt.subplot2grid((6,6),(5,0),colspan=3) plt.plot(plot_t,res_search[2][0,0:3001,0],color='k') plt.ylim([-0.05,0.3]) plt.xlim([0,30]) plt.plot([SEqT_P/100.0,SEqT_P/100.0],[-1,1],color='b',linestyle='--',label='S-EqT P') #plt.plot([SEqT_S/100.0,SEqT_S/100.0],[-1,1],color='r',linestyle='--',label='S-EqT S') plt.xlabel('time (s)',fontsize=14) plt.ylabel('Probability',fontsize=14) plt.title('P phase probabilities by EqT model',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) ax_seqt_final = plt.subplot2grid((6,6),(5,3),colspan=3) plt.plot(plot_t,pred_res[-1][0,0:3001,0,0],color='k') plt.xlim([0,30]) plt.ylim([-0.05,0.3]) plt.plot([SEqT_P/100.0,SEqT_P/100.0],[-1,1],color='b',linestyle='--',label='S-EqT P') #plt.plot([SEqT_S/100.0,SEqT_S/100.0],[-1,1],color='r',linestyle='--',label='S-EqT S') #plt.xlabel('time (s)',fontsize=14) plt.title('P phase probabilities by S-EqT model',fontsize=14) plt.xlabel('time (s)',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.tight_layout() plt.show() plt.close() ``` # Explore hidden responses inside the P branch ``` K.clear_session() cfgs = yaml.load(open('../../S_EqT_codes/configs/S_branch.yaml','r'), Loader=yaml.BaseLoader) encode_model, siamese_model, EqT_model = S_EqT_Concate_RSRN_Model(cfgs) siamese_model.load_weights('../../models/S_EqT/S_EqT_S_branch.hdf5') SEqT_penultimate = Model(siamese_model.input,siamese_model.get_layer('concatenate_26').output) model_corr = build_corr_model(cfgs = cfgs, EqT_model = EqT_model) four_examples = np.load('./resources/Four_examples.npy',allow_pickle=True)[()] template = four_examples['CI.DEC'] search = four_examples['CI.RIN'] # get hidden responses and predictions from the pre-trained EqT model # template data data_t = template['data'] data_t = normalize_by_std(data_t) data_t_in = np.zeros([1,6000,3]) data_t_in[0,:,:] = data_t[:,:] encoded_t = encode_model.predict(data_t_in) template_eqt_pred = EqT_model.predict(data_t_in) # get EqT prediction on the templates template_eqt_output = EqT_model.predict(data_t_in) spt_t_eqt = np.argmax(template_eqt_output[1][0,:,0]) sst_t_eqt = np.argmax(template_eqt_output[2][0,:,0]) # search data data_s = search['data'] data_s = normalize_by_std(data_s) data_s_in = np.zeros([1,6000,3]) data_s_in[0,:,:] = data_s[:,:] encoded_s = encode_model.predict(data_s_in) search_eqt_pred = EqT_model.predict(data_s_in) # S-EqT predicition on search seismogram siamese_input_list = get_siamese_input_list(cfgs, spt_t_eqt, sst_t_eqt, encoded_t.copy(), encoded_s.copy()) search_seqt_pred = siamese_model.predict(siamese_input_list) search_SEqT_penultimate_response = SEqT_penultimate.predict(siamese_input_list) # get responses for visualization vis_cfgs = yaml.load(open('../../S_EqT_codes/configs/Vis_config.yaml','r'), Loader=yaml.BaseLoader) output_dict = dict() output_list = [] RSRN_lengths = vis_cfgs['Model']['RSRN_Encoded_lengths'] RSRN_channels = vis_cfgs['Model']['RSRN_Encoded_channels'] encoded_list = vis_cfgs['Model']['RSRN_Encoded_list'] for encoded_name in encoded_list: output_dict[encoded_name] = EqT_model.get_layer(encoded_name).output output_list.append(output_dict[encoded_name]) model_encoded_for_vis = Model(inputs = EqT_model.input, outputs = output_list) encoded_t_plot = model_encoded_for_vis.predict(data_t_in) encoded_s_plot = model_encoded_for_vis.predict(data_s_in) encoded_t_slice = encoded_t_plot.copy() # define inputs S_EqT_Input_dict = dict() S_EqT_Input_list = [] encoded_channels = RSRN_channels encoded_lengths = RSRN_lengths for idx, encoded_name in enumerate(encoded_list): S_EqT_Input_dict[encoded_name+'_Template'] = Input(shape=[None,1,int(encoded_channels[idx])],name=encoded_name+'_Template') S_EqT_Input_dict[encoded_name+'_Search'] = Input(shape=[int(encoded_lengths[idx]),1,int(encoded_channels[idx])],name=encoded_name+'_Search') S_EqT_Input_list.append(S_EqT_Input_dict[encoded_name+'_Template']) S_EqT_Input_list.append(S_EqT_Input_dict[encoded_name+'_Search']) # define correlation results concate_with_ori = int(cfgs['Model']['Concate_with_ori']) feature_corr_dict = dict() S_EqT_Output_list = [] for idx, encoded_name in enumerate(encoded_list): feature_corr_dict[encoded_name+'_corr'] = feature_map_corr_layer(encoded_name+'_corr',(int(encoded_lengths[idx]),1,int(encoded_channels[idx])))([S_EqT_Input_dict[encoded_name+'_Template'],S_EqT_Input_dict[encoded_name+'_Search'] ]) S_EqT_Output_list.append(feature_corr_dict[encoded_name+'_corr']) model_corr_for_vis = Model(inputs = S_EqT_Input_list, outputs = S_EqT_Output_list) # origion responses and their cross-corelations ori_response_list_for_vis, enhanced_response_list_for_vis = get_response_list_for_vis(vis_cfgs, spt_t_eqt, sst_t_eqt, encoded_t_slice, encoded_s_plot) ori_corr = model_corr_for_vis.predict(ori_response_list_for_vis) enhanced_corr = model_corr_for_vis.predict(enhanced_response_list_for_vis) encoded_s = encoded_s_plot corr_res = enhanced_corr res_search = search_eqt_pred pred_res = search_seqt_pred SEqT_S = np.argmax(search_seqt_pred[-1][0,:,0,0]) plot_t = np.arange(0,30.01,0.01) plt.figure(figsize=(16,12)) ax1 = plt.subplot2grid((6,6),(0,4),colspan=2,rowspan=2) plt.title('Simplified EqT diagram',fontsize=14) img = mpimg.imread('./resources/EqT_diagram.jpg') plt.imshow(img,aspect='auto') plt.xticks([]) plt.yticks([]) ax1.axis('off') ax_t1 = plt.subplot2grid((6,6),(0,0),rowspan=2,colspan=2) for idx in range(3): plt.plot(plot_t,data_t[0:3001,idx]/np.max(np.abs(data_t[0:3001,idx]))+idx*2 + 2,color='k') if idx == '0': plt.text(28, idx*2 + 2 + 0.1, 'E') if idx == '1': plt.text(28, idx*2 + 2 + 0.1, 'N') if idx == '2': plt.text(28, idx*2 + 2 + 0.1, 'Z') plt.plot([spt_t_eqt/100.0,spt_t_eqt/100.0],[1,8],color='b',label='EqT P') plt.plot([sst_t_eqt/100.0,sst_t_eqt/100.0],[1,8],color='r',label='EqT S') plt.title('Template seismogram from station CI.DEC\nStart Time: 2020-08-08T16:46:00',fontsize=14) plt.ylim([1,7.5]) plt.legend(loc='upper right',prop= {'size':14}) plt.yticks([]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.xlim([0,30]) plt.xlabel('time (s)',fontsize=14) ax_t2 = plt.subplot2grid((6,6),(2,0),colspan=2) rdx = 22 t_len = int(len(encoded_t_plot[rdx][0,:,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(encoded_t_plot[rdx][0,:t_len,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.title('OTR conv1d_42',fontsize=14) plt.plot([spt_t_eqt/4.0,spt_t_eqt/4.0],[-50,50],color='b',label='EqT P') plt.plot([sst_t_eqt/4.0,sst_t_eqt/4.0],[-50,50],color='r',label='EqT S') t_min_plot = np.min(encoded_t_plot[rdx]) t_max_plot = np.max(encoded_t_plot[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.text(-100,t_min_plot+t_gain*0.2,'Amplitude',fontsize=14,rotation=90) ax_t3 = plt.subplot2grid((6,6),(3,0),colspan=2) rdx = 22 t_len = int(len(encoded_t_plot[rdx][0,:,0])/2.0) start_dx = int(spt_t_eqt*int(RSRN_lengths[rdx])/6000.0) end_dx = start_dx + len(encoded_t_slice[rdx][0,:,0,0]) temp_x_plot = np.arange(start_dx,end_dx,1) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(temp_x_plot, encoded_t_slice[rdx][0,:,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('ETR conv1d_42',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.plot([spt_t_eqt/4.0,spt_t_eqt/4.0],[-50,50],color='b',label='EqT P') plt.plot([sst_t_eqt/4.0,sst_t_eqt/4.0],[-50,50],color='r',label='EqT S') t_min_plot = np.min(encoded_t_slice[rdx]) t_max_plot = np.max(encoded_t_slice[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.text(-140,t_min_plot+t_gain*0.2,'Amplitude',fontsize=14,rotation=90) plt.xlabel('Array Index (N)',fontsize=14) ax_t4 = plt.subplot2grid((6,6),(2,4),colspan=2) rdx = 22 t_len = int(len(encoded_t_plot[rdx][0,:,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(ori_corr[rdx][0,:t_len,0,channel_dx]/57.0 + 0 * 2,color='k') plt.xlim([0,t_len]) plt.title('OCC conv1d_42',fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) t_min_plot = np.min(ori_corr[rdx])/57.0 t_max_plot = np.max(ori_corr[rdx])/57.0 t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) #plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) ax_s1 = plt.subplot2grid((6,6),(0,2),rowspan=2,colspan=2) for idx in range(3): plt.plot(plot_t,data_s[0:3001,idx]/np.max(np.abs(data_s[0:3001,idx]))+idx*2 + 2,color='k') if idx == '0': plt.text(28, idx*2 + 2 + 0.1, 'E') if idx == '1': plt.text(28, idx*2 + 2 + 0.1, 'N') if idx == '2': plt.text(28, idx*2 + 2 + 0.1, 'Z') plt.yticks([]) #plt.plot([SEqT_P/100.0,SEqT_P/100.0],[1,8],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/100.0,SEqT_S/100.0],[1,8],color='r',linestyle='--',label='S-EqT S') plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.xlim([0,30]) plt.ylim([1,7.5]) plt.legend(loc='upper right',prop= {'size':14}) #plt.text(0,7.5,'(c)',fontsize=20) plt.title('Searching seismogram from station CI.RIN\nStart Time: 2020-08-08T16:46:00',fontsize=14) plt.xlabel('time (s)',fontsize=14) ax_s2 = plt.subplot2grid((6,6),(2,2),colspan=2) rdx = 22 t_len = int(len(ori_response_list_for_vis[rdx*2+1][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(ori_response_list_for_vis[rdx*2+1][0,:t_len,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.title('OSR conv1d_42',fontsize=14) t_min_plot = np.min(ori_response_list_for_vis[rdx*2+1]) t_max_plot = np.max(ori_response_list_for_vis[rdx*2+1]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) #plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) ax_s3 = plt.subplot2grid((6,6),(3,2),colspan=2) rdx = 22 t_len = int(len(encoded_s[rdx][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(encoded_s[rdx][0,:t_len,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.title('ESR conv1d_42',fontsize=14) t_min_plot = np.min(encoded_s[rdx]) t_max_plot = np.max(encoded_s[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) #plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xlabel('Array Index (N)',fontsize=14) ax_s4 = plt.subplot2grid((6,6),(3,4),colspan=2) rdx = 22 t_len = int(len(encoded_s[rdx][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(corr_res[rdx][0,:,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.title('ECC conv1d_42',fontsize=14) t_min_plot = np.min(corr_res[rdx]) t_max_plot = np.max(corr_res[rdx]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) #plt.plot([SEqT_P/4.0,SEqT_P/4.0],[-50,50],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/4.0,SEqT_S/4.0],[-50,50],color='r',linestyle='--',label='S-EqT S') t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xlabel('Array Index (N)',fontsize=14) ax_eqt_final_resp = plt.subplot2grid((6,6),(4,0),colspan=3) rdx = 24 t_len = int(len(ori_response_list_for_vis[rdx*2+1][0,:,0,0])/2.0) for channel_dx in range(int(RSRN_channels[rdx])): plt.plot(ori_response_list_for_vis[rdx*2+1][0,:t_len,0,channel_dx] + 0 * 2,color='k') plt.xlim([0,t_len]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.title('OSR conv1d_42',fontsize=14) t_min_plot = np.min(ori_response_list_for_vis[rdx*2+1]) t_max_plot = np.max(ori_response_list_for_vis[rdx*2+1]) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) #plt.plot([SEqT_P,SEqT_P],[-50,50],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S,SEqT_S],[-50,50],color='r',linestyle='--',label='S-EqT S') #t_ax=plt.gca();t_ax.spines['right'].set_color('r');t_ax.spines['top'].set_color('r');t_ax.spines['bottom'].set_color('r');t_ax.spines['left'].set_color('r');t_ax.spines['right'].set_linewidth(3);t_ax.spines['top'].set_linewidth(3);t_ax.spines['bottom'].set_linewidth(3);t_ax.spines['left'].set_linewidth(3) plt.xlabel('Array Index (N)',fontsize=14) plt.title('Response of the penultimate layer in S branch of EqT model',fontsize=14) plt.ylabel('Amplitude',fontsize=14) ax_seqt_final_resp = plt.subplot2grid((6,6),(4,3),colspan=3) for idx in range(7): plt.plot(search_SEqT_penultimate_response[0,:,0,idx],color='k') t_min_plot = np.min(search_SEqT_penultimate_response) t_max_plot = np.max(search_SEqT_penultimate_response) plt.xticks(fontsize=12) plt.yticks(fontsize=12) t_gain = (t_max_plot - t_min_plot)*0.15 plt.ylim([t_min_plot - t_gain,t_max_plot + t_gain]) #plt.plot([SEqT_P,SEqT_P],[-40,40],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S,SEqT_S],[-40,40],color='r',linestyle='--',label='S-EqT S') plt.xlim([0,t_len]) #plt.ylabel('Amplitude',fontsize=14) plt.xlabel('Array Index (N)',fontsize=14) plt.title('Response of the penultimate layer in S branch of S-EqT model',fontsize=14) ax_eqt_final = plt.subplot2grid((6,6),(5,0),colspan=3) plt.plot(plot_t,res_search[2][0,0:3001,0],color='k') plt.ylim([-0.05,0.7]) plt.xlim([0,30]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) #plt.plot([SEqT_P/100.0,SEqT_P/100.0],[-1,1],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/100.0,SEqT_S/100.0],[-1,1],color='r',linestyle='--',label='S-EqT S') plt.xlabel('time (s)',fontsize=14) plt.ylabel('Probability',fontsize=14) plt.title('S phase probabilities by EqT model',fontsize=14) ax_seqt_final = plt.subplot2grid((6,6),(5,3),colspan=3) plt.plot(plot_t,pred_res[-1][0,0:3001,0,0],color='k') plt.xlim([0,30]) plt.ylim([-0.05,0.7]) plt.xticks(fontsize=12) plt.yticks(fontsize=12) #plt.plot([SEqT_P/100.0,SEqT_P/100.0],[-1,1],color='b',linestyle='--',label='S-EqT P') plt.plot([SEqT_S/100.0,SEqT_S/100.0],[-1,1],color='r',linestyle='--',label='S-EqT S') #plt.xlabel('time (s)',fontsize=14) plt.title('S phase probabilities by S-EqT model',fontsize=14) plt.xlabel('time (s)',fontsize=14) plt.tight_layout() plt.show() plt.close() ```
github_jupyter
# MAML Tutorial with JAX [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.sandbox.google.com/github/google/jax/blob/master/docs/notebooks/maml.ipynb) Eric Jang Blog post: https://blog.evjang.com/2019/02/maml-jax.html 21 Feb 2019 Pedagogical tutorial for implementing Model-Agnostic Meta-Learning with JAX's awesome `grad` and `vmap` and `jit` operators. ## Overview In this notebook we'll go through: - how to take gradients, gradients of gradients. - how to fit a sinusoid function with a neural network (and do auto-batching with vmap) - how to implement MAML and check its numerics - how to implement MAML for sinusoid task (single-task objective, batching task instances). - extending MAML to handle batching at the task-level ``` ### import jax.numpy (almost-drop-in for numpy) and gradient operators. import jax.numpy as jnp from jax import grad ``` ## Gradients of Gradients JAX makes it easy to compute gradients of python functions. Here, we thrice-differentiate $e^x$ and $x^2$ ``` f = lambda x : jnp.exp(x) g = lambda x : jnp.square(x) print(grad(f)(1.)) # = e^{1} print(grad(grad(f))(1.)) print(grad(grad(grad(f)))(1.)) print(grad(g)(2.)) # 2x = 4 print(grad(grad(g))(2.)) # x = 2 print(grad(grad(grad(g)))(2.)) # x = 0 ``` ## Sinusoid Regression and vmap To get you familiar with JAX syntax first, we'll optimize neural network params with fixed inputs on a mean-squared error loss to $f_\theta(x) = sin(x)$. ``` from jax import vmap # for auto-vectorizing functions from functools import partial # for use with vmap from jax import jit # for compiling functions for speedup from jax import random # stax initialization uses jax.random from jax.experimental import stax # neural network library from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax # neural network layers import matplotlib.pyplot as plt # visualization # Use stax to set up network initialization and evaluation functions net_init, net_apply = stax.serial( Dense(40), Relu, Dense(40), Relu, Dense(1) ) rng = random.PRNGKey(0) in_shape = (-1, 1,) out_shape, net_params = net_init(rng, in_shape) def loss(params, inputs, targets): # Computes average loss for the batch predictions = net_apply(params, inputs) return jnp.mean((targets - predictions)**2) # batch the inference across K=100 xrange_inputs = jnp.linspace(-5,5,100).reshape((100, 1)) # (k, 1) targets = jnp.sin(xrange_inputs) predictions = vmap(partial(net_apply, net_params))(xrange_inputs) losses = vmap(partial(loss, net_params))(xrange_inputs, targets) # per-input loss plt.plot(xrange_inputs, predictions, label='prediction') plt.plot(xrange_inputs, losses, label='loss') plt.plot(xrange_inputs, targets, label='target') plt.legend() import numpy as np from jax.experimental import optimizers from jax.tree_util import tree_multimap # Element-wise manipulation of collections of numpy arrays opt_init, opt_update, get_params = optimizers.adam(step_size=1e-2) opt_state = opt_init(net_params) # Define a compiled update step @jit def step(i, opt_state, x1, y1): p = get_params(opt_state) g = grad(loss)(p, x1, y1) return opt_update(i, g, opt_state) for i in range(100): opt_state = step(i, opt_state, xrange_inputs, targets) net_params = get_params(opt_state) # batch the inference across K=100 targets = jnp.sin(xrange_inputs) predictions = vmap(partial(net_apply, net_params))(xrange_inputs) losses = vmap(partial(loss, net_params))(xrange_inputs, targets) # per-input loss plt.plot(xrange_inputs, predictions, label='prediction') plt.plot(xrange_inputs, losses, label='loss') plt.plot(xrange_inputs, targets, label='target') plt.legend() ``` ## MAML: Optimizing for Generalization Suppose task loss function $\mathcal{L}$ is defined with respect to model parameters $\theta$, input features $X$, input labels $Y$. MAML optimizes the following: $\mathcal{L}(\theta - \nabla \mathcal{L}(\theta, x_1, y_1), x_2, y_2)$ $x_1, y_2$ and $x_2, y_2$ are identically distributed from $X, Y$. Therefore, MAML objective can be thought of as a differentiable cross-validation error (w.r.t. $x_2, y_2$) for a model that learns (via a single gradient descent step) from $x_1, y_1$. Minimizing cross-validation error provides an inductive bias on generalization. The following toy example checks MAML numerics via parameter $x$ and input $y$. ``` # gradients of gradients test for MAML # check numerics g = lambda x, y : jnp.square(x) + y x0 = 2. y0 = 1. print('grad(g)(x0) = {}'.format(grad(g)(x0, y0))) # 2x = 4 print('x0 - grad(g)(x0) = {}'.format(x0 - grad(g)(x0, y0))) # x - 2x = -2 def maml_objective(x, y): return g(x - grad(g)(x, y), y) print('maml_objective(x,y)={}'.format(maml_objective(x0, y0))) # x**2 + 1 = 5 print('x0 - maml_objective(x,y) = {}'.format(x0 - grad(maml_objective)(x0, y0))) # x - (2x) ``` ## Sinusoid Task + MAML Now let's re-implement the Sinusoidal regression task from Chelsea Finn's [MAML paper](https://arxiv.org/abs/1703.03400). ``` alpha = .1 def inner_update(p, x1, y1): grads = grad(loss)(p, x1, y1) inner_sgd_fn = lambda g, state: (state - alpha*g) return tree_multimap(inner_sgd_fn, grads, p) def maml_loss(p, x1, y1, x2, y2): p2 = inner_update(p, x1, y1) return loss(p2, x2, y2) x1 = xrange_inputs y1 = targets x2 = jnp.array([0.]) y2 = jnp.array([0.]) maml_loss(net_params, x1, y1, x2, y2) ``` Let's try minimizing the MAML loss (without batching across multiple tasks, which we will do in the next section) ``` opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3) # this LR seems to be better than 1e-2 and 1e-4 out_shape, net_params = net_init(rng, in_shape) opt_state = opt_init(net_params) @jit def step(i, opt_state, x1, y1, x2, y2): p = get_params(opt_state) g = grad(maml_loss)(p, x1, y1, x2, y2) l = maml_loss(p, x1, y1, x2, y2) return opt_update(i, g, opt_state), l K=20 np_maml_loss = [] # Adam optimization for i in range(20000): # define the task A = np.random.uniform(low=0.1, high=.5) phase = np.random.uniform(low=0., high=jnp.pi) # meta-training inner split (K examples) x1 = np.random.uniform(low=-5., high=5., size=(K,1)) y1 = A * np.sin(x1 + phase) # meta-training outer split (1 example). Like cross-validating with respect to one example. x2 = np.random.uniform(low=-5., high=5.) y2 = A * np.sin(x2 + phase) opt_state, l = step(i, opt_state, x1, y1, x2, y2) np_maml_loss.append(l) if i % 1000 == 0: print(i) net_params = get_params(opt_state) # batch the inference across K=100 targets = jnp.sin(xrange_inputs) predictions = vmap(partial(net_apply, net_params))(xrange_inputs) plt.plot(xrange_inputs, predictions, label='pre-update predictions') plt.plot(xrange_inputs, targets, label='target') x1 = np.random.uniform(low=-5., high=5., size=(K,1)) y1 = 1. * np.sin(x1 + 0.) for i in range(1,5): net_params = inner_update(net_params, x1, y1) predictions = vmap(partial(net_apply, net_params))(xrange_inputs) plt.plot(xrange_inputs, predictions, label='{}-shot predictions'.format(i)) plt.legend() ``` ## Batching Meta-Gradient Across Tasks Kind of does the job but not that great. Let's reduce the variance of gradients in outer loop by averaging across a batch of tasks (not just one task at a time). vmap is awesome it enables nice handling of batching at two levels: inner-level "intra-task" batching, and outer level batching across tasks. From a software engineering perspective, it is nice because the "task-batched" MAML implementation simply re-uses code from the non-task batched MAML algorithm, without losing any vectorization benefits. ``` def sample_tasks(outer_batch_size, inner_batch_size): # Select amplitude and phase for the task As = [] phases = [] for _ in range(outer_batch_size): As.append(np.random.uniform(low=0.1, high=.5)) phases.append(np.random.uniform(low=0., high=jnp.pi)) def get_batch(): xs, ys = [], [] for A, phase in zip(As, phases): x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1)) y = A * np.sin(x + phase) xs.append(x) ys.append(y) return jnp.stack(xs), jnp.stack(ys) x1, y1 = get_batch() x2, y2 = get_batch() return x1, y1, x2, y2 outer_batch_size = 2 x1, y1, x2, y2 = sample_tasks(outer_batch_size, 50) for i in range(outer_batch_size): plt.scatter(x1[i], y1[i], label='task{}-train'.format(i)) for i in range(outer_batch_size): plt.scatter(x2[i], y2[i], label='task{}-val'.format(i)) plt.legend() x2.shape opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3) out_shape, net_params = net_init(rng, in_shape) opt_state = opt_init(net_params) # vmapped version of maml loss. # returns scalar for all tasks. def batch_maml_loss(p, x1_b, y1_b, x2_b, y2_b): task_losses = vmap(partial(maml_loss, p))(x1_b, y1_b, x2_b, y2_b) return jnp.mean(task_losses) @jit def step(i, opt_state, x1, y1, x2, y2): p = get_params(opt_state) g = grad(batch_maml_loss)(p, x1, y1, x2, y2) l = batch_maml_loss(p, x1, y1, x2, y2) return opt_update(i, g, opt_state), l np_batched_maml_loss = [] K=20 for i in range(20000): x1_b, y1_b, x2_b, y2_b = sample_tasks(4, K) opt_state, l = step(i, opt_state, x1_b, y1_b, x2_b, y2_b) np_batched_maml_loss.append(l) if i % 1000 == 0: print(i) net_params = get_params(opt_state) # batch the inference across K=100 targets = jnp.sin(xrange_inputs) predictions = vmap(partial(net_apply, net_params))(xrange_inputs) plt.plot(xrange_inputs, predictions, label='pre-update predictions') plt.plot(xrange_inputs, targets, label='target') x1 = np.random.uniform(low=-5., high=5., size=(10,1)) y1 = 1. * np.sin(x1 + 0.) for i in range(1,3): net_params = inner_update(net_params, x1, y1) predictions = vmap(partial(net_apply, net_params))(xrange_inputs) plt.plot(xrange_inputs, predictions, label='{}-shot predictions'.format(i)) plt.legend() # Comparison of maml_loss for task batch size = 1 vs. task batch size = 8 plt.plot(np.convolve(np_maml_loss, [.05]*20), label='task_batch=1') plt.plot(np.convolve(np_batched_maml_loss, [.05]*20), label='task_batch=4') plt.ylim(0., 1e-1) plt.legend() ```
github_jupyter
## Пожалуйста заполни <a href='https://forms.gle/TRk9jQDSq8pWdzFY6'>форму обратной связи</a> <hr> <a href='https://drive.google.com/file/d/1Gs7EAKYESUg4MLVv-u7Db6y2_Cej5i5O/view?usp=sharing'>Запись лекции</a> ``` import os ``` # Работа с деревом файла Функция `walk` модуля `os` позволяет обходить все папки, начиная с заданной, и получать на каждой итерации списки всех файлов и папок на этом уровне. См. документацию: `os.walk` ``` start_path = 'pykili' for root, dirs, files in os.walk(start_path): print('Где мы сейчас:', root) print('Папки на этом уровне:', dirs) print('Файлы на этом уровне:', files) texts = [] for root, dirs, files in os.walk(start_path): for every_file in files: print(os.path.join(root, every_file)) ``` Вывести все .md файлы ``` texts = [] for root, dirs, files in os.walk(start_path): for every_file in files: if os.path.splitext(every_file)[1] == '.md': print(os.path.join(root, every_file)) ``` Обратите внимание, что одна точка обозначает текущую директорию: ``` start_path = "." print("Что здесь есть:", os.listdir(start_path)) ``` Две точки обозначают родительский каталог: ``` os.getcwd() os.chdir('pykili/pykili.github.io') os.getcwd() os.getcwd() print("На уровень выше:", os.listdir("..")) print('#####') print("А в подпапке _data", os.listdir("../_data/")) os.walk(".") ``` <hr> # Форматированный вывод Форматирование строк — это способ презентации данных в виде строковой переменной. На настоящий момент в Питоне существует целых три синтаксиса для форматирования: синтаксис с использованием метода строк `.format()`, «старый» синтаксис с оператором `%` и самый новый с использованием «f-строк». ### Базовый пример Метод строк `.format()` заменяет фигурные скобки в строке, к которой применён, на строковое представление своих аргументов: ``` x = 3 print('x = {}'.format(x)) ``` Если фигурных скобок несколько, то у метода .format() должно быть соответствующее число аргументов: ``` x = 27 y = 2 print('{} // {} = {}'.format(x, y, x // y)) ``` В фигурных скобках можно указать номер аргумента, начинаю с нуля: ``` name = 'Алиса' day = 22 s = '''Здраствуйте, {0}. Только по промокоду "{0}{1}" {1} апреля 2019 года вы сможете приобрести гантели в количестве {1} шт. по цене одной!'''.format(name, day) print(s) ``` Для удобства, вместо номеров можно использовать имена аргументов (они не обязаны совпадать с именами переменных, но часто это удобно): ``` city1 = 'А' city2 = 'Б' driver = 'велосипедист' velocity = 22 x = 10.52 s = '''Из пункта {departure} в пункт {destination} выехал {driver} со скоростью {v} км/ч. За {x} км до пункта {destination} {driver} встретил свою бабушку. Сколько пирожков съел {driver} перед тем как продолжить свой путь к {destination}?'''.format( departure=city1,destination=city2,driver=driver,x=x,v=velocity) print(s) ``` ### "Старое" форматирование https://docs.python.org/3/library/stdtypes.html#old-string-formatting ``` name = 'Алиса' day = 22 s = '''Здраствуйте, %s. Только по промокоду "%s%d" %d апреля 2019 года вы сможете приобрести гантели в количестве %d шт. по цене одной!''' % (name, name, day, day, day) print(s) print('''Здраствуйте, %(name)s. Только по промокоду "%(name)s%(day)d" %(day)d апреля 2019 года вы сможете приобрести гантели в количестве %(day)d шт. по цене одной!''' % {'name': "Алиса", "day": 22}) ``` ### "Современный" способ: ``` x = 1 print(f'x = {x}') #print("x = {}".format(x)) z = [(1, 22222), (333, 55)] for x, y in z: print(str(x) + "\t" + str(y) + "\n") name = 'Алиса' day = 22 s = f'''Здраствуйте, {name}. Только по промокоду "{name}{day}" {day} апреля 2019 года вы сможете приобрести гантели в количестве {day} шт. по цене одной!''' print(s) import math x = math.sqrt(0.5) print(f"F-score: {x}") print(f"F-score: {x:.2f}") a = "hi" b = "world" f"xxxxx {' '.join([a, b])} xxxx" ``` <hr> # Input'ы Базовый пример инпута ``` a = input() a a = int(input()) a a, b = input().split() print(' and '.join([a, b])) b a, b = map(int, input().split()) print(' and '.join(map(str, [a, b]))) b a, b = map(int, input().split()) print(f'{a} and {b}') ``` Прочитать больше про map -> https://pyneng.readthedocs.io/ru/latest/book/10_useful_functions/map.html ``` a, b = map(lambda a: int(a)**2, input().split()) print(' and '.join(map(str, [a, b]))) ``` Еще про лямбды - https://www.w3schools.com/python/python_lambda.asp <hr> ``` os.getcwd() import re dict_res = {} for root, dirs, files in os.walk(os.getcwd()): for each_file in files: num_from_dtr = None num_from_str = sum(map(int, re.findall(r'\d+', each_file))) if num_from_str is not None: if root in dict_res: dict_res[root] += num_from_str else: dict_res[root] = num_from_str for key, value in dict_res.items(): print(f'В папке {key} сумма цифр в файлах равна {value}') dict_res for root, dirs, files in os.walk(os.getcwd()): print('Где мы сейчас:', root) print('Папки на этом уровне:', dirs) print('Файлы на этом уровне:', files) int('05') ```
github_jupyter
``` # import packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import featuretools as ft import lightgbm as lgb import seaborn as sns from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score from random import sample import pickle import eli5 from eli5.sklearn import PermutationImportance from lightgbm import LGBMClassifier from sklearn.preprocessing import Imputer from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA %matplotlib inline RSEED = 50 ``` # Load Original Features ``` feat_num = 449 #df_total = pd.read_csv('./data/features%s_add_card1_cnt.csv'%(feat_num)) with open('./data/features%s.pickle'%(feat_num), 'rb') as handle: df_total = pickle.load(handle) df_train = df_total[df_total['isFraud'].notnull()] df_train.shape ``` # Negative Downsampling ``` train_pos = df_train[df_train['isFraud']==1] train_neg = df_train[df_train['isFraud']==0] train_neg = train_neg.sample(int(df_train.shape[0] * 0.2), random_state=42) df_train_sample = pd.concat([train_pos,train_neg]).sort_index() ``` # Prepare Data ``` labels_train = df_train_sample['isFraud'] features_train = df_train_sample.drop(columns = ['isFraud', 'TransactionID']) features_train.shape features_train.head() with open('./data/feat%s_rm_pm_importance100.pickle'%(437), 'rb') as handle: to_drop = pickle.load(handle) for item in to_drop: if not 'V' in item: print(item) features_train = features_train.drop(list(to_drop),axis=1) categorical_raw = ['ProductCD', 'card2', 'card3', 'card4', 'card5','card6', 'addr1','addr2','P_email','R_email','M1','M2','M3', 'M4','M5','M6','M7','M8','M9','DeviceType','DeviceInfo','dow','hour', 'Device_name','Device_version','screen_width','screen_height', 'P_email_suffix','R_email_suffix','id_30_OS','id_30_version', 'is_card_freq_Device','is_wide','is_long','is_zero','is_win8_vista', 'is_windows_otheros','is_card_freq_pdc','is_card_freq_addr1'] # ids = [ 'id_%s'%(i) for i in range(12,39)] categorical_raw = categorical_raw + ids params = {'num_leaves': 491, 'min_child_weight': 0.03454472573214212, 'feature_fraction': 0.3797454081646243, 'bagging_fraction': 0.4181193142567742, 'min_data_in_leaf': 106, 'objective': 'binary', 'max_depth': -1, 'learning_rate': 0.006883242363721497, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'auc', "verbosity": -1, 'reg_alpha': 0.3899927210061127, 'reg_lambda': 0.6485237330340494, 'random_state': 47, #'num_threads':10 #'is_unbalance':True #'scale_pos_weight':9 } ``` # Select Features ``` def train_selector(params,train_num,features_train,labels_train,categorical,verbose_eval=500): train_set = lgb.Dataset(features_train.iloc[0:train_num,:], label=labels_train.values[0:train_num], categorical_feature=categorical) valid_set = lgb.Dataset(features_train.iloc[train_num:,:], label=labels_train.values[train_num:], categorical_feature=categorical) valid_results = {} model = lgb.train(params,train_set,num_boost_round = 10000, valid_sets = [train_set, valid_set], verbose_eval= verbose_eval, early_stopping_rounds = 500, evals_result=valid_results) return model,valid_results def select_by_importance(model,features_train,importance=0,num_keep=None): fi = pd.DataFrame({'feature': features_train.columns, 'importance':model.feature_importance()}) fi = fi.sort_values('importance', ascending = False) if num_keep != None: to_drop = fi.iloc[num_keep:,:].feature else: to_drop = fi[fi.importance <= importance].feature return to_drop def fold_train_selector(Nfold,features_train,labels_train,categorical): splits = Nfold ave_auc = 0 valid_results = {} folds = KFold(n_splits = splits,random_state=RSEED) for fold_num, (trn_idx, val_idx) in enumerate(folds.split(features_train.values, labels_train.values)): print("Fold {}".format(fold_num)) train_df, y_train_df = features_train.iloc[trn_idx], labels_train.iloc[trn_idx] valid_df, y_valid_df = features_train.iloc[val_idx], labels_train.iloc[val_idx] trn_data = lgb.Dataset(train_df, label=y_train_df,categorical_feature=categorical) val_data = lgb.Dataset(valid_df, label=y_valid_df,categorical_feature=categorical) clf = lgb.train(params, trn_data, 10000, valid_sets = [trn_data, val_data], verbose_eval=500, early_stopping_rounds=500, evals_result=valid_results) pred = clf.predict(valid_df) auc_score = roc_auc_score(y_valid_df, pred) ave_auc += auc_score / splits print( " auc = ", auc_score ) return ave_auc def permutation_importance(model,features_valid,labels_valid): """calculate permutation importance of features Args: model: the trained model. features_valid: dataframe. The validation set of features. labels_valid: labels of validation set. Returns: df_fimportance: dataframe. The importances of features. """ base_score = roc_auc_score(labels_valid, model.predict(features_valid)) list_fimportance = [] for col in features_valid.columns: print(col) save = features_valid[col].copy() features_valid[col] = np.random.permutation(features_valid[col]) col_score = roc_auc_score(labels_valid, model.predict(features_valid)) features_valid[col] = save list_fimportance.append([col,base_score - col_score]) return pd.DataFrame(list_fimportance,columns = ['feature','importance']) ``` ## PCA V Features ``` def check_missing(df,cols=None,axis=0): """check data frame column missing situation Args df: data frame. cols: list. List of column names axis: int. 0 means column and 1 means row Returns missing_info: data frame. """ if cols != None: df = df[cols] missing_num = df.isnull().sum(axis).to_frame().rename(columns={0:'missing_num'}) missing_num['missing_percent'] = df.isnull().mean(axis)*100 return missing_num.sort_values(by='missing_percent',ascending = False) vfeatures = ['V'+str(i) for i in range(1,340)] scaler = StandardScaler() scaler.fit(features_train[vfeatures]) imp = Imputer(missing_values=np.nan , strategy='mean', axis=0) vfeature_impute = imp.fit_transform(features_train[vfeatures]) vfeature_impute_scale = scaler.transform(vfeature_impute) vfeature_impute_scale = pd.DataFrame(vfeature_impute_scale, columns=vfeatures) pca = PCA() vfeature_pca = pca.fit_transform(vfeature_impute_scale) # check components number should be the same as total features components_total = len(pca.explained_variance_ratio_) # generate sequence for plotting components = np.arange(components_total) fig, ax1 = plt.subplots(figsize=(15,5)) ax1.bar(components[0:100],pca.explained_variance_ratio_[0:100]) ax1.set_ylabel('Explained Variance', color="blue") ax1.set_xlabel('Number of Components') ax2 = ax1.twinx() ax2.plot(np.cumsum(pca.explained_variance_ratio_[0:100]), color="red",marker='o') ax2.set_ylabel('Cumulative Explained Variance', color="red") plt.title("Cumulative Explained Variance vs No. of Principal Components") np.cumsum(pca.explained_variance_ratio_[:30])[-1] # Re-apply PCA to the data while selecting for number of components to retain. pca_50 = PCA(n_components=30) vfeature_pca_50 = pca_50.fit_transform(vfeature_impute_scale) vfeature_pca_50_df = pd.DataFrame(vfeature_pca_50,columns= ['PCA'+str(i) for i in range(1,31)]) vfeature_pca_50_df.head() vfeature_pca_50_df.reset_index(drop=True,inplace=True) features_train.reset_index(drop=True,inplace=True) features_train.drop(vfeatures,axis=1,inplace=True) features_train = features_train.join(vfeature_pca_50_df) features_train.head() ``` ## Train with all feature set ``` train_num = int(138771*0.8)#160000 #features_train = features_train.drop(['C8'],axis=1) categorical = list(set(categorical_raw).intersection(features_train.columns)) model,valid_results = train_selector(params,train_num,features_train,labels_train, categorical,verbose_eval=500) model.num_trees() ``` ## Permutation Importance ``` lgb.plot_importance(model, max_num_features=50,figsize=(12,10)) fi_importance = permutation_importance(model,features_train.iloc[train_num:], labels_train.iloc[train_num:]) fi_importance.sort_values(by='importance',ascending=False)[0:50] fi_importance.sort_values(by='importance',ascending=False)[-10:] ``` ## Feature Test Result ``` features_train.head() categorical = list(set(categorical_raw).intersection(features_train.columns)) ave_auc = fold_train_selector(3,features_train,labels_train,categorical) # feat439 change id 30 --not confirmed from large dataset ave_auc # feat439 change device info --not confirmed from large dataset ave_auc # feat439 add card_mv_day_fq ave_auc # feat438 add addr1 cnt boost performance ave_auc # feat437 add card1 cnt boost performance ave_auc # feat436 add pdc_amt_std_ratio imporve but lower than pdc_amt_ratio ave_auc # feat437 add pdc_amt_std_ratio-- lower performance ave_auc # feat436 add pdc_amt_ratio-- very effective ave_auc # feat435 clean id 33 if not treat as categorical performance drop ave_auc # feat435 clean id 33 improve performance ave_auc # feat453 clean DeviceInfo and modify Device_name ave_auc # feat435 clean DeviceInfo lead to even lower performance ave_auc # feat435 clean id30 -- clean id_30 lead to lower performance ave_auc # feat434 clean P and R email -- very effective ave_auc # feat434 clean id31 -- very effective ave_auc # feat434 without any change ave_auc ``` ## Feature selection by Importance ``` #to_drop = list(select_by_importance(model,features_train,importance=0)) #to_drop = fi_importance[fi_importance.importance <0].feature #to_drop = fi_importance.sort_values(by='importance',ascending=False)[-50:].feature to_drop = ['P_email'] features_train_temp = features_train.drop(to_drop,axis=1) categorical_temp = list(set(categorical_raw).intersection(features_train_temp.columns)) print(features_train_temp.head()) ave_auc = fold_train_selector(3,features_train_temp,labels_train,categorical_temp) # feat439 add pemail fraud rate drop pemail ave_auc # # feat437 add card1 cnt drop fi_importance.sort_values(by='importance',ascending=False)[-50:].feature ave_auc # feat437 add card1 cnt drop 'V169' ave_auc # feat437 add card1 cnt drop 'D5' ave_auc # feat437 add card1 cnt drop fi_importance[fi_importance.importance <0].feature ave_auc # feat437 add card1 cnt drop fi_importance.sort_values(by='importance',ascending=False)[-100:].feature ave_auc # feat437 add card1 cnt drop transactionDT ave_auc to_drop = fi_importance.sort_values(by='importance',ascending=False)[-100:].feature with open('./data/feat437_rm_pm_importance100.pickle', 'wb') as handle: pickle.dump(to_drop, handle, protocol=pickle.HIGHEST_PROTOCOL) ``` ## Recursive Eliminate Features ``` # to_drop = {'P_email_suffix','R_email_suffix','dow','pdc_D1_ratio'} is useless # to_drop = {'card_TAmt_ratio','card_TAmt_std_ratio','is_card_freq_pdc','is_card_freq_addr1'}is useless # to_drop = {'TransactionAmt_decimal','is_wide','is_long','is_zero'} is useless # to_drop = {'card_D2_mean','card_D15_mean','card_D4_mean','card_id_02_mean'','card_D1_std', # 'card_D15_std','card_D1_mean','card_id_02_std','card_D3_mean} is useless # to_drop = {'addr1_D15_mean','addr1_D15_std'} is useless # to_drop = {'ProductCD_target_mean','M4_target_mean'} is useless # to_drop = {'card2_fq_enc','card3_fq_enc','card5_fq_enc','P_email_fq_enc','R_email_fq_enc'} is useless # to_drop = {'addr2_fq_enc',} # to_drop = {'R_email_fraud_rate','card6_fraud_rate','card4_fraud_rate'} all have boost performance # to_drop = {'card_addr_fq','card_mv_hour_fq','card_mv_hour_fq_ratio', # 'card_hour_Amt','card_hour_Amt_ratio', # 'card_mv_day_fq_ratio','card_day_Amt','card_day_Amt_ratio'} useless # to_drop = {'D6_fq_enc','D7_fq_enc','D8_fq_enc','D9_fq_enc','DeviceInfo_fq_enc', # 'id_30_fq_enc','id_31_fq_enc','screen_width_fq_enc'} # 大小数据集表现不一致 to_drop = {'DT_hour_Amt_ratio','DT_day_Amt_ratio','DT_month_Amt_ratio', 'DT_year_Amt_ratio','card2_Amt_ratio', 'card3_Amt_ratio','card4_Amt_ratio','card5_Amt_ratio','card6_Amt_ratio'} result = [] for col in to_drop: print(col) to_drop_temp = list(to_drop - set([col])) features_train_temp = features_train.drop(to_drop_temp,axis=1) print(features_train_temp.shape) categorical_temp = list(set(categorical_raw).intersection(features_train_temp.columns)) ave_auc = fold_train_selector(3,features_train_temp,labels_train,categorical_temp) print(ave_auc) result.append([col,ave_auc]) result result best = 0.921552900501287 result best = 0.9213639958310471 result 0.921645405116515 result ```
github_jupyter
# Making work public ## Licensing When a repository with source code, a manuscript or other creative works becomes public, it should include a file LICENSE or LICENSE.txt in the base directory of the repository that clearly states under which license the content is being made available. This is because creative works are automatically eligible for intellectual property (and thus copyright) protection. Reusing creative works without a license is dangerous, because the copyright holders could sue you for copyright infringement. A license solves this problem by granting rights to others (the licensees) that they would otherwise not have. What rights are being granted under which conditions differs, often only slightly, from one license to another. In practice, a few licenses are by far the most popular, and choosealicense.com will help you find a common license that suits your needs. Important considerations include: - Whether you want to address patent rights. - Whether you require people distributing derivative works to also distribute their source code. - Whether the content you are licensing is source code. - Whether you want to license the code at all. Choosing a license that is in common use makes life easier for contributors and users, because they are more likely to already be familiar with the license and don’t have to wade through a bunch of jargon to decide if they’re ok with it. The Open Source Initiative and Free Software Foundation both maintain lists of licenses which are good choices. This article provides an excellent overview of licensing and licensing options from the perspective of scientists who also write code. At the end of the day what matters is that there is a clear statement as to what the license is. Also, the license is best chosen from the get-go, even if for a repository that is not public. Pushing off the decision only makes it more complicated later, because each time a new collaborator starts contributing, they, too, hold copyright and will thus need to be asked for approval once a license is chosen. <section class="challenge panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Can I Use Open License?</h2> </div> <div class="panel-body"> <p>Find out whether you are allowed to apply an open license to your software. Can you do this unilaterally, or do you need permission from someone in your institution? If so, who?</p> </div> </section> <section class="challenge panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> What licenses have I already accepted?</h2> </div> <div class="panel-body"> <p>Many of the software tools we use on a daily basis (including in this workshop) are released as open-source software. Pick a project on GitHub from the list below, or one of your own choosing. Find its license (usually in a file called LICENSE or COPYING) and talk about how it restricts your use of the software. Is it one of the licenses discussed in this session? How is it different?</p> <ul> <li>Git, the source-code management tool</li> <li>CPython, the standard implementation of the Python language</li> <li>Jupyter, the project behind the web-based Python notebooks we’ll be using</li> <li>EtherPad, a real-time collaborative editor</li> </ul> </div> </section> <section class="keypoints panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-exclamation-circle"></span> Key Points</h2> </div> <div class="panel-body"> <ul> <li>People who incorporate General Public License (GPL’d) software into their own software must make their software also open under the GPL license; most other open licenses do not require this.</li> <li>The Creative Commons family of licenses allow people to mix and match requirements and restrictions on attribution, creation of derivative works, further sharing, and commercialization.</li> <li>People who are not lawyers should not try to write licenses from scratch.</li> </ul> </div> </section> ## Citation You may want to include a file called CITATION or CITATION.txt that describes how to reference your project; the one for Software Carpentry states: ``` To reference Software Carpentry in publications, please cite both of the following: Greg Wilson: "Software Carpentry: Getting Scientists to Write Better Code by Making Them More Productive". Computing in Science & Engineering, Nov-Dec 2006. Greg Wilson: "Software Carpentry: Lessons Learned". arXiv:1307.5448, July 2013. @article{wilson-software-carpentry-2006, author = {Greg Wilson}, title = {Software Carpentry: Getting Scientists to Write Better Code by Making Them More Productive}, journal = {Computing in Science \& Engineering}, month = {November--December}, year = {2006}, } @online{wilson-software-carpentry-2013, author = {Greg Wilson}, title = {Software Carpentry: Lessons Learned}, version = {1}, date = {2013-07-20}, eprinttype = {arxiv}, eprint = {1307.5448} } ``` More detailed advice, and other ways to make your code citable can be found at the Software Sustainability Institute blog and in: > Smith AM, Katz DS, Niemeyer KE, FORCE11 Software Citation Working Group. (2016) Software citation principles. PeerJ Computer Science 2:e86 https://doi.org/10.7717/peerj-cs.86 There is also an `@software{...` BibTeX entry type in case no “umbrella” citation like a paper or book exists for the project you want to make citable. <section class="keypoints panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-exclamation-circle"></span> Key Points</h2> </div> <div class="panel-body"> <ul> <li>Add a CITATION file to a repository to explain how you want your work cited.</li> </ul> </div> </section> --- The material in this notebook is derived from the Software Carpentry lessons &copy; [Software Carpentry](http://software-carpentry.org/) under the terms of the [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license.
github_jupyter
## Advanced McStas features: EXTEND and WHEN In this tutorial we will look at two advanced features in McStas, the EXTEND block and WHEN condition. Here we will use them to flag certain neutrons with EXTEND, and only record them in monitors when the flag is set using a WHEN condition. ``` from mcstasscript.interface import instr, functions, plotter instrument = instr.McStas_instr("python_tutorial", input_path="run_folder") ``` ### Set up an example McStas instrument First we set up an example instrument conisiting of a source, a guide and a position/divergence monitor. The guide is set up such that it only has mirrors on the left and right side, and absorbs neutrons if they hit the top or bottom. This is done to look at the horizontal behavior independently from the vertical, as this is easier to analyze. ``` src = instrument.add_component("source", "Source_simple") src.xwidth = 0.02 src.yheight = 0.02 src.focus_xw = guide_opening_w = 0.05 src.focus_yh = guide_opening_h = 0.06 src.dist = 1.5 src.flux = 1E13 instrument.add_parameter("wavelength", value=5.0, comment="Wavelength in [Ang]") src.lambda0="wavelength" src.dlambda="0.001*wavelength" guide = instrument.add_component("guide", "Guide_gravity", AT=[0,0,1.5], RELATIVE=src) guide.w1 = guide_opening_w guide.h1 = guide_opening_h guide.w2 = guide_opening_w guide.h2 = guide_opening_h guide.l = guide_length = 15 guide.mleft = 4.0 guide.mright = 4.0 guide.mtop = 0.0 guide.mbottom = 0.0 guide.G = -9.82 acceptance = instrument.add_component("acceptance", "DivPos_monitor") acceptance.set_AT([0,0, guide_length + 0.1], RELATIVE=guide) acceptance.nh = 200 acceptance.ndiv = 200 acceptance.filename = '"acceptance.dat"' acceptance.xwidth = 0.08 acceptance.yheight = 0.05 acceptance.maxdiv_h = 1.5 acceptance.restore_neutron = 1 data = instrument.run_full_instrument(ncount=5E6, foldername="data_folder/mcstas_EXTEND_WHEN", increment_folder_name=True, parameters={"wavelength" : 2.8}) plotter.make_sub_plot(data) ``` ### Interpreting the data Here we see an acceptance monitor, with position along the x-axis and divergence along the y-axis. The guide is under illuminated by the small source, so there are gaps in the acceptance diagram. We see the position and divergence of the beam consist of a large number of stripes, the ones with lowest divergence has the largest intensity. ## Add an flag A flag is just a name for a variable that records some information on the neutron during the simulation, and can be used later to make a decision. Here we could check how many times the ray was reflected in the guide. We use an EXTEND block after a component to access variables internal to the component in the instrument scope. We declare a variable in the instrument scope called *n_reflections*. In the component scope, one can use the SCATTERED variable which contains the number of times the ray has encountered the SCATTER keyword within the component. Usually this is done when entering and leaving, and under each scattering / reflection, so the number of reflections is SCATTERED - 2. ``` instrument.add_declare_var("int", "n_reflections") guide.append_EXTEND("n_reflections = SCATTERED - 2;") guide.print_long() ``` ## Use the flag to limit what is recorded in a monitor A WHEN statement can be used to activate / deactivate a component when some condition is true / false. For example we could require 0 reflection in our guide. We add a few monitors similar to the original, with the only difference being WHEN statements requiring 0, 1 or 2 reflections in the guide for the component to be active. We use a for loop to add the similar components, only changing the component instance name, filename and WHEN statement between each. ``` reflection_numbers = [0, 1, 2] for reflections in reflection_numbers: reflections_string = str(reflections) acceptance = instrument.add_component("acceptance_" + reflections_string, "DivPos_monitor") acceptance.filename = '"acceptance_' + reflections_string + '.dat"' acceptance.set_WHEN("n_reflections == " + reflections_string) acceptance.set_AT([0,0, guide_length + 0.1], RELATIVE=guide) acceptance.nh = 200 acceptance.ndiv = 200 acceptance.xwidth = 0.08 acceptance.yheight = 0.05 acceptance.maxdiv_h = 1.5 acceptance.restore_neutron = 1 acceptance.print_long() print("") ``` ### Running the simulation We now run the simulation with the new monitors to see how they differ from the original version. ``` data = instrument.run_full_instrument(ncount=5E6, foldername="data_folder/mcstas_EXTEND_WHEN", increment_folder_name=True, parameters={"wavelength" : 2.8}) plotter.make_sub_plot(data) ``` ### Interpretation of the data The original monitor is unchanged as it was not modified. On the monitors with different numbers of reflections, we see the middle line correspond to zero reflections, the two lines around those are for one reflection and so forth. This explains why the lines further from the center has lower intensity, as they underwent more reflections while also having a larger angle of incidence. ### The McStas instrument file We here show the generated McStas instrument file in order to clarify how this would be accomplished without the McStasScript API. ``` with open("run_folder/python_tutorial.instr") as file: instrument_string = file.read() print(instrument_string) ```
github_jupyter
# Time Series Problem Set: Question 1 ``` import numpy as np import pandas as pd from statsmodels.tsa.stattools import levinson_durbin from scipy.stats import kurtosis, kstat from scipy.special import comb # Read in data YEAR_NUMBER = 2000 df = pd.read_csv(f'../portfolio-analysis/{YEAR_NUMBER}_data.csv', index_col=0) # Cut data to 250 days sp500 = df.SP500[:250] assert len(sp500) == 250 ``` ## Part (a) ``` M = 10 lagged = np.vstack([sp500[i:240+i] for i in range(M + 1)]).T cov = np.cov(lagged.T) eigvals = np.linalg.eigvalsh(cov) # Eigenvalues of a symmetric matrix msg = 'R is positive definite.' if (eigvals > 0).all() else 'R is NOT positive definite!' print(msg) ``` ## Part (b) If all reflection coefficients $k_m$ had magnitude less than 1, then the corresponding polynomial is stable. If all reflection coefficients above a certain order are all 0, then the corresponding system is exactly $AR$. ``` # The Levinson-Durbin and least squares coefficients generally agree, # except for the first coefficient, which represents \delta_{p0}. _, a_lv, _, sigma, _ = levinson_durbin(s=sp500, nlags=10) ar_coeff_lv = np.hstack([1, a_lv]) ar_coeff_ls, _, _, _ = np.linalg.lstsq(np.hstack([np.ones([lagged.shape[0], 1]), lagged[:, 1:]]), lagged[:, 0], rcond=None) print(f'Levinson-Durbin:\n{ar_coeff_lv}\n') print(f'Least Squares:\n{ar_coeff_ls}') ``` ## Part (c) ``` aic = (2/250)*np.log(sigma[1:]) + [2*i/250 for i in range(10)] optimal_lag = np.argmin(aic) + 1 print(f'Optimal lag value: {optimal_lag}') ``` ## Part (d) ``` sp500_diff = sp500.diff()[1:] # Part (a) M = 10 lagged = np.vstack([sp500_diff[i:240+i] for i in range(M)]).T cov = np.cov(lagged.T) eigvals = np.linalg.eigvalsh(cov) # Eigenvalues of a symmetric matrix msg = 'R is positive definite.' if (eigvals > 0).all() else 'R is NOT positive definite!' print(msg) print(20*'-') # Part (b) _, a_lv, _, sigma, _ = levinson_durbin(s=sp500_diff, nlags=10) ar_coeff_lv = np.hstack([1, a_lv]) ar_coeff_ls, _, _, _ = np.linalg.lstsq(np.hstack([np.ones([lagged.shape[0], 1]), lagged[:, 1:]]), lagged[:, 0], rcond=None) print(f'Levinson-Durbin:\n{ar_coeff_lv}\n') print(f'Least Squares:\n{ar_coeff_ls}') print(20*'-') # Part (c) aic = (2/250)*np.log(sigma[1:]) + [2*i/250 for i in range(10)] optimal_lag = np.argmin(aic) + 1 print(f'Optimal lag value: {optimal_lag}') ``` ## Part (e) ``` # For the direct model, M = 1 M = 1 lagged = np.vstack([sp500[i:250-M-1+i] for i in range(M + 1)]).T x = lagged[:, 1:] y = lagged[:, 0] _, ar_coeff, _, sigma, _ = levinson_durbin(s=sp500_diff, nlags=M) resid = pd.Series(y - x @ ar_coeff) reflection_coeff = ar_coeff[-1] cov = np.array([resid.autocorr(lag=i) for i in range(1, 11)]) print(f'Reflection coefficient: {reflection_coeff}') print(f'Covariance coefficients: {cov}') # For the direct model, M = 10 M = 10 lagged = np.vstack([sp500[i:250-M-1+i] for i in range(M + 1)]).T x = lagged[:, 1:] y = lagged[:, 0] _, ar_coeff, _, sigma, _ = levinson_durbin(s=sp500_diff, nlags=M) resid = pd.Series(y - x @ ar_coeff) reflection_coeff = ar_coeff[-1] cov = np.array([resid.autocorr(lag=i) for i in range(1, 11)]) print(f'Reflection coefficient: {reflection_coeff}') print(f'Covariance coefficients: {cov}') # For the first difference model, M = 1 M = 1 lagged = np.vstack([sp500_diff[i:250-M-1+i] for i in range(M + 1)]).T x = lagged[:, 1:] y = lagged[:, 0] _, ar_coeff, _, sigma, _ = levinson_durbin(s=sp500_diff, nlags=M) resid = pd.Series(y - x @ ar_coeff) reflection_coeff = ar_coeff[-1] cov = np.array([resid.autocorr(lag=i) for i in range(1, 11)]) print(f'Reflection coefficient: {reflection_coeff}') print(f'Covariance coefficients: {cov}') # For the first difference model, M = 10 M = 10 lagged = np.vstack([sp500_diff[i:250-M-1+i] for i in range(M + 1)]).T x = lagged[:, 1:] y = lagged[:, 0] _, ar_coeff, _, sigma, _ = levinson_durbin(s=sp500_diff, nlags=M) resid = pd.Series(y - x @ ar_coeff) reflection_coeff = ar_coeff[-1] cov = np.array([resid.autocorr(lag=i) for i in range(1, 11)]) print(f'Reflection coefficient: {reflection_coeff}') print(f'Covariance coefficients: {cov}') ``` ## Part (f) ``` # Taken from https://www.statsmodels.org/dev/_modules/statsmodels/stats/moment_helpers.html def mnc2cum(mnc): '''convert non-central moments to cumulants recursive formula produces as many cumulants as moments http://en.wikipedia.org/wiki/Cumulant#Cumulants_and_moments ''' mnc = [1] + list(mnc) kappa = [1] for nn,m in enumerate(mnc[1:]): n = nn+1 kappa.append(m) for k in range(1,n): kappa[n] -= comb(n-1,k-1,exact=1) * kappa[k]*mnc[n-k] return kappa[1:] # Kurtosis is not close to 3, which would be expected for a Gaussian variable. # It looks like the residuals are not Gaussian! kurt = kurtosis(resid) non_central_moments = [np.mean(resid**k) for k in range(3, 7)] cumul = mnc2cum(non_central_moments) print(f'Kurtosis: {kurt}') print(f'Cumulants: {cumul}') ```
github_jupyter
# Latent dimension comparisons ## Goal - Compare the `average per sample mse loss` and `average per sample kl loss` for different number of latent dimensions to determine what number of latent dimensions to use for the VAE ### Imports ``` import pandas as pd import numpy as np import math import matplotlib.pyplot as plt ``` ### Check the validity of the dumped arrays by ensuring that all the samples used for testing are present in the validation for all the latent dimension tests ``` latent_dims = [16, 32, 64, 128, 256] dumps = ["20190829_010238", "20190829_010252", "20190829_010339", "20190829_010405", "20190829_010431"] # First check that all the indices from the test validation set exist in all the dumps ldump_idx_arr = None # Iterate over the dumps and check the indices for latent_dim, dump in zip(latent_dims, dumps): print("----------------------------------------------------") print("Reading metrics from VAE with {0} latent dimensions :".format(latent_dim)) print("----------------------------------------------------") dump_npz_path = "/home/akajal/WatChMaL/VAE/dumps/{0}/test_valid_iteration_metrics.npz".format(dump) dump_npz_arr = np.load(dump_npz_path) dump_indices = np.sort(dump_npz_arr["indices"]) if ldump_idx_arr is not None: if not np.array_equal(dump_indices, ldump_idx_arr): print("Index array for latent dims {0} not equal to all the other.".format(latent_dim)) else: print("Index array equal to the first index array") else: ldump_idx_arr = dump_indices ``` ### For each configuration of the latent dimensions, print the `average per sample mse loss` with its `standard deviation` and `standard error` and print the `average per sample kl loss` with its `standard deviation` and `standard error` ``` # Collect the metrics for plotting as well recon_loss_values, kl_loss_values = [], [] recon_std_values, kl_std_values = [], [] recon_stderr_values, kl_stderr_values = [], [] # Iterate over the dumps and check the indices for latent_dim, dump in zip(latent_dims, dumps): print("----------------------------------------------------") print("Printing metrics for VAE with {0} latent dimensions :".format(latent_dim)) print("----------------------------------------------------") dump_npz_path = "/home/akajal/WatChMaL/VAE/dumps/{0}/test_valid_iteration_metrics.npz".format(dump) npz_arr = np.load(dump_npz_path) dump_recon_loss, dump_kl_loss = npz_arr["recon_loss"], npz_arr["kl_loss"] mean_recon_loss, std_recon_loss = np.mean(dump_recon_loss), np.std(dump_recon_loss) stderr_recon_loss = std_recon_loss/math.sqrt(dump_recon_loss.shape[0]) recon_loss_values.append(mean_recon_loss) recon_std_values.append(std_recon_loss) recon_stderr_values.append(stderr_recon_loss) mean_kl_loss, std_kl_loss = np.mean(dump_kl_loss), np.std(dump_kl_loss) stderr_kl_loss = std_kl_loss/math.sqrt(dump_kl_loss.shape[0]) kl_loss_values.append(mean_kl_loss) kl_std_values.append(std_kl_loss) kl_stderr_values.append(stderr_kl_loss) print("Recon Loss metrics") print("Mean Recon loss : {0}".format(mean_recon_loss)) print("Std Recon loss : {0}".format(std_recon_loss)) print("Stderr Recon loss : {0}\n".format(stderr_recon_loss)) print("KL Loss metrics") print("Mean KL loss : {0}".format(mean_kl_loss)) print("Std KL loss : {0}".format(std_kl_loss)) print("Stderr KL loss : {0}".format(stderr_kl_loss)) ``` ### For each of the latent dimensions, plot the `average per sample mse loss` with its `stderr` as the error bar and plot the `average per sample kl loss` with its `stderr` as the error bar ``` # Plot the metrics for the training subset # Initialize the plot fig, ax1 = plt.subplots(figsize=(16,9)) # Set the x-axes ticks for the plot ax1.set_xticks(latent_dims) # Use the same x-axis to plot the KL loss ax2 = ax1.twinx() # Plot the MSE values collected above ax1.errorbar(latent_dims, recon_loss_values, yerr=recon_stderr_values, linestyle='dashed', marker='o', markersize=10, elinewidth=3, capsize=4.0, color="blue", label="Average per sample MSE Loss") # Plot the KL values collected above ax2.errorbar(latent_dims, kl_loss_values, yerr=kl_stderr_values, linestyle='dashed', marker='o', markersize=10, elinewidth=3, capsize=4.0, color="red", label="Average per sample KL Loss") # Setup plot characteristics ax1.tick_params(axis="x", labelsize=25) ax1.set_xlabel("Number of latent dimensions", fontsize=25) ax1.set_xscale("log", basex=2) ax1.set_ylabel("MSE Loss", fontsize=25, color="blue") ax1.tick_params(axis="y", labelsize=25, colors="blue") ax2.set_ylabel("KL Loss", fontsize=25, color="red") ax2.tick_params(axis="y", labelsize=25, colors="red") plt.margins(0.2) ax1.grid(True) ax2.grid(True) ax1.set_facecolor('white') ax2.set_facecolor('white') lgd = fig.legend(prop={"size":25}, loc='center', bbox_to_anchor=(0.5, 0.75)) plt.savefig("figures/latent_dimensions_vs_mse_and_kl_loss.pdf", format="pdf", dpi=600, bbox_inches="tight") ``` ### From here, we choose the model with 128 dimensions as our reference model ### On a per sample basis, compute the delta difference b/w the MSE Loss and KL Loss for that sample with a model with `x` latent dimensions and a model with 128 dimensions ### Steps : #### 1. Construct 5 dataframes (one for each number of latent dimensions) #### 2. Sort the dataframes along the dataset index axis #### 3. Calculate the delta difference for each of the dataframe w.r.t. the dataframe corresponding to the reference model #### 4. Plot the delta difference values ### Construct 5 dataframes (one for each number of latent dimensions) ``` dump_dfs = [] # Iterate over the dumps and check the indices for latent_dim, dump in zip(latent_dims, dumps): print("----------------------------------------------------") print("Reading metrics from VAE with {0} latent dimensions :".format(latent_dim)) print("----------------------------------------------------") dump_npz_path = "/home/akajal/WatChMaL/VAE/dumps/{0}/test_valid_iteration_metrics.npz".format(dump) dump_npz_arr = np.load(dump_npz_path) dump_dfs.append(pd.DataFrame(data={"index":dump_npz_arr["indices"], "recon_loss":dump_npz_arr["recon_loss"], "kl_loss":dump_npz_arr["kl_loss"]})) print("Done.") ``` ### Sort the dataframes along the index axis ``` for df in dump_dfs: df.sort_values(by="index", inplace=True) ``` ### Set the index of the reference dataframe to use ``` ref_df_idx = 3 ``` ### Calculate the delta differenced values for each number of latent dimensions ``` for df in dump_dfs: df["delta recon_loss"] = df["recon_loss"].values - dump_dfs[ref_df_idx]["recon_loss"].values df["delta kl_loss"] = df["kl_loss"].values - dump_dfs[ref_df_idx]["kl_loss"].values ``` ### Find the mean and stderr of the delta differenced values for each number of latent dimensions ``` delta_recon_mean, delta_kl_mean, delta_recon_stderr, delta_kl_stderr = [], [], [], [] for df in dump_dfs: delta_recon_loss, delta_kl_loss = df["delta recon_loss"], df["delta kl_loss"] delta_recon_mean.append(np.mean(delta_recon_loss.values)) delta_kl_mean.append(np.mean(delta_kl_loss.values)) delta_recon_stderr.append(np.std(delta_recon_loss.values)/math.sqrt(df["delta recon_loss"].values.shape[0])) delta_kl_stderr.append(np.std(delta_kl_loss.values)/math.sqrt(df["delta kl_loss"].values.shape[0])) print(delta_recon_mean) print(delta_kl_mean) print(delta_recon_stderr) print(delta_kl_stderr) ``` ### Plot the delta differenced values collected above ``` # Plot the metrics for the training subset # Initialize the plot fig, ax1 = plt.subplots(figsize=(16,9)) # Set the x-axes ticks for the plot ax1.set_xticks(latent_dims) # Use the same x-axis to plot the KL loss ax2 = ax1.twinx() # Plot the MSE values collected above ax1.errorbar(latent_dims, delta_recon_mean, yerr=delta_recon_stderr, linestyle='dashed', marker='o', markersize=10, elinewidth=3, capsize=4.0, color="blue", label=r"Average per sample $\Delta$ MSE Loss") # Plot the KL values collected above ax2.errorbar(latent_dims, delta_kl_mean, yerr=delta_kl_stderr, linestyle='dashed', marker='o', markersize=10, elinewidth=3, capsize=4.0, color="red", label=r"Average per sample $\Delta$ KL Loss") # Setup plot characteristics ax1.tick_params(axis="x", labelsize=25) ax1.set_xlabel("Number of latent dimensions", fontsize=25) ax1.set_xscale("log", basex=2) ax1.set_ylabel(r"$\Delta$ MSE Loss", fontsize=25, color="blue") ax1.tick_params(axis="y", labelsize=25, colors="blue") ax2.set_ylabel(r"$\Delta$ KL Loss", fontsize=25, color="red") ax2.tick_params(axis="y", labelsize=25, colors="red") plt.margins(0.2) ax1.grid(True) ax2.grid(True) lgd = fig.legend(prop={"size":25}, loc='center', bbox_to_anchor=(0.5, 0.75)) plt.savefig("figures/latent_dimensions_vs_delta_differenced_loss.pdf", format="pdf", dpi=600, bbox_inches="tight") ``` ## Done.
github_jupyter
# PARAMETER FITTING PART 2 - OPTIMIZATION This notebook describes optimization algorithms, ways to search for a minimum value without doing exhaustive search. # Preliminaries ``` IS_COLAB = True if IS_COLAB: !pip install -q tellurium pass # Python packages used in this chapter import matplotlib.pyplot as plt import numpy as np import pandas as pd import urllib.request # use this library to download file from GitHub import tellurium as te import seaborn as sns ``` ## Constants and Helper Functions ``` def getSharedCodes(moduleName): """ Obtains common codes from the github repository. Parameters ---------- moduleName: str name of the python module in the src directory """ if IS_COLAB: url = "https://github.com/sys-bio/network-modeling-summer-school-2021/raw/main/src/%s.py" % moduleName local_python = "python.py" _, _ = urllib.request.urlretrieve(url=url, filename=local_python) else: local_python = "../../src/%s.py" % moduleName with open(local_python, "r") as fd: codeStr = "".join(fd.readlines()) print(codeStr) exec(codeStr, globals()) # Acquire codes getSharedCodes("util") # TESTS assert(isinstance(LINEAR_PATHWAY_DF, pd.DataFrame)) ``` # Optimization Essentials <a class="anchor" id="parameter-fitting-optimization-essentials"></a> ## Definitions **Optimization** is the term used by mathematicians to find the *best* value of a function. There are two kinds of "best" - the minimum value and the maximum value. Actually, the two are equivalent. This is because finding the maximum value is easily translated into finding the minimum value by searching for the negative of the maximum value. More formally, we use $f$ to denote a function of many variables. We denote the $n$-th variable by $x_n$, and so $f(x_1, \cdots, x_N)$ produces a scalar result given $N$ values. Here is some useful notation: * ${\bf x} = (x_1 , \cdots, x_N)$ * $x_n$ is a parameter * $N$ is the number of parameters to fit * ${\bf x}^{\star}$ is the vector that minimizes $f({\bf x})$ for all permitted values of ${\bf x}$ To expand on the last point, in reaction networks, species concentrations and kinetic constants must be non-negative. Here's an example of an $f$ in the $x$-$y$ plane: $f(x, y) = \sqrt{ (x-1)^2 + (y-2)^2)}$. The input to $f$ is a point in the plane; the output of $f$ is the distance from that point to $(1, 2)$. In this case, $f$ is always non-negative, and so it's minimum value is 0. ``` # Heat map of distances from the point (10, 20) xstar = 10 ystar = 20 x = np.arange(-50, 50, 0.5) y = np.arange(-50, 50, 0.5) xx, yy = np.meshgrid(x, y) xx = xx.flatten() yy = yy.flatten() df = pd.DataFrame({"x": xx.flatten(), "y": yy.flatten()}) df = df.apply(lambda v: np.round(v, 0)) df["distance"] = ((df["x"] - xstar)**2 + (df["y"] - ystar)**2)**0.5 table = pd.pivot_table(df, values='distance', columns=["x"], index=['y']) ax = sns.heatmap(table, vmin=0, vmax =50) ax.set_title("Distances from (10, 20).") type(ax) ``` In the following examples, we mostly use $N = 1$ since the visualizations are easier. But keep in mind that almost always $N$ is much larger than 1 since $N$ is the number of parameters we want to fit. Many times ## Convex Curves A convex function of ${\bf x}$ looks like a bowl. That is, if you repeatedly change ${\bf x}$ so that $f$ gets smaller, you are guaranted to find ${\bf x}^{\star}$. ``` xv = np.array(range(20)) xv = xv - 6 _ = plt.plot(xv, 3 + (xv - 4)**2 ) _ = plt.title("Convex Curve") ``` This function is convex. We can readily see that the minimum value of this function occurs at $x^{\star} = 4$. A convex curve has the very nice property that its local minimum is also the global minimum. That is, there are no small valleys where the optimizer might get "stuck". For this reason, it's easy to find $x^{\star}$ using gradient decent. ## Non-Convex Curves Unfortunately, convex curves are rare in practice. Much more common is something like the following. ``` from mpl_toolkits.mplot3d import Axes3D def eggholder(x): return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47))))) x = np.arange(-512, 513) y = np.arange(-512, 513) xgrid, ygrid = np.meshgrid(x, y) xy = np.stack([xgrid, ygrid]) fig = plt.figure(figsize=(12,10)) ax = fig.add_subplot(111, projection='3d') ax.view_init(45, -45) ax.plot_surface(xgrid, ygrid, eggholder(xy), cmap='terrain') ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('Optimization Landscape(x, y)') ax.set_title("Complex, Non-Convex Curve") plt.show() ``` For curves such as these, hueristic methods are needed such as differential evolution, simulated annealing, and random stochastic gradient descent. # Programming to find minimum values Here, we describe how to use the python ``scipy.minimize`` library to find the minimum value of a function. First, you need to implement $f({\bf x})$. We implement the distance function in the plane ``` def distance(point, center=(10, 20)): """ Calculate the distance from the center. Parameters ---------- point: tuple-float center: tuple-float Returns ------- float """ return sum((np.array(point) - np.array(center))**2) # TESTS center = (2, 3) assert(distance(center, center=center) == 0) ``` Next, we use the ``LinearConstraint`` class in ``scipy.optimize`` to indicate that we are only intereted in points in the upper right quadrant. This isn't essential for the example we're doing now. But it is very useful later on when we want to ensure that kinetic constants are non-negative. The constraints are defined in terms of bounds after a matrix multiplication. Let ${\bf M}$ be the constraint matrix with dimension $N \times N$, ${\bf x}$ be a solution vector. Note that $({\bf M}) ({\bf x})$ is also a vector of dimension $N$. The constraints are that each element in $({\bf M}) ({\bf x})$ must be greater equal to its corresponding element in ${\bf x}^L$ and less than or equal to its corresponding element in ${\bf x}^U$, where the superscripts $L$, $U$ indicate the lower and upper bound vector. In the example below, $ {\bf M} = \begin{pmatrix} 1 & 0 \\ 0 & 1 \\ \end{pmatrix}$; ${\bf x}^L = [0, 0]$; and ${\bf x}^U = [\infty, \infty]$. So, the constraints are that $0 \leq x_n \leq \infty$. ``` from scipy.optimize import LinearConstraint eye2Mat = np.identity(2) # Two dimensional identity matrix constraints = LinearConstraint(eye2Mat, [0, 0], [np.inf, np.inf]), ``` The code below finds the mimal value for the distance function. Note that it is sometimes helpful to use the optional keyword argument args to pass additional information to the function being minimized. ``` from scipy.optimize import minimize res = minimize( distance, constraints=constraints, x0=(0, 0), ) res.x ``` By default, ``scipy.optimize.minimize`` uses gradient descent. ``minimize`` has a keyword argument ``method`` that provides a way to select other algorithms that are more appropriate for non-convex curves. However, for non-convex curves, we recommend using ``scipy.optimize.differential_evolution`` which has a similar (but not identical) interface as ``minimize``. In particular, note that the bounds are a sequence of min-max pairs for each parameter. ``` from scipy.optimize import differential_evolution bounds = [(0, 100) for _ in range(2)] res = differential_evolution( distance, bounds=bounds, ) res.x ``` # Exercise Apply ``scipy.optimize.minimize`` to the ``LINEARY_PATHWAY_MODEL`` to find values of the parameters ``k1, k2, k3, k4`` that minimize the sum of squares of the residuals for ``LINEAR_PATHWAY_DF``. *Hint*: Create a function that takes as input the values of the parameters of ``LINEAR_PATHWAY_MODEL`` and outputs the sum of squares of the residuals.
github_jupyter
# Introduction to SimpleITKv4 Registration - Continued ## ITK v4 Registration Components <img src="ITKv4RegistrationComponentsDiagram.svg" style="width:700px"/><br><br> Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers. The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending. ``` import SimpleITK as sitk # If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage # function so that it also resamples the image to a smaller size (testing environment is memory constrained). %run setup_for_testing # Utility method that either downloads data from the network or # if already downloaded returns the file name for reading from disk (cached data). %run update_path_to_download_script from downloaddata import fetch_data as fdata # Always write output to a separate directory, we don't want to pollute the source directory. import os OUTPUT_DIR = "Output" # GUI components (sliders, dropdown...). from ipywidgets import interact, fixed # Enable display of HTML. from IPython.display import display, HTML # Plots will be inlined. %matplotlib inline # Callbacks for plotting registration progress. import registration_callbacks ``` ## Utility functions A number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory. ``` def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix): """ Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the result to file. Args: transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving. fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image. moving_image (SimpleITK Image): resample this image. outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to outputfile_prefix.mha. """ resample = sitk.ResampleImageFilter() resample.SetReferenceImage(fixed_image) # SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results. resample.SetInterpolator(sitk.sitkLinear) resample.SetTransform(transform) sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix + ".mha") sitk.WriteTransform(transform, outputfile_prefix + ".tfm") def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary): """ Callback from dropbox which selects the two series which will be used for registration. The callback prints out some information about each of the series from the meta-data dictionary. For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6, Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf) """ # The callback will update these global variables with the user selection. global selected_series_fixed global selected_series_moving img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0]) img_moving = sitk.ReadImage(series_dictionary[moving_image][0]) # There are many interesting tags in the DICOM data dictionary, display a selected few. tags_to_print = { "0010|0010": "Patient name: ", "0008|0060": "Modality: ", "0008|0021": "Series date: ", "0008|0031": "Series time:", "0008|0070": "Manufacturer: ", } html_table = [] html_table.append( "<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>" ) for tag in tags_to_print: fixed_tag = "" moving_tag = "" try: fixed_tag = img_fixed.GetMetaData(tag) except: # ignore if the tag isn't in the dictionary pass try: moving_tag = img_moving.GetMetaData(tag) except: # ignore if the tag isn't in the dictionary pass html_table.append( "<tr><td>" + tags_to_print[tag] + "</td><td>" + fixed_tag + "</td><td>" + moving_tag + "</td></tr>" ) html_table.append("</table>") display(HTML("".join(html_table))) selected_series_fixed = fixed_image selected_series_moving = moving_image ``` ## Loading Data In this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive. ``` data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt")) # 'selected_series_moving/fixed' will be updated by the interact function. selected_series_fixed = "" selected_series_moving = "" # Directory contains multiple DICOM studies/series, store the file names # in dictionary with the key being the series ID. reader = sitk.ImageSeriesReader() series_file_names = {} series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) # list of all series if series_IDs: # check that we have at least one series for series in series_IDs: series_file_names[series] = reader.GetGDCMSeriesFileNames( data_directory, series ) interact( DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image=series_IDs, series_dictionary=fixed(series_file_names), ) else: print("This is surprising, data directory does not contain any DICOM series.") # Actually read the data based on the user's selection. fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed]) moving_image = sitk.ReadImage(series_file_names[selected_series_moving]) # Save images to file and view overlap using external viewer. sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha")) sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha")) ``` ## Initial Alignment A reasonable guesstimate for the initial translational alignment can be obtained by using the CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and the translation aligns the centers of the two images. There are two options for defining the centers of the images, either the physical centers of the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS). Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and its return type is the generic SimpleITK.Transform. ``` initial_transform = sitk.CenteredTransformInitializer( sitk.Cast(fixed_image, moving_image.GetPixelID()), moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY, ) # Save moving image after initial transform and view overlap using external viewer. save_transform_and_image( initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"), ) ``` Look at the transformation, what type is it? ``` print(initial_transform) ``` ## Final registration ### Version 1 <ul> <li> Single scale (not using image pyramid).</li> <li> Initial transformation is not modified in place.</li> </ul> <ol> <li> Illustrate the need for scaling the step size differently for each parameter: <ul> <li> SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic).</li> <li> SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels.</li> <li> SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.</li> </ul> </li> <li> Look at the optimizer's stopping condition to ensure we have not terminated prematurely. </li> </ol> ``` registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) # The learningRate parameter is always required. Using the default # configuration this parameter is ignored because it is overridden # by the default setting of the estimateLearningRate parameter which # is sitk.ImageRegistrationMethod.Once. For the user selected # learningRate to take effect you need to also set the # estimateLearningRate parameter to sitk.ImageRegistrationMethod.Never registration_method.SetOptimizerAsGradientDescent( learningRate=1.0, numberOfIterations=100 ) # Scale the step size differently for each parameter, this is critical!!! registration_method.SetOptimizerScalesFromPhysicalShift() registration_method.SetInitialTransform(initial_transform, inPlace=False) registration_method.AddCommand( sitk.sitkStartEvent, registration_callbacks.metric_start_plot ) registration_method.AddCommand( sitk.sitkEndEvent, registration_callbacks.metric_end_plot ) registration_method.AddCommand( sitk.sitkIterationEvent, lambda: registration_callbacks.metric_plot_values(registration_method), ) final_transform_v1 = registration_method.Execute( sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32) ) print( f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}" ) print(f"Final metric value: {registration_method.GetMetricValue()}") # Save moving image after registration and view overlap using external viewer. save_transform_and_image( final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"), ) ``` Look at the final transformation, what type is it? ``` print(final_transform_v1) ``` ### Version 1.1 The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized. In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$): <ul> <li> SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. </li> <li> SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified. </li> <li> SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified. </li> </ul> The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$ We now modify the previous example to use $T_{opt}$ and $T_m$. ``` registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) registration_method.SetOptimizerAsGradientDescent( learningRate=1.0, numberOfIterations=100 ) registration_method.SetOptimizerScalesFromPhysicalShift() # Set the initial moving and optimized transforms. optimized_transform = sitk.Euler3DTransform() registration_method.SetMovingInitialTransform(initial_transform) registration_method.SetInitialTransform(optimized_transform) registration_method.AddCommand( sitk.sitkStartEvent, registration_callbacks.metric_start_plot ) registration_method.AddCommand( sitk.sitkEndEvent, registration_callbacks.metric_end_plot ) registration_method.AddCommand( sitk.sitkIterationEvent, lambda: registration_callbacks.metric_plot_values(registration_method), ) registration_method.Execute( sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32) ) # Need to compose the transformations after registration. final_transform_v11 = sitk.CompositeTransform(optimized_transform) final_transform_v11.AddTransform(initial_transform) print( f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}" ) print(f"Final metric value: {registration_method.GetMetricValue()}") # Save moving image after registration and view overlap using external viewer. save_transform_and_image( final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"), ) ``` Look at the final transformation, what type is it? Why is it different from the previous example? ``` print(final_transform_v11) ``` ### Version 2 <ul> <li> Multi scale - specify both scale, and how much to smooth with respect to original image.</li> <li> Initial transformation modified in place, so in the end we have the same type of transformation in hand.</li> </ul> ``` registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) registration_method.SetOptimizerAsGradientDescent( learningRate=1.0, numberOfIterations=100 ) # , estimateLearningRate=registration_method.EachIteration) registration_method.SetOptimizerScalesFromPhysicalShift() final_transform = sitk.Euler3DTransform(initial_transform) registration_method.SetInitialTransform(final_transform) registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1]) registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0]) registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() registration_method.AddCommand( sitk.sitkStartEvent, registration_callbacks.metric_start_plot ) registration_method.AddCommand( sitk.sitkEndEvent, registration_callbacks.metric_end_plot ) registration_method.AddCommand( sitk.sitkMultiResolutionIterationEvent, registration_callbacks.metric_update_multires_iterations, ) registration_method.AddCommand( sitk.sitkIterationEvent, lambda: registration_callbacks.metric_plot_values(registration_method), ) registration_method.Execute( sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32) ) print( f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}" ) print(f"Final metric value: {registration_method.GetMetricValue()}") # Save moving image after registration and view overlap using external viewer. save_transform_and_image( final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v2"), ) ``` Look at the final transformation, what type is it? ``` print(final_transform) ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ <font style="font-size:28px;" align="left"><b><font color="blue"> Solution for </font>Quantum Tomography </b></font> <br> _prepared by Abuzer Yakaryilmaz_ <br><br> <a id="task1"></a> <h3> Task 1 </h3> You are given 1000 copies of the identical qubits which are in the same quantum state lying in the first or second quadrant of the unit circle. This quantum state is represented by an angle $ \theta \in [0,\pi) $, and your task is to guess this angle. You use the class __unknown_qubit__ and its methods for your experiments. _Remark that the measurement outcomes of the quantum states with angles $ \pi \over 3 $ and $ 2 \pi \over 3 $ are identical even though they are different quantum states. Therefore, getting 1000 qubits and then measuring them does not guarantee the correct answer._ Test your solution at least ten times. <h3> Solution </h3> __Class unknown_qubit__ ``` # class unknown_qubit # available_qubit = 1000 -> you get at most 1000 qubit copies # get_qubits(number_of_qubits) -> you get the specified number of qubits for your experiment # measure_qubits() -> your qubits are measured and the result is returned as a dictionary variable # -> after measurement, these qubits are destroyed # rotate_qubits(angle) -> your qubits are rotated with the specified angle in radian # compare_my_guess(my_angle) -> your guess in radian is compared with the real angle from random import randrange from math import pi from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer class unknown_qubit: def __init__(self): self.__theta = randrange(18000)/18000*pi self.__available_qubits = 1000 self.__active_qubits = 0 print(self.__available_qubits,"qubits are created") def get_qubits(self,number_of_qubits=None): if number_of_qubits is None or isinstance(number_of_qubits,int) is False or number_of_qubits < 1: print() print("ERROR: the method 'get_qubits' takes the number of qubit(s) as a positive integer, i.e., get_qubits(100)") elif number_of_qubits <= self.__available_qubits: self.__qc = QuantumCircuit(1,1) self.__qc.ry(2 * self.__theta,0) self.__active_qubits = number_of_qubits self.__available_qubits = self.__available_qubits - self.__active_qubits print() print("You have",number_of_qubits,"active qubits that are set to (cos(theta),sin(theta))") self.available_qubits() else: print() print("WARNING: you requested",number_of_qubits,"qubits, but there is not enough available qubits!") self.available_qubits() def measure_qubits(self): if self.__active_qubits > 0: self.__qc.measure(0,0) job = execute(self.__qc,Aer.get_backend('qasm_simulator'),shots=self.__active_qubits) counts = job.result().get_counts(self.__qc) print() print("your",self.__active_qubits,"qubits are measured") print("counts = ",counts) self.__active_qubits = 0 return counts else: print() print("WARNING: there is no active qubits -- you might first execute 'get_qubits()' method") self.available_qubits() def rotate_qubits(self,angle=None): if angle is None or (isinstance(angle,float) is False and isinstance(angle,int) is False): print() print("ERROR: the method 'rotate_qubits' takes a real-valued angle in radian as its parameter, i.e., rotate_qubits(1.2121)") elif self.__active_qubits > 0: self.__qc.ry(2 * angle,0) print() print("your active qubits are rotated by angle",angle,"in radian") else: print() print("WARNING: there is no active qubits -- you might first execute 'get_qubits()' method") self.available_qubits() def compare_my_guess(self,my_angle): if my_angle is None or (isinstance(my_angle,float) is False and isinstance(my_angle,int) is False): print("ERROR: the method 'compare_my_guess' takes a real-valued angle in radian as your guessed angle, i.e., compare_my_guess(1.2121)") else: self.__available_qubits = 0 diff = abs(my_angle-self.__theta) print() print(self.__theta,"is the original",) print(my_angle,"is your guess") print("the angle difference between the original theta and your guess is",diff/pi*180,"degree") print("-->the number of available qubits is (set to) zero, and so you cannot make any further experiment") def available_qubits(self): print("--> the number of available unused qubit(s) is",self.__available_qubits) ``` __Single experiment__ A direct measument gives us two candidates. We use 900 copies here. ``` from math import pi, cos, sin, acos, asin my_experiment = unknown_qubit() # we use 900 copies to determine our two candidates my_experiment.get_qubits(900) counts = my_experiment.measure_qubits() number_of_observed_zeros = 0 if '0' in counts: number_of_observed_zeros = counts['0'] probability_of_observing_zeros = number_of_observed_zeros/900 cos_theta = probability_of_observing_zeros ** 0.5 theta = acos(cos_theta) theta_first_candidate = theta theta_second_candidate = pi-theta print("the first candidate is",theta_first_candidate,"in radian and",theta_first_candidate*180/pi,"in degree") print("the second candidate is",theta_second_candidate,"in radian and",theta_second_candidate*180/pi,"in degree") ``` We use remaining 100 copies to test which candidate works better. For this purpose, we rotate qubits with the first candidate angle in reverse direction. If it is the correct guess, then the new quantum state should be very close to the state $ \ket{0} $ and so we observe only '0's. ``` my_experiment.get_qubits(100) my_experiment.rotate_qubits(-1 * theta_first_candidate) counts = my_experiment.measure_qubits() number_of_observed_zeros = 0 if '0' in counts: number_of_observed_zeros = counts['0'] if number_of_observed_zeros == 100: my_guess = theta_first_candidate else: my_guess = theta_second_candidate my_experiment.compare_my_guess(my_guess) ``` __Multiple Experiments__ ``` for i in range(10): print("Experiment",(i+1)) print("___________") print() my_experiment = unknown_qubit() my_experiment.get_qubits(900) counts = my_experiment.measure_qubits() number_of_observed_zeros = 0 if '0' in counts: number_of_observed_zeros = counts['0'] probability_of_observing_zeros = number_of_observed_zeros/900 cos_theta = probability_of_observing_zeros ** 0.5 theta = acos(cos_theta) theta_first_candidate = theta theta_second_candidate = pi-theta my_experiment.get_qubits(100) my_experiment.rotate_qubits(-1 * theta_first_candidate) counts = my_experiment.measure_qubits() number_of_observed_zeros = 0 if '0' in counts: number_of_observed_zeros = counts['0'] if number_of_observed_zeros == 100: my_guess = theta_first_candidate else: my_guess = theta_second_candidate my_experiment.compare_my_guess(my_guess) print() print() print() ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('classic') pd.plotting.register_matplotlib_converters() %matplotlib inline ``` # Discussion: Week 4 ## Exercise: The Perpetual Inventory Method The stock of physical capital is a fundamental component of many macroeconomic models but measuring, and therefore acquiring data on, the value of a country's capital stock is hard. The challenge arise because it is hard to estimate the value of something that is not changing ownership in a transaction. What is value of a high-rise apartment building that has had the same owner for 50 years? One way to overcome this challenge is to use the **perpetual inventory method**. The perpetual inventory method is based on the law of motion of the capital stock: \begin{align} K_{t+1} & = I_t + (1-\delta) K_t \end{align} where $K_t$ denotes physical capital, $I_t$ denotes investment (purchases of new capital), and $\delta$ denotes the rate of depreciation. Give $K_0$ and data on investment $I_0, I_1, \ldots,$, the the capital stock in periods $1, 2, \ldots$ can be computed iteratively. To obtain $K_0$, the method assumes that the economy is in a steady state in the initial year. Recall that in the steady state: \begin{align} I & = \delta K \end{align} So, set $K_0 = \delta^{-1} I_0$. Steps: 1. Obtain real investment data (https://fred.stlouisfed.org/series/GPDICA) 2. Select a value for the depreciation rate $\delta$. 0.1 is a standard choice. 3. Initialize an array of zeros (or an empty array) to store capital values 4. Set the first value of the captial array equal to the $\delta^{-1} I_0$ 5. Iterate over the values in the investment data to compute the remaining values of capital. Read Timothy Kehoe's notes (http://users.econ.umn.edu/~tkehoe/classes/GrowthAccountingNotes.pdf) for more background on the perpetual inventory method if you're interested. ``` # Import real investment data from FRED into a DataFrame called 'data' (PROVIDED) data = pd.read_csv('https://fred.stlouisfed.org/data/GPDICA.txt',sep='\s+',skiprows=13,index_col=0,parse_dates=True) # Set the columns attribute of data to ['Investment'] data.columns = ['Investment'] # Set the index.name attribute of data to 'Date' (i.e., data.index.name) data.index.name='Date' # Create a variable called 'delta' that stores the value for delta as 0.1 delta = 0.1 # Create a variable called 'capital' equal to an array of zeros with the same length as the variable 'data' capital = np.arange(len(data)) # Set the initial value of the 'capital' variable equal to the initial value of investment divided by delta capital[0] = data['Investment'].iloc[0]/delta # Iterate (i.e., use a for loop) over t in range(len(capital)-1) to construct values in the capital array for t in range(len(capital)-1): capital[t+1] = data['Investment'].iloc[t] + (1-delta)*capital[t] # Add a 'Capital' column to data data['Capital'] = capital # Divide data by 1000 so that the values are in units are in trillions of $ (instead of billions) data = data/1000 # Print the first 5 rows of data print(data.head()) ``` Now construct a figure that contains a plot of capital in the top panel and investment in the bottom panel. 1. The figure should have dimensions $12\times 8$. 2. Y-axis labels for investment and capital should both be "trillions of chained dollars" 3. Each axis should have an appropriate title. ``` # Create figure fig = plt.figure(figsize=(12,8)) # Construct the plot of capital ax1 = fig.add_subplot(2,1,1) ax1.plot(data.index,data.Capital,lw=4,alpha=0.75) ax1.set_ylabel('Trillions of chained Dollars') ax1.set_title('Capital') ax1.grid() # Construct the plot of investment ax2 = fig.add_subplot(2,1,2) ax2.plot(data.index,data.Investment,lw=4,alpha=0.75) ax2.set_ylabel('Trillions of chained Dollars') ax2.set_xlabel('Date') ax2.set_title('Investment') ax2.grid() # Use fig.tight_layout() to adjust whitespace fig.tight_layout() ``` **Question** 1. Why does the computed capital series fluctuate less than the investment data? 2. Given that there are about 325 million people in the US, does the computed value of capital in the last year look large, small, or about right on a per capita basis? **Answer** 1. Because 10 percent rate of capital accumulation implies an autoregressive coefficent on capital of 0.9 so changes in capital are highly persistent.<!-- answer --> 2. 26 trillion divided by 325 million is about 80,000 which seems to be in the right ballpark. Note that that the amount of capital in the US *per worker* will be an order of magnitude higher since there is a stubstantial share of people not in the workforce: e.g., children, retired persons, institutionalized persons.<!-- answer -->
github_jupyter
# Creation of a new calibrant In this tutorial we will see how to create a new calibrant. For this example we will use one of the componant of mostpaintings: hydrocerussite. The cell parameter are definied in this document: http://rruff.geo.arizona.edu/AMS/AMC_text_files/11987_amc.txt The first step is to record the cell parameters and provide them to pyFAI to define the cell. ``` import pyFAI print("pyFAI version",pyFAI.version) from pyFAI.calibrant import Cell hydroc_hex = Cell.hexagonal(5.24656, 23.7023) #This is an alternative representation, where extinction rules are already definied ... but that's cheating hydroc_rho = Cell.hexagonal(5.24656, 23.7023, lattice_type="R") ``` Chromium oxide has a crystal structure de Corrundom which is R-3m (space group 166). The selection rules are rather complicated and are available in: http://img.chem.ucl.ac.uk/sgp/large/166bz2.gif We will setup a function corresponding to the selection rules. It returns True if the reflection is active and False otherwise. ``` def reflection_condition_166(h,k,l): """from http://img.chem.ucl.ac.uk/sgp/large/166bz2.htm""" if h == 0 and k == 0: # 00l: 3n return l%3 == 0 elif h == 0 and l == 0: # 0k0: k=3n return k%3 == 0 elif k == 0 and l == 0: # h00: h=3n return h%3 == 0 elif h == k: # hhl: l=3n return l%3 == 0 elif l == 0: # hk0: h-k = 3n return (h-k)%3 == 0 elif k == 0: # h0l: h-l = 3n return ((h - l)%3 == 0) elif h == 0: # 0kl: h+l = 3n return ((k + l)%3 == 0) else: # -h + k + l = 3n return (-h + k + l) % 3 == 0 # Use the actual selection rule, not the short version: #cro.selection_rules.append(lambda h, k, l: ((-h + k + l) % 3 == 0)) hydroc_hex.selection_rules.append(reflection_condition_166) for reflex in hydroc_hex.d_spacing(1).values(): print(reflex[0], reflex[1]) print("length is the same: ", len(hydroc_hex.d_spacing(1)) == len(hydroc_rho.d_spacing(1))) ds_hex=list(hydroc_hex.d_spacing(1).keys()) ds_hex.sort() ds_rho=list(hydroc_rho.d_spacing(1).keys()) ds_rho.sort() print("Content is the same:", ds_hex == ds_rho) hydroc_rho.save("hydrocerussite", "basic lead carbonate (R-3m)", dmin=1, doi="https://doi.org/10.1107/S0108270102006844") ``` ## Conclusion This is an advanced tutorial, most user won't have to define their own calibrant. You can also contact the developers to get your own calibrant integrated into pyFAI which makes things easier for you and other users.
github_jupyter
# Deep Learning with PyTorch Step-by-Step: A Beginner's Guide # Chapter 4 ``` from IPython.core.display import display, HTML display(HTML("<style>.container { width:80% !important; }</style>")) try: import google.colab import requests url = 'https://raw.githubusercontent.com/dvgodoy/PyTorchStepByStep/master/config.py' r = requests.get(url, allow_redirects=True) open('config.py', 'wb').write(r.content) except ModuleNotFoundError: pass from config import * config_chapter4() # This is needed to render the plots in this chapter from plots.chapter4 import * import random import numpy as np from PIL import Image import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset, random_split, WeightedRandomSampler, SubsetRandomSampler from torchvision.transforms import Compose, ToTensor, Normalize, ToPILImage, RandomHorizontalFlip, Resize import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') %matplotlib inline from data_generation.image_classification import generate_dataset from stepbystep.v0 import StepByStep from plots.chapter4 import * ``` # Classifying Images ## Data Generation ``` images, labels = generate_dataset(img_size=5, n_images=300, binary=True, seed=13) fig = plot_images(images, labels, n_plot=30) ``` ## Images and Channels ``` image_r = np.zeros((5, 5), dtype=np.uint8) image_r[:, 0] = 255 image_r[:, 1] = 128 image_g = np.zeros((5, 5), dtype=np.uint8) image_g[:, 1] = 128 image_g[:, 2] = 255 image_g[:, 3] = 128 image_b = np.zeros((5, 5), dtype=np.uint8) image_b[:, 3] = 128 image_b[:, 4] = 255 image_gray = .2126*image_r + .7152*image_g + .0722*image_b image_rgb = np.stack([image_r, image_g, image_b], axis=2) fig = image_channels(image_r, image_g, image_b, image_rgb, image_gray, rows=(0, 1)) fig = image_channels(image_r, image_g, image_b, image_rgb, image_gray, rows=(0, 2)) ``` ### NCHW vs NHWC ``` images.shape example = images[7] example example_hwc = np.transpose(example, (1, 2, 0)) example_hwc.shape example_hwc ``` ## Torchvision ### Transforms ``` tensorizer = ToTensor() example_tensor = tensorizer(example_hwc) example_tensor.shape example_tensor example_img = ToPILImage()(example_tensor) print(type(example_img)) plt.imshow(example_img, cmap='gray') plt.grid(False) ``` ### Transforms on Images ``` flipper = RandomHorizontalFlip(p=1.0) flipped_img = flipper(example_img) plt.imshow(flipped_img, cmap='gray') plt.grid(False) ``` ### Transforms on Tensor ``` img_tensor = tensorizer(flipped_img) img_tensor ``` #### Normalize Transform $$ \Large \begin{aligned} \text{input} = 0 \implies \frac{0 - \text{mean}}{\text{std}}= \frac{0 - 0.5}{0.5}&=-1 \\ \text{input} = 1 \implies \frac{1 - \text{mean}}{\text{std}}= \frac{1 - 0.5}{0.5}&=1 \end{aligned} $$ ``` normalizer = Normalize(mean=(.5,), std=(.5,)) normalized_tensor = normalizer(img_tensor) normalized_tensor ``` ### Composing Transforms ``` composer = Compose([RandomHorizontalFlip(p=1.0), Normalize(mean=(.5,), std=(.5,))]) composed_tensor = composer(example_tensor) (composed_tensor == normalized_tensor).all() print(example) print(example_tensor) example_tensor = torch.as_tensor(example / 255).float() ``` ## Data Preparation ``` # Builds tensors from numpy arrays BEFORE split x_tensor = torch.as_tensor(images / 255).float() y_tensor = torch.as_tensor(labels.reshape(-1, 1)).float() ``` ### Dataset Transforms ``` class TransformedTensorDataset(Dataset): def __init__(self, x, y, transform=None): self.x = x self.y = y self.transform = transform def __getitem__(self, index): x = self.x[index] if self.transform: x = self.transform(x) return x, self.y[index] def __len__(self): return len(self.x) composer = Compose([RandomHorizontalFlip(p=0.5), Normalize(mean=(.5,), std=(.5,))]) dataset = TransformedTensorDataset(x_tensor, y_tensor, transform=composer) ``` ### SubsetRandomSampler ### Helper Function #4 ``` def index_splitter(n, splits, seed=13): idx = torch.arange(n) # Makes the split argument a tensor splits_tensor = torch.as_tensor(splits) # Finds the correct multiplier, so we don't have # to worry about summing up to N (or one) multiplier = n / splits_tensor.sum() splits_tensor = (multiplier * splits_tensor).long() # If there is a difference, throws at the first split # so random_split does not complain diff = n - splits_tensor.sum() splits_tensor[0] += diff # Uses PyTorch random_split to split the indices torch.manual_seed(seed) return random_split(idx, splits_tensor) train_idx, val_idx = index_splitter(len(x_tensor), [80, 20]) train_idx train_idx.indices train_sampler = SubsetRandomSampler(train_idx) val_sampler = SubsetRandomSampler(val_idx) # Builds a loader of each set train_loader = DataLoader(dataset=dataset, batch_size=16, sampler=train_sampler) val_loader = DataLoader(dataset=dataset, batch_size=16, sampler=val_sampler) len(iter(train_loader)), len(iter(val_loader)) ``` ### Data Augmentation Transformations ``` x_train_tensor = x_tensor[train_idx] y_train_tensor = y_tensor[train_idx] x_val_tensor = x_tensor[val_idx] y_val_tensor = y_tensor[val_idx] train_composer = Compose([RandomHorizontalFlip(p=.5), Normalize(mean=(.5,), std=(.5,))]) val_composer = Compose([Normalize(mean=(.5,), std=(.5,))]) train_dataset = TransformedTensorDataset(x_train_tensor, y_train_tensor, transform=train_composer) val_dataset = TransformedTensorDataset(x_val_tensor, y_val_tensor, transform=val_composer) # Builds a loader of each set train_loader = DataLoader(dataset=train_dataset, batch_size=16, shuffle=True) val_loader = DataLoader(dataset=val_dataset, batch_size=16) ``` ### WeightedRandomSampler ``` classes, counts = y_train_tensor.unique(return_counts=True) print(classes, counts) weights = 1.0 / counts.float() weights sample_weights = weights[y_train_tensor.squeeze().long()] print(sample_weights.shape) print(sample_weights[:10]) print(y_train_tensor[:10].squeeze()) generator = torch.Generator() sampler = WeightedRandomSampler( weights=sample_weights, num_samples=len(sample_weights), generator=generator, replacement=True ) train_loader = DataLoader(dataset=train_dataset, batch_size=16, sampler=sampler) val_loader = DataLoader(dataset=val_dataset, batch_size=16) ``` ### Helper Function #5 ``` def make_balanced_sampler(y): # Computes weights for compensating imbalanced classes classes, counts = y.unique(return_counts=True) weights = 1.0 / counts.float() sample_weights = weights[y.squeeze().long()] # Builds sampler with compute weights generator = torch.Generator() sampler = WeightedRandomSampler( weights=sample_weights, num_samples=len(sample_weights), generator=generator, replacement=True ) return sampler sampler = make_balanced_sampler(y_train_tensor) ``` ### Seeds and more (seeds) ``` train_loader.sampler.generator.manual_seed(42) random.seed(42) torch.tensor([t[1].sum() for t in iter(train_loader)]).sum() def set_seed(self, seed=42): torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) try: self.train_loader.sampler.generator.manual_seed(seed) except AttributeError: pass setattr(StepByStep, 'set_seed', set_seed) ``` ### Putting It Together ``` # Builds tensors from numpy arrays BEFORE split # Modifies the scale of pixel values from [0, 255] to [0, 1] x_tensor = torch.as_tensor(images / 255).float() y_tensor = torch.as_tensor(labels.reshape(-1, 1)).float() # Uses index_splitter to generate indices for training and # validation sets train_idx, val_idx = index_splitter(len(x_tensor), [80, 20]) # Uses indices to perform the split x_train_tensor = x_tensor[train_idx] y_train_tensor = y_tensor[train_idx] x_val_tensor = x_tensor[val_idx] y_val_tensor = y_tensor[val_idx] # Builds different composers because of data augmentation on training set train_composer = Compose([RandomHorizontalFlip(p=.5), Normalize(mean=(.5,), std=(.5,))]) val_composer = Compose([Normalize(mean=(.5,), std=(.5,))]) # Uses custom dataset to apply composed transforms to each set train_dataset = TransformedTensorDataset(x_train_tensor, y_train_tensor, transform=train_composer) val_dataset = TransformedTensorDataset(x_val_tensor, y_val_tensor, transform=val_composer) # Builds a weighted random sampler to handle imbalanced classes sampler = make_balanced_sampler(y_train_tensor) # Uses sampler in the training set to get a balanced data loader train_loader = DataLoader(dataset=train_dataset, batch_size=16, sampler=sampler) val_loader = DataLoader(dataset=val_dataset, batch_size=16) ``` ### Pixels as Features ``` dummy_xs, dummy_ys = next(iter(train_loader)) dummy_xs.shape flattener = nn.Flatten() dummy_xs_flat = flattener(dummy_xs) print(dummy_xs_flat.shape) print(dummy_xs_flat[0]) ``` ## Shallow Model $$ \Large \text{P}(y=1) = \sigma(z) = \sigma(w_0x_0+w_1x_1+\cdots+w_{24}x_{24}) $$ ![](images/classification.png) ### Notation $$ \Large W = \underset{(25 \times 1)}{ \begin{bmatrix} w_0 \\ w_1 \\ \vdots \\ w_{24} \end{bmatrix}}; X = \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ x_1 \\ \vdots \\ x_{24} \end{bmatrix}} $$ $$ \Large \begin{aligned} z & = W^T \cdot X =\underset{(1 \times 25)}{ \begin{bmatrix} - & w^{T} & -\\ \end{bmatrix}} \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ x_1 \\ \vdots \\ x_{24} \end{bmatrix}} = \underset{(1 \times 25)}{ \begin{bmatrix} w_0 & w_1 & \cdots & w_{24} \end{bmatrix}} \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ x_1 \\ \vdots \\ x_{24} \end{bmatrix}} \\ & = w_0x_0 + w_1x_1 + \cdots + w_{24}x_{24} \end{aligned} $$ ### Model Configuration ``` # Sets learning rate - this is "eta" ~ the "n" like Greek letter lr = 0.1 torch.manual_seed(17) # Now we can create a model model_logistic = nn.Sequential() model_logistic.add_module('flatten', nn.Flatten()) model_logistic.add_module('output', nn.Linear(25, 1, bias=False)) model_logistic.add_module('sigmoid', nn.Sigmoid()) # Defines a SGD optimizer to update the parameters optimizer_logistic = optim.SGD(model_logistic.parameters(), lr=lr) # Defines a binary cross entropy loss function binary_loss_fn = nn.BCELoss() ``` ### Model Training ``` n_epochs = 100 sbs_logistic = StepByStep(model_logistic, binary_loss_fn, optimizer_logistic) sbs_logistic.set_loaders(train_loader, val_loader) sbs_logistic.train(n_epochs) fig = sbs_logistic.plot_losses() ``` ## Deep-ish Model ![](images/classification_equiv.png) ### Model Configuration ``` # Sets learning rate - this is "eta" ~ the "n" like Greek letter lr = 0.1 torch.manual_seed(17) # Now we can create a model model_nn = nn.Sequential() model_nn.add_module('flatten', nn.Flatten()) model_nn.add_module('hidden0', nn.Linear(25, 5, bias=False)) model_nn.add_module('hidden1', nn.Linear(5, 3, bias=False)) model_nn.add_module('output', nn.Linear(3, 1, bias=False)) model_nn.add_module('sigmoid', nn.Sigmoid()) # Defines a SGD optimizer to update the parameters optimizer_nn = optim.SGD(model_nn.parameters(), lr=lr) # Defines a binary cross entropy loss function binary_loss_fn = nn.BCELoss() ``` ### Model Training ``` n_epochs = 100 sbs_nn = StepByStep(model_nn, binary_loss_fn, optimizer_nn) sbs_nn.set_loaders(train_loader, val_loader) sbs_nn.train(n_epochs) fig = sbs_nn.plot_losses() fig = figure5(sbs_logistic, sbs_nn) ``` ### Show Me the Math! $$ \large \begin{array}{rcccccccccccc} \text{Hidden}\ \#0 & & & & & & & & \underset{(5 \times 1)}{ \begin{bmatrix} z_{00} \\ z_{01} \\ z_{02} \\ z_{03} \\ z_{04} \\ \end{bmatrix}} & = & \underset{(5 \times 25)}{ \begin{bmatrix} - & w^{T}_{00} & -\\ - & w^{T}_{01} & -\\ - & w^{T}_{02} & -\\ - & w^{T}_{03} & -\\ - & w^{T}_{04} & - \end{bmatrix}} & & \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ \vdots \\ x_{11} \\ \vdots \\ x_{24} \end{bmatrix}} \\ \text{Hidden}\ \#1 & & & & \underset{(3 \times 1)}{ \begin{bmatrix} z_{10} \\ z_{11} \\ z_{12} \\ \end{bmatrix}} & = & \underset{(3 \times 5)}{ \begin{bmatrix} - & w^{T}_{10} & -\\ - & w^{T}_{11} & -\\ - & w^{T}_{12} & -\\ \end{bmatrix}} & & \underset{(5 \times 1)}{ \begin{bmatrix} z_{00} \\ z_{01} \\ z_{02} \\ z_{03} \\ z_{04} \\ \end{bmatrix}} \\ \text{Output} & \underset{(1 \times 1)}{ \begin{bmatrix} z_{2} \end{bmatrix}} & = & \underset{(1 \times 3)}{ \begin{bmatrix} - & w^{T}_{20} & -\\ \end{bmatrix}} & \underset{(3 \times 1)}{ \begin{bmatrix} z_{10} \\ z_{11} \\ z_{12} \\ \end{bmatrix}} \\ \hline \text{substituting } z's... & \underset{(1 \times 1)}{ \begin{bmatrix} z_{2} \end{bmatrix}} & = & \underbrace{ \underset{(1 \times 3)}{ \begin{bmatrix} - & w^{T}_{20} & -\\ \end{bmatrix}}}_{\text{Output Layer}} & & & \underbrace{ \underset{(3 \times 5)}{ \begin{bmatrix} - & w^{T}_{10} & -\\ - & w^{T}_{11} & -\\ - & w^{T}_{12} & -\\ \end{bmatrix}}}_{\text{Hidden Layer #1}} & & & & \underbrace{ \underset{(5 \times 25)}{ \begin{bmatrix} - & w^{T}_{00} & -\\ - & w^{T}_{01} & -\\ - & w^{T}_{02} & -\\ - & w^{T}_{03} & -\\ - & w^{T}_{04} & - \end{bmatrix}}}_{\text{Hidden Layer #0}} & & \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ \vdots \\ x_{11} \\ \vdots \\ x_{24} \end{bmatrix}} \\ \text{multiplying...} & & = & \underbrace{ \underset{(1 \times 25)}{ \begin{bmatrix} - & w^{T} & -\\ \end{bmatrix}}}_{\text{Matrices Multiplied}} & & & & & & & & & \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ \vdots \\ x_{11} \\ \vdots \\ x_{24} \end{bmatrix}} \end{array} $$ ### Show Me the Code! ``` w_nn_hidden0 = model_nn.hidden0.weight.detach() w_nn_hidden1 = model_nn.hidden1.weight.detach() w_nn_output = model_nn.output.weight.detach() w_nn_hidden0.shape, w_nn_hidden1.shape, w_nn_output.shape w_nn_equiv = w_nn_output @ w_nn_hidden1 @ w_nn_hidden0 w_nn_equiv.shape w_nn_equiv = w_nn_output.mm(w_nn_hidden1.mm(w_nn_hidden0)) w_logistic_output = model_logistic.output.weight.detach() w_logistic_output.shape fig = weights_comparison(w_logistic_output, w_nn_equiv) def count_parameters(self): return sum(p.numel() for p in self.model.parameters() if p.requires_grad) setattr(StepByStep, 'count_parameters', count_parameters) sbs_logistic.count_parameters(), sbs_nn.count_parameters() ``` ### Weights as Pixels ``` w_nn_hidden0.shape fig = figure7(w_nn_hidden0) ``` ## Activation Functions ### Sigmoid $$ \Large \sigma(z) = \frac{1}{1 + e^{-z}} $$ ``` fig = plot_activation(torch.sigmoid) dummy_z = torch.tensor([-3., 0., 3.]) torch.sigmoid(dummy_z) nn.Sigmoid()(dummy_z) ``` ### Hyperbolic Tangent (Tanh) $$ \Large \sigma(z) = \frac{e^z - e^{-z}}{e^z + e^{-z}} $$ ``` fig = plot_activation(torch.tanh) dummy_z = torch.tensor([-3., 0., 3.]) torch.tanh(dummy_z) nn.Tanh()(dummy_z) ``` ### Rectified Linear Unit (ReLU) $$ \Large \begin{aligned} \sigma(z) &= \begin{cases} z,\ \text{if } z \ge 0 \\ 0,\ \text{if } z < 0 \end{cases} \\ & \text{or} \\ \sigma(z) &= \text{max}(0, z) \end{aligned} $$ ``` fig = plot_activation(torch.relu) dummy_z = torch.tensor([-3., 0., 3.]) F.relu(dummy_z) nn.ReLU()(dummy_z) dummy_z.clamp(min=0) ``` ### Leaky ReLU $$ \Large \begin{aligned} \sigma(z) =& \begin{cases} z,\ \text{if } z \ge 0 \\ 0.01z,\ \text{if } z < 0 \end{cases} \\ \text{or}& \\ \sigma(z)=&\text{max}(0,z)+0.01\ \text{min}(0,z) \end{aligned} $$ ``` fig = plot_activation(nn.LeakyReLU(), name='Leaky ReLU') dummy_z = torch.tensor([-3., 0., 3.]) F.leaky_relu(dummy_z, negative_slope=0.01) nn.LeakyReLU(negative_slope=0.02)(dummy_z) ``` ### Parametric ReLU (PReLU) $$ \Large \begin{aligned} \sigma(z) =& \begin{cases} z,\ \text{if } z \ge 0 \\ az,\ \text{if } z < 0 \end{cases} \\ \text{or}& \\ \sigma(z)=&\text{max}(0,z)+a\ \text{min}(0,z) \end{aligned} $$ ``` fig = plot_activation(nn.PReLU(), name='Parametric ReLU') dummy_z = torch.tensor([-3., 0., 3.]) F.prelu(dummy_z, weight=torch.tensor(0.25)) nn.PReLU(init=0.25)(dummy_z) ``` ## Deep Model ![](images/classification_relu2.png) ### Model Configuration ``` # Sets learning rate - this is "eta" ~ the "n" like Greek letter lr = 0.1 torch.manual_seed(17) # Now we can create a model model_relu = nn.Sequential() model_relu.add_module('flatten', nn.Flatten()) model_relu.add_module('hidden0', nn.Linear(25, 5, bias=False)) model_relu.add_module('activation0', nn.ReLU()) model_relu.add_module('hidden1', nn.Linear(5, 3, bias=False)) model_relu.add_module('activation1', nn.ReLU()) model_relu.add_module('output', nn.Linear(3, 1, bias=False)) model_relu.add_module('sigmoid', nn.Sigmoid()) # Defines a SGD optimizer to update the parameters optimizer_relu = optim.SGD(model_relu.parameters(), lr=lr) # Defines a binary cross entropy loss function binary_loss_fn = nn.BCELoss() ``` ### Model Training ``` n_epochs = 50 sbs_relu = StepByStep(model_relu, binary_loss_fn, optimizer_relu) sbs_relu.set_loaders(train_loader, val_loader) sbs_relu.train(n_epochs) fig = sbs_relu.plot_losses() fig = figure5b(sbs_logistic, sbs_nn, sbs_relu) ``` ### Show Me the Math Again! $$ \large \begin{array}{rcccccccccccc} \text{Hidden }\#0 & & & & & & & & \underset{(5 \times 1)}{ \begin{bmatrix} z_{00} \\ z_{01} \\ z_{02} \\ z_{03} \\ z_{04} \\ \end{bmatrix}} & = & \underset{(5 \times 25)}{ \begin{bmatrix} - & w^{T}_{00} & -\\ - & w^{T}_{01} & -\\ - & w^{T}_{02} & -\\ - & w^{T}_{03} & -\\ - & w^{T}_{04} & - \end{bmatrix}} & & \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ \vdots \\ x_{11} \\ \vdots \\ x_{24} \end{bmatrix}} \\ \text{Hidden }\#1 & & & & \underset{(3 \times 1)}{ \begin{bmatrix} z_{10} \\ z_{11} \\ z_{12} \\ \end{bmatrix}} & = & \underset{(3 \times 5)}{ \begin{bmatrix} - & w^{T}_{10} & -\\ - & w^{T}_{11} & -\\ - & w^{T}_{12} & -\\ \end{bmatrix}} & & \underbrace{ f_0 \underset{(5 \times 1)}{ \left( \begin{bmatrix} z_{00} \\ z_{01} \\ z_{02} \\ z_{03} \\ z_{04} \\ \end{bmatrix} \right)}}_{\text{Activation #0}} \\ \text{Output} & \underset{(1 \times 1)}{ \begin{bmatrix} z_{2} \end{bmatrix}} & = & \underset{(1 \times 3)}{ \begin{bmatrix} - & w^{T}_{20} & -\\ \end{bmatrix}} & \underbrace{ f_1 \underset{(3 \times 1)}{ \left( \begin{bmatrix} z_{10} \\ z_{11} \\ z_{12} \\ \end{bmatrix} \right)}}_{\text{Activation #1}} \\ \hline \text{substituting z's...} & \underset{(1 \times 1)}{ \begin{bmatrix} z_{2} \end{bmatrix}} & = & \underbrace{ \underset{(1 \times 3)}{ \begin{bmatrix} - & w^{T}_{20} & -\\ \end{bmatrix}}}_{\text{Output Layer}} & f_1 & & \left( \underbrace{ \underset{(3 \times 5)}{ \begin{bmatrix} - & w^{T}_{10} & -\\ - & w^{T}_{11} & -\\ - & w^{T}_{12} & -\\ \end{bmatrix}}}_{\text{Hidden Layer #1}} \right. & & f_0 & & \left( \underbrace{ \underset{(5 \times 25)}{ \begin{bmatrix} - & w^{T}_{00} & -\\ - & w^{T}_{01} & -\\ - & w^{T}_{02} & -\\ - & w^{T}_{03} & -\\ - & w^{T}_{04} & - \end{bmatrix}}}_{\text{Hidden Layer #0}} \right. & & \left. \left. \underbrace{ \underset{(25 \times 1)}{ \begin{bmatrix} x_0 \\ \vdots \\ x_{11} \\ \vdots \\ x_{24} \end{bmatrix}}}_{\text{Inputs}} \right) \right) \end{array} $$ ## Putting It All Together ``` class TransformedTensorDataset(Dataset): def __init__(self, x, y, transform=None): self.x = x self.y = y self.transform = transform def __getitem__(self, index): x = self.x[index] if self.transform: x = self.transform(x) return x, self.y[index] def __len__(self): return len(self.x) def index_splitter(n, splits, seed=13): idx = torch.arange(n) # Makes the split argument a tensor splits_tensor = torch.as_tensor(splits) # Finds the correct multiplier, so we don't have # to worry about summing up to N (or one) multiplier = n / splits_tensor.sum() splits_tensor = (multiplier * splits_tensor).long() # If there is a difference, throws at the first split # so random_split does not complain diff = n - splits_tensor.sum() splits_tensor[0] += diff # Uses PyTorch random_split to split the indices torch.manual_seed(seed) return random_split(idx, splits_tensor) def make_balanced_sampler(y): # Computes weights for compensating imbalanced classes classes, counts = y.unique(return_counts=True) weights = 1.0 / counts.float() sample_weights = weights[y.squeeze().long()] # Builds sampler with compute weights generator = torch.Generator() sampler = WeightedRandomSampler( weights=sample_weights, num_samples=len(sample_weights), generator=generator, replacement=True ) return sampler # Builds tensors from numpy arrays BEFORE split # Modifies the scale of pixel values from [0, 255] to [0, 1] x_tensor = torch.as_tensor(images / 255).float() y_tensor = torch.as_tensor(labels.reshape(-1, 1)).float() # Uses index_splitter to generate indices for training and # validation sets train_idx, val_idx = index_splitter(len(x_tensor), [80, 20]) # Uses indices to perform the split x_train_tensor = x_tensor[train_idx] y_train_tensor = y_tensor[train_idx] x_val_tensor = x_tensor[val_idx] y_val_tensor = y_tensor[val_idx] # Builds different composers because of data augmentation on training set train_composer = Compose([RandomHorizontalFlip(p=.5), Normalize(mean=(.5,), std=(.5,))]) val_composer = Compose([Normalize(mean=(.5,), std=(.5,))]) # Uses custom dataset to apply composed transforms to each set train_dataset = TransformedTensorDataset(x_train_tensor, y_train_tensor, transform=train_composer) val_dataset = TransformedTensorDataset(x_val_tensor, y_val_tensor, transform=val_composer) # Builds a weighted random sampler to handle imbalanced classes sampler = make_balanced_sampler(y_train_tensor) # Uses sampler in the training set to get a balanced data loader train_loader = DataLoader(dataset=train_dataset, batch_size=16, sampler=sampler) val_loader = DataLoader(dataset=val_dataset, batch_size=16) # Sets learning rate - this is "eta" ~ the "n" like Greek letter lr = 0.1 torch.manual_seed(11) # Now we can create a model model_relu = nn.Sequential() model_relu.add_module('flatten', nn.Flatten()) model_relu.add_module('hidden0', nn.Linear(25, 5, bias=False)) model_relu.add_module('activation0', nn.ReLU()) model_relu.add_module('hidden1', nn.Linear(5, 3, bias=False)) model_relu.add_module('activation1', nn.ReLU()) model_relu.add_module('output', nn.Linear(3, 1, bias=False)) model_relu.add_module('sigmoid', nn.Sigmoid()) # Defines a SGD optimizer to update the parameters # (now retrieved directly from the model) optimizer_relu = optim.SGD(model_relu.parameters(), lr=lr) # Defines a binary cross entropy loss function binary_loss_fn = nn.BCELoss() n_epochs = 50 sbs_relu = StepByStep(model_relu, binary_loss_fn, optimizer_relu) sbs_relu.set_loaders(train_loader, val_loader) sbs_relu.train(n_epochs) ```
github_jupyter
``` !pip install transformers import tensorflow as tf import numpy as np import re import string import pandas as pd import tensorflow_datasets as tfds from transformers import T5Tokenizer, TFT5Model, TFT5ForConditionalGeneration ``` ## T5 Tokenizer and Config ``` # Bert Tokenizer (leverages SentencePiece and Unicode Normalizaiton) tokenizer = T5Tokenizer.from_pretrained('t5-small') # Start of sentence token end_token = tokenizer.eos_token # End of sentence token start_token = tokenizer.pad_token ``` ## Load Data ``` #Load Data from GDrive news = pd.read_excel("/content/drive/My Drive/news.xlsx") news.drop(['Source ', 'Time ', 'Publish Date'], axis=1, inplace=True) print(news.head(100)) ``` ## Creating Training Examples ``` # Clean Text def preprocessText(text): #remove content into parenthesis text = re.sub(r'\([^)]*\)', '', text) #remove quotes text= re.sub('"','', text) #delete whitespaces text = " ".join(text.split()) return text max_len_doc = -1 max_len_sum = -1 # Keep all training inputs into a dictionary. train_data = { 'input_ids': [], 'attention_mask':[], 'decoder_inputs_ids':[], "decoder_attention_mask":[] } # Preprocess and Tokenize for i in news['Short']: # Clean text trainText = preprocessText(i) # From text to tensor ids= tokenizer.encode_plus(trainText) # Find longest text if (max_len_doc < len(ids['input_ids'])): max_len_doc = len(ids['input_ids']) # Get input tokens train_data['input_ids'].append(ids['input_ids']) # Get attention mask train_data['attention_mask'].append(ids['attention_mask']) for i in news['Headline']: # Add start token <pad> in front of summary labelsText= start_token+ " "+ preprocessText(i) decoder_ids = tokenizer.encode_plus(labelsText) if(max_len_sum < len(decoder_ids['input_ids'])): max_len_sum = len(decoder_ids['input_ids']) train_data['decoder_inputs_ids'].append(decoder_ids['input_ids']) train_data['decoder_attention_mask'].append(decoder_ids['attention_mask']) #Convert to array of lists for key in train_data: train_data[key]= np.array(train_data[key]) #Pad sequence to max len train_data['input_ids'] = tf.keras.preprocessing.sequence.pad_sequences(train_data['input_ids'], maxlen= max_len_doc, padding= 'post', truncating='post' ) train_data['attention_mask'] = tf.keras.preprocessing.sequence.pad_sequences(train_data['attention_mask'], maxlen= max_len_doc, padding= 'post', truncating='post') train_data['decoder_inputs_ids'] = tf.keras.preprocessing.sequence.pad_sequences(train_data['decoder_inputs_ids'], maxlen= max_len_sum, padding= 'post', truncating='post') train_data['decoder_attention_mask'] = tf.keras.preprocessing.sequence.pad_sequences(train_data['decoder_attention_mask'], maxlen= max_len_sum, padding= 'post', truncating='post') print ("Number of training examples: ", len(train_data['input_ids']) print ("Max length of tokens of main text:" max_len_doc) print ("Max length of token of sumamry: ", max_len_sum) ``` ## Create Keras Model ``` def createModel (): # T5 Model - Hugging Face T5 = TFT5ForConditionalGeneration.from_pretrained('t5-small') task_specific_params = T5.config.task_specific_params if task_specific_params is not None: T5.config.update(task_specific_params.get("summarization", {})) # Inputs input_ids = tf.keras.Input(shape=(max_len_doc,),dtype=tf.int32) decoder_inputs_ids = tf.keras.Input(shape=(max_len_sum-1,),dtype=tf.int32) attention_mask = tf.keras.Input(shape=(max_len_doc,),dtype=tf.int32) decoder_attention_mask = tf.keras.Input(shape=(max_len_sum-1,),dtype=tf.int32) # Get T5 output logits = T5(input_ids, attention_mask = attention_mask, decoder_input_ids=decoder_inputs_ids, decoder_attention_mask= decoder_attention_mask)[0] # return Keras model return tf.keras.Model(inputs= [input_ids, attention_mask, decoder_inputs_ids,decoder_attention_mask], outputs=logits) ``` ## HyperParameters ``` BATCH_SIZE = 64 EPOCHS = 4 LEARNING_RATE = 3e-5 ``` ## Use TPU ``` use_tpu = True if use_tpu: # Create distribution strategy tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) # Create model with strategy.scope(): model = createModel() else: model = createModel() model.summary() ``` ## Train Model ``` loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True) adam_opt = tf.optimizers.Adam (learning_rate= LEARNING_RATE) model.compile(adam_opt, loss= loss, metrics=[loss]) model.fit(x=[train_data['input_ids'], train_data['attention_mask'], train_data['decoder_inputs_ids'][:,:-1], train_data['decoder_attention_mask'][:,:-1]], y= train_data['decoder_inputs_ids'][:,1:], batch_size= BATCH_SIZE, epochs=EPOCHS, verbose=2) ``` ## Test Model using Greedy Algorithm ``` def getSummary(text): # Start summary with <pad> token summary = start_token # Preprocess text text = preprocessText(text) # Convert text to tensor ids = tokenizer.encode_plus(text) input_ids = ids['input_ids'] attention_mask = ids['attention_mask'] # Pad text sequence input_ids = tf.keras.preprocessing.sequence.pad_sequences([input_ids], maxlen= max_len_doc, padding= 'post', truncating='post' ) attention_mask = tf.keras.preprocessing.sequence.pad_sequences([attention_mask], maxlen= max_len_doc, padding= 'post', truncating='post') counter = 1 prev_summary ="" while (counter < max_len_sum and len(prev_summary)!=len(summary)): # Convert summary to decoder_ids = tokenizer.encode_plus(summary) decoder_input_ids = decoder_ids['input_ids'] decoder_attention_mask = decoder_ids['attention_mask'] #Pad sequence to max len decoder_inputs_ids = tf.keras.preprocessing.sequence.pad_sequences([decoder_input_ids[:-1]], maxlen= max_len_sum, padding= 'post', truncating='post') decoder_attention_mask = tf.keras.preprocessing.sequence.pad_sequences([decoder_attention_mask[:-1]], maxlen= max_len_sum, padding= 'post', truncating='post') # Decoder of T5 predicts the next word pred = model.predict([input_ids,attention_mask, decoder_inputs_ids, decoder_attention_mask]) # Decode text new_summary = tokenizer.decode(np.argmax(pred, axis=-1)[0,:counter]) prev_summary = summary # Get new summary and prepare it for the next prediction summary = start_token +" "+ new_summary counter+=1 #remove <pad> token return re.sub(r'<pad>',"",summary) getSummary("With your permission we and our partners may use precise geolocation\ data and identification through device scanning. You may click to consent to our\ and our partners’ processing as described above. Alternatively you may access more\ detailed information and change your preferences before consenting or to refuse consenting.") ```
github_jupyter
``` %matplotlib inline ``` # Async optimization Loop Bayesian optimization is used to tune parameters for walking robots or other experiments that are not a simple (expensive) function call. Tim Head, February 2017. Reformatted by Holger Nahrstaedt 2020 .. currentmodule:: skopt They often follow a pattern a bit like this: 1. ask for a new set of parameters 2. walk to the experiment and program in the new parameters 3. observe the outcome of running the experiment 4. walk back to your laptop and tell the optimizer about the outcome 5. go to step 1 A setup like this is difficult to implement with the ***_minimize()** function interface. This is why **scikit-optimize** has a ask-and-tell interface that you can use when you want to control the execution of the optimization loop. This notebook demonstrates how to use the ask and tell interface. ``` print(__doc__) import numpy as np np.random.seed(1234) import matplotlib.pyplot as plt ``` The Setup --------- We will use a simple 1D problem to illustrate the API. This is a little bit artificial as you normally would not use the ask-and-tell interface if you had a function you can call to evaluate the objective. ``` from skopt.learning import ExtraTreesRegressor from skopt import Optimizer noise_level = 0.1 ``` Our 1D toy problem, this is the function we are trying to minimize ``` def objective(x, noise_level=noise_level): return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))\ + np.random.randn() * noise_level ``` Here a quick plot to visualize what the function looks like: ``` # Plot f(x) + contours plt.set_cmap("viridis") x = np.linspace(-2, 2, 400).reshape(-1, 1) fx = np.array([objective(x_i, noise_level=0.0) for x_i in x]) plt.plot(x, fx, "r--", label="True (unknown)") plt.fill(np.concatenate([x, x[::-1]]), np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx], [fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])), alpha=.2, fc="r", ec="None") plt.legend() plt.grid() plt.show() ``` Now we setup the :class:`Optimizer` class. The arguments follow the meaning and naming of the ***_minimize()** functions. An important difference is that you do not pass the objective function to the optimizer. ``` opt = Optimizer([(-2.0, 2.0)], "ET", acq_optimizer="sampling") # To obtain a suggestion for the point at which to evaluate the objective # you call the ask() method of opt: next_x = opt.ask() print(next_x) ``` In a real world use case you would probably go away and use this parameter in your experiment and come back a while later with the result. In this example we can simply evaluate the objective function and report the value back to the optimizer: ``` f_val = objective(next_x) opt.tell(next_x, f_val) ``` Like ***_minimize()** the first few points are random suggestions as there is no data yet with which to fit a surrogate model. ``` for i in range(9): next_x = opt.ask() f_val = objective(next_x) opt.tell(next_x, f_val) ``` We can now plot the random suggestions and the first model that has been fit: ``` from skopt.acquisition import gaussian_ei def plot_optimizer(opt, x, fx): model = opt.models[-1] x_model = opt.space.transform(x.tolist()) # Plot true function. plt.plot(x, fx, "r--", label="True (unknown)") plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([fx - 1.9600 * noise_level, fx[::-1] + 1.9600 * noise_level]), alpha=.2, fc="r", ec="None") # Plot Model(x) + contours y_pred, sigma = model.predict(x_model, return_std=True) plt.plot(x, y_pred, "g--", label=r"$\mu(x)$") plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.2, fc="g", ec="None") # Plot sampled points plt.plot(opt.Xi, opt.yi, "r.", markersize=8, label="Observations") acq = gaussian_ei(x_model, model, y_opt=np.min(opt.yi)) # shift down to make a better plot acq = 4 * acq - 2 plt.plot(x, acq, "b", label="EI(x)") plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue') # Adjust plot layout plt.grid() plt.legend(loc='best') plot_optimizer(opt, x, fx) ``` Let us sample a few more points and plot the optimizer again: ``` for i in range(10): next_x = opt.ask() f_val = objective(next_x) opt.tell(next_x, f_val) plot_optimizer(opt, x, fx) ``` By using the :class:`Optimizer` class directly you get control over the optimization loop. You can also pickle your :class:`Optimizer` instance if you want to end the process running it and resume it later. This is handy if your experiment takes a very long time and you want to shutdown your computer in the meantime: ``` import pickle with open('my-optimizer.pkl', 'wb') as f: pickle.dump(opt, f) with open('my-optimizer.pkl', 'rb') as f: opt_restored = pickle.load(f) ```
github_jupyter
# Week 01, Worksheet 0: Python syntax (`if` statements) <div class="alert alert-block alert-info"> This worksheet will invite you to tinker with the examples, as they are live code cells. Instead of the normal fill-in-the-blank style of notebook, feel free to mess with the code directly. Remember that -- to test things out -- the <a href = "../sandbox/CMPSC%20100%20-%20Week%2000%20-%20Sandbox.ipynb"><b>Sandbox</b></a> is available to you as well. </div> <div class="alert alert-block alert-warning"> While grading, you will see a line referencing the following characters:<br/><br/> <pre>\ud83c\udf89\ud83c\udf89\ud83c\udf89</pre></br> These refer to the characters:</br></br> <pre>🎉🎉🎉</pre> </div> ## `if` I complete this worksheet... `if` statements depart a bit from our traditional Python syntax. Whereas we've been focusing on assignment, and now relative equality, our work takes us into an area of programming which contemplates _how code actually_ runs. Why talk about this now instead of last week? Because we're going to mess with it a bit. ## Flow of control (or control flow) How do we understand the following code snippet to run? ```python # We have five widgets widgets = 5 # The Professor gives us five more widgets += 5 # Due to a complex social situation, we owe 9/10 of our widgets to friends widget -= .90 * widgets # While once rich with widgets, we now have... print(widgets) ``` I can hear you through the internet: TOP TO BOTTOM! You're right. And code will _generally_ still follow this rule, which implies (for the above code): * Variables must be created before we can use them * If the value of a variable changes over time, the most recent assignments "wins" * Whatever the value of the variable is at the end of the code is the final value These will all still be true, but sometimes (depending on circumstances), we can jump ahead a bit. ## Back to `if` statements Sometimes we want make different decisions in our code based on whether or not a condition is `True`. In this case, we engage branching logic to assist us in our programmatic decision-making by using the `if` statement. This statemet takes the general form of: ```python if CONDITION: # Functionality if true ``` Here, `CONDITION` substitutes for some `boolean` value or expression which _must be true_. Notice also that the line proceeding the `if` portion of the statement is **_indented 4 spaces_**. Indentation is an important part of the Python language: it identifies what _belongs to_ this branch of our "branching logic": the `# Functionality if true` portion should only work if the `CONDITION` true. If not, it skips it. So: ```python if widgets > 5: print("We're better off than we were before!") ``` But, as we know from our example, we're not better off -- we actually only have `1` widget left! We need to be able to accomodate this. ```python if widgets > 5: print("We're better off than we were before!") else: print("Somewhere we lost some widgets...") ``` Here, we use an `else` clause to indicate what to do if the `CONDITION` (in this case, `widgets > 5`) isn't true. Let's say, for sake of example, if we're completely out of widgets we want to do something else. We have the following situation: * If `widgets > 5`, we're rich * If `widgets < 5` but still `widgets > 0`, at least we have a widget left * If `widgets == 0`, we're probably sad How do we model that in code? ```python if widgets > 5: print("We're rich!") elif widgets < 5 and widgets > 0: print("At least we still have one.") else: print("Oh no! No widgets. But, look! The Professor gave us one!") widgets +=1 print(widgets) ``` Okay, okay, so I was nice. But, there are two things to notice about the above code: * There's this new thing called the `elif` or "else if" * We can use as many statements as we want in a branch Both are important. First, we can always add as many conditions as we want at any point. We already know about `relational` and `logical` combinations (`widgets < 5 and widgets > 0`), but the `elif` or "else if" allows us to do something else _in very specific cases_. In addition, we can write as many statements or expressions as we want in an `if` clause, as we see in the `else` branch of the statement above. Of course, we could go overboard: ``` # TINKER AWAY! # We have five widgets widgets = 5 # The Professor gives us five more widgets += 5 # Due to a complex social situation, we owe 9/10 of our widgets to friends widgets -= .90 * widgets if widgets > 5: print("We're rich!") elif widgets == 4: print("Not as many as before, but not so bad.") elif widgets > 1: print("Hm. We lost several somewhere...") else: print("Oh no! We only have the 1 widget! But, look -- The Professor gave us another one!") widgets +=1 print("In the end, we have " + str(int(widgets)) + " widgets.") ``` Answer these questions about the above example. Feel free to modify the code above to check your work. #### 1. How many widgets do we need to have to display the message `Not as many as before, but not so bad.`? `TODO` #### 2. How many widgets will trigger the message `Hm. We lost several somewhere...`? `TODO` #### 3. Given what the final value of `widgets` is, what message will ultimately display, and what is the final value of `widgets`? (Hint: I'm nice. Sometimes.)`TODO` `TODO` #### 3. Assign various numbers to the first statement (`widgets = 5`). Is there a way to trick the statement? Why or why not? `TODO` ### What does this have to do with that "flow of control" thing? Good question. Thanks for asking. As we see in our examples above, our code still runs from top to bottom. However, we skip portions of it that do or don't run based on various conditions that vary as to their relative "truthiness." So, we can think of this as a frustration of the flow of control, not a negation of it. It diagrams like this: ![If this_diagram](https://cs.allegheny.edu/sites/dluman/cmpsc100/cmpsc-100-if-flow.png) Each of the paths ends at `print(widgets)`, but as we can see the value of `widgets` at that moment is contingent on which "branch" the statement follows. #### A note on assignments This means that, occasionally, we need to assign variables _outside_ of our structures in order to use them -- this follows the rule that variables have to _exist_ before we can modify or call on them. Imagine the following: ```python # We can't do this; truthiness hasn't been created yet if True: truthiness += 1 ``` Here, we have a new options to assign variables to either `0` or `""` values. We can also assign variables to `None` -- essentially, _nothing_. It's kind of like someone in Congress voting "present." To do this, all we have to do is remember our data types, and work accordingly: ```python a_number = 0 a_number = None a_string = "" a_string = None a_boolean = None ```
github_jupyter
# Catalyst segmentation tutorial Authors: [Roman Tezikov](https://github.com/TezRomacH), [Dmitry Bleklov](https://github.com/Bekovmi), [Sergey Kolesnikov](https://github.com/Scitator) [![Catalyst logo](https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master/pics/catalyst_logo.png)](https://github.com/catalyst-team/catalyst) ### Colab setup First of all, do not forget to change the runtime type to GPU. <br/> To do so click `Runtime` -> `Change runtime type` -> Select `"Python 3"` and `"GPU"` -> click `Save`. <br/> After that you can click `Runtime` -> `Run` all and watch the tutorial. ## Requirements Download and install the latest version of catalyst and other libraries required for this tutorial. ``` # this variable will be used in `runner.train` and by default we disable FP16 mode is_fp16_used = False is_alchemy_used = False # for augmentations !pip install albumentations==0.4.3 # for pretrained segmentation models fo PyTorch !pip install segmentation-models-pytorch==0.1.0 # for TTA !pip install ttach==0.0.2 ################ # Catalyst itself !pip install -U catalyst # For specific version of catalyst, uncomment: # ! pip install git+http://github.com/catalyst-team/catalyst.git@{master/commit_hash} ################ # for tensorboard !pip install tensorflow # for alchemy experiment logging integration, uncomment this 2 lines below # !pip install -U alchemy # is_alchemy_used = True # if Your machine support Apex FP16, uncomment this 3 lines below # !git clone https://github.com/NVIDIA/apex # !pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./apex # is_fp16_used = True ``` ### Colab extras – Plotly To intergate visualization library `plotly` to colab, run ``` import IPython def configure_plotly_browser_state(): display(IPython.core.display.HTML(''' <script src="/static/components/requirejs/require.js"></script> <script> requirejs.config({ paths: { base: '/static/base', plotly: 'https://cdn.plot.ly/plotly-latest.min.js?noext', }, }); </script> ''')) IPython.get_ipython().events.register('pre_run_cell', configure_plotly_browser_state) ``` ## Setting up GPUs ``` from typing import Callable, List, Tuple import os import torch import catalyst from catalyst.dl import utils print(f"torch: {torch.__version__}, catalyst: {catalyst.__version__}") # os.environ["CUDA_VISIBLE_DEVICES"] = "0" # "" - CPU, "0" - 1 GPU, "0,1" - MultiGPU SEED = 42 utils.set_global_seed(SEED) utils.prepare_cudnn(deterministic=True) ``` # Reproducibility [![Alchemy logo](https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master/pics/alchemy_logo.png)](https://github.com/catalyst-team/alchemy) To make your research more reproducible and easy to monitor, Catalyst has an integration with [Alchemy](https://alchemy.host) – experiment tracking tool for deep learning. To use monitoring, goto [Alchemy](https://alchemy.host/) and get your personal token. ``` # for alchemy experiment logging integration, uncomment this 2 lines below # !pip install -U alchemy # is_alchemy_used = True if is_alchemy_used: monitoring_params = { "token": None, # insert your personal token here "project": "segmentation_example", "group": "first_trials", "experiment": "first_experiment", } assert monitoring_params["token"] is not None else: monitoring_params = None ``` ------- ## Dataset As a dataset we will take Carvana - binary segmentation for the "car" class. > If you are on MacOS and you don’t have `wget`, you can install it with: `brew install wget`. After Catalyst installation, `download-gdrive` function become available to download objects from Google Drive. We use it to download datasets. usage: `download-gdrive {FILE_ID} {FILENAME}` ``` %%bash download-gdrive 1iYaNijLmzsrMlAdMoUEhhJuo-5bkeAuj segmentation_data.zip extract-archive segmentation_data.zip &>/dev/null from pathlib import Path ROOT = Path("segmentation_data/") train_image_path = ROOT / "train" train_mask_path = ROOT / "train_masks" test_image_path = ROOT / "test" ``` Collect images and masks into variables. ``` ALL_IMAGES = sorted(train_image_path.glob("*.jpg")) len(ALL_IMAGES) ALL_MASKS = sorted(train_mask_path.glob("*.gif")) len(ALL_MASKS) import random import matplotlib.pyplot as plt import numpy as np from skimage.io import imread as gif_imread from catalyst import utils def show_examples(name: str, image: np.ndarray, mask: np.ndarray): plt.figure(figsize=(10, 14)) plt.subplot(1, 2, 1) plt.imshow(image) plt.title(f"Image: {name}") plt.subplot(1, 2, 2) plt.imshow(mask) plt.title(f"Mask: {name}") def show(index: int, images: List[Path], masks: List[Path], transforms=None) -> None: image_path = images[index] name = image_path.name image = utils.imread(image_path) mask = gif_imread(masks[index]) if transforms is not None: temp = transforms(image=image, mask=mask) image = temp["image"] mask = temp["mask"] show_examples(name, image, mask) def show_random(images: List[Path], masks: List[Path], transforms=None) -> None: length = len(images) index = random.randint(0, length - 1) show(index, images, masks, transforms) ``` You can restart the cell below to see more examples. ``` show_random(ALL_IMAGES, ALL_MASKS) ``` The dataset below reads images and masks and optionally applies augmentation to them. ``` from typing import List from torch.utils.data import Dataset class SegmentationDataset(Dataset): def __init__( self, images: List[Path], masks: List[Path] = None, transforms=None ) -> None: self.images = images self.masks = masks self.transforms = transforms def __len__(self) -> int: return len(self.images) def __getitem__(self, idx: int) -> dict: image_path = self.images[idx] image = utils.imread(image_path) result = {"image": image} if self.masks is not None: mask = gif_imread(self.masks[idx]) result["mask"] = mask if self.transforms is not None: result = self.transforms(**result) result["filename"] = image_path.name return result ``` ------- ### Augmentations [![Albumentation logo](https://albumentations.readthedocs.io/en/latest/_static/logo.png)](https://github.com/albu/albumentations) The [albumentation](https://github.com/albu/albumentations) library works with images and masks at the same time, which is what we need. ``` import albumentations as albu from albumentations.pytorch import ToTensor def pre_transforms(image_size=224): return [albu.Resize(image_size, image_size, p=1)] def hard_transforms(): result = [ albu.RandomRotate90(), albu.Cutout(), albu.RandomBrightnessContrast( brightness_limit=0.2, contrast_limit=0.2, p=0.3 ), albu.GridDistortion(p=0.3), albu.HueSaturationValue(p=0.3) ] return result def resize_transforms(image_size=224): BORDER_CONSTANT = 0 pre_size = int(image_size * 1.5) random_crop = albu.Compose([ albu.SmallestMaxSize(pre_size, p=1), albu.RandomCrop( image_size, image_size, p=1 ) ]) rescale = albu.Compose([albu.Resize(image_size, image_size, p=1)]) random_crop_big = albu.Compose([ albu.LongestMaxSize(pre_size, p=1), albu.RandomCrop( image_size, image_size, p=1 ) ]) # Converts the image to a square of size image_size x image_size result = [ albu.OneOf([ random_crop, rescale, random_crop_big ], p=1) ] return result def post_transforms(): # we use ImageNet image normalization # and convert it to torch.Tensor return [albu.Normalize(), ToTensor()] def compose(transforms_to_compose): # combine all augmentations into one single pipeline result = albu.Compose([ item for sublist in transforms_to_compose for item in sublist ]) return result train_transforms = compose([ resize_transforms(), hard_transforms(), post_transforms() ]) valid_transforms = compose([pre_transforms(), post_transforms()]) show_transforms = compose([resize_transforms(), hard_transforms()]) ``` Let's look at the augmented results. <br/> You can restart the cell below to see more examples of augmentations. ``` show_random(ALL_IMAGES, ALL_MASKS, transforms=show_transforms) ``` ------- ## Loaders ``` import collections from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader def get_loaders( images: List[Path], masks: List[Path], random_state: int, valid_size: float = 0.2, batch_size: int = 32, num_workers: int = 4, train_transforms_fn = None, valid_transforms_fn = None, ) -> dict: indices = np.arange(len(images)) # Let's divide the data set into train and valid parts. train_indices, valid_indices = train_test_split( indices, test_size=valid_size, random_state=random_state, shuffle=True ) np_images = np.array(images) np_masks = np.array(masks) # Creates our train dataset train_dataset = SegmentationDataset( images = np_images[train_indices].tolist(), masks = np_masks[train_indices].tolist(), transforms = train_transforms_fn ) # Creates our valid dataset valid_dataset = SegmentationDataset( images = np_images[valid_indices].tolist(), masks = np_masks[valid_indices].tolist(), transforms = valid_transforms_fn ) # Catalyst uses normal torch.data.DataLoader train_loader = DataLoader( train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True, ) valid_loader = DataLoader( valid_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, drop_last=True, ) # And excpect to get an OrderedDict of loaders loaders = collections.OrderedDict() loaders["train"] = train_loader loaders["valid"] = valid_loader return loaders if is_fp16_used: batch_size = 64 else: batch_size = 32 print(f"batch_size: {batch_size}") loaders = get_loaders( images=ALL_IMAGES, masks=ALL_MASKS, random_state=SEED, train_transforms_fn=train_transforms, valid_transforms_fn=valid_transforms, batch_size=batch_size ) ``` ------- ## Experiment ### Model Catalyst has [several segmentation models](https://github.com/catalyst-team/catalyst/blob/master/catalyst/contrib/models/segmentation/__init__.py#L16) (Unet, Linknet, FPN, PSPnet and their versions with pretrain from Resnet). > You can read more about them in [our blog post](https://github.com/catalyst-team/catalyst-info#catalyst-info-1-segmentation-models). But for now let's take the model from [segmentation_models.pytorch](https://github.com/qubvel/segmentation_models.pytorch) (SMP for short). The same segmentation architectures have been implemented in this repository, but there are many more pre-trained encoders. [![Segmentation Models logo](https://raw.githubusercontent.com/qubvel/segmentation_models.pytorch/master/pics/logo-small-w300.png)](https://github.com/qubvel/segmentation_models.pytorch) ``` import segmentation_models_pytorch as smp # We will use Feature Pyramid Network with pre-trained ResNeXt50 backbone model = smp.FPN(encoder_name="resnext50_32x4d", classes=1) ``` ### Model training We will optimize loss as the sum of IoU, Dice and BCE, specifically this function: $IoU + Dice + 0.8*BCE$. ``` from torch import nn from catalyst.contrib.nn import DiceLoss, IoULoss # we have multiple criterions criterion = { "dice": DiceLoss(), "iou": IoULoss(), "bce": nn.BCEWithLogitsLoss() } from torch import optim from catalyst.contrib.nn import RAdam, Lookahead learning_rate = 0.001 encoder_learning_rate = 0.0005 # Since we use a pre-trained encoder, we will reduce the learning rate on it. layerwise_params = {"encoder*": dict(lr=encoder_learning_rate, weight_decay=0.00003)} # This function removes weight_decay for biases and applies our layerwise_params model_params = utils.process_model_params(model, layerwise_params=layerwise_params) # Catalyst has new SOTA optimizers out of box base_optimizer = RAdam(model_params, lr=learning_rate, weight_decay=0.0003) optimizer = Lookahead(base_optimizer) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.25, patience=2) from catalyst.dl import SupervisedRunner num_epochs = 3 logdir = "./logs/segmentation" device = utils.get_device() print(f"device: {device}") if is_fp16_used: fp16_params = dict(opt_level="O1") # params for FP16 else: fp16_params = None print(f"FP16 params: {fp16_params}") # by default SupervisedRunner uses "features" and "targets", # in our case we get "image" and "mask" keys in dataset __getitem__ runner = SupervisedRunner(device=device, input_key="image", input_target_key="mask") ``` ### Monitoring in tensorboard If you do not have a Tensorboard opened after you have run the cell below, try running the cell again. ``` %load_ext tensorboard %tensorboard --logdir {logdir} ``` ### Running train-loop ``` from catalyst.dl.callbacks import DiceCallback, IouCallback, \ CriterionCallback, MetricAggregationCallback callbacks = [ # Each criterion is calculated separately. CriterionCallback( input_key="mask", prefix="loss_dice", criterion_key="dice" ), CriterionCallback( input_key="mask", prefix="loss_iou", criterion_key="iou" ), CriterionCallback( input_key="mask", prefix="loss_bce", criterion_key="bce" ), # And only then we aggregate everything into one loss. MetricAggregationCallback( prefix="loss", mode="weighted_sum", # can be "sum", "weighted_sum" or "mean" # because we want weighted sum, we need to add scale for each loss metrics={"loss_dice": 1.0, "loss_iou": 1.0, "loss_bce": 0.8}, ), # metrics DiceCallback(input_key="mask"), IouCallback(input_key="mask"), ] if is_alchemy_used: from catalyst.dl import AlchemyLogger callbacks.append(AlchemyLogger(**monitoring_params)) runner.train( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, # our dataloaders loaders=loaders, # We can specify the callbacks list for the experiment; callbacks=callbacks, # path to save logs logdir=logdir, num_epochs=num_epochs, # save our best checkpoint by IoU metric main_metric="iou", # IoU needs to be maximized. minimize_metric=False, # for FP16. It uses the variable from the very first cell fp16=fp16_params, # prints train logs verbose=True, ) ``` ### Training analysis The `utils.plot_metrics` method reads tensorboard logs from the logdir and plots beautiful metrics with `plotly` package. ``` # tensorboard should be enought, uncomment to check plotly version # it can take a while (colab issue) # utils.plot_metrics( # logdir=logdir, # # specify which metrics we want to plot # metrics=["loss", "accuracy01", "auc/_mean", "f1_score", "_base/lr"] # ) ``` ## Model inference Let's look at model predictions. ``` TEST_IMAGES = sorted(test_image_path.glob("*.jpg")) # create test dataset test_dataset = SegmentationDataset( TEST_IMAGES, transforms=valid_transforms ) num_workers: int = 4 infer_loader = DataLoader( test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers ) # this get predictions for the whole loader predictions = np.vstack(list(map( lambda x: x["logits"].cpu().numpy(), runner.predict_loader(loader=infer_loader, resume=f"{logdir}/checkpoints/best.pth") ))) print(type(predictions)) print(predictions.shape) threshold = 0.5 max_count = 5 for i, (features, logits) in enumerate(zip(test_dataset, predictions)): image = utils.tensor_to_ndimage(features["image"]) mask_ = torch.from_numpy(logits[0]).sigmoid() mask = utils.detach(mask_ > threshold).astype("float") show_examples(name="", image=image, mask=mask) if i >= max_count: break ``` ## Model tracing Catalyst allows you to use Runner to make [tracing](https://pytorch.org/docs/stable/jit.html) models. > How to do this in the Config API, we wrote in [our blog (issue \#2)](https://github.com/catalyst-team/catalyst-info#catalyst-info-2-tracing-with-torchjit) For this purpose it is necessary to pass in a method `trace ` model and a batch on which `predict_batch ` will be executed: ``` batch = next(iter(loaders["valid"])) # saves to `logdir` and returns a `ScriptModule` class runner.trace(model=model, batch=batch, logdir=logdir, fp16=is_fp16_used) !ls {logdir}/trace/ ``` After this, you can easily load the model and predict anything! ``` from catalyst.dl.utils import trace if is_fp16_used: model = trace.load_traced_model( f"{logdir}/trace/traced-forward-opt_O1.pth", device="cuda", opt_level="O1" ) else: model = trace.load_traced_model( f"{logdir}/trace/traced-forward.pth", device="cpu" ) model_input = batch["image"].to("cuda" if is_fp16_used else "cpu") model(model_input) ``` ### Advanced: Custom Callbacks Let's plot the heatmap of predicted masks. ``` import collections from catalyst.dl import Callback, CallbackOrder, IRunner class CustomInferCallback(Callback): def __init__(self): super().__init__(CallbackOrder.Internal) self.heatmap = None self.counter = 0 def on_loader_start(self, runner: IRunner): self.predictions = None self.counter = 0 def on_batch_end(self, runner: IRunner): # data from the Dataloader # image, mask = runner.input["image"], runner.input["mask"] logits = runner.output["logits"] probabilities = torch.sigmoid(logits) self.heatmap = ( probabilities if self.heatmap is None else self.heatmap + probabilities ) self.counter += len(probabilities) def on_loader_end(self, runner: IRunner): self.heatmap = self.heatmap.sum(axis=0) self.heatmap /= self.counter from collections import OrderedDict from catalyst.dl.callbacks import CheckpointCallback infer_loaders = {"infer": loaders["valid"]} model = smp.FPN(encoder_name="resnext50_32x4d", classes=1) device = utils.get_device() if is_fp16_used: fp16_params = dict(opt_level="O1") # params for FP16 else: fp16_params = None runner = SupervisedRunner(device=device, input_key="image", input_target_key="mask") runner.infer( model=model, loaders=infer_loaders, callbacks=OrderedDict([ ("loader", CheckpointCallback(resume=f"{logdir}/checkpoints/best.pth")), ("infer", CustomInferCallback()) ]), fp16=fp16_params, ) import matplotlib %matplotlib inline import matplotlib.pyplot as plt heatmap = utils.detach(runner.runner.callbacks["infer"].heatmap[0]) plt.figure(figsize=(20, 9)) plt.imshow(heatmap, cmap="hot", interpolation="nearest") plt.show() ``` ### Advanced: test-time augmentations (TTA) There is [ttach](https://github.com/qubvel/ttach) is a new awesome library for test-time augmentation for segmentation or classification tasks. ``` import ttach as tta # D4 makes horizontal and vertical flips + rotations for [0, 90, 180, 270] angels. # and then merges the result masks with merge_mode="mean" tta_model = tta.SegmentationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode="mean") tta_runner = SupervisedRunner( model=tta_model, device=utils.get_device(), input_key="image" ) infer_loader = DataLoader( test_dataset, batch_size=1, shuffle=False, num_workers=num_workers ) batch = next(iter(infer_loader)) # predict_batch will automatically move the batch to the Runner's device tta_predictions = tta_runner.predict_batch(batch) # shape is `batch_size x channels x height x width` print(tta_predictions["logits"].shape) ``` Let's see our mask after TTA ``` threshold = 0.5 image = utils.tensor_to_ndimage(batch["image"][0]) mask_ = tta_predictions["logits"][0, 0].sigmoid() mask = utils.detach(mask_ > threshold).astype("float") show_examples(name="", image=image, mask=mask) ```
github_jupyter
# Starbucks Capstone Challenge ### Introduction This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set. Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products. Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement. You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ### Example To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer. However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. ### Cleaning This makes data cleaning especially important and tricky. You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. ### Final Advice Because this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). # Data Sets The data is contained in three files: * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.) * profile.json - demographic data for each customer * transcript.json - records for transactions, offers received, offers viewed, and offers completed Here is the schema and explanation of each variable in the files: **portfolio.json** * id (string) - offer id * offer_type (string) - type of offer ie BOGO, discount, informational * difficulty (int) - minimum required spend to complete an offer * reward (int) - reward given for completing an offer * duration (int) - time for offer to be open, in days * channels (list of strings) **profile.json** * age (int) - age of the customer * became_member_on (int) - date when customer created an app account * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F) * id (str) - customer id * income (float) - customer's income **transcript.json** * event (str) - record description (ie transaction, offer received, offer viewed, etc.) * person (str) - customer id * time (int) - time in hours since start of test. The data begins at time t=0 * value - (dict of strings) - either an offer id or transaction amount depending on the record **Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal: <img src="pic1.png"/> Then you will want to run the above command: <img src="pic2.png"/> Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors. ``` %%capture !pip install flat-table ``` # 0. Import libraries and load Starbucks data ``` # standard libraries import pandas as pd pd.options.display.max_columns = None import numpy as np import math, json , re, os import matplotlib.pyplot as plt import seaborn as sns from IPython.display import Image, HTML, display from tqdm import tqdm from os.path import exists from joblib import dump, load from pprint import pprint import flat_table # ML specific classes through sklearn from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer, LabelEncoder, OneHotEncoder, MinMaxScaler from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, RepeatedStratifiedKFold from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.metrics import accuracy_score, f1_score, fbeta_score, make_scorer, confusion_matrix, plot_confusion_matrix # jupyter magic to display plots directly in the notebook %matplotlib inline # use vector graphics format for nicer plots %config Inline.Backend.figure_format = 'svg' plt.style.use('seaborn') # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True) ``` # 1. Exploratory Analysis #### Lets look at the data and check out inconsistencies ## 1.1 Profile Dataset ``` display(profile.head(), profile.isna().sum().to_frame().rename(columns={0:'Null count'})) profile.query("age == 118") ``` #### We find there are null values in Gender (along with'O') and Income columns. As we can see above, age is 118 for all rows where null value is present in gender and income columns ``` # let's first rename id to customer_id for more clarity profile = profile.rename(columns={'id':'customer_id'}) # No. of users print('profile size: {}'.format(profile['customer_id'].unique().shape[0])) # gender distribution display(profile['gender'].value_counts().to_frame()) # income distribution by gender plt.figure(figsize=(10,6)) sns.histplot(data=profile, x="income", hue="gender", kde=True); plt.tight_layout() # plt.savefig('plots/profile_income_dist.svg') plt.show() # age distribution by gender plt.figure(figsize=(10,6)) sns.histplot(data=profile, x="age", hue="gender", kde=True); plt.tight_layout() # plt.savefig('plots/profile_age_dist.svg') plt.show() # Convert "became_member_on" to standard python datetime format profile['became_member_on'] = pd.to_datetime(profile['became_member_on'], format = '%Y%m%d') profile['became_member_in'] = profile['became_member_on'].apply(lambda x: x.year) # Compute how long the user has been a member of the Starbucks app - let's say the anchor point is say 30-Sep-2020 profile['member_since_in_months'] = (pd.to_datetime("30-Sep-2020") - profile['became_member_on']).astype('timedelta64[M]').astype('int') profile.head() # "became member in" distribution by gender - how many customers are new and how many are long term members? members_by_year = profile.groupby('became_member_in')['customer_id'].count().to_frame().rename(columns={'customer_id':'no_of_customers'}) plt.figure(figsize=(10,6)) sns.barplot(data = members_by_year, x=members_by_year.index, y='no_of_customers') plt.tight_layout() # plt.savefig('plots/profile_member_joined_dist.svg') plt.show() # "loyalty" distribution by gender plt.figure(figsize=(10,6)) sns.histplot(data=profile, x="member_since_in_months", kde=True); plt.tight_layout() # plt.savefig('plots/profile_loyalty_dist.svg') plt.show() ``` ## 1.2 Portfolio Dataset ``` display(portfolio.head(), portfolio.isna().sum().to_frame().rename(columns={0:'Null count'})) # let's first rename id to offer_id for more clarity portfolio = portfolio.rename(columns={'id':'offer_id'}) # let's check the number of outstanding offers portfolio_stats = portfolio.groupby('offer_type')['offer_id'].count().to_frame() portfolio_stats.loc["total"] = portfolio_stats.sum(axis=0) portfolio_stats # we can see multiple channels through which offers have been delivered to users - let's check them portfolio['channels'].explode().unique().tolist() # we also see that each offer type has a reward and difficulty columns - would be interesting to see if you get more reward for spending more? :) plt.figure(figsize=(10,6)) sns.scatterplot(data=portfolio, x="difficulty", y="reward", hue="offer_type") plt.tight_layout() # plt.savefig('plots/portfolio_reward_vs_difficulty.svg') plt.show() ``` ## 1.3 Transcript Dataset ``` display(transcript.head(), transcript.isna().sum().to_frame().rename(columns={0:'Null count'})) # let's first rename person to customer_id for more clarity transcript = transcript.rename(columns={'person':'customer_id'}) # No. of users print('transcript size: {}'.format(transcript['customer_id'].unique().shape[0])) ``` #### It looks as though the number of people in transcript are the same as the number of people in the profile dataset, so that is good news. ``` # what are the different types of values - we're mainly interested in the dictionary keys dict_keys = [list(value.keys()) for value in transcript['value']] # need to flatten list of lists that may be present - occurs when a dictionary has multiple keys set([item for sublist in dict_keys for item in sublist]) # event distribution event_dist = transcript.groupby('event')['customer_id'].count().to_frame() event_dist = event_dist.div(event_dist.sum(axis=0), axis=1).multiply(100) ax = event_dist.rename(columns={'customer_id':'pct'}).sort_values(by='pct', ascending=True).plot(kind='barh', figsize=(10,6), legend=None); plt.title('Events distribution', fontsize=14) plt.xlabel('percentage of total events') plt.tight_layout() # plt.savefig('plots/transcript_event_dist.svg', dpi=300) plt.show() # let's expand values column and get keys as column headers and get rid of the dictionary transcript = pd.concat([transcript.drop(['value'], axis=1), transcript['value'].apply(pd.Series)], axis=1) # there are duplicate offer id and offer_id columns - need to clean them up transcript['offer_id'] = transcript['offer_id'].fillna(transcript['offer id']) transcript = transcript.drop(columns=['offer id']) transcript.head() # check if no. of users in transcript is the same as in the profile dataset users_in_transcript = list(transcript['customer_id'].unique()) users_in_profile = list(profile['customer_id'].unique()) in_transcript_but_not_in_profile = [x for x in users_in_transcript if x not in set(users_in_profile)] in_profile_but_not_in_transcript = [x for x in users_in_profile if x not in set(users_in_transcript)] print('Number of users in transcript but not in profile dataset: '+str(len(in_transcript_but_not_in_profile))) print('Number of users in profile but not in transcript dataset: '+str(len(in_profile_but_not_in_transcript))) ``` #### Create a temporary dataset to analyze events and event_type to see which offers do the users prefer ``` temp = transcript.copy() temp = temp.merge(portfolio[['offer_id','offer_type']], how='left', on='offer_id') temp.head() temp_group = temp.groupby(['event','offer_type'])['event'].count().to_frame().rename(columns={'event':'count'}) # get percentages event temp_group['percentage'] = temp_group.div(temp_group.sum(level=0), level=0) temp_group['percentage'].unstack(level=0).plot(kind='barh', subplots=True, figsize=(10,8)); plt.tight_layout() # plt.savefig('plots/transcript_portfolio_offer_preference.svg', dpi=300) plt.show() ``` # 2. Data cleaning and preprocessing for machine learning algorithms #### Now lets clean up all three dataframes into a usable format and join them in one single dataframe ### 2.1 Clean profile dataset ``` def clean_preprocess_profile(profile): """ Takes the profile dataframe and cleans it by creating one-hot encodings as well as handling null values """ # We can safely remove null values since they form a very small part of the dataset # Remove customers with null income data profile_copy = profile.copy() profile_copy = profile_copy.dropna(subset=['gender','income']) # Specifying age range min_age = np.int(np.floor(np.min(profile_copy['age'])/10)*10) max_age = np.int(np.ceil(np.max(profile_copy['age'])/10)*10) age_bins = range(min_age, max_age + 10, 10) profile_copy['age_range'] = pd.cut(x=profile_copy['age'], bins=age_bins, right=True).astype('str') # One-hot encode age_range column age_dummies = pd.get_dummies(profile_copy['age_range']) # No need to convert gender to a number, directly one-hot encode it - the order of number does not signify importance customer_gender_map = dict(zip(profile_copy['customer_id'], profile_copy['gender'])) # one-hot encode gender to dummies since its a categorical feature gender_dummies = pd.get_dummies(profile_copy['gender']) # We will also not one-hot encode income range since its a continuous variable - keep original values as feature # the year that a customer became a member in is not uniformly distributed (6th cell) - potential customer differentiator # let's add dummies for that as well year_dummies = pd.get_dummies(profile_copy['became_member_in']) # concat the dummies to the profile dataset and drop the original columns cleaned_profile = (pd.concat([profile_copy, gender_dummies, age_dummies, year_dummies], axis=1, sort=False) .drop(columns=['age','age_range','gender','became_member_on','became_member_in','member_since_in_months'])) return cleaned_profile, customer_gender_map cleaned_profile, customer_gender_map = clean_preprocess_profile(profile) cleaned_profile.head() len(list(customer_gender_map.keys())) cleaned_profile.columns ``` ### 2.2 Clean portfolio dataset ``` def clean_preprocess_portfolio(portfolio): """ Takes the portfolio dataframe and cleans it by creating one-hot encodings """ # No null handling required since there are no NULL values # One-hot encode channels column - using sckit-learn module multi_label_binary = MultiLabelBinarizer() multi_label_binary.fit(portfolio['channels']) channels_dummies = pd.DataFrame(multi_label_binary.transform(portfolio['channels']), columns=multi_label_binary.classes_) # One-hot encode offer_type column offer_type_dummies = pd.get_dummies(portfolio['offer_type']) # that's it - concat now portfolio = pd.concat([portfolio, channels_dummies, offer_type_dummies], axis=1, sort=False) \ .drop(columns=['offer_type', 'channels']) return portfolio cleaned_portfolio = clean_preprocess_portfolio(portfolio) cleaned_portfolio.head() ``` ### 2.3 Clean transcript dataset ``` def clean_preprocess_transcript(transcript, profile): """ Transforms the transcript dataframe and clean it by creating one-hot encodings Also split the dataframe into seperate offers and transaction datasets """ # we need to only keep users that are in the cleaned profile (after removal of nulls and age 118) # even though original datasets have the same number of customer ids transcript_copy = transcript.copy() transcript_copy = transcript_copy[transcript_copy['customer_id'].isin(list(profile['customer_id'].unique()))] # Convert time column from hours to days transcript_copy['time'] = transcript_copy['time'] / 24.0 # let's first get dummies for events event_dummies = pd.get_dummies(transcript_copy['event']) event_dummies.columns = [col.replace(' ','_') for col in event_dummies.columns] # now concat to get final dataframe cleaned_transcript = pd.concat([transcript_copy, event_dummies], axis=1, sort=False).drop(columns=['event']) offer_cols = ['customer_id','offer_id','time','offer_completed','offer_received','offer_viewed'] transac_cols = ['customer_id','time','amount','reward'] cleaned_offer_dataset = cleaned_transcript.query("offer_completed==1 or offer_received==1 or offer_viewed==1")[offer_cols] cleaned_transaction_dataset = cleaned_transcript.query("transaction==1")[transac_cols] return cleaned_transcript, cleaned_offer_dataset, cleaned_transaction_dataset cleaned_transcript, cleaned_offer_dataset, cleaned_transaction_dataset = clean_preprocess_transcript(transcript, cleaned_profile) cleaned_transcript.head() len(cleaned_transcript['customer_id'].unique().tolist()) cleaned_offer_dataset.head() # so, out of 17000 users, 2175 had null data for gender/income => so, 14825 users. Now, out of these, 5 had no offers but they still transacted. # We'll remove these from our offer dataset since they do not add value users_with_no_offers = cleaned_transcript[~cleaned_transcript['customer_id'].isin(list(cleaned_offer_dataset['customer_id'].unique()))] users_with_no_offers.head() print("Unique users with no offers, only transactions: "+str(len(users_with_no_offers['customer_id'].unique().tolist()))) cleaned_transaction_dataset.head() ``` # 3. Data preparation/Feature Engineering for model input ## Observations #### What is an effective offer? 1. For BOGO and discount offers, an effective offer would be defined if the events are defined in this chronological order: **offer received > offer viewed > transaction > offer completed** #### What is not counted as an effective offer? 1. User recieved and viewed the offer but did not transact: **offer received > offer viewed** (no offer completed/transaction events) 2. User received offer but did not do anything: **offer received** (did not even view it) 3. Users who transacted w/o receiving an offer, after the offer is completed, before the offer is received or before the the offer is viewed. The different timelines can be summarized as: - **transaction** - **offer received > "do nothing"** (did not even view the offer) - **offer received > transaction > offer completed > offer viewed** - **transaction > offer received > offer completed > offer viewed** - **offer received > transaction > offer viewed > offer completed** - **offer received > transaction** (only applicable to informational offer) - **offer received > transaction > offer viewed** (only applicable to informational offer) (3) can be summarized as - if an offer completed or transaction event occurs before an offer viewed event occurs. ### 3.1 Let's iterate through each customer id and offer_id to label the dataset (computationally intensive) ``` def construct_effective_offer_label(profile, portfolio, offers, transactions): # define the unique customer_ids to loop over customer_ids = offers['customer_id'].unique().tolist() events = ['offer_completed','offer_received','offer_viewed'] all_offers_labeled = [] for _user in tqdm(range(len(customer_ids))): # get customer_id from the list user_id = customer_ids[_user] # get profile data for the user user_profile = profile.query("customer_id == @user_id").drop(columns='customer_id').reset_index(drop=True) # get offer data for the user user_offers_data = offers.query("customer_id == @user_id").drop(columns='customer_id').reset_index(drop=True) # get transaction data for the user user_transactions_data = transactions.query("customer_id == @user_id").drop(columns='customer_id').reset_index(drop=True) offer_received = user_offers_data.query("offer_received == 1").drop(columns=events).reset_index(drop=True) offer_viewed = user_offers_data.query("offer_viewed == 1").drop(columns=events).reset_index(drop=True) offer_completed = user_offers_data.query("offer_completed == 1").drop(columns=events).reset_index(drop=True) # loop over each offer for the particular user all_offers_user = [] for offer in range(len(offer_received)): # fetch offer id for the offer offer_id = offer_received.iloc[offer]['offer_id'] # extract offer data from portfolio offer_details = portfolio.query("offer_id == @offer_id").drop(columns=['offer_id']).reset_index(drop=True) # extract offer duration from offer row offer_duration = offer_details['duration'].values[0] # compute offer start and offer end times (in days) start_offer = offer_received.iloc[offer]['time'] end_offer = start_offer + offer_duration # check if offer was viewed/completed and was transcated for the duration of the offer transaction_mask = (user_transactions_data['time'] >= start_offer) & (user_transactions_data['time'] <= end_offer) offer_transactions = user_transactions_data[transaction_mask] offer_viewed_mask = (offer_viewed['time'] >= start_offer) & (offer_viewed['time'] <= end_offer) offer_completed_mask = (offer_completed['time'] >= start_offer) & (offer_completed['time'] <= end_offer) offer_successful_mask = (offer_viewed_mask.sum() > 0) & (offer_completed_mask.sum() > 0) # make a dictionary that describes the current user offer offer_summary = {'effective_offer': int(offer_successful_mask), 'offer_id': offer_id, 'customer_id': user_id, 'time': start_offer, 'total_amount_transacted_for_offer': offer_transactions['amount'].sum()} offer_summary_df = pd.DataFrame([offer_summary]) # prepare to concat the features now from offer_details and user_offers_data offer_labeled = (pd.concat([offer_summary_df, offer_details, user_profile], axis=1, sort=False)) all_offers_user.append(offer_labeled) # aggregate all offers for the user user_offers_labeled = pd.concat(all_offers_user) all_offers_labeled.append(user_offers_labeled) # aggregate for all users/customer_ids all_offers_labeled_df = pd.concat(all_offers_labeled) return all_offers_labeled_df %%time if exists('data/labeled_dataset_with_effective_offer.pkl'): effective_offer_labeled_dataset = pd.read_pickle('data/labeled_dataset_with_effective_offer.pkl') else: effective_offer_labeled_dataset = (construct_effective_offer_label(cleaned_profile, cleaned_portfolio, cleaned_offer_dataset, cleaned_transaction_dataset)) effective_offer_labeled_dataset.to_pickle('data/labeled_dataset_with_effective_offer.pkl') effective_offer_labeled_dataset.head() # let's do a quick sanity check on the dataset to see if any nulls creeped in effective_offer_labeled_dataset.isna().sum() # let's check how many offers were sent out to users and how many were actually effective by gender effective_offers = effective_offer_labeled_dataset.copy() effective_offers = effective_offers[['offer_id', 'effective_offer', 'customer_id']] effective_offers['gender'] = effective_offers['customer_id'].map(customer_gender_map) effective_offers = effective_offers.groupby(['offer_id','gender']) \ .agg({'effective_offer': 'sum','customer_id':'count'}) \ .rename(columns={'effective_offer':'total_effective_offers','customer_id':'total_offers'}) effective_offers['pct_effective_offers'] = (effective_offers['total_effective_offers'] / effective_offers['total_offers'])*100 effective_offers # let's make the offer labels a bit more readable offer_label_synthetic = [] for idx in range(0,len(list(set(effective_offers.index.get_level_values('offer_id')))),1): offer_label_synthetic.append("Offer ID "+str(idx+1)) offer_id_dict = dict(zip(list(set(effective_offers.index.get_level_values('offer_id'))),offer_label_synthetic)) effective_offers = effective_offers[['pct_effective_offers']].reset_index().replace({'offer_id':offer_id_dict}) \ .set_index('offer_id').sort_index().reset_index() effective_offers.head() g = sns.catplot(data=effective_offers, x='offer_id', y='pct_effective_offers', hue='gender', kind='bar', legend_out=False) g.fig.set_size_inches(10,8) g.set_xticklabels(rotation=45, horizontalalignment='right').despine(left=True) plt.title("Percentage of effective offers by Gender", fontsize=14) plt.tight_layout() plt.savefig('plots/labeled_dataset_pct_effective_offers.svg') plt.show() ``` ## 3.2 Feature Engineering/Data Transformation #### Normalize Numerical Features Our labeled dataset includes both numeric and one hot encoded categorical variables. We will apply minimum/maximum scaling to numeric variables to bring all numeric variables on one scale. This is done so that the size of numeric value does not result in model bias ``` final_dataset = effective_offer_labeled_dataset.copy() # we'll drop columns/features which are not useful for the modeling part columns_to_remove = ['total_amount_transacted_for_offer','time','offer_id','customer_id','informational'] final_dataset = final_dataset.drop(columns=columns_to_remove) final_dataset.head() numeric_columns = ['reward','income','difficulty','duration'] target_label = 'effective_offer' class_label = final_dataset.filter([target_label]) features = final_dataset.drop(columns=target_label) scaler = MinMaxScaler() features[numeric_columns] = scaler.fit_transform(features[numeric_columns]) features_names = features.columns features.head() print("Number of total features: {}\n============================\n".format(len(features.columns))) # Split the features dataset into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features, class_label, test_size = 0.10, # 90/10 split between training/test random_state = 42, # the usual integer value that works as "seed" value for reproduce the same split on each run stratify = class_label) # Display result after split print("results of the split\n============================") print("Training set has {} samples.".format(X_train.shape[0])) print("Testing set has {} samples.".format(X_test.shape[0])) print("\nclass distribution\n============================") print('y_train class distribution\n----------------------------') print(y_train.value_counts(normalize=True)) print('\ny_test class distribution\n----------------------------') print(y_test.value_counts(normalize=True)) y_train = y_train.values.ravel() y_test = y_test.values.ravel() ``` # 4. Evaluating/training models for predictions ### 4.1 Benchmark model - Naive Predictor A naive predictor assumes that all customer offers were successful ``` train_accuracy = accuracy_score(y_train,np.ones(len(y_train))) train_f1score = f1_score(y_train,np.ones(len(y_train))) test_accuracy = accuracy_score(y_test,np.ones(len(y_test))) test_f1score = f1_score(y_test,np.ones(len(y_test))) naive_pred_perf = {} naive_pred_perf['NaivePredictor'] = {} naive_pred_perf['NaivePredictor']['train'] = {} naive_pred_perf['NaivePredictor']['test'] = {} naive_pred_perf['NaivePredictor']['train']['accuracy'] = train_accuracy naive_pred_perf['NaivePredictor']['train']['f1score'] = train_f1score naive_pred_perf['NaivePredictor']['test']['accuracy'] = test_accuracy naive_pred_perf['NaivePredictor']['test']['f1score'] = test_f1score pprint(naive_pred_perf) model_dir = "./models" if not os.path.exists(model_dir): os.mkdir(model_dir) scorer = make_scorer(fbeta_score, beta=0.5) ``` ### 4.2 Logistic Regression Perform Random Search Cross Validation on model hyperparameter space. Grid search is a brute force way of finding the optimal parameters because it train and test every possible combination. ``` lr_model_path = os.path.join(model_dir, 'logistic_regression_model.joblib') if exists(lr_model_path): # if model already exists lr_random = load(lr_model_path) else: # instantiate a logistic regression classifer object lr_classifier = LogisticRegression(random_state=42, solver='liblinear') # grid dictionary grid_parameters = {'penalty': ['l1', 'l2'], 'C': [0.0001,0.001,0.01,0.1,1,10,100,1000]} # smaller C results in a stronger regularization cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42) lr_random = RandomizedSearchCV(estimator = lr_classifier, param_distributions = grid_parameters, scoring = scorer, cv = cv, verbose = 2, random_state = 42, n_jobs = 4) # use 4 processors for parallel computation lr_random.fit(X_train, y_train) dump(lr_random, lr_model_path) def model_peformance_statistics(classifier, x_train, y_train, x_test, y_test): best_classifier = classifier.best_estimator_ classifier_name = str(best_classifier.__class__).replace("'>""",'').split('.')[-1] predict_y_train = best_classifier.predict(x_train) predict_y_test = best_classifier.predict(x_test) model_perf_stats = {} model_perf_stats[classifier_name] = {} model_perf_stats[classifier_name]['train'] = {} model_perf_stats[classifier_name]['test'] = {} model_perf_stats[classifier_name]['train']['accuracy'] = accuracy_score(y_train, predict_y_train) model_perf_stats[classifier_name]['train']['f1score'] = f1_score(y_train, predict_y_train) model_perf_stats[classifier_name]['test']['accuracy'] = accuracy_score(y_test, predict_y_test) model_perf_stats[classifier_name]['test']['f1score'] = f1_score(y_test, predict_y_test) return classifier.best_params_, model_perf_stats lr_params, lr_perf = model_peformance_statistics(lr_random, X_train, y_train, X_test, y_test) print("Logistic Regression best fit parameters:\n======================================") pprint(lr_params) print("\nModel Performance:\n======================================") pprint(lr_perf) ``` ### 4.3 Random Forest Classifier ``` rf_model_path = os.path.join(model_dir, 'random_forest_model.joblib') if exists(rf_model_path): # if model already exists rf_random = load(rf_model_path) else: # instantiate a random forest classifier obj rf_classifier = RandomForestClassifier(random_state = 42) # Number of trees in random forest - ideal range: log scale from 10 to 1,000. n_estimators = [10, 100, 250, 400, 650, 1000] #[np.int(x) for x in np.logspace(1,3,15)] # Number of features to consider at every split max_features = ['log2', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.arange(3, 11)] max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 4] # grid dictionary grid_parameters = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42) rf_random = RandomizedSearchCV(estimator = rf_classifier, param_distributions = grid_parameters, scoring = scorer, n_iter = 50, cv = cv, verbose = 2, random_state = 42, n_jobs = -1) # use all processors for parallel computation rf_random.fit(X_train, y_train) dump(rf_random, rf_model_path) rf_params, rf_perf = model_peformance_statistics(rf_random, X_train, y_train, X_test, y_test) print("Random Forest Classifier best fit hyperparameters:\n======================================") pprint(rf_params) print("\nModel Performance:\n======================================") pprint(rf_perf) ``` ### 4.4 Gradient Boosting Classifier ``` gb_model_path = os.path.join(model_dir, 'gradient_boosting_model.joblib') if exists(gb_model_path): # if model already exists gb_random = load(gb_model_path) else: # instantiate a gradient boosting classifier obj gb_classifier = GradientBoostingClassifier(random_state = 42) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 4] # grid dictionary grid_parameters = {'loss': ['deviance', 'exponential'], 'learning_rate': [0.1, 0.01, 0.001], 'n_estimators': [10, 100, 250, 400, 650, 1000], #[np.int(x) for x in np.logspace(1,3,15)] 'max_depth': [3, 7, 9], 'subsample': [0.5, 0.7, 1.0], 'min_samples_leaf': min_samples_leaf, 'min_samples_split': min_samples_split} cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42) gb_random = RandomizedSearchCV(estimator = gb_classifier, param_distributions = grid_parameters, scoring = scorer, n_iter = 50, cv = 3, verbose = 2, random_state = 42, n_jobs = -1) # use all processors for parallel computation gb_random.fit(X_train, y_train) dump(gb_random, gb_model_path) gb_params, gb_perf = model_peformance_statistics(gb_random, X_train, y_train, X_test, y_test) print("Gradient Boosting Classifier best fit hyperparameters:\n======================================") pprint(gb_params) print("\nModel Performance:\n======================================") pprint(gb_perf) ``` ### 4.5 Support Vector Machines (SVM) ``` svm_model_path = os.path.join(model_dir, 'support_vector_machines_model.joblib') if exists(svm_model_path): # if model already exists svm_random = load(svm_model_path) else: # instantiate a svm obj svm_classifier = SVC(random_state = 42) # grid dictionary grid_parameters = {'kernel': ['linear','poly', 'rbf', 'sigmoid'], 'C': [100, 10, 1.0, 0.1, 0.001], 'gamma': ['scale']} cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42) svm_random = RandomizedSearchCV(estimator = svm_classifier, param_distributions = grid_parameters, scoring = scorer, n_iter = 20, cv = 3, verbose = 2, random_state = 42, n_jobs = -1) # use all processors for parallel computation svm_random.fit(X_train, y_train) dump(svm_random, svm_model_path) svm_params, svm_perf = model_peformance_statistics(svm_random, X_train, y_train, X_test, y_test) print("SVM best fit hyperparameters:\n======================================") pprint(svm_params) print("\nModel Performance:\n======================================") pprint(svm_perf) ``` ### 4.6 K-Nearest Neighbors (KNN) ``` knn_model_path = os.path.join(model_dir, 'k_nearest_neighbors_model.joblib') if exists(knn_model_path): # if model already exists knn_random = load(knn_model_path) else: # instantiate a KNN obj knn_classifier = KNeighborsClassifier() # grid dictionary grid_parameters = {'n_neighbors': range(1, 21, 2), 'weights': ['uniform', 'distance'], 'metric': ['euclidean', 'manhattan', 'minkowski']} cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42) knn_random = RandomizedSearchCV(estimator = knn_classifier, param_distributions = grid_parameters, scoring = scorer, n_iter = 50, cv = 3, verbose = 2, random_state = 42, n_jobs = -1) # use all processors for parallel computation knn_random.fit(X_train, y_train) dump(knn_random, knn_model_path) knn_params, knn_perf = model_peformance_statistics(knn_random, X_train, y_train, X_test, y_test) print("K-Nearest Neighbors best fit hyperparameters:\n======================================") pprint(knn_params) print("\nModel Performance:\n======================================") pprint(knn_perf) ``` # 5. Model Performance Evaluation and Feature Importance Checks ## 5.1 Model Performance - on Accuracy and F1 Score ``` model_performance_dict = {**naive_pred_perf,**lr_perf,**rf_perf,**gb_perf,**svm_perf,**knn_perf} model_performance_stats = flat_table.normalize(pd.DataFrame(model_performance_dict).T) \ .rename(columns={'index':'Machine Learning Algorithms', 'train.f1score':'Train - F1 Score', 'train.accuracy':'Train - Accuracy', 'test.f1score':'Test - F1 Score', 'test.accuracy':'Test - Accuracy'}) \ .set_index('Machine Learning Algorithms') order_of_columns = ['Train - Accuracy','Train - F1 Score','Test - Accuracy','Test - F1 Score'] model_performance_stats = model_performance_stats[order_of_columns].sort_values(by='Test - Accuracy',ascending=True) model_performance_stats def evaluate_feature_importance(estimator): best_classifier = estimator.best_estimator_ classifier_name = str(best_classifier.__class__).replace("'>""",'').split('.')[-1] title = classifier_name+' Estimated Feature Importance' if classifier_name == 'LogisticRegression': # We can look at the coefficients in the coef_ attribute of the fitted Logistic Regression model to see which features are most important. # For LogisticRegression, all transform is doing is looking at which coefficients are highest in absolute value. # Source: https://stackoverflow.com/questions/24255723/sklearn-logistic-regression-important-features classifier_feature_importance = np.abs(best_classifier.coef_) else: classifier_feature_importance = best_classifier.feature_importances_ classifier_feature_importance = classifier_feature_importance / np.sum(classifier_feature_importance) # each model has different formats - some even have features that are nested (list of lists) if classifier_name == 'LogisticRegression': classifier_feature_importance = classifier_feature_importance.tolist() classifier_feature_importance = [item for sublist in classifier_feature_importance for item in sublist] elif classifier_name == 'RandomForestClassifier': classifier_feature_importance = classifier_feature_importance.tolist() elif classifier_name == 'GradientBoostingClassifier': pass feature_importance = pd.DataFrame(list(zip(features_names.tolist(),classifier_feature_importance)), \ columns=['feature_name', 'classifier_importance']) \ .sort_values('classifier_importance', ascending = False) \ .reset_index(drop=True) plt.figure(figsize=(10, 6)) sns.barplot(x = 'classifier_importance', y = 'feature_name', data = feature_importance) plt.xlabel('Relative Importance of feature for classifier') plt.ylabel('Feature Name') plt.title(title) plt.tight_layout() plt.savefig('plots/'+title.lower().replace(' ','_')+'.svg') ``` ## 5.2 Evaluate feature importance for all models (except naive of course) Note: Feature Importance for fitted SVM classifier can be extract using the same method we used for Logistic Regression - Features whith highest importance are the ones with coefficients from the coef_ attribute that are highest in absolute value However, it only works for SVM classifiers with linear kernel - but our best fitted model uses a 'rbf' kernel. For other kernels, it is not possible because data is transformed by the kernel method to another space, which is not related to input space. Source: 1. https://stackoverflow.com/questions/41592661/determining-the-most-contributing-features-for-svm-classifier-in-sklearn 2. https://stackoverflow.com/questions/21260691/scikits-learn-how-to-obtain-features-weight Also, feature importance is not defined for the KNN Classification algorithm. There is no easy way to compute the features responsible for a classification for a KNN fitted model. A hacky, brute force method can work in the following steps - 1. Evaluate the model accuracy based on the original dataset 2. For each feature in the dataset: 1. Make a copy of the dataset 2. Randomly shuffle the current target feature 3. Evaluate the model accuracy based on the dataset with the shuffled feature 4. Compute the difference in the accuracies - this is the feature importance, where higher is better More info here - https://github.com/scikit-learn/scikit-learn/issues/8898 ``` evaluate_feature_importance(lr_random) evaluate_feature_importance(gb_random) evaluate_feature_importance(rf_random) ``` ## 5.3 Check confusion matrix ``` class_label_map = {0:'ineffective_offer', 1:'effective_offer'} _classes = lr_random.best_estimator_.classes_.tolist() mapped_classes = [class_label_map.get(l) for l in _classes] fig, ax = plt.subplots(figsize=(10, 6)) disp = plot_confusion_matrix(lr_random.best_estimator_, X_test, y_test, ax = ax, display_labels = mapped_classes, cmap = plt.cm.Blues,normalize = 'true') disp.ax_.set_title('Confusion Matrix - Logistic Regression') disp.ax_.grid(False) plt.tight_layout() plt.savefig('plots/confusion_matrix_logistic_regression.svg') fig, ax = plt.subplots(figsize=(10, 6)) disp = plot_confusion_matrix(gb_random.best_estimator_, X_test, y_test, ax = ax, display_labels = mapped_classes, cmap = plt.cm.Blues,normalize = 'true') disp.ax_.set_title('Confusion Matrix - Gradient Boosting Classifier') disp.ax_.grid(False) plt.tight_layout() plt.savefig('plots/confusion_matrix_gradient_boosting_classifier.svg') fig, ax = plt.subplots(figsize=(10, 6)) disp = plot_confusion_matrix(svm_random.best_estimator_, X_test, y_test, ax = ax, display_labels = mapped_classes, cmap = plt.cm.Blues,normalize = 'true') disp.ax_.set_title('Confusion Matrix - Support Vector (SV) Classifier') disp.ax_.grid(False) plt.tight_layout() plt.savefig('plots/confusion_matrix_support_vector_classifier.svg') fig, ax = plt.subplots(figsize=(10, 6)) disp = plot_confusion_matrix(rf_random.best_estimator_, X_test, y_test, ax = ax, display_labels = mapped_classes, cmap = plt.cm.Blues,normalize = 'true') disp.ax_.set_title('Confusion Matrix - Random Forest Classifier') disp.ax_.grid(False) plt.tight_layout() plt.savefig('plots/confusion_matrix_random_forest_classifier.svg') ``` # 6. Conclusion and Future Improvements ## 6.1 Analysing the results: As we can see from the table in cell 56, in terms of both accuracy and F1 score, the Random Forest model has the best performance, albeit with a narrow margin over the other models, with KNN model performing best on the training dataset but fails to have a high accuracy over the test dataset. We also performed a random search for the best fit hperparameters from a set of values so that we select the best performing parameters for each model. Even then, as you can see, we had a very narrow band of accuracy figures for each model - ranging from 72% to 74%. We know from other examples over the internet that the ensemble methods like Random Forest and Gradient Boosting show a very good performance over the other models. This means that model tuning might not help much here, so we should look into improving the feature selection of the model, i.e. removing sparse features. The results would indicate that Random Forest and Logistic Regression model have done the best at not misclassifying negative events as positive (meaning, misclassifying people on which offers are ineffective, as people on which offers would be effective).Gradient Boosting has the highest rate of misclassifying negative events as positive - however, it also has the lowest rate of misclassifying positive events as negative (from the confusion matrix). It (Gradient Boosting) also performs worse than Random Forest, which needs a bit more digging. **Why Gradient Boosting has a worse accuracy/performance than Random Forest?**:\ Conceptually, both random forest and gradient boosting models are a combination of multiple decision trees. A random forest classifier randomly samples the training data with replacement to construct a set of decision trees that are combined using majority voting. In contrast, gradient boosting iteratively constructs a set of decision trees with the goal of reducing the number of misclassified training data samples from the previous iteration. A consequence of these model construction strategies is that the depth of decision trees generated during random forest model training are typically greater than gradient boosting weak learner depth to minimize model variance. Typically, gradient boosting performs better than a random forest classifier. However, gradient boosting may overfit the training data and requires additional effort to tune. A random forest classifier is less prone to overfitting because it constructs decision trees from random training data samples. Also, a random forest classifier's hyperparameters are easier to optimize (Source: https://www.quora.com/How-can-the-performance-of-a-Gradient-Boosting-Machine-be-worse-than-Random-Forests) ## 6.2 Conclusion: All our models except Gradient Boositing are predicting the positive case (i.e. where an offer is effective) more accurately compared to predicting the negative cases (i.e. where an offer is ineffective), which is expected given the uneven classes. We are not much concerned with the misclassification cases since we don't mind sending people more offers than they would have liked; we would rather not miss anyone on which an offer would have been effective. Given the analysis, we would select Random Forest as the best model to use. We also look at important features for each algorithm - I have detailed parts where the important features can't be extracted due to technical limitations along with their source. "Feature importance" refers to a numerical value that describes a feature's contribution to building a model that maximizes its evaluation metric. A random forest classifier is an example of a model that estimates feature importance during training. The analysis suggests that the top five features based on their importance for an effective offer are: 1. Offer difficulty (how much money a customer must spend to complete an offer) 2. Offer reward 2. Offer duration 4. Customer income 5. Whether a customer created an account on the Starbucks rewards app in 2018 Since the top three features are associated with an customer offer, it may be possible to improve the performance of a random forest model by creating features that describe an offer's success rate as a function of offer difficulty, duration, and reward. These additional features should provide a random forest classifier the opportunity to construct a better decision boundary that separates successful and unsuccessful customer offers. We can also extract features by their relative importance for other models as shown in the analysis section 5.1 ## 6.3 Future Improvements: 1. **Feature engineering step** - To improve model performance, we can try to drop some dummy variables/one-hot encoded features and see how it will affect the model performance. There is also an ongoing debate on the use of one hot encoding when using tree and regression models. For regression classification models (e.g. logistic regression), we should typically remove one level of the variable in order to prevent multicollinearity between variables. Typically, we should not run into this issue with tree-based models like random forest or gradient boosting. However, there is some debate as to whether one should do it or not. According to some articles (like here: https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/), it is generally not advisable to encode categorical variables as they would generate sparse matrices. The resulting sparsity virtually ensures that continuous variables are assigned higher feature importance. A single level of a categorical variable must meet a very high bar in order to be selected for splitting early in the tree building. This can degrade predictive performance. 2. **Introduce Polynomial features** - A low accuracy score for the models is likely due to the model underfitting more likely since we have very sparse features, we can try to transform the features into polynomial form (using PolynomialFeatures from sklean.pre_processing module) to further might improve model performance. 3. **Add more data** More data with a better distribution of effective/ineffective offers would have helped achieve a higher accuracy, as is the case with most machine learning algorithms.
github_jupyter
``` # noexport import os os.system('export_notebook_pyx tmilib_cython.ipynb') import numpy cimport numpy import tmilib def dataset_to_feature_vectors(double[:,:] dataset, enabled_feat=None): #cdef float[:,:] dataset = dataset_gen.asarray(dataset_gen, dtype=float) cdef long[:] topdomains = numpy.asarray([tmilib.domain_to_id(x) for x in tmilib.top_n_domains_by_visits(20)], dtype=int) cdef long num_topdomains = len(topdomains) cdef long[:] domain_id_to_productivity = numpy.array(tmilib.get_domain_id_to_productivity(), dtype=numpy.int64) cdef long[:] rescuetime_productivity_levels = numpy.array(tmilib.get_rescuetime_productivity_levels(), dtype=int) cdef long num_rescuetime_productivity_levels = len(rescuetime_productivity_levels) cdef long num_features = 3 + 2*num_topdomains + 2*num_rescuetime_productivity_levels cdef long[:] enabled_features if type(enabled_feat) == str: enabled_features = numpy.array(map(int, enabled_feat), dtype=int) elif enabled_feat == None: enabled_features = numpy.array([1]*num_features, dtype=int) else: enabled_features = numpy.array(enabled_feat, dtype=int) #cdef list output = [[0]*num_features for x in range(len(dataset['sinceprev']))] cdef long num_enabled_features = len([x for x in enabled_features if x == 1]) cdef double[:,:] output = numpy.zeros((len(dataset), num_enabled_features), dtype=float) #cdef list output = [] #output = numpy.zeros((len(dataset['sinceprev']), num_features), dtype=object) # object instead of float, so we can have floats and ints #for idx,sinceprev,tonext,fromdomain,todomain in zipkeys_idx(dataset, 'sinceprev', 'tonext', 'fromdomain', 'todomain'): #cdef list cur cdef long feature_num, fromdomain_productivity, todomain_productivity cdef long label, fromdomain, todomain cdef double sinceprev, tonext cdef long productivity_idx, productivity, domain_idx, domain cdef long output_idx cdef long cur_idx = 0 cdef long dataset_len = len(dataset) #for label,sinceprev,tonext,fromdomain,todomain in dataset: for output_idx in range(dataset_len): label = <long>dataset[output_idx, 0] sinceprev = dataset[output_idx, 1] tonext = dataset[output_idx, 2] fromdomain = <long>dataset[output_idx, 3] todomain = <long>dataset[output_idx, 4] #cur = output[output_idx] #output_idx += 1 cur_idx = 0 if enabled_features[0]: output[output_idx,cur_idx] = sinceprev cur_idx += 1 if enabled_features[1]: output[output_idx,cur_idx] = tonext cur_idx += 1 if enabled_features[2]: output[output_idx,cur_idx] = fromdomain == todomain cur_idx += 1 feature_num = 3 for domain_idx in range(num_topdomains): if enabled_features[feature_num+domain_idx]: output[output_idx,cur_idx] = fromdomain == topdomains[domain_idx] cur_idx += 1 feature_num += num_topdomains for domain_idx in range(num_topdomains): if enabled_features[feature_num+domain_idx]: output[output_idx,cur_idx] = todomain == topdomains[domain_idx] cur_idx += 1 feature_num += num_topdomains fromdomain_productivity = domain_id_to_productivity[fromdomain] todomain_productivity = domain_id_to_productivity[todomain] for productivity_idx in range(num_rescuetime_productivity_levels): if enabled_features[feature_num+productivity_idx]: output[output_idx,cur_idx] = fromdomain_productivity == rescuetime_productivity_levels[productivity_idx] cur_idx += 1 feature_num += num_rescuetime_productivity_levels for productivity_idx in range(num_rescuetime_productivity_levels): if enabled_features[feature_num+productivity_idx]: output[output_idx,cur_idx] = todomain_productivity == rescuetime_productivity_levels[productivity_idx] cur_idx += 1 #feature_num += len(get_rescuetime_productivity_levels()) return output ```
github_jupyter
# Function Practice Exercises - Solutions Problems are arranged in increasing difficulty: * Warmup - these can be solved using basic comparisons and methods * Level 1 - these may involve if/then conditional statements and simple methods * Level 2 - these may require iterating over sequences, usually with some kind of loop * Challenging - these will take some creativity to solve ## WARMUP SECTION: #### LESSER OF TWO EVENS: Write a function that returns the lesser of two given numbers *if* both numbers are even, but returns the greater if one or both numbers are odd lesser_of_two_evens(2,4) --> 2 lesser_of_two_evens(2,5) --> 5 ``` def lesser_of_two_evens(a,b): if a%2 == 0 and b%2 == 0: return min(a,b) else: return max(a,b) # Check lesser_of_two_evens(2,4) # Check lesser_of_two_evens(2,5) ``` #### ANIMAL CRACKERS: Write a function takes a two-word string and returns True if both words begin with same letter animal_crackers('Levelheaded Llama') --> True animal_crackers('Crazy Kangaroo') --> False ``` def animal_crackers(text): wordlist = text.split() print(wordlist) return wordlist[0][0] == wordlist[1][0] # Check animal_crackers('Levelheaded Llama') # Check animal_crackers('Crazy Kangaroo') ``` #### MAKES TWENTY: Given two integers, return True if the sum of the integers is 20 *or* if one of the integers is 20. If not, return False makes_twenty(20,10) --> True makes_twenty(12,8) --> True makes_twenty(2,3) --> False ``` def makes_twenty(n1,n2): return (n1+n2)==20 or n1==20 or n2==20 # Check makes_twenty(20,10) # Check makes_twenty(12,8) #Check makes_twenty(2,3) ``` # LEVEL 1 PROBLEMS #### OLD MACDONALD: Write a function that capitalizes the first and fourth letters of a name old_macdonald('macdonald') --> MacDonald Note: `'macdonald'.capitalize()` returns `'Macdonald'` ``` def old_macdonald(name): if len(name) > 3: return name[:3].capitalize() + name[3:].capitalize() else: return 'Name is too short!' # Check old_macdonald('macdonald') ``` #### MASTER YODA: Given a sentence, return a sentence with the words reversed master_yoda('I am home') --> 'home am I' master_yoda('We are ready') --> 'ready are We' ``` def master_yoda(text): return ' '.join(text.split()[::-1]) # Check master_yoda('I am home') # Check master_yoda('We are ready') ``` #### ALMOST THERE: Given an integer n, return True if n is within 10 of either 100 or 200 almost_there(90) --> True almost_there(104) --> True almost_there(150) --> False almost_there(209) --> True NOTE: `abs(num)` returns the absolute value of a number ``` def almost_there(n): return ((abs(100 - n) <= 10) or (abs(200 - n) <= 10)) # Check almost_there(90) # Check almost_there(104) # Check almost_there(150) # Check almost_there(209) ``` # LEVEL 2 PROBLEMS #### FIND 33: Given a list of ints, return True if the array contains a 3 next to a 3 somewhere. has_33([1, 3, 3]) → True has_33([1, 3, 1, 3]) → False has_33([3, 1, 3]) → False ``` def has_33(nums): for i in range(0, len(nums)-1): b # nicer looking alternative in commented code #if nums[i] == 3 and nums[i+1] == 3: if nums[i:i+2] == [3,3]: return True return False # Check has_33([1, 3, 3]) # Check has_33([1, 3, 1, 3]) # Check has_33([3, 1, 3]) ``` #### PAPER DOLL: Given a string, return a string where for every character in the original there are three characters paper_doll('Hello') --> 'HHHeeellllllooo' paper_doll('Mississippi') --> 'MMMiiissssssiiippppppiii' ``` def paper_doll(text): result = '' for char in text: result += char * 3 return result # Check paper_doll('Hello') # Check paper_doll('Mississippi') ``` #### BLACKJACK: Given three integers between 1 and 11, if their sum is less than or equal to 21, return their sum. If their sum exceeds 21 *and* there's an eleven, reduce the total sum by 10. Finally, if the sum (even after adjustment) exceeds 21, return 'BUST' blackjack(5,6,7) --> 18 blackjack(9,9,9) --> 'BUST' blackjack(9,9,11) --> 19 ``` def blackjack(a,b,c): if sum((a,b,c)) <= 21: return sum((a,b,c)) elif sum((a,b,c)) <=31 and 11 in (a,b,c): return sum((a,b,c)) - 10 else: return 'BUST' # Check blackjack(5,6,7) # Check blackjack(9,9,9) # Check blackjack(9,9,11) ``` #### SUMMER OF '69: Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 9 (every 6 will be followed by at least one 9). Return 0 for no numbers. summer_69([1, 3, 5]) --> 9 summer_69([4, 5, 6, 7, 8, 9]) --> 9 summer_69([2, 1, 6, 9, 11]) --> 14 ``` def summer_69(arr): total = 0 add = True for num in arr: while add: if num != 6: total += num break else: add = False while not add: if num != 9: break else: add = True break return total # Check summer_69([1, 3, 5]) # Check summer_69([4, 5, 6, 7, 8, 9]) # Check summer_69([2, 1, 6, 9, 11]) ``` # CHALLENGING PROBLEMS #### SPY GAME: Write a function that takes in a list of integers and returns True if it contains 007 in order spy_game([1,2,4,0,0,7,5]) --> True spy_game([1,0,2,4,0,5,7]) --> True spy_game([1,7,2,0,4,5,0]) --> False ``` def spy_game(nums): code = [0,0,7,'x'] for num in nums: if num == code[0]: code.pop(0) # code.remove(num) also works return len(code) == 1 # Check spy_game([1,2,4,0,0,7,5]) # Check spy_game([1,0,2,4,0,5,7]) # Check spy_game([1,7,2,0,4,5,0]) ``` #### COUNT PRIMES: Write a function that returns the *number* of prime numbers that exist up to and including a given number count_primes(100) --> 25 By convention, 0 and 1 are not prime. ``` def count_primes(num): primes = [2] x = 3 if num < 2: # for the case of num = 0 or 1 return 0 while x <= num: for y in range(3,x,2): # test all odd factors up to x-1 if x%y == 0: x += 2 break else: primes.append(x) x += 2 print(primes) return len(primes) def count_factor(num): factor=[] for y in range(1,num+1): if num%y==0: factor.append(y) print(factor) print("The total number of factors are",len(factor)) count_factor(100) count_factor(50) # Check count_primes(11) ``` BONUS: Here's a faster version that makes use of the prime numbers we're collecting as we go! ``` def count_primes2(num): primes = [2] x = 3 if num < 2: return 0 while x <= num: for y in primes: # use the primes list! if x%y == 0: x += 2 break else: primes.append(x) x += 2 print(primes) return len(primes) count_primes2(100) ``` ----- ### Just for fun, not a real problem :) #### PRINT BIG: Write a function that takes in a single letter, and returns a 5x5 representation of that letter print_big('a') out: * * * ***** * * * * HINT: Consider making a dictionary of possible patterns, and mapping the alphabet to specific 5-line combinations of patterns. <br>For purposes of this exercise, it's ok if your dictionary stops at "E". ``` def print_big(letter): patterns = {1:' * ',2:' * * ',3:'* *',4:'*****',5:'**** ',6:' * ',7:' * ',8:'* * ',9:'* '} alphabet = {'A':[1,2,4,3,3],'B':[5,3,5,3,5],'C':[4,9,9,9,4],'D':[5,3,3,3,5],'E':[4,9,4,9,4]} for x in alphabet[letter.upper()]: print(patterns[x]) print_big('d') x=[1,2,3] y=[i**2 for i in x] print(y) ``` ## Great Job!
github_jupyter
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); </script> # Generating C Code for the Scalar Wave Equation in Cartesian Coordinates ## Authors: Zach Etienne & Thiago Assumpção ### Formatting improvements courtesy Brandon Clark ## This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up either monochromatic plane wave or spherical Gaussian [Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** <font color='green'><b> Validated </b></font> **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented below ([right-hand-side expressions](#code_validation1); [initial data expressions](#code_validation2)). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). ### NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py) * [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) ## Introduction: ### Problem Statement We wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates: $$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$ where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition $$u(0,x,y,...) = f(x,y,...)$$ and suitable spatial boundary conditions. As described in the next section, we will find it quite useful to define $$v(t,x,y,...) = \partial_t u(t,x,y,...).$$ In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs \begin{align} \partial_t u &= v \\ \partial_t v &= c^2 \nabla^2 u. \end{align} We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. ### The Method of Lines Once we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and 2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form $$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$ where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb) \begin{equation} \partial_t \begin{bmatrix} u \\ v \end{bmatrix}= \begin{bmatrix} 0 & 1 \\ c^2 \nabla^2 & 0 \end{bmatrix} \begin{bmatrix} u \\ v \end{bmatrix} \end{equation} satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). ### Basic Algorithm The basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in <font color='green'>green</font>. We will review how NRPy+ generates these core components in this module. 1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration. 1. <font color='green'>Set gridfunction values to initial data.</font> 1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. <font color='green'>Evaluate scalar wave RHS expressions.</font> 1. Apply boundary conditions. **We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences. 1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction. As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference). We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by $$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$ where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. <a id='toc'></a> # Table of Contents $$\label{toc}$$ 1. [Step 1](#initializenrpy): Initialize core NRPy+ modules 1. [Step 2](#rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing 1. [Step 3](#rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](#code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module 1. [Step 4](#id): Setting up Initial Data for the Scalar Wave Equation 1. [Step 4.a](#planewave): The Monochromatic Plane-Wave Solution 1. [Step 4.b](#sphericalgaussian): The Spherical Gaussian Solution (*Courtesy Thiago Assumpção*) 1. [Step 5](#code_validation2): Code Validation against `ScalarWave.InitialData` NRPy+ module 1. [Step 6](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file <a id='initializenrpy'></a> # Step 1: Initialize core NRPy+ modules \[Back to [top](#toc)\] $$\label{initializenrpy}$$ Let's start by importing all the needed modules from NRPy+: ``` # Step P1: Import needed NRPy+ core modules: import NRPy_param_funcs as par # NRPy+: Parameter interface import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import grid as gri # NRPy+: Functions having to do with numerical grids import finite_difference as fin # NRPy+: Finite difference C code generation module from outputC import lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends ``` <a id='rhss1d'></a> # Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](#toc)\] $$\label{rhss1d}$$ To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so $$\nabla^2 u = \partial_x^2 u.$$ Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+. As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by \begin{equation} \left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2} \left( -\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right) - \frac{5}{2} u_j \right) + \mathcal{O}\left((\Delta x)^4\right). \end{equation} ``` # Step P2: Define the C parameter wavespeed. The `wavespeed` # variable is a proper SymPy variable, so it can be # used in below expressions. In the C code, it acts # just like a usual parameter, whose value is # specified in the parameter file. thismodule = "ScalarWave" wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0) # Step 1: Set the spatial dimension parameter, and then read # the parameter as DIM. par.set_parval_from_str("grid::DIM",1) DIM = par.parval_from_str("grid::DIM") # Step 2: Set the finite differencing order to 4. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4) # Step 3: Register gridfunctions that are needed as input # to the scalar wave RHS expressions. uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"]) # Step 4: Declare the rank-2 indexed expression \partial_{ij} u, # which is symmetric about interchange of indices i and j # Derivative variables like these must have an underscore # in them, so the finite difference module can parse the # variable name properly. uu_dDD = ixp.declarerank2("uu_dDD","sym01") # Step 5: Define right-hand sides for the evolution. uu_rhs = vv vv_rhs = 0 for i in range(DIM): vv_rhs += wavespeed*wavespeed*uu_dDD[i][i] vv_rhs = sp.simplify(vv_rhs) # Step 6: Generate C code for scalarwave evolution equations, # print output to the screen (standard out, or stdout). fin.FD_outputC("stdout", [lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs), lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)]) ``` **Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator, \begin{equation} \left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2} \left( -\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right) - \frac{5}{2} u_j \right), \end{equation} correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. <a id='rhss3d'></a> # Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](#toc)\] $$\label{rhss3d}$$ Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled: ``` # Step 1: Define the C parameter wavespeed. The `wavespeed` # variable is a proper SymPy variable, so it can be # used in below expressions. In the C code, it acts # just like a usual parameter, whose value is # specified in the parameter file. wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0) # Step 2: Set the spatial dimension parameter # to *FOUR* this time, and then read # the parameter as DIM. par.set_parval_from_str("grid::DIM",3) DIM = par.parval_from_str("grid::DIM") # Step 3: Set the finite differencing order to 10. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10) # Step 4a: Reset gridfunctions registered in 1D case above, # to avoid NRPy+ throwing an error about double- # registering gridfunctions, which is not allowed. gri.glb_gridfcs_list = [] # Step 4b: Register gridfunctions that are needed as input # to the scalar wave RHS expressions. uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"]) # Step 5: Declare the rank-2 indexed expression \partial_{ij} u, # which is symmetric about interchange of indices i and j # Derivative variables like these must have an underscore # in them, so the finite difference module can parse the # variable name properly. uu_dDD = ixp.declarerank2("uu_dDD","sym01") # Step 6: Define right-hand sides for the evolution. uu_rhs = vv vv_rhs = 0 for i in range(DIM): vv_rhs += wavespeed*wavespeed*uu_dDD[i][i] # Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs): vv_rhs = sp.simplify(vv_rhs) # Step 8: Generate C code for scalarwave evolution equations, # print output to the screen (standard out, or stdout). fin.FD_outputC("stdout", [lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs), lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True") ``` <a id='code_validation1'></a> ## Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](#toc)\] $$\label{code_validation1}$$ Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between 1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module. ``` # Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in # terms of other SymPy variables. Even if we reset the list # of NRPy+ gridfunctions, these *SymPy* expressions for # uu_rhs and vv_rhs *will remain unaffected*. # # Here, we will use the above-defined uu_rhs and vv_rhs to # validate against the same expressions in the # ScalarWave/ScalarWave_RHSs.py module, # to ensure consistency between this tutorial # (historically speaking, the tutorial was written first) # and the ScalarWave_RHSs.py module itself. # # Reset the list of gridfunctions, as registering a gridfunction # twice will spawn an error. gri.glb_gridfcs_list = [] # Step 11: Call the ScalarWave_RHSs() function from within the # ScalarWave/ScalarWave_RHSs.py module, # which should do exactly the same as in Steps 1-10 above. import ScalarWave.ScalarWave_RHSs as swrhs swrhs.ScalarWave_RHSs() # Step 12: Consistency check between the tutorial notebook above # and the ScalarWave_RHSs() function from within the # ScalarWave/ScalarWave_RHSs.py module. print("Consistency check between ScalarWave tutorial and NRPy+ module:") print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)") print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)") ``` <a id='id'></a> # Step 4: Setting up Initial Data for the Scalar Wave Equation \[Back to [top](#toc)\] $$\label{id}$$ <a id='planewave'></a> ## Step 4.a: The Monochromatic Plane-Wave Solution \[Back to [top](#toc)\] $$\label{planewave}$$ The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is $$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$ where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form $$ f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2, $$ where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation. ``` # Step 1: Set parameters defined in other modules xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z # Step 2: Declare free parameters intrinsic to these initial data time = par.Cparameters("REAL", thismodule, "time",0.0) kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0]) # Step 3: Normalize the k vector kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2) # Step 4: Compute k.x dot_product = sp.sympify(0) for i in range(DIM): dot_product += xx[i]*kk[i] dot_product /= kk_norm # Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID. uu_ID_PlaneWave = sp.sin(dot_product - wavespeed*time)+2 vv_ID_PlaneWave = sp.diff(uu_ID_PlaneWave, time) ``` Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing $$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$ and confirming the result is exactly zero. ``` sp.simplify(wavespeed**2*(sp.diff(uu_ID_PlaneWave,xx[0],2) + sp.diff(uu_ID_PlaneWave,xx[1],2) + sp.diff(uu_ID_PlaneWave,xx[2],2)) - sp.diff(uu_ID_PlaneWave,time,2)) ``` <a id='sphericalgaussian'></a> ## Step 4.b: The Spherical Gaussian Solution \[Back to [top](#toc)\] $$\label{sphericalgaussian}$$ Here we will implement the spherical Gaussian solution, consists of ingoing and outgoing wave fronts: \begin{align} u(r,t) &= u_{\rm out}(r,t) + u_{\rm in}(r,t),\ \ \text{where}\\ u_{\rm out}(r,t) &=\frac{r-ct}{r} \exp\left[\frac{-(r-ct)^2}{2 \sigma^2}\right] \\ u_{\rm in}(r,t) &=\frac{r+ct}{r} \exp\left[\frac{-(r+ct)^2}{2 \sigma^2}\right] \\ \end{align} where $c$ is the wavespeed, and $\sigma$ is the width of the Gaussian (i.e., the "standard deviation"). ``` # Step 1: Set parameters defined in other modules xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z # Step 2: Declare free parameters intrinsic to these initial data time = par.Cparameters("REAL", thismodule, "time",0.0) sigma = par.Cparameters("REAL", thismodule, "sigma",3.0) # Step 4: Compute r r = sp.sympify(0) for i in range(DIM): r += xx[i]**2 r = sp.sqrt(r) # Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID. uu_ID_SphericalGaussianOUT = +(r - wavespeed*time)/r * sp.exp( -(r - wavespeed*time)**2 / (2*sigma**2) ) uu_ID_SphericalGaussianIN = +(r + wavespeed*time)/r * sp.exp( -(r + wavespeed*time)**2 / (2*sigma**2) ) uu_ID_SphericalGaussian = uu_ID_SphericalGaussianOUT + uu_ID_SphericalGaussianIN vv_ID_SphericalGaussian = sp.diff(uu_ID_SphericalGaussian, time) ``` Since the wave equation is linear, both the leftgoing and rightgoing waves must satisfy the wave equation, which implies that their sum also satisfies the wave equation. Next we verify that $u(r,t)$ satisfies the wave equation, by computing $$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm R}(r,t)\right\},$$ and $$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm L}(r,t)\right\},$$ are separately zero. We do this because SymPy has difficulty simplifying the combined expression. ``` print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianOUT,xx[0],2) + sp.diff(uu_ID_SphericalGaussianOUT,xx[1],2) + sp.diff(uu_ID_SphericalGaussianOUT,xx[2],2)) - sp.diff(uu_ID_SphericalGaussianOUT,time,2)) ) print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianIN,xx[0],2) + sp.diff(uu_ID_SphericalGaussianIN,xx[1],2) + sp.diff(uu_ID_SphericalGaussianIN,xx[2],2)) - sp.diff(uu_ID_SphericalGaussianIN,time,2))) ``` <a id='code_validation2'></a> # Step 5: Code Validation against `ScalarWave.InitialData` NRPy+ module \[Back to [top](#toc)\] $$\label{code_validation2}$$ As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between 1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData](../edit/ScalarWave/InitialData.py) module. ``` # We just defined SymPy expressions for uu_ID and vv_ID in # terms of other SymPy variables. Here, we will use the # above-defined uu_ID and vv_ID to validate against the # same expressions in the ScalarWave/InitialData.py # module, to ensure consistency between this tutorial # (historically speaking, the tutorial was written first) # and the PlaneWave ID module itself. # # Step 6: Call the InitialData(Type="PlaneWave") function from within the # ScalarWave/InitialData.py module, # which should do exactly the same as in Steps 1-5 above. import ScalarWave.InitialData as swid swid.InitialData(Type="PlaneWave") # Step 7: Consistency check between the tutorial notebook above # and the PlaneWave option from within the # ScalarWave/InitialData.py module. print("Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case") if sp.simplify(uu_ID_PlaneWave - swid.uu_ID) != 0: print("TEST FAILED: uu_ID_PlaneWave - swid.uu_ID = "+str(sp.simplify(uu_ID_PlaneWave - swid.uu_ID))+"\t\t (should be zero)") sys.exit(1) if sp.simplify(vv_ID_PlaneWave - swid.vv_ID) != 0: print("TEST FAILED: vv_ID_PlaneWave - swid.vv_ID = "+str(sp.simplify(vv_ID_PlaneWave - swid.vv_ID))+"\t\t (should be zero)") sys.exit(1) print("TESTS PASSED!") # Step 8: Consistency check between the tutorial notebook above # and the SphericalGaussian option from within the # ScalarWave/InitialData.py module. swid.InitialData(Type="SphericalGaussian") print("Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case") if sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID) != 0: print("TEST FAILED: uu_ID_SphericalGaussian - swid.uu_ID = "+str(sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID))+"\t\t (should be zero)") sys.exit(1) if sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID) != 0: print("TEST FAILED: vv_ID_SphericalGaussian - swid.vv_ID = "+str(sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID))+"\t\t (should be zero)") sys.exit(1) print("TESTS PASSED!") ``` <a id='latex_pdf_output'></a> # Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ScalarWave") ```
github_jupyter
``` import pandas as pd import numpy as np import re from re import sub import multiprocessing from unidecode import unidecode from gensim.models.phrases import Phrases, Phraser from gensim.models import Word2Vec from gensim.test.utils import get_tmpfile from gensim.models import KeyedVectors from time import time from collections import defaultdict import logging # Setting up the loggings to monitor gensim logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO) file = pd.read_csv("polish_sentiment_dataset.csv") file_cleaned = file.dropna().drop_duplicates().reset_index(drop=True).rename(columns={'description':'title'}) file_cleaned.rate.value_counts()/len(file_cleaned) file_cleaned[file_cleaned.rate==0] file_cleaned = file_cleaned[file_cleaned.rate!=0] file_cleaned.rate.value_counts()/len(file_cleaned) def text_to_word_list(text, remove_polish_letters): ''' Pre process and convert texts to a list of words method inspired by method from eliorc github repo: https://github.com/eliorc/Medium/blob/master/MaLSTM.ipynb''' text = remove_polish_letters(text) text = str(text) text = text.lower() # Clean the text text = sub(r"[^A-Za-z0-9^,!?.\/'+]", " ", text) text = sub(r"\+", " plus ", text) text = sub(r",", " ", text) text = sub(r"\.", " ", text) text = sub(r"!", " ! ", text) text = sub(r"\?", " ? ", text) text = sub(r"'", " ", text) text = sub(r":", " : ", text) text = sub(r"\s{2,}", " ", text) text = text.split() return text file_cleaned.title = file_cleaned.title.apply(lambda x: text_to_word_list(x, unidecode)) file_model = file_cleaned.copy() file_model = file_model[file_model.title.str.len()>1] sent = [row for row in file_model.title] phrases = Phrases(sent, min_count=1, progress_per=50000) bigram = Phraser(phrases) sentences = bigram[sent] sentences[1] ``` - min count = 3 - remove most unusual words from training embeddings, like words 'ssssuuuuuuuppppppeeeeeerrrr', which actually stands for 'super', and doesn't need additional training - window = 4 - Word2Vec model will learn to predict given word from up to 4 words to the left, and up to 4 words to the right - size = 300 - size of hidden layer used to predict surroundings of embedded word, which also stands for dimensions of trained embeddings - sample = 1e-5 - probability baseline for subsampling most frequent words from surrounding of embedded word - negative = 20 - number of negative (ones that shouldn't have been predicted while modeling selected pair of words) words that will have their corresponding weights updated while training on specific training example, along with positive word ``` w2v_model = Word2Vec(min_count=3, window=4, size=300, sample=1e-5, alpha=0.03, min_alpha=0.0007, negative=20, workers=multiprocessing.cpu_count()-1) start = time() w2v_model.build_vocab(sentences, progress_per=50000) print('Time to build vocab: {} mins'.format(round((time() - start) / 60, 2))) start = time() w2v_model.train(sentences, total_examples=w2v_model.corpus_count, epochs=30, report_delay=1) print('Time to train the model: {} mins'.format(round((time() - start) / 60, 2))) w2v_model.init_sims(replace=True) w2v_model.save("word2vec.model") ``` Exporting preprocessed dataset for further steps (with replaced bigrams) ``` file_export = file_model.copy() file_export['old_title'] = file_export.title file_export.old_title = file_export.old_title.str.join(' ') file_export.title = file_export.title.apply(lambda x: ' '.join(bigram[x])) file_export.rate = file_export.rate.astype('int8') file_export[['title', 'rate']].to_csv('cleaned_dataset.csv', index=False) ```
github_jupyter
# Lab 1: MNIST Data Loader This notebook is the first lab of the "Deep Learning Explained" course. It is derived from the tutorial numbered CNTK_103A in the CNTK repository. This notebook is used to download and pre-process the [MNIST][] digit images to be used for building different models to recognize handwritten digits. ** Note: ** This notebook must be run to completion before the other course notebooks can be run. [MNIST]: http://yann.lecun.com/exdb/mnist/ ``` # Import the relevant modules to be used later from __future__ import print_function import gzip import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import shutil import struct import sys try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve # Config matplotlib for inline plotting %matplotlib inline ``` ## Data download We will download the data onto the local machine. The MNIST database is a standard set of handwritten digits that has been widely used for training and testing of machine learning algorithms. It has a training set of 60,000 images and a test set of 10,000 images with each image being 28 x 28 grayscale pixels. This set is easy to use visualize and train on any computer. ``` # Functions to load MNIST images and unpack into train and test set. # - loadData reads image data and formats into a 28x28 long array # - loadLabels reads the corresponding labels data, 1 for each image # - load packs the downloaded image and labels data into a combined format to be read later by # CNTK text reader def loadData(src, cimg): print ('Downloading ' + src) gzfname, h = urlretrieve(src, './delete.me') print ('Done.') try: with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) # Read magic number. if n[0] != 0x3080000: raise Exception('Invalid file: unexpected magic number.') # Read number of entries. n = struct.unpack('>I', gz.read(4))[0] if n != cimg: raise Exception('Invalid file: expected {0} entries.'.format(cimg)) crow = struct.unpack('>I', gz.read(4))[0] ccol = struct.unpack('>I', gz.read(4))[0] if crow != 28 or ccol != 28: raise Exception('Invalid file: expected 28 rows/cols per image.') # Read data. res = np.fromstring(gz.read(cimg * crow * ccol), dtype = np.uint8) finally: os.remove(gzfname) return res.reshape((cimg, crow * ccol)) def loadLabels(src, cimg): print ('Downloading ' + src) gzfname, h = urlretrieve(src, './delete.me') print ('Done.') try: with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) # Read magic number. if n[0] != 0x1080000: raise Exception('Invalid file: unexpected magic number.') # Read number of entries. n = struct.unpack('>I', gz.read(4)) if n[0] != cimg: raise Exception('Invalid file: expected {0} rows.'.format(cimg)) # Read labels. res = np.fromstring(gz.read(cimg), dtype = np.uint8) finally: os.remove(gzfname) return res.reshape((cimg, 1)) def try_download(dataSrc, labelsSrc, cimg): data = loadData(dataSrc, cimg) labels = loadLabels(labelsSrc, cimg) return np.hstack((data, labels)) ``` # Download the data In the following code, we use the functions defined above to download and unzip the MNIST data into memory. The training set has 60000 images while the test set has 10000 images. ``` # URLs for the train image and labels data url_train_image = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz' url_train_labels = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz' num_train_samples = 60000 print("Downloading train data") train = try_download(url_train_image, url_train_labels, num_train_samples) url_test_image = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz' url_test_labels = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz' num_test_samples = 10000 print("Downloading test data") test = try_download(url_test_image, url_test_labels, num_test_samples) ``` # Visualize the data Here, we use matplotlib to display one of the training images and it's associated label. ``` # Plot a random image sample_number = 5001 plt.imshow(train[sample_number,:-1].reshape(28,28), cmap="gray_r") plt.axis('off') print("Image Label: ", train[sample_number,-1]) ``` # Save the images Save the images in a local directory. While saving the data we flatten the images to a vector (28x28 image pixels becomes an array of length 784 data points). ![mnist-input](https://www.cntk.ai/jup/cntk103a_MNIST_input.png) The labels are encoded as [1-hot][] encoding (label of 3 with 10 digits becomes `0001000000`, where the first index corresponds to digit `0` and the last one corresponds to digit `9`. ![mnist-label](https://www.cntk.ai/jup/cntk103a_onehot.png) [1-hot]: https://en.wikipedia.org/wiki/One-hot ``` # Save the data files into a format compatible with CNTK text reader def savetxt(filename, ndarray): dir = os.path.dirname(filename) if not os.path.exists(dir): os.makedirs(dir) if not os.path.isfile(filename): print("Saving", filename ) with open(filename, 'w') as f: labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str))) for row in ndarray: row_str = row.astype(str) label_str = labels[row[-1]] feature_str = ' '.join(row_str[:-1]) f.write('|labels {} |features {}\n'.format(label_str, feature_str)) else: print("File already exists", filename) # Save the train and test files (prefer our default path for the data) data_dir = os.path.join("..", "Examples", "Image", "DataSets", "MNIST") if not os.path.exists(data_dir): data_dir = os.path.join("data", "MNIST") print ('Writing train text file...') savetxt(os.path.join(data_dir, "Train-28x28_cntk_text.txt"), train) print ('Writing test text file...') savetxt(os.path.join(data_dir, "Test-28x28_cntk_text.txt"), test) print('Done') ``` **Optional: Suggested Explorations** One can do data manipulations to improve the performance of a machine learning system. I suggest you first use the data generated so far and complete Lab 2- 4 labs. Once you have a baseline with classifying the data in its original form, now use the different data manipulation techniques to further improve the model. There are several ways data alterations can be performed. CNTK readers automate a lot of these actions for you. However, to get a feel for how these transforms can impact training and test accuracies, I strongly encourage individuals to try one or more of data perturbation. - Shuffle the training data rows to create a different set of training images. Be sure to shuffle each image in the same way. Hint: Use `permute_indices = np.random.permutation(train.shape[0])`. Then run Lab 2-4 with this newly permuted data. - Adding noise to the data can often improve (lower) the [generalization error][]. You can augment the training set by adding noise (generated with numpy, hint: use `numpy.random`) to the training images. - Distort the images with [affine transformation][] (translations or rotations) [generalization error]: https://en.wikipedia.org/wiki/Generalization_error [affine transformation]: https://en.wikipedia.org/wiki/Affine_transformation
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction:-Logistic-regression" data-toc-modified-id="Introduction:-Logistic-regression-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Introduction: Logistic regression</a></span></li><li><span><a href="#Simple-logistic-regression" data-toc-modified-id="Simple-logistic-regression-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Simple logistic regression</a></span></li><li><span><a href="#Polynomial-logistic-regression" data-toc-modified-id="Polynomial-logistic-regression-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Polynomial logistic regression</a></span></li><li><span><a href="#Implementation-using-SKlearn" data-toc-modified-id="Implementation-using-SKlearn-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Implementation using SKlearn</a></span></li><li><span><a href="#Implementation-using-Statsmodel" data-toc-modified-id="Implementation-using-Statsmodel-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Implementation using Statsmodel</a></span><ul class="toc-item"><li><span><a href="#Influence-analysis-performed-by-Statsmodel" data-toc-modified-id="Influence-analysis-performed-by-Statsmodel-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Influence analysis performed by Statsmodel</a></span></li></ul></li></ul></div> # Introduction: Logistic regression This notebook explores a Python implementation of logistic regression in 2D, first with linear terms, and then with multiple polynomial terms. These are Python implementations of exercise 2) from Andrew Ng's course: Machine Learning on coursera. Some similar notebooks and sources: [1][1] [2][2] ### Libraries [1]: http://nbviewer.jupyter.org/github/JWarmenhoven/Machine-Learning/blob/master/notebooks/Programming%20Exercise%202%20-%20Logistic%20Regression.ipynb [2]: https://github.com/kaleko/CourseraML/blob/a815ac95ba3d863b7531926b1edcdb4f5dd0eb6b/ex2/ex2.ipynb ``` import os import math as m import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import scipy.optimize as opt import scipy.stats as stats from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model import statsmodels.api as sm %matplotlib inline sns.set(style='white') ``` ### Import data set and visualize the data ``` data1 = pd.read_csv("/Users/User/Desktop/Computer_Science/stanford_ml/machine-learning-ex2/ex2/ex2data1.txt", header = None, names = ["Exam1", "Exam2", "Admission"]) display(data1.head()) sns.lmplot("Exam1", "Exam2", data = data1, hue = "Admission",fit_reg = False) plt.title("Scatter plot: exam results and admission") plt.show() ``` # Simple logistic regression Only linear terms in the function $h_\theta(x) = \theta^Tx$ ### Initialize the training data: design matrix X, output vector y, theta vector ``` X = data1.iloc[:,0:2].values m,n = X.shape X = np.concatenate((np.ones(m)[:,np.newaxis],X),1) y = data1.iloc[:,-1].values[:,np.newaxis] initial_theta = np.zeros((n+1)) ``` ### Defining the cost and gradient function #### Logistic regression hypothesis $$ h_{\theta}(x) = g(\theta^{T}x)$$ $$ g(z)=\frac{1}{1+e^{−z}} $$ #### Cost Function $$ J(\theta) = \frac{1}{m}\sum_{i=1}^{m}\big[-y^{(i)}\, log\,( h_\theta\,(x^{(i)}))-(1-y^{(i)})\,log\,(1-h_\theta(x^{(i)}))\big]$$ #### Vectorized Cost Function $$ J(\theta) = \frac{1}{m}\big((\,log\,(g(X\theta))^Ty+(\,log\,(1-g(X\theta))^T(1-y)\big)$$ #### Partial derivative $$ \frac{\delta J(\theta)}{\delta\theta_{j}} = \frac{1}{m}\sum_{i=1}^{m} ( h_\theta (x^{(i)})-y^{(i)})x^{(i)}_{j} $$ #### Vectorized $$ \frac{\delta J(\theta)}{\delta\theta_{j}} = \frac{1}{m} X^T(g(X\theta)-y)$$ ``` def sigmoid(x): return 1 / (1 + np.exp(-x)) def CostFunc(theta,X,y): '''Computes the cost of using theta as the parameter for logistic regression''' #Initializing variables m = len(y) J = 0 grad = np.zeros(theta.shape) #Vectorized computations z = X @ theta h = sigmoid(z) J = (1/m) * ( (-y.T @ np.log(h)) - (1 - y).T @ np.log(1-h)); return J def Gradient(theta,X,y): '''Computes the gradient of the cost w.r.t. to the parameters.''' #Initializing variables m = len(y) theta = theta[:,np.newaxis] grad = np.zeros(theta.shape) #Vectorized computations z = X @ theta h = sigmoid(z) grad = (1/m)*(X.T @ ( h - y)); return grad.ravel() def Predictions(theta, X): #Return predicted values given X and optimal theta. Returned value in form 0, 1 #Based on assumption that Threshold T = 0.5 res = np.sign(X @ theta[:,np.newaxis]) np.putmask(res,res == -1,0) return res def Accuracy(y_pred, y): diff = y_pred - y return 1.0 - (float(np.count_nonzero(diff)) / len(diff)) ``` ### Optimizing the cost function using Scipy ``` model = opt.minimize(fun = CostFunc, x0 = initial_theta, args = (X, y), method = 'TNC', jac = Gradient) theta_pred = model.x y_pred = Predictions(theta_pred,X) Accuracy(y_pred,y) ``` ### Predicted probability of admission for a student with 45, 85 exam grades ``` sigmoid(model.x @ np.array([1,45,85])) ``` ### Visualizing the decision boundary $$ g(\theta_0 + \theta_1x_1+...+\theta_n x_n) = T$$ - Positive decision when $g(\theta^Tx) \geq T$ - Negative decision when $g(\theta^Tx) < T$ In our case, $g(\theta^Tx)$ is the sigmoid function whose inverse is the log odds function, so the decision boundary can be re-written as: $$ \theta_0 + \theta_1x_1+...+\theta_n x_n = \log \left( \frac{T}{1 - T} \right)$$ For the simple 2-D case and a threshold value of 0.5 this gives: \begin{aligned} \theta_0 + \theta_1 x_1 + \theta_2 x_2 = 0 \\ x_2 = - \frac{(\theta_0 + \theta_1 x_1)}{\theta_2} \end{aligned} ``` #Straight line decision boundary plot_x_1 = np.array([min(data1.Exam1) - 2, max(data1.Exam1) + 2 ]) plot_x_2 = - (theta_pred[0] + theta_pred[1] * plot_x_1) / theta_pred[2] #Plotting on sns sns.lmplot("Exam1", "Exam2", data = data1, hue = "Admission",fit_reg = False, legend = False, size = 6) plt.plot(plot_x_1, plot_x_2, color = 'r', label = 'Decision boundary') plt.title("Scatter plot: exam results and admission") plt.legend(loc = 0) plt.show() ``` # Polynomial logistic regression ``` data2 = pd.read_csv("/Users/User/Desktop/Computer_Science/stanford_ml/machine-learning-ex2/ex2/ex2data2.txt", header = None, names = ["test1", "test2", "output"]) display(data2.head()) pos = data2[data2.output == 1] neg = data2[data2.output == 0] sns.lmplot("test1", "test2", data = data2, hue = "output",fit_reg = False) plt.title("Scatter plot") plt.show() ``` ### Adding non linear features to the design matrix $X := x_1 + x_2 + x_1^2 + x_2^2 + x_1 x_2 + x_1^2 x_2^2 + ... $ ``` def mapFeature(X1, X2): '''MAPFEATURE Feature mapping function to polynomial features MAPFEATURE(X1, X2) maps the two input features to quadratic features used in the regularization exercise. Returns a new feature array with more features, comprising of X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc.. Inputs X1, X2 must be the same size''' X1 = X1[:,np.newaxis] X2 = X2[:,np.newaxis] degree = 6 m,n = X1.shape out = np.ones(m)[:,np.newaxis] for i in range(1,degree+1): for j in range(0,i+1): tmp = (X1**(i-j))* (X2**j); out = np.concatenate((out,tmp),1) return out ``` ### Initialize the training data: design matrix X, output vector y, theta vector ``` #Load values X = data2.iloc[:,0:2].values m,n = X.shape X = np.concatenate((np.ones(m)[:,np.newaxis],X),1) y = data2.iloc[:,-1][:,np.newaxis] #Generate polynomial design matrix X = mapFeature(data2.test1, data2.test2) m,n = X.shape initial_theta = np.ones(n) lamda = 10 ``` ### Alternative polynomial method using Sklearn ``` # Note that this function inserts a column with 'ones' in the design matrix for the intercept. poly = PolynomialFeatures(6) XX = poly.fit_transform(data2.iloc[:,0:2].values) display(XX.shape) display(XX[0:5,1]) display(X[0:5,1]) ``` ### Defining the cost and gradient functions for regularized regression #### Regularized Cost Function #### $$ J(\theta) = \frac{1}{m}\sum_{i=1}^{m}\big[-y^{(i)}\, log\,( h_\theta\,(x^{(i)}))-(1-y^{(i)})\,log\,(1-h_\theta(x^{(i)}))\big] + \frac{\lambda}{2m}\sum_{j=1}^{n}\theta_{j}^{2}$$ #### Vectorized Cost Function #### $$ J(\theta) = \frac{1}{m}\big((\,log\,(g(X\theta))^Ty+(\,log\,(1-g(X\theta))^T(1-y)\big) + \frac{\lambda}{2m}\sum_{j=1}^{n}\theta_{j}^{2}$$ #### Partial derivative #### $$ \frac{\delta J(\theta)}{\delta\theta_{j}} = \frac{1}{m}\sum_{i=1}^{m} ( h_\theta (x^{(i)})-y^{(i)})x^{(i)}_{j} + \frac{\lambda}{m}\theta_{j}$$ #### Vectorized #### $$ \frac{\delta J(\theta)}{\delta\theta_{j}} = \frac{1}{m} X^T(g(X\theta)-y) + \frac{\lambda}{m}\theta_{j}$$ ##### $$\text{Note: intercept parameter } \theta_{0} \text{ is not to be regularized}$$ ``` def costFunctionReg(theta,X,y,lamda): '''Compute cost for logistic regression with regularization ''' #Initializations theta = theta[:,np.newaxis] m = len(y) n,_ = theta.shape J = 0 L = np.eye(n) L[0,0] = 0 #Computations z = X @ theta h = sigmoid(z) J_reg = (lamda / (2*m)) * ((L@theta).T @ theta) J = (1/m) * ( (-y.T @ np.log(h)) - (1 - y).T @ np.log(1-h)) + J_reg; return J def GradientReg(theta,X,y,lamda): '''Compute gradient for logistic regression with regularization ''' #Initializations theta = theta[:,np.newaxis] m = len(y) n,_ = theta.shape grad = np.zeros(theta.shape) L = np.eye(n) L[0,0] = 0 #Computations z = X @ theta h = sigmoid(z) grad_reg = (lamda/m) * (L@theta) grad = (1/m)*(X.T @ ( h - y)) + grad_reg return grad.ravel() ``` ### Running the optimization using Scipy optimize ``` model2 = opt.minimize(fun = costFunctionReg, x0 = initial_theta, args = (X, y, 1), method = 'BFGS', jac = GradientReg) display(model2.x[0:5]) res2 = model2.x ``` ### Predicting values and accuracy ``` y_pred = Predictions(model2.x,X) Accuracy(y_pred,y) ``` ### Plotting the polynomial decision boundary for different lambda values ``` #Initializing meshgrid and figure u = np.linspace(-1,1.5,50) v = np.linspace(-1,1.5,50) xx1, xx2 = np.meshgrid(u, v) fig, axes = plt.subplots(1,3, sharey = True, figsize=(17,5)) #Initializing lambda values lamda = [0,1,100] #Looping through lambda values, performing logistic regression and plotting result on each axes for l in range(3): res = opt.minimize(fun = costFunctionReg, x0 = initial_theta, args = (X, y, lamda[l]), method = 'BFGS', jac = GradientReg) h = sigmoid(poly.fit_transform(np.c_[xx1.ravel(), xx2.ravel()]).dot(res.x)) h = h.reshape(xx1.shape) axes[l].scatter(pos.test1,pos.test2, marker = 'o', label = 'y = 1') axes[l].scatter(neg.test1,neg.test2, marker = 'x', label = 'y = 0') axes[l].contour(xx1, xx2, h, [0.5], linewidths=1, colors='g') axes[l].set_title('Lambda = {}, Accuracy = {}'.format(lamda[l], round(Accuracy(Predictions(res.x,X),y),2))) axes[l].legend() plt.show() ``` # Implementation using SKlearn Note that in SKlearn, the regularization parameter C works as 1 / lambda hence is the inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization ``` model_SK = linear_model.LogisticRegression(fit_intercept=False,C=1, penalty='l2', solver='liblinear') model_SK.fit(X,y.ravel()) model_SK.coef_ #Initializing meshgrid and figure u = np.linspace(-1,1.5,50) v = np.linspace(-1,1.5,50) xx1, xx2 = np.meshgrid(u, v) fig, axes = plt.subplots(1,3, sharey = True, figsize=(17,5)) #Initializing lambda values C = [1e9,1,0.05] #Looping through lambda values, performing logistic regression and plotting result on each axes for l in range(3): model_SK = linear_model.LogisticRegression(fit_intercept=False,C=C[l], penalty='l2', solver='liblinear') model_SK.fit(X,y.ravel()) h = sigmoid(poly.fit_transform(np.c_[xx1.ravel(), xx2.ravel()]).dot(model_SK.coef_.T)) h = h.reshape(xx1.shape) axes[l].scatter(pos.test1,pos.test2, marker = 'o', label = 'y = 1') axes[l].scatter(neg.test1,neg.test2, marker = 'x', label = 'y = 0') axes[l].contour(xx1, xx2, h, [0.5], linewidths=1, colors='g') axes[l].pcolormesh(xx1, xx2, h, alpha = .1, cmap = 'jet', edgecolors = 'none') axes[l].grid(False) axes[l].set_title('C = {}, Accuracy = {}'.format(C[l], round(Accuracy(Predictions(res.x,X),y),2))) axes[l].legend() plt.show() ``` # Implementation using Statsmodel Attention: workaround needed to print the summary method in statsmodel due to this [issue][1] [1]: https://github.com/statsmodels/statsmodels/issues/3931 ``` model_sm = sm.Logit(y,X) results = model_sm.fit() #Implementing workaround due to error in statsmodel from scipy import stats stats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df) print(results.summary()) ``` ### These coefficients correspond to those calculated by SKlearn with very large C parameter i.e. very little regularization ``` model_SK = linear_model.LogisticRegression(fit_intercept=False,C=1e11, penalty='l2', solver='liblinear') model_SK.fit(X,y.ravel()) model_SK.coef_ ((176.75 - 175.95)**2 + (168.25- 175.95)**2 + (182.875- 175.95)**2)*8 ```
github_jupyter
``` import mesh_renderer import os import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.animation as animation import camera_utils import rasterize_triangles from IPython.display import Image, HTML import PIL from stl import mesh from io import BytesIO def drawarray(a, fmt="png"): a = np.uint8(a) f = BytesIO() PIL.Image.fromarray(a).save(f, fmt) return Image(data=f.getvalue()) def drawarrays(arrays): ratio = arrays[0].shape[1] / arrays[0].shape[0] fig, ax = plt.subplots(figsize=(5, 5 * ratio)) im = ax.imshow(arrays[0]) def update(i): im.set_data(arrays[i]) plt.axis('off') ani = animation.FuncAnimation(fig, update, frames=np.arange(0, len(arrays)), interval=50) plt.close() return ani def get_triangles(bust): indices = {} indices_list = [] triangles = [] for tri in bust.vectors: triangle = [] for idx, v in enumerate(tri): val = tuple(v) if val not in indices: index = len(indices) indices_list.append(val) indices[val] = index else: index = indices[val] triangle.append(index) triangles.append(triangle) return indices_list, triangles bust = mesh.Mesh.from_file("../data/Slic3r_friendly_pt5_scale_tilted.stl") vertices, triangles = get_triangles(bust) triangles = np.array(triangles) vertices = np.array(vertices) vertices -= vertices.mean(axis=0, keepdims=True) vertices = (vertices / np.array(vertices).max()) * 2 cube_vertices = np.array([[-1, -1, 1], [-1, -1, -1], [-1, 1, -1], [-1, 1, 1], [1, -1, 1], [1, -1, -1], [1, 1, -1], [1, 1, 1]]) cube_triangles = np.array([[0, 1, 2], [2, 3, 0], [3, 2, 6], [6, 7, 3], [7, 6, 5], [5, 4, 7], [4, 5, 1], [1, 0, 4], [5, 6, 2], [2, 1, 5], [7, 4, 0], [0, 3, 7]], dtype=np.int32) def render_cube(*, vertices, normals, triangles, angles, image_height=300, image_width=300): model_rotation = camera_utils.euler_matrices(angles)[0, :3, :3] vertices_world_space = tf.reshape( tf.matmul(vertices, model_rotation, transpose_b=True), [1, vertices.get_shape()[0].value, 3]) # camera position: eye = tf.constant([[0.0, 0.0, 6.0]], dtype=tf.float32) lightbulb = tf.constant([[0.0, 0.0, 6.0]] + [[0.0, 5.0, 6.0], [0.0, -5.0, 6.0], [-5.0, 0.0, 6.0], [5.0, 0.0, 6.0]], dtype=tf.float32) center = tf.constant([[0.0, 0.0, 0.0]], dtype=tf.float32) world_up = tf.constant([[0.0, 1.0, 0.0]], dtype=tf.float32) # [[min-x, min-y, min-z]] min_vertex = tf.reduce_min(vertices, axis=0, keepdims=True) # [[max-x, max-y, max-z]] max_vertex = tf.reduce_max(vertices, axis=0, keepdims=True) vertex_diffuse_colors = tf.reshape((vertices - min_vertex) / (max_vertex - min_vertex), [1, vertices.get_shape()[0].value, 3]) # vertex_diffuse_colors = tf.ones_like(vertices_world_space, dtype=tf.float32) * tf.r light_positions = tf.expand_dims(lightbulb, axis=0) light_intensities = tf.ones([1, 1, 3], dtype=tf.float32) ambient_color = tf.constant([[0.0, 0.0, 0.0]]) vertex_positions = tf.reshape( tf.matmul(vertices, model_rotation, transpose_b=True), [1, vertices.get_shape()[0].value, 3]) desired_normals = tf.reshape( tf.matmul(normals, model_rotation, transpose_b=True), [1, vertices.get_shape()[0].value, 3]) render = mesh_renderer.mesh_renderer( vertex_positions, triangles, desired_normals, vertex_diffuse_colors, eye, center, world_up, light_positions, light_intensities, image_width, image_height, ambient_color=ambient_color, ) return tf.reshape(tf.minimum(render, 1.0), [image_height, image_width, 4]) def build_nodes(*, object_vertices, object_triangles, image_height=300, image_width=300): object_vertices = tf.constant(object_vertices, dtype=tf.float32) object_normals = tf.nn.l2_normalize(object_vertices, dim=1) target_angles = tf.placeholder(tf.float32, shape=[None, 3]) starting_angles = tf.placeholder(tf.float32, shape=[None, 3]) training = tf.placeholder_with_default(False, shape=()) desired_render = tf.map_fn(lambda x: render_cube(vertices=object_vertices, normals=object_normals, triangles=object_triangles, angles=x[None, :], image_height=image_height, image_width=image_width), target_angles) starting_render = tf.map_fn(lambda x: render_cube(vertices=object_vertices, normals=object_normals, triangles=object_triangles, angles=x[None, :], image_height=image_height, image_width=image_width), starting_angles) # by observing the rendered image, predict how to change the state # of the world to make things better sizes = [ (128, 5), (128, 5), (128, 5), ] out = tf.concat([desired_render[:, :, :, :3]], axis=-1) for filters, kernel_size in sizes: out = tf.nn.leaky_relu(tf.layers.conv2d(out, filters, kernel_size, strides=(2, 2), activation=None, kernel_initializer=tf.random_uniform_initializer(-0.002, 0.002), bias_initializer=tf.zeros_initializer())) predicted_angles = tf.contrib.layers.fully_connected( tf.reshape(out, [tf.shape(out)[0], out.get_shape()[1].value * out.get_shape()[2].value * out.get_shape()[3].value]), 3, activation_fn=None, biases_initializer=tf.zeros_initializer(), weights_initializer=tf.random_uniform_initializer(-0.002, 0.002)) render = tf.map_fn(lambda x: render_cube(vertices=object_vertices, normals=object_normals, triangles=object_triangles, angles=x[None, :], image_height=image_height, image_width=image_width), predicted_angles) # + starting_angles #loss = tf.reduce_mean(tf.abs(render - desired_render)) loss = tf.reduce_mean(tf.square(predicted_angles - target_angles)) optimizer = tf.train.AdamOptimizer(learning_rate=1e-4) vars = tf.trainable_variables() grads = tf.gradients(loss, vars) grads, _ = tf.clip_by_global_norm(grads, 1.0) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.apply_gradients(zip(grads, vars)) return { "loss": loss, "train_op": train_op, "render": render, "desired_render": desired_render, "predicted_angles": predicted_angles, "target_angles": target_angles, "starting_angles": starting_angles, "training": training, } def build_nodes_simple(*, object_vertices, object_triangles, image_height=300, image_width=300, init_val=None): if init_val is None: init_val = [[-1.16, 0.0, 3.48]] object_vertices = tf.constant(object_vertices, dtype=tf.float32) object_normals = tf.nn.l2_normalize(object_vertices, dim=1) target_angles = tf.placeholder(tf.float32, shape=[None, 3]) starting_angles = tf.placeholder(tf.float32, shape=[None, 3]) training = tf.placeholder_with_default(False, shape=()) desired_render = tf.map_fn(lambda x: render_cube(vertices=object_vertices, normals=object_normals, triangles=object_triangles, angles=x[None, :], image_height=image_height, image_width=image_width), target_angles) starting_render = tf.map_fn(lambda x: render_cube(vertices=object_vertices, normals=object_normals, triangles=object_triangles, angles=x[None, :], image_height=image_height, image_width=image_width), starting_angles) # by observing the rendered image, predict how to change the state # of the world to make things better predicted_angles = tf.Variable(init_val, dtype=tf.float32) render = tf.map_fn(lambda x: render_cube(vertices=object_vertices, normals=object_normals, triangles=object_triangles, angles=x[None, :], image_height=image_height, image_width=image_width), predicted_angles) # + starting_angles loss = tf.reduce_mean(tf.abs(render - desired_render)) # optimizer = tf.train.AdamOptimizer(learning_rate=1e-4) optimizer = tf.train.MomentumOptimizer(0.7, 0.1) vars = tf.trainable_variables() grads = tf.gradients(loss, vars) grads, _ = tf.clip_by_global_norm(grads, 1.0) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.apply_gradients(zip(grads, vars)) return { "loss": loss, "train_op": train_op, "render": render, "desired_render": desired_render, "predicted_angles": predicted_angles, "target_angles": target_angles, "starting_angles": starting_angles, "training": training, } tf.reset_default_graph() sess = tf.InteractiveSession() nodes = build_nodes_simple(#object_triangles=triangles, object_vertices=vertices, object_triangles=cube_triangles, object_vertices=cube_vertices, image_height=300, image_width=300, init_val=[[-1.16, 0.00, 0.0]]) sess.run(tf.global_variables_initializer()) desire = sess.run(nodes["desired_render"], {nodes["target_angles"]: [[-1.16, 0.00, 3.48]], nodes["training"]: True}) drawarray(desire[0] * 255) errors = [] renders = [] ema_l = None for i in range(300): l, _, temp, desire = sess.run([nodes["loss"], nodes["train_op"], nodes["render"], nodes["desired_render"]], {nodes["target_angles"]: [[-1.16, 0.00, 3.48]], nodes["training"]: True}) print("{} Loss {}".format(i, l)) errors.append(l) renders.append(temp[0]) if l < 0.01: break # errors = [] # scales = [] # sess = tf.InteractiveSession() # nodes = build_nodes(#object_triangles=triangles, object_vertices=vertices, # object_triangles=cube_triangles, object_vertices=cube_vertices, # image_height=300, image_width=300) # sess.run(tf.global_variables_initializer()) # noise_level = 10.0 # ema_l = None # batch_size = 8 # time_without_improvement = 0 # for i in range(1000): # target = np.array([[0.0, 0.0, 0.0] for _ in range(batch_size)]) # l, _ = sess.run([nodes["loss"], nodes["train_op"]], # {nodes["target_angles"]: target + np.random.uniform(-noise_level, noise_level, size=(batch_size, 3)), # nodes["training"]: True}) # print("{} Loss {}, imp={}".format(i, l, time_without_improvement)) # errors.append(l) # scales.append(noise_level) # if ema_l is None: # ema_l = l # else: # ema_l = ema_l * 0.99 + 0.01 * l # if ema_l < 0.01 or time_without_improvement > 150: # if noise_level >= 30: # break # print("Increasing noise level {}".format(noise_level)) # ema_l = None # noise_level *= 1.1 # time_without_improvement = 0 # else: # time_without_improvement += 1 # # errors = [] # scales = [] # sess = tf.InteractiveSession() # nodes = build_nodes(#object_triangles=triangles, object_vertices=vertices, # object_triangles=cube_triangles, object_vertices=cube_vertices, # image_height=300, image_width=300) # sess.run(tf.global_variables_initializer()) # noise_level = 10.0 # ema_l = None # batch_size = 8 # time_without_improvement = 0 # for i in range(1000): # target = np.array([[0.0, 0.0, 0.0] for _ in range(batch_size)]) # l, _ = sess.run([nodes["loss"], nodes["train_op"]], # {nodes["target_angles"]: target + np.random.uniform(-noise_level, noise_level, size=(batch_size, 3)), # nodes["training"]: True}) # print("{} Loss {}, imp={}".format(i, l, time_without_improvement)) # errors.append(l) # scales.append(noise_level) # if ema_l is None: # ema_l = l # else: # ema_l = ema_l * 0.99 + 0.01 * l # if ema_l < 0.01 or time_without_improvement > 150: # if noise_level >= 30: # break # print("Increasing noise level {}".format(noise_level)) # ema_l = None # noise_level *= 1.1 # time_without_improvement = 0 # else: # time_without_improvement += 1 # plt.plot(errors) #plt.plot(scales) tf.trainable_variables() l, pred = sess.run([nodes["loss"], nodes["predicted_angles"]], {nodes["target_angles"]: [[-1.16, 0.00, 3.48]]}) print(pred) for i in range(400): l, pred, temp, desire = sess.run([nodes["loss"], nodes["predicted_angles"], nodes["render"], nodes["desired_render"]], {nodes["target_angles"]: [[-20.0, 0.00, 3.14]]}) renders.append(temp[0]) break drawarray(np.minimum(desire[0], 1.0) * 255) HTML(drawarrays(renders).to_html5_video()) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl %matplotlib inline plt.rcParams['figure.figsize'] = (10,10) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' import sys caffe_root = '/Users/Bato/Documents/caffe/' sys.path.insert(0, caffe_root+'python') import caffe caffe.set_mode_cpu() model_def = caffe_root + 'models/fine_tune_CaffeNet/Tune_fc78_1500iters/test2_4/deploy.prototxt' model_weight = caffe_root + 'models/fine_tune_CaffeNet/Tune_fc78_1500iters/test2_4/caffenet_train_iter_1500.caffemodel' net = caffe.Net(model_def, model_weight, caffe.TEST) ``` Set up input preprocessing. (We'll use Caffe's caffe.io.Transformer to do this, but this step is independent of other parts of Caffe, so any custom preprocessing code may be used). Our default CaffeNet is configured to take images in BGR format. Values are expected to start in the range [0, 255] and then have the mean ImageNet pixel value subtracted from them. In addition, the channel dimension is expected as the first (outermost) dimension. As matplotlib will load images with values in the range [0, 1] in RGB format with the channel as the innermost dimension, we are arranging for the needed transformations here. ``` # set the size of input net.blobs['data'].reshape(50, # batch size 3, # 3-channel(BGR) images 227, 227) # image size mu = np.load(caffe_root+'python/caffe/imagenet/ilsvrc_2012_mean.npy') # mu:(3, 256, 256) mu = mu.mean(1).mean(1) # Obtain the mean (BGR) pixel values # print 'mean-substracted values:',zip('BGR',mu) transformer = caffe.io.Transformer({'data':net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255] transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR image = caffe.io.load_image(caffe_root + 'examples/images/cat.jpg') transformed_image = transformer.preprocess('data',image) # plt.imshow(image) # plt.grid('off') net.blobs['data'].data[...] = transformed_image output = net.forward() output_prob = output['prob'][0] print 'predicted class is:',output_prob.argmax() labels_file = caffe_root + 'data/ilsvrc12/synset_words.txt' labels = np.loadtxt(labels_file, str, delimiter='\t') print 'output label:',labels[output_prob.argmax()] top_inds = output_prob.argsort()[::-1][:5] print 'probabilities and labels:' zip(output_prob[top_inds], labels[top_inds]) for layer_name, blob in net.blobs.iteritems(): print layer_name + '\t' + str(blob.data.shape) for layer_name, param in net.params.iteritems(): print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape) def vis_square(data): data = (data - data.min())/(data.max() - data.min()) n = int(np.ceil(np.sqrt(data.shape[0]))) padding = (((0, n**2 - data.shape[0]), (0, 1), (0, 1)) + ((0, 0),) * (data.ndim - 3)) data = np.pad(data, padding, mode = 'constant', constant_values=1) data = data.reshape((n, n) + data.shape[1:]).transpose((0,2,1,3)+tuple(range(4, data.ndim+1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data); plt.axis('off') # filters = net.params['conv1'][0].data # vis_square(filters.transpose(0, 2, 3, 1)) # feat = net.blobs['pool5'].data[0, :] # vis_square(feat) import scipy.io as scio Data = scio.loadmat('/Users/Bato/Dropbox/tsne_python_5.25/All_PC_DataforBato.mat') I = Data['images']; Ans = Data['state'] # shape of I: (101, 101, 842), shape of Ans:(1, 842) plt.imshow(I[:,:,1]) # This part is done for fine-tuning. Prepare for the txt file needed to write lmdb file. # # def write_image(I): # # for i in range(0,I.shape[2]): # # II = (I[:,:,i] - np.mean(I[:,:,i]))/np.mean(I[:,:,i]) # # # II = np.dstack((np.dstack((II,II)),II)) # # plt.imsave('/Users/Bato/Documents/caffe/data/phase_contrast/phase_contrast%s.jpeg' % i,II,cmap='gray') # # return None # # write_image(I) def write_text(a, b, I): #a,b is the testing patients number p = [0, 192, 387, 507, 677, 842] ar = np.zeros((1, 842)) ar[:,p[a-1]:p[a]] = 1 ar[:,p[b-1]:p[b]] = 1 test_set=[] train_set=[] test_index = np.where(ar==1)[1] train_index = np.where(ar==0)[1] train_set=open("/Users/Bato/Documents/caffe/data/phase_contrast/trainingset.txt", "w") test_set=open("/Users/Bato/Documents/caffe/data/phase_contrast/testingset.txt", "w") for j in train_index: train_set.write('/phase_contrast'+str(j)+'.jpeg '+str(int(Ans[:,j]))+'\n') train_set.close() for i in test_index: test_set.write('/phase_contrast'+str(i)+'.jpeg '+str(int(Ans[:,i]))+'\n') train_images = I[:,:,train_index] test_images = I[:,:,test_index] train_labels = Ans[:,train_index] test_labels = Ans[:,test_index] #np.savetxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/'+ # str(a)+'_'+str(b)+'.txt',test_labels) return train_images, test_images, train_labels, test_labels train_I, test_I, train_Ans, test_Ans = write_text(2,4, I) def read_med_fine_tuning(images): output_matrix = np.zeros([images.shape[2],4096]) for i in range(0,images.shape[2]): image = images[:,:,i] image = (image - np.mean(image))/np.mean(image) image_3d = np.dstack((np.dstack((image,image)),image)) transform_image = transformer.preprocess('data', image_3d) print 'Processing the {0} image'.format(i) net.blobs['data'].data[...] = transform_image output = net.forward() feature = net.blobs['fc6'].data[0,:] # output_prob = output['prob'][0]#.argmax() output_matrix[i,:]= feature#output_prob return output_matrix output_matrix = read_med_fine_tuning(test_I) np.savetxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test2_4/fc7_matrix_2_4.csv',output_matrix) # Data preparation for computing the AUC for fine-tuning from numpy import loadtxt out1_3 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test1_3/pro_matrix_1_3.csv') out1_4 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test1_4/pro_matrix_1_4.csv') out1_5 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test1_5/pro_matrix_1_5.csv') out2_3 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test2_3/pro_matrix_2_3.csv') out2_4 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test2_4/pro_matrix_2_4.csv') out2_5 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc78_1500iters/test2_5/pro_matrix_2_5.csv') output_matrixs = [out1_3,out1_4,out1_5,out2_3,out2_4,out2_5] Ans1_3 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/1_3.txt') Ans1_4 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/1_4.txt') Ans1_5 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/1_5.txt') Ans2_3 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/2_3.txt') Ans2_4 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/2_4.txt') Ans2_5 = loadtxt('/Users/Bato/Documents/caffe/models/fine_tune_CaffeNet/Tune_fc8_1500iters/testset_label/2_5.txt') ans_matrixs = [Ans1_3, Ans1_4, Ans1_5, Ans2_3, Ans2_4,Ans2_5] # This part is to computer the AUC when we use fine-tuning. import sklearn.svm as svm from sklearn.metrics import roc_curve, auc import seaborn as sns from scipy import interp name = ['P1&3','P1&4','P1&5','P2&3','P2&4','P2&5'] acc = [1.0000,0.9944, 0.7704, 0.5484, 0.6188, 0.7594] acc7 = [1.0000,0.9961, 0.6556, 0.5454, 0.616, 0.7592 ] mean_acc = np.mean(acc) mean_tpr = 0.0 mean_fpr = np.linspace(0, 1, 100) all_tpr = [] sns.set_palette('muted') palette = np.array(sns.color_palette("hls", 10)) mean_tpr = 0.0 mean_fpr = np.linspace(0, 1, 100) all_tpr = [] for i in range(0, len(output_matrixs)): fpr, tpr, thresholds = roc_curve(ans_matrixs[i], output_matrixs[i][:,1]) mean_tpr += interp(mean_fpr, fpr, tpr) mean_tpr[0] = 0.0 roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, lw=2,c=palette[i], label='testing %s,AUC=%0.4f,acc=%0.4f' %(name[i],roc_auc,acc7[i])) mean_tpr /= len(output_matrixs) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve of fine-tuning fc8') plt.legend(loc=4) plt.grid('off') plt.plot(mean_fpr, mean_tpr, 'k--', label='Mean ROC (AUC = %0.4f),Mean acc=%0.4f' % (mean_auc,mean_acc), lw=2) plt.legend(loc=4) plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck') plt.savefig('/Users/Bato/Dropbox/tsne_python_5.25/Feature/CaffeNet/fine-tuning/fc78/AUC.png') # This part is to generate off-the-shell CNN features. def read_med_off_the_shell(images): feat_matrix = np.zeros((images.shape[2],4096)) for i in range(0, images.shape[2]): image = images[:,:,i] image = (image - np.mean(image))/np.mean(image) image_3d = np.dstack((np.dstack((image,image)),image)) transform_image = transformer.preprocess('data', image_3d) print 'Processing the {0} image'.format(i) net.blobs['data'].data[...] = transform_image output = net.forward() feature = net.blobs['fc6'].data[0,:] # feat_matrix[i, :] = np.mean(np.mean(feature, axis=1),axis=1) feat_matrix[i,:] = feature return feat_matrix feature_matrix = read_med_off_the_shell(I) np.savetxt('/Users/Bato/Dropbox/tsne_python_5.25/Feature/CaffeNet/fine-tuning/OFF-the-shell/test1_3_fc6.csv',feature_matrix,delimiter=",") # Split the whole feature matrix into training/testing sets patient-wise. feature_matrix =np.loadtxt('/Users/Bato/Dropbox/tsne_python_5.25/Feature/CaffeNet/fine-tuning/OFF-the-shell/test1_3/test1_3_fc6.csv',delimiter=",") # feature_matrix =np.loadtxt('/Users/Bato/Dropbox/tsne_python_5.25/Feature/inception3/off-the-shelf/mix1/mix1.csv',delimiter=",") def split_matrix_to_patient(feature_matrix): p1 = np.concatenate((feature_matrix[0:192,:], Ans.T[0:192]),axis=1) p2 = np.concatenate((feature_matrix[192:387,:], Ans.T[192:387]),axis=1) p3 = np.concatenate((feature_matrix[387:507,:], Ans.T[387:507]),axis=1) p4 = np.concatenate((feature_matrix[507:677,:], Ans.T[507:677]),axis=1) p5 = np.concatenate((feature_matrix[677:,:], Ans.T[677:]),axis=1) tr1 = np.concatenate((p2,p4,p5), axis=0) tr2 = np.concatenate((p2,p3,p5), axis=0) tr3 = np.concatenate((p2,p3,p4), axis=0) tr4 = np.concatenate((p1,p4,p5), axis=0) tr5 = np.concatenate((p1,p3,p5), axis=0) tr6 = np.concatenate((p1,p3,p4), axis=0) t1 = np.concatenate((p1,p3),axis=0) t2 = np.concatenate((p1,p4),axis=0) t3 = np.concatenate((p1,p5),axis=0) t4 = np.concatenate((p2,p3),axis=0) t5 = np.concatenate((p2,p4),axis=0) t6 = np.concatenate((p2,p5),axis=0) return tr1, tr2, tr3, tr4, tr5, tr6, t1, t2, t3, t4, t5, t6 tr1, tr2, tr3, tr4, tr5, tr6, t1, t2, t3, t4, t5, t6 = split_matrix_to_patient(feature_matrix) # This part is to compute the AUC when we use off-the-shell CNN features to classify. # This features are all high-dimensional features. import sklearn.svm as svm from sklearn.metrics import roc_curve, auc import seaborn as sns from scipy import interp sns.set_palette('muted') palette = np.array(sns.color_palette("hls", 10)) random_state = np.random.RandomState(0) clf = svm.SVC(gamma=0.001, C=100., probability=True, random_state=random_state) train = [tr1,tr2,tr3,tr4,tr5,tr6] test = [t1,t2,t3,t4,t5,t6] name = ['P1&3','P1&4','P1&5','P2&3','P2&4','P2&5'] mean_tpr3 = 0.0 mean_fpr3 = np.linspace(0, 1, 100) mean_accuracy = [] std_AUC = [] for i,val in enumerate(train): clf.fit(train[i][:,:-1],np.ravel(train[i][:,-1:])) probas = clf.predict_proba(test[i][:,:-1]) fpr, tpr, thresholds = roc_curve(test[i][:,-1:], probas[:,1]) mean_tpr3 += interp(mean_fpr3, fpr, tpr) mean_tpr3[0] = 0.0 roc_auc = auc(fpr, tpr) std_AUC.append(roc_auc) auccracy = clf.score(test[i][:,:-1], test[i][:,-1:]) mean_accuracy.append(auccracy) plt.plot(fpr, tpr, lw=2,c=palette[i], label='testing %s,AUC=%0.4f,acc=%0.4f' % (name[i],roc_auc,auccracy)) mpl.rc('figure',figsize=(15,8)) mean_tpr3 /= len(train) mean_tpr3[-1] = 1.0 mean_auc3 = auc(mean_fpr3, mean_tpr3) std_accuracy = np.std(mean_accuracy) std_AUC = np.std(std_AUC) mean_accuracy = np.sum(mean_accuracy)/len(mean_accuracy) # plt.plot(mean_fpr1, mean_tpr1, 'k:', # label='Mean ROC of $Caffe_6$ (AUC = %0.2f)' % mean_auc1, lw=3,c=palette[0]) # plt.plot(mean_fpr, mean_tpr, 'k-.', # label='Mean ROC of $Caffe_5$ (AUC = %0.2f)' % mean_auc, lw=3, c=palette[1]) plt.plot(mean_fpr3, mean_tpr3, 'k-', label='Mean ROC of fc6 layer (AUC = %0.2f$\pm$%0.2f, acc=%0.2f$\pm$%0.2f)' % (mean_auc3,std_AUC,mean_accuracy,std_accuracy), lw=3, c='black') plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate',fontsize=20) plt.ylabel('True Positive Rate',fontsize=20) plt.title('ROC curves for fc6 layer',fontsize=20) plt.legend(loc=4,prop={'size':11}) plt.grid('off') plt.savefig('/Users/Bato/Dropbox/tsne_python_5.25/Feature/CaffeNet/fine-tuning/OFF-the-shell/test1_3/fc6/fc6-AUC.png') plt.show() layers = ['conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'] x = np.arange(len(layers)) mean_auc = [0.56, 0.77, 0.80, 0.83, 0.91, 0.81, 0.90, 0.91] mpl.rc('xtick',labelsize=15) mpl.rc('figure',figsize = (6,6)) plt.xticks(x, layers) plt.plot(x, mean_auc) plt.xlabel('Layers of CaffeNet') plt.ylabel('Mean AUC') plt.savefig('/Users/Bato/Dropbox/tsne_python_5.25/Feature/CaffeNet/all_mean_auc.png') plt.show() ```
github_jupyter
This notebook is part of the $\omega radlib$ documentation: https://docs.wradlib.org. Copyright (c) $\omega radlib$ developers. Distributed under the MIT License. See LICENSE.txt for more info. # Example for georeferencing a radar dataset ``` import wradlib.georef as georef import numpy as np import matplotlib.pyplot as pl import matplotlib as mpl from matplotlib.patches import Rectangle import warnings warnings.filterwarnings('ignore') try: get_ipython().magic("matplotlib inline") except: pl.ion() ``` **1st step:** Compute centroid coordinates and vertices of all radar bins in WGS84 (longitude and latitude). ``` # Define the polar coordinates and the site coordinates in lat/lon r = np.arange(1, 129) * 1000 az = np.linspace(0, 360, 361)[0:-1] # Site coordinates for different DWD radar locations (you choose) # LAT: drs: 51.12527778 ; fbg: 47.87444444 ; tur: 48.58611111 ; # muc: 48.3372222 # LON: drs: 13.76972222 ; fbg: 8.005 ; tur: 9.783888889 ; muc: 11.61277778 sitecoords = (9.7839, 48.5861) ``` We can now generate the polgon vertices of the radar bins - with **each vertex in lon/lat coordinates**. ``` proj_wgs84 = georef.epsg_to_osr(4326) polygons = georef.spherical_to_polyvert(r, az, 0, sitecoords, proj=proj_wgs84) polygons = polygons[..., 0:2] polygons.shape ``` ... or we can compute the corresponding centroids of all bins - - with **each centroid in lon/lat coordinates**. ``` cent_coords = georef.spherical_to_centroids(r, az, 0, sitecoords, proj=proj_wgs84) cent_coords = np.squeeze(cent_coords) cent_lon = cent_coords[..., 0] cent_lat = cent_coords[..., 1] ``` In order to understand how vertices and centroids correspond, we can plot them together. ``` fig = pl.figure(figsize=(16, 16)) aspect = ( cent_lon.max()-cent_lon.min() ) / ( cent_lat.max()-cent_lat.min() ) ax = fig.add_subplot(121, aspect = aspect ) polycoll = mpl.collections.PolyCollection(polygons, closed=True, facecolors='None', linewidth=0.1) ax.add_collection(polycoll, autolim=True) #ax.plot(cent_lon, cent_lat, 'r+') pl.title('Zoom in\n(only possible for interactive plots).') ax.add_patch(Rectangle((sitecoords[0]+0.25, sitecoords[1]+0.25), 0.2, 0.2/aspect, edgecolor="red", facecolor="None", zorder=3)) pl.xlim(cent_lon.min(), cent_lon.max()) pl.ylim(cent_lat.min(), cent_lat.max()) ax = fig.add_subplot(122, aspect = aspect) polycoll = mpl.collections.PolyCollection(polygons, closed=True, facecolors='None') ax.add_collection(polycoll, autolim=True) ax.plot(cent_lon, cent_lat, 'r+') pl.title('Zoom into red box of left plot') pl.xlim(sitecoords[0]+0.25, sitecoords[0]+0.25+0.2) pl.ylim(sitecoords[1]+0.25, sitecoords[1]+0.25+0.2/aspect) ``` **2nd step:** Reproject the centroid coordinates to Gauss-Krueger Zone 3 (i.e. EPSG-Code 31467). ``` proj_gk3 = georef.epsg_to_osr(31467) x, y = georef.reproject(cent_lon, cent_lat, projection_targe=proj_gk3) ```
github_jupyter
Lowercase + remove symbol + stopwords Hoax ``` filehoax = open('all_hoax.txt').read() hoaxlow = filehoax.lower() open('all_hoax_low.txt', 'w').write(hoaxlow) import re string = open('all_hoax_low.txt').read() new_str = re.sub('[^a-zA-Z\n]', ' ', string) open('all_hoax_nosym.txt', 'w').write(new_str) #removing stopwords f1 = open('all_hoax_nosym.txt', 'r') f2 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/stopwords_id.txt', 'r') f3 = open("all_hoax_stprmvd.txt","w") first_words=[] second_words=[] for line in f1: words = line.split() for w in words: first_words.append(w) for line in f2: w = line.split() for i in w: second_words.append(i) for word1 in first_words : for word2 in second_words: if word1 == word2: while True: try: first_words.remove(word2) except: break for word in first_words: f3.write(word) f3.write(' ') f1.close() f2.close() f3.close() ``` facts ``` filefacts = open('all_facts.txt').read() factslow = filefacts.lower() open('all_facts_low.txt', 'w').write(factslow) stringf = open('all_facts_low.txt').read() new_strf = re.sub('[^a-zA-Z\n]', ' ', stringf) open('all_facts_nosym.txt', 'w').write(new_strf) #removing stopwords f1f = open('all_facts_nosym.txt', 'r') f2f = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/stopwords_id.txt', 'r') f3f = open("all_facts_stprmvd.txt","w") first_wordsf=[] second_wordsf=[] for line in f1f: words = line.split() for w in words: first_wordsf.append(w) for line in f2f: w = line.split() for i in w: second_wordsf.append(i) for word1 in first_wordsf : for word2 in second_wordsf: if word1 == word2: while True: try: first_wordsf.remove(word2) except: break for word in first_wordsf: f3f.write(word) f3f.write(' ') f1f.close() f2f.close() f3f.close() ``` stemming hoax ``` from Sastrawi.Stemmer.StemmerFactory import StemmerFactory factory = StemmerFactory() stemmer = factory.create_stemmer() hoax = open('all_hoax_stprmvd.txt').read() hoax_stemmed = stemmer.stem(hoax) open('all_hoax_stemmed.txt', 'w').write(hoax_stemmed) ``` facts ``` fact = open('all_facts_stprmvd.txt').read() fact_stemmed = stemmer.stem(fact) open('all_facts_stemmed.txt', 'w').write(fact_stemmed) ```
github_jupyter
## Welcome to the BioProv tutorials! ### Tutorial index * <a href="./introduction.ipynb">Introduction to BioProv</a> * <a href="./w3c-prov.ipynb">W3C-PROV projects</a> * <a href="./workflows_and_presets.ipynb">Presets and Workflows</a> ## Introduction to BioProv BioProv is a library to record provenance information of bioinformatics workflows. If you work with genomics, you've probably encountered the situation where you have several different files for a number of biological samples, and each file concerns a certain aspect of your data. As you develop your analysis workflow, it is challenging to keep track of the **provenance** of your data: how, when and why each file was created and/or modified. There are many tools to aid in this task, such as [version control](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004668), [scientific workflow management systems](https://www.researchgate.net/profile/Jonathan_Mercier/publication/331422146_Evaluating_Workflow_Management_Systems_A_Bioinformatics_Use_Case/links/5d961df9299bf1c363f577b4/Evaluating-Workflow-Management-Systems-A-Bioinformatics-Use-Case.pdf), or even simply keeping a [tidy computational notebook](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004385). Although these practices are certainly helpful and [we recommend that you employ them](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510), it is not trivial to integrate and share provenance information across different people, research groups and even computing environments. A solution to this has been the development of [W3C-PROV](https://www.w3.org/TR/prov-overview/), a standard created by the W3C organization to facilitate the exchange of provenance data in the web. The W3C-PROV is composed of a set of [13 documents](https://www.w3.org/TR/prov-overview/), of which maybe the most pertinent to us is the [W3C-PROV-DM](https://www.w3.org/TR/2013/REC-prov-dm-20130430/), which describes a data model to represent provenance information. Although this model is widely implemented in a range of domain applications, including to [scientific workflows](http://www.vldb.org/pvldb/vol11/p2082-sousa.pdf), to the best of our knowledge, there is not yet a software tool specialized in the provenance of biological data structures and bioinformatics workflows. To extract provenance attributes of common file formats and common project organization patterns in bioinformatics, generic provenance extraction systems must be extended or customized, which can be a costly task for both the domain specialist and the developers of said systems. In order to fulfill this gap, we present BioProv, which aims to facilitate the provenance extraction in bioinformatics workflows by providing a Python library which integrates two open source libraries: [BioPython](https://academic.oup.com/bioinformatics/article/25/11/1422/330687) and [Prov](https://prov.readthedocs.io/en/latest/). ### How it works BioProv is **project-based**, where each **Project** contains a number of **Samples** which have associated **Files**. **Files** may also be associated directly with the **Project**, if they contain information about zero or multiple samples. BioProv also stores information about **Programs** used create new and modify existing **Files**. **Programs** may contain **Parameters** which will determine how they will be run. Once a **Program** has been run, information about the process will be stored as a **Run**. Therefore, these are the main classes of the BioProv library: * **Project** * **Sample** * **Files** * **Programs** * **Parameters** * **Runs** See an example on how to make a BioProv Project. A **Project** is composed of **Samples**. We are going to create a **Project** with only 1 **Sample**, which is going to be the genome sequence of [*Synechococcus elongatus* PCC 6301](https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=269084), a cyanobacteria. We are going to add attributes to this **Sample** using the *attributes* argument, which takes a Python `dict`. We are going to fill it with information about [its location on NCBI](https://www.ncbi.nlm.nih.gov/assembly/GCF_000010065.1). We then create a **Project** using a list of **Samples**. ``` import bioprov as bp sample = bp.Sample("Synechococcus_elongatus_PCC_6301", attributes={"ncbi_accession": "GCF_000010065.1", "ncbi_database": "assembly"} ) project = bp.Project(samples=[sample,], tag="introduction") ``` ### Adding Files and Programs Now we have a **Project** containing 1 **Sample**. However, our sample has no associated **Files** nor **Programs**. Let's add a **File** to our **Sample** and run a program on it. BioProv comes with an auxiliary `data` subpackage, which contains some preset data for us to experiment with. The `synechococcus_genome` variable is an instance of `pathlib.PosixPath`, which is used to hold file paths. ``` from bioprov.data import synechococcus_genome # We create a File object based on a path or a string representing a path. assembly_file = bp.File(synechococcus_genome, tag="assembly") # We can add this File to our Sample sample.add_files(assembly_file) sample.files ``` Now our instance of `Sample` holds a `File` object. Files can be accessed by the attribute `.files`, which is a dictionary composed of `{file.tag: File instance}`. We can now run a **Program** in our **Sample**. The sample's **Files** can be used as **Parameter** to the program. Programs are processed by the UNIX shell. Here we are setting up a program using UNIX's `grep` to count the occurrences of a particular kmer in our sample. We are then going to write the results to a new **File**. To write our program, we start with an instance of the **Program** class and add **Parameters** to it. ``` grep = bp.Program("grep") ``` Now that we have a **Program**, let's set the other variables we'll need. The first is an easy one, the kmer we are going to count: ``` kmer = "GATTACA" ``` Then, we need to create a new **File** in our **Sample**. The files associated with a **Sample** can be accessed in the `Sample.files` dictionary: ``` sample.files ``` We are going to create a new **File** in this dictionary. To set the path of this **File**, we can use attributes from the existing files. Each item in the `Sample.files` dictionary is an instance of `bioprov.File`, so there are several attributes which may be useful: ``` print(sample.files['assembly'].__class__, "\n") sample.files['assembly'].__dict__ ``` For example, if we want to create the new file in the same directory as the `'assembly'` file, we can use its `File.directory` attribute: ``` newfilepath = sample.files['assembly'].directory.joinpath(f"{kmer}_count.txt") ``` We can now set a new file in the `Sample.files` dictionary based on our kmer and the directory of the existing `'assembly'` **File**: ``` sample.files[f'{kmer}_count'] = bp.File(newfilepath) # which is the same as: sample.add_files(bp.File(newfilepath, tag=f"{kmer}_count")) ``` ### Running Programs We now have a new **File** with the name of our kmer in the `Sample.files` dictionary. Now, we must create the parameters to be added to the `grep` program. Parameters are strings which are added to the program's command-line. We can just put a string with all of our parameters, but creating them one by one and enclosing them with the `bp.Parameter` class will allow for querying later. Parameters are added to a **Program** with the `Program.add_parameter()` method. We then bind the **Program** to the **Sample** using the `Sample.add_programs()` method. It's important to remember these two methods: `Program.add_parameter()` and `Sample.add_programs()`. They allow BioProv to resolve internal relationships between each class. Finally, we check our command is correct: each `bioprov.Program` instance has a `Program.cmd` attribute which shows the exact command-line which will be run on the UNIX shell. ``` count = bp.Parameter("-c") kmer_param = bp.Parameter(f"'{kmer}'") in_file = bp.Parameter(str(sample.files['assembly'])) pipe_out = bp.Parameter(">", str(sample.files[f'{kmer}_count'])) for param in (count, kmer_param, in_file, pipe_out): grep.add_parameter(param) sample.add_programs(grep) grep.cmd ``` Now we want to run our program. We use the `Program.run()` method. ``` grep.run() ``` When we run a **Program**, we create a new **Run**. The `bioprov.Run` class holds information about a process, such as the start time and end time. Runs are stored in the `Program.runs` attribute: ``` grep.runs # Each Run has useful attributes such as stdout, stderr and status grep.runs['1'].__dict__ ``` ### Exporting Projects We now have a simple, yet complete, **BioProv Project**. We have a **Project** with 1 or more associated **Samples**, and 1 or more **Programs** have been **run** on the sample. We can export this **Project** as a JSON file. ``` project['Synechococcus_elongatus_PCC_6301'].files project.to_json("./introduction.json") ``` This project can be easily retrieved with the `bioprov.from_json()` function. ``` project = bp.from_json("./introduction.json") ``` This allows us to read and write Projects as JSON files, so we can store and/or query them. ### What about the provenance? We've learned the basics of BioProv, like the main classes, how to create **Projects**, **Samples**, and **Programs.** However, the point of BioProv is to be able to convert these elements to the W3C-PROV format. You can couple BioProv Projects (or any other BioProv object, for that matter) to W3C-PROV elements, allowing them to be exported as W3C-PROV documents, implemented with the [Prov](https://github.com/trungdong/prov) library. Continue to the <a href="./w3c-prov.ipynb">W3C-PROV tutorial.</a>
github_jupyter
# CarND Object Detection Lab Let's get started! ``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from PIL import Image from PIL import ImageDraw from PIL import ImageColor import time from scipy.stats import norm %matplotlib inline plt.style.use('ggplot') ``` ## MobileNets [*MobileNets*](https://arxiv.org/abs/1704.04861), as the name suggests, are neural networks constructed for the purpose of running very efficiently (high FPS, low memory footprint) on mobile and embedded devices. *MobileNets* achieve this with 3 techniques: 1. Perform a depthwise convolution followed by a 1x1 convolution rather than a standard convolution. The 1x1 convolution is called a pointwise convolution if it's following a depthwise convolution. The combination of a depthwise convolution followed by a pointwise convolution is sometimes called a separable depthwise convolution. 2. Use a "width multiplier" - reduces the size of the input/output channels, set to a value between 0 and 1. 3. Use a "resolution multiplier" - reduces the size of the original input, set to a value between 0 and 1. These 3 techiniques reduce the size of cummulative parameters and therefore the computation required. Of course, generally models with more paramters achieve a higher accuracy. *MobileNets* are no silver bullet, while they perform very well larger models will outperform them. ** *MobileNets* are designed for mobile devices, NOT cloud GPUs**. The reason we're using them in this lab is automotive hardware is closer to mobile or embedded devices than beefy cloud GPUs. ### Convolutions #### Vanilla Convolution Before we get into the *MobileNet* convolution block let's take a step back and recall the computational cost of a vanilla convolution. There are $N$ kernels of size $D_k * D_k$. Each of these kernels goes over the entire input which is a $D_f * D_f * M$ sized feature map or tensor (if that makes more sense). The computational cost is: $$ D_f * D_f * M * N * D_k * D_k $$ Let $D_g * D_g$ be the size of the output feature map. Then a standard convolution takes in a $D_f * D_f * M$ input feature map and returns a $D_g * D_g * N$ feature map as output. ![Standard Convolution](assets/standard_conv.png) #### Depthwise Convolution A depthwise convolution acts on each input channel separately with a different kernel. $M$ input channels implies there are $M$ $D_k * D_k$ kernels. Also notice this results in $N$ being set to 1. If this doesn't make sense, think about the shape a kernel would have to be to act upon an inidividual channel. Computation cost: $$ D_f * D_f * M * D_k * D_k $$ ![Depthwise Convolution](assets/depthwise_conv.png) #### Pointwise Convolution A pointwise convolution performs a 1x1 convolution, it's the same as a vanilla convolution except the kernel size is $1 * 1$. Computation cost: $$ D_k * D_k * D_f * D_f * M * N = 1 * 1 * D_f * D_f * M * N = D_f * D_f * M * N $$ ![Pointwise Convolution](assets/pointwise_conv.png) Thus the total computation cost is for separable depthwise convolution: $$ D_f * D_f * M * D_k * D_k + D_f * D_f * M * N $$ which results in $\frac{1}{N} + \frac{1}{D_k^2}$ reduction in computation: $$ \frac {D_f * D_f * M * D_k * D_k + D_f * D_f * M * N} {D_f * D_f * M * N * D_k * D_k} = \frac {D_k^2 + N} {D_k^2*N} = \frac {1}{N} + \frac{1}{D_k^2} $$ *MobileNets* use a 3x3 kernel, so assuming a large enough $N$, separable depthwise convnets are ~9x more computationally efficient than vanilla convolutions! ### Width Multiplier The 2nd technique for reducing the computational cost is the "width multiplier" which is a hyperparameter inhabiting the range [0, 1] denoted here as $\alpha$. $\alpha$ reduces the number of input and output channels proportionally: $$ D_f * D_f * \alpha M * D_k * D_k + D_f * D_f * \alpha M * \alpha N $$ ### Resolution Multiplier The 3rd technique for reducing the computational cost is the "resolution multiplier" which is a hyperparameter inhabiting the range [0, 1] denoted here as $\rho$. $\rho$ reduces the size of the input feature map: $$ \rho D_f * \rho D_f * M * D_k * D_k + \rho D_f * \rho D_f * M * N $$ Combining the width and resolution multipliers results in a computational cost of: $$ \rho D_f * \rho D_f * a M * D_k * D_k + \rho D_f * \rho D_f * a M * a N $$ Training *MobileNets* with different values of $\alpha$ and $\rho$ will result in different speed vs. accuracy tradeoffs. The folks at Google have run these experiments, the result are shown in the graphic below: ![MobileNets Graphic](https://github.com/tensorflow/models/raw/master/research/slim/nets/mobilenet_v1.png) MACs (M) represents the number of multiplication-add operations in the millions. ### Exercise 1 - Implement Separable Depthwise Convolution In this exercise you'll implement a separable depthwise convolution block and compare the number of parameters to a standard convolution block. For this exercise we'll assume the width and resolution multipliers are set to 1. Docs: * [depthwise convolution](https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d) ``` def vanilla_conv_block(x, kernel_size, output_channels): """ Vanilla Conv -> Batch Norm -> ReLU """ x = tf.layers.conv2d( x, output_channels, kernel_size, (2, 2), padding='SAME') x = tf.layers.batch_normalization(x) return tf.nn.relu(x) # TODO: implement MobileNet conv block def mobilenet_conv_block(x, kernel_size, output_channels): """ Depthwise Conv -> Batch Norm -> ReLU -> Pointwise Conv -> Batch Norm -> ReLU """ pass ``` **[Sample solution](./exercise-solutions/e1.py)** Let's compare the number of parameters in each block. ``` # constants but you can change them so I guess they're not so constant :) INPUT_CHANNELS = 32 OUTPUT_CHANNELS = 512 KERNEL_SIZE = 3 IMG_HEIGHT = 256 IMG_WIDTH = 256 with tf.Session(graph=tf.Graph()) as sess: # input x = tf.constant(np.random.randn(1, IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS), dtype=tf.float32) with tf.variable_scope('vanilla'): vanilla_conv = vanilla_conv_block(x, KERNEL_SIZE, OUTPUT_CHANNELS) with tf.variable_scope('mobile'): mobilenet_conv = mobilenet_conv_block(x, KERNEL_SIZE, OUTPUT_CHANNELS) vanilla_params = [ (v.name, np.prod(v.get_shape().as_list())) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'vanilla') ] mobile_params = [ (v.name, np.prod(v.get_shape().as_list())) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'mobile') ] print("VANILLA CONV BLOCK") total_vanilla_params = sum([p[1] for p in vanilla_params]) for p in vanilla_params: print("Variable {0}: number of params = {1}".format(p[0], p[1])) print("Total number of params =", total_vanilla_params) print() print("MOBILENET CONV BLOCK") total_mobile_params = sum([p[1] for p in mobile_params]) for p in mobile_params: print("Variable {0}: number of params = {1}".format(p[0], p[1])) print("Total number of params =", total_mobile_params) print() print("{0:.3f}x parameter reduction".format(total_vanilla_params / total_mobile_params)) ``` Your solution should show the majority of the parameters in *MobileNet* block stem from the pointwise convolution. ## *MobileNet* SSD In this section you'll use a pretrained *MobileNet* [SSD](https://arxiv.org/abs/1512.02325) model to perform object detection. You can download the *MobileNet* SSD and other models from the [TensorFlow detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md). [Paper](https://arxiv.org/abs/1611.10012) describing comparing several object detection models. Alright, let's get into SSD! ### Single Shot Detection (SSD) Many previous works in object detection involve more than one training phase. For example, the [Faster-RCNN](https://arxiv.org/abs/1506.01497) architecture first trains a Region Proposal Network (RPN) which decides which regions of the image are worth drawing a box around. RPN is then merged with a pretrained model for classification (classifies the regions). The image below is an RPN: ![Faster-RCNN Visual](./assets/faster-rcnn.png) The SSD architecture is a single convolutional network which learns to predict bounding box locations and classify the locations in one pass. Put differently, SSD can be trained end to end while Faster-RCNN cannot. The SSD architecture consists of a base network followed by several convolutional layers: ![SSD Visual](./assets/ssd_architecture.png) **NOTE:** In this lab the base network is a MobileNet (instead of VGG16.) #### Detecting Boxes SSD operates on feature maps to predict bounding box locations. Recall a feature map is of size $D_f * D_f * M$. For each feature map location $k$ bounding boxes are predicted. Each bounding box carries with it the following information: * 4 corner bounding box **offset** locations $(cx, cy, w, h)$ * $C$ class probabilities $(c_1, c_2, ..., c_p)$ SSD **does not** predict the shape of the box, rather just where the box is. The $k$ bounding boxes each have a predetermined shape. This is illustrated in the figure below: ![](./assets/ssd_feature_maps.png) The shapes are set prior to actual training. For example, In figure (c) in the above picture there are 4 boxes, meaning $k$ = 4. ### Exercise 2 - SSD Feature Maps It would be a good exercise to read the SSD paper prior to a answering the following questions. ***Q: Why does SSD use several differently sized feature maps to predict detections?*** A: Your answer here **[Sample answer](./exercise-solutions/e2.md)** The current approach leaves us with thousands of bounding box candidates, clearly the vast majority of them are nonsensical. ### Exercise 3 - Filtering Bounding Boxes ***Q: What are some ways which we can filter nonsensical bounding boxes?*** A: Your answer here **[Sample answer](./exercise-solutions/e3.md)** #### Loss With the final set of matched boxes we can compute the loss: $$ L = \frac {1} {N} * ( L_{class} + L_{box}) $$ where $N$ is the total number of matched boxes, $L_{class}$ is a softmax loss for classification, and $L_{box}$ is a L1 smooth loss representing the error of the matched boxes with the ground truth boxes. L1 smooth loss is a modification of L1 loss which is more robust to outliers. In the event $N$ is 0 the loss is set 0. ### SSD Summary * Starts from a base model pretrained on ImageNet. * The base model is extended by several convolutional layers. * Each feature map is used to predict bounding boxes. Diversity in feature map size allows object detection at different resolutions. * Boxes are filtered by IoU metrics and hard negative mining. * Loss is a combination of classification (softmax) and dectection (smooth L1) * Model can be trained end to end. ## Object Detection Inference In this part of the lab you'll detect objects using pretrained object detection models. You can download the pretrained models from the [model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md). ``` # Frozen inference graph files. NOTE: change the path to where you saved the models. SSD_GRAPH_FILE = 'ssd_mobilenet_v1_coco_11_06_2017/frozen_inference_graph.pb' RFCN_GRAPH_FILE = 'rfcn_resnet101_coco_11_06_2017/frozen_inference_graph.pb' FASTER_RCNN_GRAPH_FILE = 'faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017/frozen_inference_graph.pb' ``` Below are utility functions. The main purpose of these is to draw the bounding boxes back onto the original image. ``` # Colors (one for each class) cmap = ImageColor.colormap print("Number of colors =", len(cmap)) COLOR_LIST = sorted([c for c in cmap.keys()]) # # Utility funcs # def filter_boxes(min_score, boxes, scores, classes): """Return boxes with a confidence >= `min_score`""" n = len(classes) idxs = [] for i in range(n): if scores[i] >= min_score: idxs.append(i) filtered_boxes = boxes[idxs, ...] filtered_scores = scores[idxs, ...] filtered_classes = classes[idxs, ...] return filtered_boxes, filtered_scores, filtered_classes def to_image_coords(boxes, height, width): """ The original box coordinate output is normalized, i.e [0, 1]. This converts it back to the original coordinate based on the image size. """ box_coords = np.zeros_like(boxes) box_coords[:, 0] = boxes[:, 0] * height box_coords[:, 1] = boxes[:, 1] * width box_coords[:, 2] = boxes[:, 2] * height box_coords[:, 3] = boxes[:, 3] * width return box_coords def draw_boxes(image, boxes, classes, thickness=4): """Draw bounding boxes on the image""" draw = ImageDraw.Draw(image) for i in range(len(boxes)): bot, left, top, right = boxes[i, ...] class_id = int(classes[i]) color = COLOR_LIST[class_id] draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color) def load_graph(graph_file): """Loads a frozen inference graph""" graph = tf.Graph() with graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(graph_file, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return graph ``` Below we load the graph and extract the relevant tensors using [`get_tensor_by_name`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). These tensors reflect the input and outputs of the graph, or least the ones we care about for detecting objects. ``` detection_graph = load_graph(SSD_GRAPH_FILE) # detection_graph = load_graph(RFCN_GRAPH_FILE) # detection_graph = load_graph(FASTER_RCNN_GRAPH_FILE) # The input placeholder for the image. # `get_tensor_by_name` returns the Tensor with the associated name in the Graph. image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') # The classification of the object (integer id). detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') ``` Run detection and classification on a sample image. ``` # Load a sample image. image = Image.open('./assets/sample1.jpg') image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0) with tf.Session(graph=detection_graph) as sess: # Actual detection. (boxes, scores, classes) = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: image_np}) # Remove unnecessary dimensions boxes = np.squeeze(boxes) scores = np.squeeze(scores) classes = np.squeeze(classes) confidence_cutoff = 0.8 # Filter boxes with a confidence score less than `confidence_cutoff` boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes) # The current box coordinates are normalized to a range between 0 and 1. # This converts the coordinates actual location on the image. width, height = image.size box_coords = to_image_coords(boxes, height, width) # Each class with be represented by a differently colored box draw_boxes(image, box_coords, classes) plt.figure(figsize=(12, 8)) plt.imshow(image) ``` ## Timing Detection The model zoo comes with a variety of models, each its benefits and costs. Below you'll time some of these models. The general tradeoff being sacrificing model accuracy for seconds per frame (SPF). ``` def time_detection(sess, img_height, img_width, runs=10): image_tensor = sess.graph.get_tensor_by_name('image_tensor:0') detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0') detection_scores = sess.graph.get_tensor_by_name('detection_scores:0') detection_classes = sess.graph.get_tensor_by_name('detection_classes:0') # warmup gen_image = np.uint8(np.random.randn(1, img_height, img_width, 3)) sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: gen_image}) times = np.zeros(runs) for i in range(runs): t0 = time.time() sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: image_np}) t1 = time.time() times[i] = (t1 - t0) * 1000 return times with tf.Session(graph=detection_graph) as sess: times = time_detection(sess, 600, 1000, runs=10) # Create a figure instance fig = plt.figure(1, figsize=(9, 6)) # Create an axes instance ax = fig.add_subplot(111) plt.title("Object Detection Timings") plt.ylabel("Time (ms)") # Create the boxplot plt.style.use('fivethirtyeight') bp = ax.boxplot(times) ``` ### Exercise 4 - Model Tradeoffs Download a few models from the [model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) and compare the timings. ## Detection on a Video Finally run your pipeline on [this short video](https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/advanced_deep_learning/driving.mp4). ``` # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML HTML(""" <video width="960" height="600" controls> <source src="{0}" type="video/mp4"> </video> """.format('driving.mp4')) ``` ### Exercise 5 - Object Detection on a Video Run an object detection pipeline on the above clip. ``` clip = VideoFileClip('driving.mp4') # TODO: Complete this function. # The input is an NumPy array. # The output should also be a NumPy array. def pipeline(img): pass ``` **[Sample solution](./exercise-solutions/e5.py)** ``` with tf.Session(graph=detection_graph) as sess: image_tensor = sess.graph.get_tensor_by_name('image_tensor:0') detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0') detection_scores = sess.graph.get_tensor_by_name('detection_scores:0') detection_classes = sess.graph.get_tensor_by_name('detection_classes:0') new_clip = clip.fl_image(pipeline) # write to file new_clip.write_videofile('result.mp4') HTML(""" <video width="960" height="600" controls> <source src="{0}" type="video/mp4"> </video> """.format('result.mp4')) ``` ## Further Exploration Some ideas to take things further: * Finetune the model on a new dataset more relevant to autonomous vehicles. Instead of loading the frozen inference graph you'll load the checkpoint. * Optimize the model and get the FPS as low as possible. * Build your own detector. There are several base model pretrained on ImageNet you can choose from. [Keras](https://keras.io/applications/) is probably the quickest way to get setup in this regard.
github_jupyter
# Distributed KernelSHAP ## Introduction In this example, KernelSHAP is used to explain a batch of instances on multiple cores. To run this example, please run ``pip install alibi[ray]`` first. ``` import pprint import shap import ray shap.initjs() import matplotlib.pyplot as plt import numpy as np import pandas as pd from alibi.explainers import KernelShap from alibi.datasets import fetch_adult from collections import defaultdict from scipy.special import logit from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, plot_confusion_matrix from sklearn.model_selection import cross_val_score, train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, OneHotEncoder from timeit import default_timer as timer from typing import Dict, List, Tuple ``` ## Data preparation ### Load and split The `fetch_adult` function returns a `Bunch` object containing the features, the targets, the feature names and a mapping of categorical variables to numbers. ``` adult = fetch_adult() adult.keys() data = adult.data target = adult.target target_names = adult.target_names feature_names = adult.feature_names category_map = adult.category_map ``` Note that for your own datasets you can use our utility function `gen_category_map` to create the category map. ``` from alibi.utils.data import gen_category_map np.random.seed(0) data_perm = np.random.permutation(np.c_[data, target]) data = data_perm[:,:-1] target = data_perm[:,-1] idx = 30000 X_train,y_train = data[:idx,:], target[:idx] X_test, y_test = data[idx+1:,:], target[idx+1:] ``` ### Create feature transformation pipeline Create feature pre-processor. Needs to have 'fit' and 'transform' methods. Different types of pre-processing can be applied to all or part of the features. In the example below we will standardize ordinal features and apply one-hot-encoding to categorical features. Ordinal features: ``` ordinal_features = [x for x in range(len(feature_names)) if x not in list(category_map.keys())] ordinal_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())]) ``` Categorical features: ``` categorical_features = list(category_map.keys()) categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('onehot', OneHotEncoder(drop='first', handle_unknown='error'))]) ``` Note that in order to be able to interpret the coefficients corresponding to the categorical features, the option `drop='first'` has been passed to the `OneHotEncoder`. This means that for a categorical variable with `n` levels, the length of the code will be `n-1`. This is necessary in order to avoid introducing feature multicolinearity, which would skew the interpretation of the results. For more information about the issue about multicolinearity in the context of linear modelling see [[1]](#References). <a id='src_1'></a> Combine and fit: ``` preprocessor = ColumnTransformer(transformers=[('num', ordinal_transformer, ordinal_features), ('cat', categorical_transformer, categorical_features)]) preprocessor.fit(X_train) ``` ### Preprocess the data ``` X_train_proc = preprocessor.transform(X_train) X_test_proc = preprocessor.transform(X_test) ``` Applying the `sklearn` processing pipeline modifies the column order of the original dataset. The new feature ordering is necessary in order to corectly plot visualisations, and is inferred from the `preprocessor` object below: ``` numerical_feats_idx = preprocessor.transformers_[0][2] categorical_feats_idx = preprocessor.transformers_[1][2] scaler = preprocessor.transformers_[0][1].named_steps['scaler'] num_feats_names = [feature_names[i] for i in numerical_feats_idx] cat_feats_names = [feature_names[i] for i in categorical_feats_idx] perm_feat_names = num_feats_names + cat_feats_names pp = pprint.PrettyPrinter() print("Original order:") pp.pprint(feature_names) print("") print("New features order:") pp.pprint(perm_feat_names) ``` Create a utility to reorder the columns of an input array so that the features have the same ordering as that induced by the preprocessor. ``` def permute_columns(X: np.ndarray, feat_names: List[str], perm_feat_names: List[str]) -> np.ndarray: """ Permutes the original dataset so that its columns (ordered according to feat_names) have the order of the variables after transformation with the sklearn preprocessing pipeline (perm_feat_names). """ perm_X = np.zeros_like(X) perm = [] for i, feat_name in enumerate(perm_feat_names): feat_idx = feat_names.index(feat_name) perm_X[:, i] = X[:, feat_idx] perm.append(feat_idx) return perm_X, perm ``` The categorical variables will be grouped to reduce shap values variance, as shown in [this](kernel_shap_adult_lr.ipynb) example. To do so, the dimensionality of each categorical variable is extracted from the preprocessor: ``` # get feature names for the encoded categorical features ohe = preprocessor.transformers_[1][1].named_steps['onehot'] fts = [feature_names[x] for x in categorical_features] cat_enc_feat_names = ohe.get_feature_names(fts) # compute encoded dimension; -1 as ohe is setup with drop='first' feat_enc_dim = [len(cat_enc) - 1 for cat_enc in ohe.categories_] d = {'feature_names': fts , 'encoded_dim': feat_enc_dim} df = pd.DataFrame(data=d) print(df) total_dim = df['encoded_dim'].sum() print("The dimensionality of the encoded categorical features is {}.".format(total_dim)) assert total_dim == len(cat_enc_feat_names) ``` ### Select a subset of test instances to explain ``` def split_set(X, y, fraction, random_state=0): """ Given a set X, associated labels y, splits a fraction y from X. """ _, X_split, _, y_split = train_test_split(X, y, test_size=fraction, random_state=random_state, ) print("Number of records: {}".format(X_split.shape[0])) print("Number of class {}: {}".format(0, len(y_split) - y_split.sum())) print("Number of class {}: {}".format(1, y_split.sum())) return X_split, y_split fraction_explained = 0.05 X_explain, y_explain = split_set(X_test, y_test, fraction_explained, ) X_explain_proc = preprocessor.transform(X_explain) ``` Create a version of the dataset to be explained that has the same feature ordering as that of the feature matrix after applying the preprocessing (for plotting purposes). ``` perm_X_explain, _ = permute_columns(X_explain, feature_names, perm_feat_names) ``` ## Fit a binary logistic regression classifier to the Adult dataset ### Training ``` classifier = LogisticRegression(multi_class='multinomial', random_state=0, max_iter=500, verbose=0, ) classifier.fit(X_train_proc, y_train) ``` ### Model assessment ``` y_pred = classifier.predict(X_test_proc) cm = confusion_matrix(y_test, y_pred) title = 'Confusion matrix for the logistic regression classifier' disp = plot_confusion_matrix(classifier, X_test_proc, y_test, display_labels=target_names, cmap=plt.cm.Blues, normalize=None, ) disp.ax_.set_title(title) print('Test accuracy: ', accuracy_score(y_test, classifier.predict(X_test_proc))) ``` ## Running KernelSHAP in sequential mode A background dataset is selected. ``` start_example_idx = 0 stop_example_idx = 100 background_data = slice(start_example_idx, stop_example_idx) ``` Groups are specified by creating a list where each sublist contains the column indices that a given variable occupies in the preprocessed feature matrix. ``` def make_groups(num_feats_names: List[str], cat_feats_names: List[str], feat_enc_dim: List[int]) -> Tuple[List[str], List[List[int]]]: """ Given a list with numerical feat. names, categorical feat. names and a list specifying the lengths of the encoding for each cat. varible, the function outputs a list of group names, and a list of the same len where each entry represents the column indices that the corresponding categorical feature """ group_names = num_feats_names + cat_feats_names groups = [] cat_var_idx = 0 for name in group_names: if name in num_feats_names: groups.append(list(range(len(groups), len(groups) + 1))) else: start_idx = groups[-1][-1] + 1 if groups else 0 groups.append(list(range(start_idx, start_idx + feat_enc_dim[cat_var_idx] ))) cat_var_idx += 1 return group_names, groups def sparse2ndarray(mat, examples=None): """ Converts a scipy.sparse.csr.csr_matrix to a numpy.ndarray. If specified, examples is slice object specifying which selects a number of rows from mat and converts only the respective slice. """ if examples: return mat[examples, :].toarray() return mat.toarray() X_train_proc_d = sparse2ndarray(X_train_proc, examples=background_data) group_names, groups = make_groups(num_feats_names, cat_feats_names, feat_enc_dim) ``` Initialise and run the explainer sequentially. ``` pred_fcn = classifier.predict_proba seq_lr_explainer = KernelShap(pred_fcn, link='logit', feature_names=perm_feat_names) seq_lr_explainer.fit(X_train_proc_d[background_data, :], group_names=group_names, groups=groups) n_runs = 3 s_explanations, s_times = [], [] for run in range(n_runs): t_start = timer() explanation = seq_lr_explainer.explain(sparse2ndarray(X_explain_proc)) t_elapsed = timer() - t_start s_times.append(t_elapsed) s_explanations.append(explanation.shap_values) ``` ## Running KernelSHAP in distributed mode The only change needed to distribute the computation is to pass a dictionary containing the number of (physical) CPUs available to distribute the computation to the `KernelShap` constructor: ``` def distrib_opts_factory(n_cpus: int) -> Dict[str, int]: return {'n_cpus': n_cpus} cpu_range = range(2, 5) distrib_avg_times = dict(zip(cpu_range, [0.0]*len(cpu_range))) distrib_min_times = dict(zip(cpu_range, [0.0]*len(cpu_range))) distrib_max_times = dict(zip(cpu_range, [0.0]*len(cpu_range))) d_explanations = defaultdict(list) for n_cpu in cpu_range: opts = distrib_opts_factory(n_cpu) distrib_lr_explainer = KernelShap(pred_fcn, link='logit', feature_names=perm_feat_names, distributed_opts=opts) distrib_lr_explainer.fit(X_train_proc_d[background_data, :], group_names=group_names, groups=groups) raw_times = [] for _ in range(n_runs): t_start = timer() d_explanations[n_cpu].append(distrib_lr_explainer.explain(sparse2ndarray(X_explain_proc), silent=True).shap_values) t_elapsed = timer() - t_start raw_times.append(t_elapsed) distrib_avg_times[n_cpu] = np.round(np.mean(raw_times), 3) distrib_min_times[n_cpu] = np.round(np.min(raw_times), 3) distrib_max_times[n_cpu] = np.round(np.max(raw_times), 3) ray.shutdown() ``` ## Results analysis ### Timing ``` print(f"Distributed average times for {n_runs} runs (n_cpus: avg_time):") print(distrib_avg_times) print("") print(f"Sequential average time for {n_runs} runs:") print(np.round(np.mean(s_times), 3), "s") ``` Running KernelSHAP in a distributed fashion improves the runtime as the results above show. However, the results above should not be interpreted as performance measurements since they were not run in a controlled environment. See our [blog post](https://www.seldon.io/how-seldons-alibi-and-ray-make-model-explainability-easy-and-scalable/) for a more thorough analysis. ### Explanations comparison ``` cls = 0 # class of prediction explained run = 1 # which run to compare the result for # sequential shap.summary_plot(s_explanations[run][cls], perm_X_explain, perm_feat_names) # distributed n_cpu = 3 shap.summary_plot(d_explanations[n_cpu][run][cls], perm_X_explain, perm_feat_names) ``` Comparing the results above one sees that the running the algorithm across multiple cores gave identical results, indicating its correctness. ## Conclusion This example showed that batches of explanations can be explained much faster by simply passing ``distributed_opts={'n_cpus': k}`` to the ``KernelShap`` constructor (here ``k`` is the number of physical cores available). The significant runtime reduction makes it possible to explain larger datasets faster and combine shap values estimated with KernelSHAP into global explanations or use larger background datasets.
github_jupyter
# Solutions ## Question 1 > `1`. Simplify the following expressions: > $\frac{3}{\sqrt{3}}$: ``` import sympy as sym expression = sym.S(3) / sym.sqrt(3) sym.simplify(expression) ``` > $\frac{2 ^ {78}}{2 ^ {12}2^{-32}}$: ``` sym.S(2) ** 78 / (sym.S(2) ** 12 * sym.S(2) ** (-32)) ``` > $8^0$: ``` sym.S(8) ** 0 ``` > $a^4b^{-2}+a^{3}b^{2}+a^{4}b^0$: ``` a = sym.Symbol("a") b = sym.Symbol("b") sym.factor(a ** 4 * b ** (-2) + a ** 3 * b ** 2 + a ** 4 * b ** 0) ``` ## Question 2 > `2`. Solve the following equations: > $x + 3 = -1$: ``` x = sym.Symbol("x") equation = sym.Eq(x + 3, -1) sym.solveset(equation, x) ``` > $3 x ^ 2 - 2 x = 5$: ``` equation = sym.Eq(3 * x ** 2 - 2 * x, 5) sym.solveset(equation, x) ``` > $x (x - 1) (x + 3) = 0$: ``` equation = sym.Eq(x * (x - 1) * (x + 3), 0) sym.solveset(equation, x) ``` > $4 x ^3 + 7x - 24 = 1$: ``` equation = sym.Eq(4 * x ** 3 + 7 * x - 24, 1) sym.solveset(equation, x) ``` ## Question 3 > `3`. Consider the equation: $x ^ 2 + 4 - y = \frac{1}{y}$: > Find the solution to this equation for $x$. ``` y = sym.Symbol("y") equation = sym.Eq(x ** 2 + 4 - y, 1 / y) solution = sym.solveset(equation, x) solution ``` > Obtain the specific solution when $y = 5$. Do this in two ways: > substitute the value in to your equation and substitute the value in to > your solution. ``` solution.subs({y: 5}) solution = sym.solveset(equation.subs({y: 5}), x) solution ``` ## Question 4 > `4`. Consider the quadratic: $f(x)=4x ^ 2 + 16x + 25$: > Calculate the discriminant of the quadratic equation $4x ^ 2 + 16x + 25 = > 0$. What does this tell us about the solutions to the equation? What > does this tell us about the graph of $f(x)$? ``` quadratic = 4 * x ** 2 + 16 * x + 25 sym.discriminant(quadratic) ``` This is negative so we know that the equation does not have any real solutions and hence the graph does not cross the x-axis. Since the coefficient of $x^2$ is positive it means that the graph is above the $y=0$ line. > By completing the square, show that the minimum point of $f(x)$ is > $\left(-2, 9\right)$ ``` a, b, c = sym.Symbol("a"), sym.Symbol("b"), sym.Symbol("c") completed_square = a * (x - b) ** 2 + c sym.expand(completed_square) ``` This gives $a=4$. ``` completed_square = completed_square.subs({a: 4}) sym.expand(completed_square) ``` Comparing the coefficients of $x$ we have the equation: $$ - 8 b = 16 $$ ``` equation = sym.Eq(-8 * b, 16) sym.solveset(equation, b) ``` Substituting: ``` completed_square = completed_square.subs({b: -2}) sym.expand(completed_square) ``` Comparing the coefficients of $x^0$ this gives: $$c+16=25$$ ``` equation = sym.Eq(c + 16, 25) sym.solveset(equation, c) completed_square = completed_square.subs({c: 9}) completed_square ``` The lowest value of $f(x)$ is for $x=-2$ which gives: $f(-2)=9$ as expected. ## Question 5 > `5`. Consider the quadratic: $f(x)=-3x ^ 2 + 24x - 97$: > Calculate the discriminant of the quadratic equation $-3x ^ 2 + 24x - 97 = > 0$. What does this tell us about the solutions to the equation? What > does this tell us about the graph of $f(x)$? ``` quadratic = -3 * x ** 2 + 24 * x - 97 sym.discriminant(quadratic) ``` This is negative so we know that the equation does not have any real solutions and hence the graph does not cross the x-axis. Since the coefficient of $x^2$ is negative it means that the graph is below the $y=0$ line. > By completing the square, show that the maximum point of $f(x)$ is > $\left(4, -49\right)$ ``` a, b, c = sym.Symbol("a"), sym.Symbol("b"), sym.Symbol("c") completed_square = a * (x - b) ** 2 + c sym.expand(completed_square) ``` This gives $a=-3$. ``` completed_square = completed_square.subs({a: -3}) sym.expand(completed_square) ``` Comparing the coefficients of $x$ we have the equation: $$ 6 b = 24 $$ ``` equation = sym.Eq(6 * b, 24) sym.solveset(equation, b) ``` Substituting: ``` completed_square = completed_square.subs({b: 4}) sym.expand(completed_square) ``` Comparing the coefficients of $x^0$ this gives: $$c-48=-97$$ ``` equation = sym.Eq(c - 48, -97) sym.solveset(equation, c) completed_square = completed_square.subs({c: -49}) completed_square ``` The highest value of $f(x)$ is for $x=4$ which gives: $f(4)=-49$ as expected. ## Question 6 `6`. Consider the function $f(x) = x^ 2 + a x + b$. > Given that $f(0) = 0$ and $f(3) = 0$ obtain the values of $a$ and $b$. Substituting 0 in to $f$ gives: ``` expression = x ** 2 + a * x + b expression.subs({x: 0}) ``` This implies that $b=0$. Substituting back in to the expression: ``` expression = expression.subs({b: 0}) expression ``` Substituting $x=3$ in to this expression gives: ``` expression.subs({x: 3}) ``` This gives the equation: $$ 3 a + 9 = 0 $$ ``` sym.solveset(expression.subs({x: 3}), a) ``` Our expression is thus: ``` expression = expression.subs({a: -3}) expression ``` > By completing the square confirm that graph of $f(x)$ has a line of symmetry > at $x=\frac{3}{2}$ ``` completed_square = a * (x - b) ** 2 + c sym.expand(completed_square) ``` We see that $a=1$ and. Substituting: ``` completed_square = completed_square.subs({a: 1}) sym.expand(completed_square) ``` This gives: $$ -2b=-3 $$ ``` equation = sym.Eq(-2 * b, -3) sym.solveset(equation, b) ``` Substituting: ``` completed_square = completed_square.subs({b: sym.S(3) / 2}) sym.expand(completed_square) ``` Which gives: $$ c + 9 / 4 = 0 $$ ``` equation = sym.Eq(c + sym.S(9) / 4, 0) sym.solveset(equation, c) ``` Substituting: ``` completed_square = completed_square.subs({c: -sym.S(9) / 4}) completed_square ``` Thus $x=3/2$ is a line of symmetry.
github_jupyter
############################################################ ### This file is used to generate Table 1-3, Fig 1 ### ############################################################ - [Verify Assumption 1](#Verify-Assumption-1) - [Table 1](#Table-1) - [Table 2](#Table-2) - [Verify Assumption 2](#Verify-Assumption-2) - [Table 3](#Table-3) - [Left plot in Figure 1](#Left-plot-in-Figure-1) - [Middle plot in Figure 1](#Middle-plot-in-Figure-1) - [Right plot in Figure 1](#Right-plot-in-Figure-1) ``` import os import numpy as np from scipy.stats import norm from scipy.stats import gaussian_kde as kde import ipywidgets as widgets from ipywidgets import interact, interactive, fixed, interact_manual import matplotlib.pyplot as plt %matplotlib inline ####### Plot Formatting ###### plt.rc('lines', linewidth = 4) plt.rc('xtick', labelsize = 18) plt.rc('ytick', labelsize = 18) plt.rc('legend',fontsize=20) # plt.rcParams["font.family"] = "serif" plt.rcParams['axes.labelsize'] = 20 plt.rcParams['axes.titlesize'] = 28 plt.rcParams['lines.markersize'] = 6 plt.rcParams['figure.figsize'] = (7.0, 5.0) ``` An almost example * $\Lambda=[-1,1]$. * $Q(\lambda) = \lambda^p$ for $p=5$. * $\mathcal{D} = Q(\Lambda) = [-1,1]$. ___ * $\pi_{\Lambda}^{init} \sim U([-1,1])$ * $\pi_{\mathcal{D}}^{obs} \sim N(0.25,0.1^2)$ * $\pi_{\mathcal{D}}^{Q(init)}$ ``` def QoI(lam,p): '''Defing a QoI mapping function''' q = lam**p return q def QoI_approx(lam,p,n): '''Definie a QoI approximation with n+2 knots for piecewise linear spline''' lam_knots = np.linspace(-1,1,n+2) q_knots = QoI(lam_knots,p) q = np.interp(lam, lam_knots, q_knots) return q # number of samples from init and observed mean (mu) and std (sigma) N, mus, sigma = int(1E5), [0.5,0.25,1], 0.1 lam = np.random.uniform(low=-1,high=1,size=N) # sample set of the init qvals_nonlinear = QoI(lam,5) # Evaluate lam^5 samples # Estimate the push-forward density for the QoI q_nonlinear_kde = kde(qvals_nonlinear) ``` # Verify Assumption 1 ``` #### Use plot to show approximate pushforward ##### fig = plt.figure() J = int(1E3) qplot = np.linspace(-1,1, num=100) lam = np.random.uniform(low=-1,high=1,size=J) # sample set of the init # Evaluate the QoI map on this init sample set #qvals_nonlinear = QoI(lam,5) # Evaluate lam^5 samples for n in [1,2,4,8,16]: qvals_approx_nonlinear = QoI_approx(lam,5,n) # Evaluate lam^5 samples # Estimate the push-forward density for the QoI q_nonlinear_kde1 = kde( qvals_approx_nonlinear ) pf_init_approx_plot = plt.plot(qplot,q_nonlinear_kde1(qplot),'--', linewidth=4, label="$n=$" + str(n)) plt.title('J = %1.0E' %(J)) plt.ylim([0,6.6]) plt.legend(); # fig.savefig("images/concept_example_pf_approxes_J_1E3") # Failing Lipschitz as a function of approximate map fig = plt.figure() def plot_func(n,J): fig.clear() qplot = np.linspace(-1,1, num=100) #observed_plot = plt.plot(qplot,norm.pdf(qplot, loc=mu, scale=sigma), 'r-', linewidth=4, label="$\pi_\mathcal{D}^{obs}$") pf_init_plot = plt.plot(qplot,1/10*np.abs(qplot)**(-4/5),'b-', linewidth=4, label="$\pi_\mathcal{D}^{Q(init)}$") np.random.seed(123456) lam = np.random.uniform(low=-1,high=1,size=J) # sample set of the init # Evaluate the QoI map on this init sample set qvals_approx_nonlinear = QoI_approx(lam,5,n) # Evaluate lam^5 samples # Estimate the push-forward density for the QoI q_nonlinear_kde = kde( qvals_approx_nonlinear ) pf_init_approx_plot = plt.plot(qplot,q_nonlinear_kde(qplot),'b--', linewidth=4, label="$\pi_{\mathcal{D},N}^{Q(init)}$") plt.title('Lipschitz const. = %4.2f and Bound = %2.2f' %(np.max(np.abs(np.gradient(q_nonlinear_kde(qplot), qplot))), np.max(q_nonlinear_kde(qplot)))); interact(plot_func, n = widgets.IntSlider(value=int(1),min=int(0),max=int(1E2),step=1), J = widgets.IntSlider(value=int(1E3), min=int(1E3), max=int(1E5), step=int(1E3))) ##### Generate data in Table 1 and 2 ##### def assumption1(n, J): np.random.seed(123456) x = np.linspace(-1, 1, 100) lam = np.random.uniform(low=-1, high=1, size=J) # sample set of the init qvals_approx_nonlinear = QoI_approx(lam, 5, n) # Evaluate lam^5 samples q_nonlinear_kde = kde(qvals_approx_nonlinear) return np.round(np.max(np.abs(np.gradient(q_nonlinear_kde(x), x))), 2), np.round(np.max(q_nonlinear_kde(x)), 2) size_J = [int(1E3), int(1E4), int(1E5)] degree_n = [1, 2, 4, 8, 16] Bound_matrix, Lip_Bound_matrix = np.zeros((3, 5)), np.zeros((3, 5)) for i in range(3): for j in range(5): n, J = degree_n[j], size_J[i] Lip_Bound_matrix[i, j] = assumption1(n, J)[0] Bound_matrix[i, j] = assumption1(n, J)[1] ``` ## Table 1 ``` ########################################### ############## Table 1 ################## ########################################### print('Table 1') print('Bound under certain n and J values') print(Bound_matrix) ``` ## Table 2 ``` ########################################### ############## Table 2 ################## ########################################### print('Table 2') print('Lipschitz bound under certain n and J values') print(Lip_Bound_matrix) ``` # Verify Assumption 2 ``` #### Use plot to show the difference between the observed and approximate pushforward ##### #### m: case number; n: approximate map index; J: sample size #### fig = plt.figure() def plot_func(n, J, m): fig.clear() qplot = np.linspace(-1,1, num=100) observed_plot = plt.plot(qplot,norm.pdf(qplot, loc=mus[m], scale=sigma), 'r-', linewidth=4, label="$\pi_\mathcal{D}^{obs}$") np.random.seed(123456) lam = np.random.uniform(low=-1,high=1,size=J) # sample set of the init # Evaluate the QoI map on this init sample set qvals_approx_nonlinear = QoI_approx(lam,5,n) # Evaluate lam^5 samples # Estimate the push-forward density for the QoI q_nonlinear_kde = kde(qvals_approx_nonlinear) # Plot Aproximate push forward pf_init_approx_plot = plt.plot(qplot,q_nonlinear_kde(qplot),'b--', linewidth=4, label="$\pi_{\mathcal{D}}^{Q_n(init)}$") obs_vals_nonlinear = norm.pdf(qvals_approx_nonlinear, loc=mus[m], scale=sigma) # Compute r r = np.divide(obs_vals_nonlinear,q_nonlinear_kde(qvals_approx_nonlinear)) plt.title('$\mathbb{E}(r) =$ %3.2f' %(np.mean(r))) plt.legend(); interact(plot_func, n = widgets.IntSlider(value=int(1),min=int(0),max=int(1E2),step=1), J = widgets.IntSlider(value=int(1E4), min=int(1E3), max=int(1E5), step=int(1E3)), m = widgets.IntSlider(vlaue=int(0),min=int(0),max=int(2), step=1)) # The expected r value def Meanr(n, J, m): ''' n: index of approximating mapping J: sample size of sample generated from parameter space m: index of mu ''' np.random.seed(123456) lam = np.random.uniform(low=-1, high=1, size=J) # sample set of the init qvals_approx_nonlinear = QoI_approx(lam, 5, n) # Evaluate lam^5 samples q_nonlinear_kde = kde(qvals_approx_nonlinear) obs_vals_nonlinear = norm.pdf(qvals_approx_nonlinear, loc=mus[m], scale=sigma) r = np.divide(obs_vals_nonlinear, q_nonlinear_kde(qvals_approx_nonlinear)) return np.round(np.mean(r), 2) meanr_matrix = np.zeros((3, 5)) for i in range(3): for j in range(5): J = int(1E4) meanr_matrix[i, j] = Meanr(degree_n[j], J, i) ``` ## Table 3 ``` ########################################### ############## Table 3 ################## ########################################### print('Table 3') print('Expected ratio for verifying Assumption 2') print(meanr_matrix) #### To make it cleaner, create Directory "images" to store all the figures #### imagepath = os.path.join(os.getcwd(),"images") os.makedirs(imagepath,exist_ok=True) fig = plt.figure() def plot_all(i): fig.clear() case = ['Case I', 'Case II', 'Case III'] qplot = np.linspace(-1, 1, num=100) observed_plot = plt.plot(qplot, norm.pdf( qplot, loc=mus[i], scale=sigma), 'r-.', label="$\pi_\mathcal{D}$") pf_init_plot = plt.plot(qplot, 1/10*np.abs(qplot)**(-4/5), 'b-', label="$\pi_\mathcal{D}^{Q}$") pf_init_plot = plt.plot(qplot, q_nonlinear_kde(qplot), 'b--', label="$\pi_{\mathcal{D},m}^{Q}$") plt.xlim([-1, 1]) plt.xlabel("$\mathcal{D}$") plt.legend() plt.title(case[i]); loc = ['Left','Mid','Right'] if loc[i]: filename = os.path.join(os.getcwd(), "images", "Fig1(%s).png"%(loc[i])) plt.savefig(filename) ``` ## Left plot in Figure 1 ``` ########################################### ####### The left plot of Fig 1 ########## ########################################### plot_all(0) ``` ## Middle plot in Figure 1 ``` ############################################# ####### The middle plot of Fig 1 ########## ############################################# plot_all(1) ``` ## Right plot in Figure 1 ``` ############################################ ####### The right plot of Fig 1 ########## ############################################ plot_all(2) ```
github_jupyter
![HPEDEV](Pictures/LogoHPEDEV.png) # HPE DISCOVER Hack Shack Challenges Provided by [HPEDEV Team](hpedev.io) Welcome to the Hack Shack Challenges. If you are reading this, you have already registered to one of the following four challenges that will be offered over the course of this HPE Discover Virtual Experience: 1. The HPE OneView Challenge 2. The Redfish Challenge 3. The Grommet Challenge 4. The Container Challenge > Note: If you have enough time, we encourage you to take all of them (in this order). At the very least, you’ll want to participate in those where the subject matter is relevant to your job. ## The HPE OneView Challenge Your mission, should you choose to accept it, is to implement vSphere VSAN on an HPE Synergy Composable Infrastructure. You will be asked to prepare a server profile template according to published best practices, and do it in a scripted way so that you can deploy it across your enterprise in different datacenters and different Synergy environments. - Languages & Tools: PowerShell, GitHub - Estimated time to complete: 1 hour ## The Redfish Challenge Your mission here will be to write the smallest possible program (in terms of number of Redfish API calls) that retrieves the complete Device Inventory list from an HPE iLO 5, and to display the following properties for each device: Location, Product Name, Firmware Version (if applicable) and Status. Remember, you only have so much time. Be quick and be sharp! - Languages & Tools: PowerShell, Python or Bash, GitHub - Estimated time to complete: 1 hour ## The Grommet Challenge A new mission awaits! In this challenge, you will take on a UX designer persona and show your creative side using Grommet to design your own little web app UI. You will start with the Grommet Designer, generate code from your design, push that code to GitHub, and finally deploy the app on Netlify. Beginner through expert designers and developers are all welcome. This challenge is all about unleashing your creativity! - Languages & Tools: Grommet Designer, GitHub, Netlifly - Estimated time to complete: 2 hours ## The Container Challenge This challenge is undoubtedly the most complicated one as it's an end-to-end developer experience. Your mission here will be to build a Grommet sample app (or reuse the one from the Grommet Challenge), package it in a container, and then run that container in a Kubernetes Cluster. This challenge touches on several technologies, such as front-end web application design, Docker containers and Kubernetes. Be ready for an intense moment when all the pieces of the puzzle will suddenly make total sense. - Languages & Tools: Grommet Designer, Docker, DockerHub, Kubectl, GitHub - Estimated time to complete: 4 hours # Getting started with your Challenge ![HPEDEV](Pictures/ChallengeProcess.png) Every HPE Discover Virtual Experience 2020 Hack Shack Challenge uses GitHub and Jupyter Notebooks to provide instructions for the challenge and to accept the answers submitted by participants. You will need to go through the following steps: Step 1: Fork the repo of the challenge you'd like to take from https://github.com/HPEDevCom Step 2: Clone your copy of the challenge repo into your Jupyter environment. Do do this, use the Terminal session from the Jupyter Launcher. If you don't have any Launcher pane, click on the `+` sign in the upper left of the left pane. ![HPEDEV](Pictures/Launcher.png) - In the Terminal session, change folder to Discover/TheXXXChallenge where XXX is the challenge you are taking (Oneview, Redfish, Grommet, Container) - Use git CLI clone from there Step 3: Open the challenge notebook in your Jupyter environment (now visible in the left pane) and start working on it > Note: Use the [GIT101 notebook](./WKSHP-GIT101/1-WKSHP-GIT-Basics.ipynb) if you need assistance with these steps Some of the challenges have workshop prerequisites. The notebooks for these workshops have been copied in your Jupyter environment within folders called WKSHP-NameOfWorkshop. Don't hesitate to refer to these notebooks if you missed the live workshop. You may also refer to the recorded version of the workshop. # Racing against the clock Remember that you are working against the clock. You have 4 hours after we provided you access to the Jupyter account to submit your response. If you find you are running out of time, make sure you commit and push your changes to your copy of the repo in order to avoid losing any work. # Submitting your response Submitting your response to the challenge will be done by submitting your edited challenge notebook (in some cases there might be additional edited files in the repo). When you are ready, we will ask you to leverage GitHub again and do the following: - Step 1: Commit changes in your Jupyter environment (again using your Launcher terminal window) - Step 2: Push changes to your repo (still using your Launcher terminal window) - Step 3: Open a Pull Request (PR) on our original repo. Make sure you provide an email in the description of the PR so we can contact you if your submission has been chosen. (This step is done from your GitHub account). You should submit your response within the 4-hour time slot. We will reset all the Jupyter accounts right after the time is up, so make sure you have at least committed and pushed your changes back to your repo prior to that. You will then have more time to submit your PR. > Note: Use the [GIT101 notebook](./WKSHP-GIT101/1-WKSHP-GIT-Basics.ipynb) if you need assistance with these steps # Looking for help? Don't hesitate to refer back to the notebooks in your Jupyter accounts for help in answering questions that come up in the challenge. We have set up a Slack channel in the HPE DEV Slack Workspace. Feel free to [join the workspace](https://slack.hpedev.io/) then [start asking questions there](https://hpedev.slack.com/archives/C015CLE2QTT). We will be around to help you. Finally, in case of a real emergency, you can contact the [HPEDEV Team](mailto:hpedev.hackshack@hpe.com). Please note that during the period of June 30th, until July 17th the Slack channel will be opened 24x7, with monitoring from the HPE DEV Team from 9AM-6PM CET. # Providing feedback We would love to get feedback about your experience with the Hack Shack Challenges, so please take a moment to fill out that [short survey](https://forms.office.com/Pages/ResponsePage.aspx?id=YSBbEGm2MUuSrCTTBNGV3KHzFdd987lBoQATWJq45DdUNEVQTzdQV1NZM1MxNVVDMDRPRlFGUTlaQi4u) for us: # Challenge rewards We will select and reward the best response per challenge subject at the end of the HPE Discover Virtual Experience Challenges period. In addition to this, we will have a drawing amongst all the challengers who participated in all four challenges and also reward this super-challenger. [Review the terms & conditions](https://hackshack.hpedev.io/challengetermsconditions) # Ready? Set! Go! [Let's get started with your challenge](Start-Here.ipynb)
github_jupyter
# Using multi-armed bandits to choose the best model for predicting credit card default ## Dependencies - [helm](https://github.com/helm/helm) - [s2i](https://github.com/openshift/source-to-image) - Kaggle account to download data. - Python packages: ``` !pip install -r requirements.txt ``` ## Getting data Either head to https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset or use the Kaggle API (instructions at https://github.com/Kaggle/kaggle-api) to download the dataset: ``` !kaggle datasets download -d uciml/default-of-credit-card-clients-dataset !unzip -o default-of-credit-card-clients-dataset.zip ``` ## Load and inspect data ``` import pandas as pd data = pd.read_csv("UCI_Credit_Card.csv") data.shape data.columns target = "default.payment.next.month" data[target].value_counts() ``` Note that we have a class imbalance, so if we use accuracy as the performance measure of a classifier, we need to be able to beat the "dummy" model that classifies every instance as 0 (no default): ``` data[target].value_counts().max() / data.shape[0] ``` ## Case study for using multi-armed bandits In deploying a new ML model, it is rarely the case that the existing (if any) model is decommissioned immediately in favour of the new one. More commonly the new model is deployed alongside the existing one(s) and the incoming traffic is shared between the models. Typically A/B testing is performed in which traffic is routed between existing models randomly, this is called the experiment stage. After a set period of time performance statistics are calculated and the best-performing model is chosen to serve 100% of the requests while the other model(s) are decommissioned. An alternative method is to route traffic dynamically to the best performing model using multi-armed bandits. This avoids the opportunity cost of consistently routing a lot of traffic to the worst performing model(s) during an experiment as in A/B testing. This notebook is a case study in deploying two models in parallel and routing traffic between them dynamically using multi-armed bandits (Epsilon-greedy and Thompson sampling in particular). We will use the dataset to simulate a real-world scenario consisting of several steps: 1. Split the data set in half (15K samples in each set) and treat the first half as the only data observes so far 2. Split the first half of the data in proportion 10K:5K samples to use as train:test sets for a first simple model (Random Forest) 3. After training the first model, simulate a "live" environment on the first 5K of data in the second half of the dataset 4. Use the so far observed 20K samples to train a second model (XGBoost) 5. Deploy the second model alongside the first together with a multi-armed bandit and simulate a "live" environment on the last 10K of the unobserved data, routing requests between the two models The following diagram illustrates the proposed simulation design: ![data-split](assets/split.png) ## Data preparation ``` import numpy as np from sklearn.model_selection import train_test_split OBSERVED_DATA = 15000 TRAIN_1 = 10000 TEST_1 = 5000 REST_DATA = 15000 RUN_DATA = 5000 ROUTE_DATA = 10000 # get features and target X = data.loc[:, data.columns != target].values y = data[target].values # observed/unobserved split X_obs, X_rest, y_obs, y_rest = train_test_split( X, y, random_state=1, test_size=REST_DATA ) # observed split into train1/test1 X_train1, X_test1, y_train1, y_test1 = train_test_split( X_obs, y_obs, random_state=1, test_size=TEST_1 ) # unobserved split into run/route X_run, X_route, y_run, y_route = train_test_split( X_rest, y_rest, random_state=1, test_size=ROUTE_DATA ) # observed+run split into train2/test2 X_rest = np.vstack((X_run, X_route)) y_rest = np.hstack((y_run, y_route)) X_train2 = np.vstack((X_train1, X_test1)) X_test2 = X_run y_train2 = np.hstack((y_train1, y_test1)) y_test2 = y_run ``` ## Model training We will train both models at once, but defer evaluation of the second model until simulating the live environment. ``` from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(random_state=1) rf.fit(X_train1, y_train1) ``` Now let's see how good our first model is on the test1 set: ``` from sklearn.metrics import ( accuracy_score, classification_report, confusion_matrix, f1_score, precision_score, recall_score, ) y_preds1 = rf.predict(X_test1) print(classification_report(y_test1, y_preds1, target_names=["No default", "Default"])) for score in [ accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ]: print(score.__name__ + ":\n", score(y_test1, y_preds1)) %matplotlib inline from utils import plot_confusion_matrix cm = confusion_matrix(y_test1, y_preds1) plot_confusion_matrix(cm, classes=["No default", "Default"], normalize=True) ``` So a simple random forest model without any optimizations is able to outperform random guessing on accuracy and achieves a baseline F1 score of ~0.44. However, it is a poor predictor of default as it only achieves a recall of ~0.34. Train the second model in advance, but defer evaluation: ``` from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) xgb.fit(X_train2, y_train2) y_preds1 = xgb.predict(X_test1) print(classification_report(y_test1, y_preds1, target_names=["No default", "Default"])) for score in [ accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ]: print(score.__name__ + ":\n", score(y_test1, y_preds1)) %matplotlib inline from utils import plot_confusion_matrix cm = confusion_matrix(y_test1, y_preds1) plot_confusion_matrix(cm, classes=["No default", "Default"], normalize=True) ``` Save trained models to disk: ``` import joblib joblib.dump(rf, "models/rf_model/RFModel.sav") joblib.dump(xgb, "models/xgb_model/XGBModel.sav") ``` ## Set up Kubernetes for live simulation Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html) to setup Seldon Core with an ingress - either Ambassador or Istio. Then port-forward to that ingress on localhost:8003 in a separate terminal either with: * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon-system -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon-system 8003:8080` * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ### Wrap model and router images with s2i We have prepared the model classes under ```models/rf_model/RFModel.py``` and ```models/xgb_model/XGBModel.py``` for wrapping the trained models as docker images using s2i. The structure of the files is as follows: ``` !pygmentize models/rf_model/RFModel.py ``` Note that we define our own custom metrics which are the entries of the confusion matrix that will be exposed to Prometheus and visualized in Grafana as the model runs in the simulated live environment. If Minikube used: create docker image for the trained models and routers inside Minikube using s2i. ## Deploy the first model ``` %%writefile rf.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: rf-deployment spec: predictors: - componentSpecs: - spec: containers: - image: seldonio/credit_default_rf_model:0.2 name: rf-model graph: name: rf-model type: MODEL name: rf-model replicas: 1 !kubectl apply -f rf.yaml -n seldon !kubectl rollout status deploy/$(kubectl get deploy -n seldon -l seldon-deployment-id=rf-deployment -o jsonpath='{.items[0].metadata.name}') -n seldon ``` ## Simulate the first model in production for 5000 samples ``` from utils import rest_request_ambassador, send_feedback_rest for i in range(X_run.shape[0]): if i % 1000 == 0: print(f"Processed {i}/{X_run.shape[0]} samples", flush=True) # fetch sample and make a request payload x = X_run[i].reshape(1, -1).tolist() request = {"data": {"ndarray": x}} # send request to model response = rest_request_ambassador("rf-deployment", "seldon", request) # extract prediction probs = response.get("data").get("ndarray")[0] pred = np.argmax(probs) # send feedback to the model informing it if it made the right decision truth_val = int(y_run[i]) reward = int(pred == truth_val) truth = [truth_val] _ = send_feedback_rest("rf-deployment", "seldon", request, response, reward, truth) ``` Import into grafana the dashboard in `assets/mab.json` We can see the model performance on the Grafana dashboard: http://localhost:3000/d/rs_zGKYiz/mab?refresh=1s&orgId=1&from=now-2m&to=now (refresh to update) ## Deploy the original model and the new model with a router in front Suppose now we have come up with a new model and want to deploy it alongside the first model with a multi-armed bandit router to make decisions which model should make predictions. We will delete the original deployment and make a new one that has both models in parallel and a router/multi-armed bandit in front. To make things interesting, we will actually deploy 2 parallel deployments with the same 2 models but a different router in front (Epsilon-greedy and Thompson sampling) to compare the performance of two very different multi-armed bandit algorithms. One can think of the first deployment as a production deployment and the second parallel one as a shadow deployment whose responses are used for testing only. But first, let's see what the performance of the new XGBoost model is on its test2 data: ``` y_preds2 = xgb.predict(X_test2) print(classification_report(y_test2, y_preds2, target_names=["No default", "Default"])) for score in [ accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ]: print(score.__name__ + ":\n", score(y_test2, y_preds2)) cm = confusion_matrix(y_test2, y_preds2) plot_confusion_matrix(cm, classes=["No default", "Default"], normalize=True) ``` So the XGBoost model is slightly better than the old RFModel, so we expect any decent multi-armed bandit router to pick this up on live data, let's try this out. First, delete the existing deployment of the old RFModel: ``` !kubectl delete sdep rf-deployment ``` Deploy the following two deployments: ``` %%writefile eg.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: eg-experiment spec: predictors: - componentSpecs: - spec: containers: - image: seldonio/credit_default_rf_model:0.2 name: rf-model - image: seldonio/credit_default_xgb_model:0.2 name: xgb-model - image: seldonio/mab_epsilon_greedy:1.6.0-dev name: eg-router graph: children: - name: rf-model type: MODEL - name: xgb-model type: MODEL name: eg-router parameters: - name: n_branches type: INT value: '2' - name: epsilon type: FLOAT value: '0.1' - name: verbose type: BOOL value: '1' - name: branch_names type: STRING value: rf:xgb - name: seed type: INT value: '1' type: ROUTER name: eg-2 replicas: 1 svcOrchSpec: env: - name: SELDON_ENABLE_ROUTING_INJECTION value: 'true' %%writefile ts.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: ts-experiment spec: name: poc-ts predictors: - componentSpecs: - spec: containers: - image: seldonio/credit_default_rf_model:0.2 name: rf-model - image: seldonio/credit_default_xgb_model:0.2 name: xgb-model - image: seldonio/mab_thompson_sampling:1.6.0-dev name: ts-router graph: children: - name: rf-model type: MODEL - name: xgb-model type: MODEL name: ts-router parameters: - name: n_branches type: INT value: '2' - name: verbose type: BOOL value: '1' - name: branch_names type: STRING value: rf:xgb - name: seed type: INT value: '1' type: ROUTER name: ts-2 replicas: 1 svcOrchSpec: env: - name: SELDON_ENABLE_ROUTING_INJECTION value: 'true' !kubectl apply -f eg.yaml -n seldon !kubectl apply -f ts.yaml -n seldon !kubectl rollout status deploy/$(kubectl get deploy -n seldon -l seldon-deployment-id=eg-experiment -o jsonpath='{.items[0].metadata.name}') -n seldon !kubectl rollout status deploy/$(kubectl get deploy -n seldon -l seldon-deployment-id=ts-experiment -o jsonpath='{.items[0].metadata.name}') -n seldon ``` ## Simulate both deployments in parellel with the remaining 10000 data samples Here we send request and feedback to both parallel deployments, thus assessing the performance of the Epsilon-greedy router versus Thompson sampling as a method of routing to the best performing model. ``` for i in range(X_route.shape[0]): if i % 1000 == 0: print(f"Processed {i}/{X_route.shape[0]} samples", flush=True) # fetch sample and make a request payload x = X_route[i].reshape(1, -1).tolist() request = {"data": {"ndarray": x}} # send request to both deployments eg_response = rest_request_ambassador("eg-experiment", "seldon", request) ts_response = rest_request_ambassador("ts-experiment", "seldon", request) # extract predictions eg_probs = eg_response.get("data").get("ndarray")[0] ts_probs = ts_response.get("data").get("ndarray")[0] eg_pred = np.argmax(eg_probs) ts_pred = np.argmax(ts_probs) # send feedback to the model informing it if it made the right decision truth_val = int(y_route[i]) eg_reward = int(eg_pred == truth_val) ts_reward = int(ts_pred == truth_val) truth = [truth_val] _ = send_feedback_rest( "eg-experiment", "seldon", request, eg_response, eg_reward, truth ) _ = send_feedback_rest( "ts-experiment", "seldon", request, ts_response, ts_reward, truth ) ``` We can see the model performance on the Grafana dashboard: http://localhost:3000/dashboard/db/mab?refresh=5s&orgId=1 (refresh to update) We note that both the Epsilon greedy and Thompson sampling allocate more traffic to the better performing model (XGBoost) over time, but Thompson Sampling does so at a quicker rate as evidenced by the superior metrics (F1 score in particular). ### Persistent MAB We also show an example of a TS Router which uses Redis for persistence to ensure that the state is shared consistently across multiple replicas. ``` %%writefile ts-persistent.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: ts-experiment-persistent spec: predictors: - componentSpecs: - spec: containers: - image: seldonio/credit_default_rf_model:0.2 name: rf-model env: - name: REDIS_SERVICE_HOST value: redis-master-0 - image: seldonio/credit_default_xgb_model:0.2 name: xgb-model env: - name: REDIS_SERVICE_HOST value: redis-master-0 - image: seldonio/mab_thompson_sampling_persistent:1.6.0-dev name: ts-router env: - name: REDIS_SERVICE_HOST value: redis-master-0 graph: children: - name: rf-model type: MODEL - name: xgb-model type: MODEL name: ts-router parameters: - name: n_branches type: INT value: '2' - name: verbose type: BOOL value: '1' - name: branch_names type: STRING value: rf:xgb - name: seed type: INT value: '1' type: ROUTER name: ts-2 replicas: 3 svcOrchSpec: env: - name: SELDON_ENABLE_ROUTING_INJECTION value: 'true' !kubectl apply -n seldon -f ts-persistent.yaml for i in range(X_route.shape[0]): if i % 1000 == 0: print(f"Processed {i}/{X_route.shape[0]} samples", flush=True) # fetch sample and make a request payload x = X_route[i].reshape(1, -1).tolist() request = {"data": {"ndarray": x}} # send request to both deployments ts_response = rest_request_ambassador("ts-experiment-persistent", "seldon", request) # extract predictions ts_probs = ts_response.get("data").get("ndarray")[0] ts_pred = np.argmax(ts_probs) # send feedback to the model informing it if it made the right decision truth_val = int(y_route[i]) ts_reward = int(ts_pred == truth_val) truth = [truth_val] _ = send_feedback_rest( "ts-experiment-persistent", "seldon", request, ts_response, ts_reward, truth ) ``` ## Clean-up ``` # delete data !rm default-of-credit-card-clients-dataset.zip !rm UCI_Credit_Card.csv # delete trained models !rm models/rf_model/RFModel.sav !rm models/xgb_model/XGBModel.sav # delete Seldon deployment from the cluster !kubectl delete sdep --all ```
github_jupyter
###1. Set up training environment ``` #Get necessary packages downloaded #May take a few minutes #This does not have to be run again if you restart a runtime, only if you terminate, are disconnected, or factory reset it !pip install pytorch_lightning !pip install torchsummaryX !pip install webdataset !pip install datasets !pip install wandb !git clone https://github.com/black0017/MedicalZooPytorch.git !pip install -r MedicalZooPytorch/installation/requirements.txt !pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html !git clone https://github.com/McMasterAI/Radiology-and-AI.git ``` #####If you make a change to a branch of the repo and want this script to reflect those changes, restart the runtime and run the code here, then the below imports again ``` !rm -r ./Radiology-and-AI !git clone --branch <branchname> https://github.com/McMasterAI/Radiology-and-AI.git ``` #####Import libraries ``` from google.colab import drive drive.mount('/content/drive', force_remount=True) import sys sys.path.append('./Radiology-and-AI/Radiology_and_AI') sys.path.append('./MedicalZooPytorch') import os import torch import numpy as np from torch.utils.data import Dataset, DataLoader, random_split from pytorch_lightning.loggers import WandbLogger import pytorch_lightning as pl import sys import nibabel as nb from skimage import transform import matplotlib.pyplot as plt import webdataset as wds from collators.brats_collator import col_img from lightning_modules.segmentation import TumourSegmentation cd drive/MyDrive ``` ###2. Load Data ``` #Loading data, must have shortcut to the shared macai_datasets folder in the base level of your google drive train_dataset = wds.Dataset("macai_datasets/brats/train/brats_train.tar.gz") eval_dataset = wds.Dataset("macai_datasets/brats/validation/brats_validation.tar.gz") ``` ###3. Find optimal learning rate ``` #Finding Learning rate trainer = pl.Trainer( precision=16, gpus = 1, accumulate_grad_batches=20 #Shammo change this values ) model = TumourSegmentation(learning_rate = 1e-4, collator=col_img, batch_size=5, train_dataset=train_dataset, eval_dataset=eval_dataset) lr_finder = trainer.tuner.lr_find(model,min_lr=1e-7,max_lr=1e-2,mode='linear') print(lr_finder.suggestion()) ``` ###4. Train ``` wandb_logger = WandbLogger(project='macai',name='test_run', offline = True) #Training trainer = pl.Trainer( accumulate_grad_batches = 20, gpus = 1, max_epochs = 10, precision=16, check_val_every_n_epoch = 1, logger = wandb_logger, log_every_n_steps=10, ) model = TumourSegmentation(learning_rate = lr_finder.suggestion(), collator=col_img, batch_size=5, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.fit(model) #Run after youre done training and the run will be uploaded to wandb !wandb sync --sync-all ``` ###5. Evaluate ``` # Example code for crudely showing some images # Probably best in another notebook, but w/e model = TumourSegmentation.load_from_checkpoint('lightning_logs/version_1/checkpoints/epoch=1-step=598.ckpt').cuda().half() # 1 for the tumor core # 2 for edema # 4 for surrounding model.cuda().half() for z in train_dataloader: print('======================================================') prediction = model.forward(torch.unsqueeze(z[0], axis=0).cuda().half()) sl = z[1][0, :, 100] sl[sl==4] = 1 plt.title('Label') plt.imshow(sl, vmin = 0, vmax=2) plt.show() prediction = prediction[0].cpu().detach().numpy().astype('float32') plt.title('Prediction class 0') plt.imshow(prediction[0, :, 100], vmin = 0, vmax=1) plt.show() plt.title('Prediction class 1') plt.imshow(prediction[1, :, 100], vmin = 0, vmax=1) plt.show() ``` def bbox2_3D(img): r = np.any(img, axis=(1, 2)) c = np.any(img, axis=(0, 2)) z = np.any(img, axis=(0, 1)) rmin, rmax = np.where(r)[0][[0, -1]] cmin, cmax = np.where(c)[0][[0, -1]] zmin, zmax = np.where(z)[0][[0, -1]] return rmin, rmax, cmin, cmax, zmin, zmax xmins = [] xmaxs = [] ymins = [] ymaxs = [] zmins = [] zmaxs = [] for img in [f_flair, f_seg, f_t1ce, f_t1, f_t2]: xmin, xmax, ymin, ymax, zmin, zmax = bbox2_3D(img) xmins.append(xmin) xmaxs.append(xmax) ymins.append(ymin) ymaxs.append(ymax) zmins.append(zmin) zmaxs.append(zmax) xmin = np.min(xmin) ymin = np.min(ymin) zmin = np.min(zmin) xmax = np.max(xmax) ymax = np.max(ymax) zmax = np.max(zmax) f_flair = f_flair[xmin:xmax, ymin:ymax, zmin:zmax] f_seg = f_seg[xmin:xmax, ymin:ymax, zmin:zmax] f_t1ce = f_t1ce[xmin:xmax, ymin:ymax, zmin:zmax] f_t1 = f_t1[xmin:xmax, ymin:ymax, zmin:zmax] f_t2 = f_t2[xmin:xmax, ymin:ymax, zmin:zmax] print(f_flair.shape)
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt np.random.seed(10) # Load in the SST data ds_train_np = np.load('NOAA_Train_Snapshots.npy') ds_test_np = np.load('NOAA_Test_Snapshots.npy') ds_mask = np.load('NOAA_Mask.npy').astype('bool') # Snapshots numbers num_train = np.shape(ds_train_np)[0] num_test = np.shape(ds_test_np)[0] num_points = np.shape(ds_train_np)[1]*np.shape(ds_train_np)[2] print('Total number of points in the snapshot:',num_points) # Select a random subsample of them num_sensors = 800 idx = np.arange(num_points,dtype='int') # Need to remove the masked points idx = idx[ds_mask.flatten()[:]] # Shuffle np.random.shuffle(idx) idx = idx[:num_sensors] # Lets plot some of the measurements at these locations with time ds_train_flat = ds_train_np.reshape(np.shape(ds_train_np)[0],np.shape(ds_train_np)[1]*np.shape(ds_train_np)[2]) ds_test_flat = ds_test_np.reshape(np.shape(ds_test_np)[0],np.shape(ds_train_np)[1]*np.shape(ds_train_np)[2]) plt.figure() plt.plot(ds_train_flat[:,idx[0]],label='Sensor 1') plt.plot(ds_train_flat[:,idx[1]],label='Sensor 2') plt.plot(ds_train_flat[:,idx[2]],label='Sensor 3') plt.legend() plt.show() # Load in the latitude and longitude information for sensors alone lat_grid = np.load('Lat_grid.npy').flatten() lon_grid = np.load('Lon_grid.npy').flatten() lon_lon, lat_lat = np.meshgrid(lon_grid,lat_grid) sensor_lats = lat_lat.flatten()[idx[:]] sensor_lons = lon_lon.flatten()[idx[:]] # Find Euclidean distance from neighbors lon_lat_coods = np.concatenate((sensor_lons.reshape(-1,1),sensor_lats.reshape(-1,1)),axis=-1) dist_mat = np.zeros(shape=(num_sensors,num_sensors),dtype='double') for i in range(num_sensors): for j in range(num_sensors): dist_mat[i,j] = np.sqrt((lon_lat_coods[i,0]-lon_lat_coods[j,0])**2 + (lon_lat_coods[i,1]-lon_lat_coods[j,1])**2) # Build adjacency matrix for the given sensors based on nearest neighbor interpolation print('The mean distance is:',np.mean(dist_mat)) print('The standard deviation of the distance is:',np.std(dist_mat)) # Connection threshold thresh = np.mean(dist_mat) - np.std(dist_mat) adj_mat = np.copy(dist_mat) adj_mat[adj_mat>thresh] = 0 adj_mat[adj_mat>0] = 1 np.fill_diagonal(adj_mat,val=1) from scipy import sparse adj_mat = adj_mat.astype('int') A = np.zeros(shape=(num_train,num_sensors,num_sensors)) A[:,:,:] = adj_mat[None,:,:] # Now stack features at each node - these features will be the past 4 weeks of weekly means and the lat lon num_features = 8 # weeks of history X = np.zeros(shape=(1,num_sensors,num_features),dtype='double') Y = np.zeros(shape=(1,num_sensors,num_features),dtype='double') start_id = 0 end_id = start_id + num_features while end_id != num_train-num_features: X_temp = np.expand_dims(np.transpose(ds_train_flat[start_id:end_id,idx[:]]),axis=0) X = np.concatenate((X,X_temp),axis=0) Y_temp = np.expand_dims(np.transpose(ds_train_flat[start_id+num_features:end_id+num_features,idx[:]]),axis=0) Y = np.concatenate((Y,Y_temp),axis=0) start_id+=1 end_id+=1 X = X[1:] Y = Y[1:] # Lets also shuffle shuffle_idx = np.arange(X.shape[0]) np.random.shuffle(shuffle_idx) X = X[shuffle_idx] Y = Y[shuffle_idx] # Get rid of superfluous A data A = A[:X.shape[0]] print('Final shape of training data inputs:',X.shape) print('Final shape of training data outputs:',Y.shape) print('Final shape of adjacency matrices:',A.shape) # Lets scale from sklearn.preprocessing import MinMaxScaler, StandardScaler scaler = StandardScaler() X = scaler.fit_transform(X.reshape(-1,num_sensors*num_features)) X = X.reshape(-1,num_sensors,num_features) Y = scaler.transform(Y.reshape(-1,num_sensors*num_features)) Y = Y.reshape(-1,num_sensors,num_features) from spektral.layers import GraphConv from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Input, Dropout from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 # Parameters channels = 128 # Number of channels in the first layer N = X.shape[1] # Number of nodes in the graph F = X.shape[2] # Original size of node features num_outputs = Y.shape[2] # Number of outputs dropout = 0.1 # Dropout rate for the features l2_reg = 0.0 # L2 regularization rate learning_rate = 1e-4 # Learning rate epochs = 2000 # Number of training epochs es_patience = 100 # Patience for early stopping # Preprocessing operations to scale by degree of vertex fltr = GraphConv.preprocess(A).astype('f4') # Model definition X_in = Input(shape=(N, F,)) fltr_in = Input((N, N,), sparse=False) dropout_1 = Dropout(dropout)(X_in) graph_conv_1 = GraphConv(channels, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=False)([dropout_1, fltr_in]) dropout_1 = Dropout(dropout)(graph_conv_1) graph_conv_1 = GraphConv(channels, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=False)([dropout_1, fltr_in]) dropout_1 = Dropout(dropout)(graph_conv_1) graph_conv_1 = GraphConv(channels, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=False)([dropout_1, fltr_in]) dropout_1 = Dropout(dropout)(graph_conv_1) graph_conv_1 = GraphConv(channels, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=False)([dropout_1, fltr_in]) dropout_1 = Dropout(dropout)(graph_conv_1) graph_conv_1 = GraphConv(channels, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=False)([dropout_1, fltr_in]) dropout_2 = Dropout(dropout)(graph_conv_1) graph_conv_2 = GraphConv(num_outputs, activation=None, use_bias=False)([dropout_2, fltr_in]) # Build model model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss='mean_squared_error') model.summary() model.fit([X, fltr], Y, epochs=2000, batch_size=32, shuffle=False, # Shuffling data means shuffling the whole graph callbacks=[ EarlyStopping(patience=10, restore_best_weights=True) ], validation_split=0.1) # Lets do evaluations on testing - first set up data X_test = np.zeros(shape=(1,num_sensors,num_features),dtype='double') Y_test = np.zeros(shape=(1,num_sensors,num_features),dtype='double') start_id = 0 end_id = start_id + num_features while end_id < num_test-2*num_features: X_temp = np.expand_dims(np.transpose(ds_test_flat[start_id:end_id,idx[:]]),axis=0) X_test = np.concatenate((X_test,X_temp),axis=0) Y_temp = np.expand_dims(np.transpose(ds_test_flat[start_id+num_features:end_id+num_features,idx[:]]),axis=0) Y_test = np.concatenate((Y_test,Y_temp),axis=0) start_id+=num_features end_id+=num_features X_test = X_test[1:] Y_test = Y_test[1:] # Test adjacency matrix A_test = np.zeros(shape=(num_test,num_sensors,num_sensors)) A_test[:,:,:] = adj_mat[None,:,:] A_test = A_test[:X_test.shape[0]] print('Final shape of testing data inputs:',X_test.shape) print('Final shape of testing data outputs:',Y_test.shape) print('Final shape of testing adjacency matrix:',A_test.shape) # Preprocessing operations to scale by degree of vertex fltr = GraphConv.preprocess(A_test).astype('f4') # Reshape X_test = scaler.transform(X_test.reshape(-1,num_sensors*num_features)) X_test = X_test.reshape(-1,num_sensors,num_features) Y_test = scaler.transform(Y_test.reshape(-1,num_sensors*num_features)) Y_test = Y_test.reshape(-1,num_sensors,num_features) # Evaluate model print('Evaluating model.') eval_results = model.evaluate([X_test, fltr], Y_test, batch_size=X_test.shape[0]) print(eval_results) # Visualize forecasts Y_pred = model.predict([X_test,fltr]) # Rescale Y_test = scaler.inverse_transform(Y_test.reshape(-1,num_sensors*num_features)) Y_pred = scaler.inverse_transform(Y_pred.reshape(-1,num_sensors*num_features)) Y_test = Y_test.reshape(-1,num_sensors,num_features) Y_pred = Y_pred.reshape(-1,num_sensors,num_features) # Some reshaping before plotting Y_test = np.einsum('ijk->jik', Y_test) Y_pred = np.einsum('ijk->jik', Y_pred) Y_test = Y_test.reshape(num_sensors,np.shape(Y_test)[1]*np.shape(Y_test)[2]) Y_pred = Y_pred.reshape(num_sensors,np.shape(Y_pred)[1]*np.shape(Y_pred)[2]) for sensor_loc in range(num_sensors): plt.figure() plt.plot(Y_test[sensor_loc,:],label='True') plt.plot(Y_pred[sensor_loc,:],label='Predicted') plt.legend() plt.show() ```
github_jupyter
# Time evolution Algorithms for time evolution of quantum mechanical systems. The following is a wrapper around various solution methods. - `eig` diagonalizes the Hamiltonian and uses that operator to compute the exponential. - `zvode` solves $i\partial_t\psi = H(t)\psi$ using an ODE integrator (ZVODE). - `expm_multiply` relies on `scipy.linalg.expm_multiply()` for approximating the exponential. - `expm` relies on `scipy.linalg.expm()` for approximating the exponential. - `chebyshev` uses the Chebyshev algorithm for approximating the exponential. - `lanczos` uses the Lanczos algorithm for approximating the exponential. ``` # file: seeq/evolution.py import numpy as np import scipy.sparse as sp import scipy.integrate import seeq.chebyshev import seeq.lanczos def evolve(ψ, Hamiltonian, times, method='chebyshev', observables=[], constant=False, **kwdargs): """ Time evolution of a quantum state `ψ` under `Hamiltonian`. Arguments: ---------- ψ -- Either a complex vector for the initial state, or a matrix with columns formed by different initial states. Hamiltonian -- Hamiltonian for the Schrödinger equation. It may be a `dxd` dense or sparse matrix or callable object H(t, ψ) that returns the product of the Hamiltonian times the vectors `ψ`. observables -- A list of `dxd` Hermitian matrices representing observables. constant -- True if Hamiltonian is a callable object but always applies the same operator. method -- One of 'eig', 'expm', 'expm_multiply', 'chebyshev', 'lanczos', 'zvode' Output: ------- If `observables` is empty (default) this function generates pairs of times and states (t, ψt). Otherwise it returns (t, Ot) where Ot is a list of expected values, commencing by the norm of the state. """ times = np.array(times) if isinstance(Hamiltonian, (np.ndarray, np.matrix)) or sp.issparse(Hamiltonian): constant = True # # Time integrator. # d = len(ψ) lastt = times[0] gen = _evolve_dict.get((method, constant), None) if gen is None: raise ValueError(f'Unknown method "{method}" in evolve():') else: gen = gen(Hamiltonian, ψ, lastt, **kwdargs) for t in times: δt = t - lastt lastt = t if δt: ψ = gen(t, δt, ψ) if observables: yield t, np.array([np.sum(ψ.conj() * (op @ ψ), 0).real for op in observables]) else: yield t, ψ def to_matrix(H, t, d): if isinstance(H, (np.ndarray, np.matrix)): return H if sp.issparse(H): return H.todense() return H(t, np.eye(d)) def ct_eig_gen(H, ψ0, t0): # Constant Hamiltonian, exact diagonalization d = ψ0.shape[0] ω, V = scipy.linalg.eigh(to_matrix(H, 0.0, ψ0.shape[0])) Vt = V.conj().T if ψ0.ndim == 1: return lambda t, δt, ψ: V @ (np.exp((-1j*δt) * ω) * (Vt @ ψ)) else: return lambda t, δt, ψ: V @ (np.exp((-1j*δt) * ω).reshape(d,1) * (Vt @ ψ)) def eig_gen(H, ψ0, t0): # Time-dependent Hamiltonian, exact diagonalization d = ψ0.shape[0] def step(t, δt, ψ): ω, V = scipy.linalg.eigh(to_matrix(H, t, d)) if ψ.ndim == 2: ω = ω.reshape(d,1) return V @ (np.exp((-1j*δt)*ω) * (V.conj().T @ ψ)) return step def expm_gen(H, ψ0, t0): # Any Hamiltonian, exact diagonalization d = ψ0.shape[0] return lambda t, δt, ψ: scipy.linalg.expm((-1j*δt)*to_matrix(H, t, d)) @ ψ def ct_chebyshev_gen(H, ψ0, t0, bandwidth=None, tol=1e-10, order=100): # Constant Hamiltonian, Chebyshev method d = ψ0.shape[0] U = seeq.chebyshev.ChebyshevExpm(H, d=d, bandwidth=bandwidth) return lambda t, δt, ψ: U.apply(ψ, dt=δt, tol=tol, order=order) def chebyshev_gen(H, ψ0, t0, bandwidth=None, tol=1e-10, order=100): # Time-dependent Hamiltonian, Chebyshev method d = ψ0.shape[0] def step(t, δt, ψ): U = seeq.chebyshev.ChebyshevExpm(scipy.sparse.linalg.LinearOperator((d,d), matvec=lambda ψ: H(t, ψ)), bandwidth=bandwidth) return U.apply(ψ, dt=δt, tol=tol, order=order) return step def ct_lanczos_gen(H, ψ0, t0, tol=1e-10, order=100): # Constant Hamiltonian, Chebyshev method U = seeq.lanczos.LanczosExpm(H, d=ψ0.shape[0]) return lambda t, δt, ψ: U.apply(ψ, dt=δt, tol=tol, order=order) def lanczos_gen(H, ψ0, t0, largestEigenvalue=0.0, tol=1e-10, order=100): # Time-dependent Hamiltonian, Chebyshev method d = ψ0.shape[0] def step(t, δt, ψ): U = seeq.lanczos.LanczosExpm(scipy.sparse.linalg.LinearOperator((d,d), matvec=lambda ψ: H(t, ψ))) return U.apply(ψ, dt=δt, tol=tol, order=order) return step def expm_multiply_gen(H, ψ0, t0): # Time-dependent Hamiltonian, Scipy's method d = ψ0.shape[0] def step(t, δt, ψ): if callable(H): aux = lambda ψ: -1j * δt * H(t, ψ) else: aux = lambda ψ: -1j * δt * (H @ ψ) aux = scipy.sparse.linalg.LinearOperator((d,d), matvec=aux) return scipy.sparse.linalg.expm_multiply(aux, ψ) return step def ct_expm_multiply_gen(H, ψ0, t0): # Constant Hamiltonian, Scipy's method if callable(H): aux = lambda ψ: -1j * δt * H(0, ψ) else: aux = lambda ψ: -1j * δt * (H @ ψ) # Time-dependent Hamiltonian, Chebyshev method return lambda t, δt, ψ: scipy.sparse.linalg.expm_multiply(aux, ψ) def zvode_gen(H, ψ0, t0, rtol=1e-10, atol=1e-10): # Time-dependent Hamiltonian, Chebyshev method if callable(H): dydt = lambda t, ψ: -1j * H(t, ψ) else: dydt = lambda t, ψ: -1j * (H @ ψ) integrator = scipy.integrate.ode(dydt) integrator.set_integrator('zvode', method='adams', rtol=rtol, atol=atol) integrator.set_initial_value(ψ0, t0) return lambda t, δt, ψ: integrator.integrate(t) _evolve_dict = { ('eig', False): eig_gen, ('expm', False): expm_gen, ('expm_multiply', False): expm_multiply_gen, ('zvode', False): zvode_gen, ('chebyshev', False): chebyshev_gen, ('lanczos', False): lanczos_gen, ('eig', True): ct_eig_gen, ('expm', True): expm_gen, ('expm_multiply', True): ct_expm_multiply_gen, ('zvode', True): zvode_gen, ('chebyshev', True): ct_chebyshev_gen, ('lanczos', True): ct_lanczos_gen, } ``` ## Example of application ``` import numpy as np import scipy.sparse.linalg as sla import math import matplotlib.pyplot as plt import timeit import numpy as np import scipy.sparse.linalg as sla import math import matplotlib.pyplot as plt import timeit def test_evolve(size, T=1.0, steps=101): # # Create a random Hermitian matrix and a vector # A = np.random.rand(size,size) ω, U = scipy.linalg.eigh(A @ A.T) A = U @ np.diag(ω / np.max(np.abs(ω))) @ U.conj().T A = 0.5 * (A + A.T) v = np.eye(size) times = np.linspace(0, T, steps) # # Numerically exact exponential # ψx = [scipy.linalg.expm(-1j * A * t) @ v for t in times] # # Benchmarks # figs, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12,4)) methods = ['expm', 'eig', 'chebyshev', 'lanczos', 'zvode'] styles = ['--', '-', 'o', '*', '^'] for (method, s) in zip(methods, styles): t = timeit.default_timer() err = [1.0 - np.abs(np.vdot(ψxt, ψt))/size for (ψxt, (t, ψt)) in zip(ψx, evolve(v, A, times, method=method))] t = timeit.default_timer()-t err = np.array(err) ax1.plot(err, s, label=method) ax2.plot(err, s, label=method) print(f'Method {method} - error = {np.max(err)} - time = {t}s') ax1.legend() ax1.set_yscale('log') ax2.legend() test_evolve(4) test_evolve(50) ```
github_jupyter
# Creating the Database File ## Create the tables Note: this is done using the sqlite3 library 1. Create the db file by connecting to it (if nonexistant it will create the file) 2. Load the SQL table creation script saved in the DatabaseDesign folder 3. Only one table can be created at a time, so split the table creation script into a list of strings, each string create one table 4. Cycle through the list and execute each table creation statement 5. Commit ``` import sqlite3 connection = sqlite3.connect("CoW.db") c = connection.cursor() infile = open("../DatabaseDesign/IRDBtablecreation.sql", "r") orig_creation_script = infile.read() infile.close() create_tables = orig_creation_script.split("\n\n") for table in create_tables: print("New Table:") print(table) print() for table in create_tables: c.execute(table) print("success") connection.commit() ``` ## Load the data Note: this is done using the Pandas library's to_sql() function 1. Load CSVs for each table into a dataframe 2. Export dataframes to the right SQL table 3. Commit the changes & close the connection ``` !ls ../FinalData import pandas as pd polity = pd.read_csv("../FinalData/polity.csv") polity.to_sql("POLITY", connection, if_exists='append', index=False) statedates = pd.read_csv("../FinalData/state_dates.csv") statedates.to_sql("STATE_DATES", connection, if_exists='append', index=False) statecontiguity = pd.read_csv("../FinalData/state_contiguity.csv") statecontiguity.to_sql("STATE_CONTIGUITY", connection, if_exists='append', index=False) terrdates = pd.read_csv("../FinalData/territory_dates.csv") terrdates.to_sql("TERRITORY_DATES", connection, if_exists='append', index=False) terrchange = pd.read_csv("../FinalData/territorialchange.csv") terrchange.to_sql("TERRITORIALCHANGE", connection, if_exists='append', index=False) stateresource = pd.read_csv("../FinalData/state_resource.csv") stateresource.to_sql("STATE_RESOURCE", connection, if_exists='append', index=False) igo = pd.read_csv("../FinalData/igo.csv") igo.to_sql("IGO", connection, if_exists='append', index=False) igomem = pd.read_csv("../FinalData/igo_membership.csv") igomem.to_sql("IGO_MEMBERSHIP", connection, if_exists='append', index=False) statealliance = pd.read_csv("../FinalData/state_alliance.csv") statealliance.to_sql("STATE_ALLIANCE", connection, if_exists='append', index=False) alliancemem = pd.read_csv("../FinalData/alliance_membership.csv") alliancemem.to_sql("ALLIANCE_MEMBERSHIP", connection, if_exists='append', index=False) alliancetraits = pd.read_csv("../FinalData/alliance_traits.csv") alliancetraits.to_sql("ALLIANCE_TRAITS", connection, if_exists='append', index=False) war = pd.read_csv("../FinalData/war.csv") war.to_sql("WAR", connection, if_exists='append', index=False) warloc = pd.read_csv("../FinalData/war_locations.csv") warloc.to_sql("WAR_LOCATIONS", connection, if_exists='append', index=False) warpar = pd.read_csv("../FinalData/war_participants.csv") warpar.to_sql("WAR_PARTICIPANTS", connection, if_exists='append', index=False) wartrans = pd.read_csv("../FinalData/war_transitions.csv") wartrans.to_sql("WAR_TRANSITIONS", connection, if_exists='append', index=False) connection.commit() connection.close() ```
github_jupyter
# NGC 205 ``` import numpy as np import matplotlib.pyplot as plt import scipy.optimize import scipy.stats %matplotlib inline ``` ## B filter ``` #mean sky value, standard deviation, etc from AIJ mean_sky = 6404.186 sky_std = 67.506 area = 12070 #relatively small size of standard deviation in mean sky value means it's negligible compared to standard deviation of the galaxy sky_std_mean = sky_std / np.sqrt(area) #loading NGC205 B data (just semimajor axis cuts) and subtracting mean sky value to get just galaxy counts data_b = np.loadtxt('ngc205B.txt') pix = data_b[:,0] cut_1 = data_b[:,1] - mean_sky cut_2 = data_b[:,2] - mean_sky mean = data_b[:,3] - mean_sky std = data_b[:,4] fig = plt.figure(figsize=(15,10)) plt.plot(pix, cut_1, "b-", alpha=0.4) plt.plot(pix, cut_2, "b-", alpha=0.4) plt.plot(pix, mean, "r-") plt.xlabel('radius (pixels)') plt.ylabel('DN counts') plt.title('NGC 205 cuts in B filter along semimajor axis with calculated average') ``` The spike in one of the cuts is interesting- most likely due to a star either in front of or behind NGC 205 along that part of the cut. Otherwise, it seems to be exponentially decaying at a slower rate than the spiral galaxies' profiles have, which is what we'd expect! ``` #calculating standard deviation of the mean std_mean = std / np.sqrt(2) fig = plt.figure(figsize=(15,10)) plt.plot(pix, mean, "r-", linewidth=2.0) plt.errorbar(pix, mean, yerr = std_mean, alpha=0.5) plt.xlabel('radius (pixels)') plt.ylabel('DN counts') plt.title('NGC 205 average B counts') ``` The standard deviation of the mean here isn't much smaller than the standard deviation of the data generally here because we only used two cuts. ``` #ref star 2MASS J00400279+4144452 located at FITS(879,326) in combined NGC205 B image ref_bmag = 14.6 #using source radius of 12pix, inner sky annulus radius 20pix, outer sky annulus radius 30pix ref_DN = 7.2e5 zeropoint = ref_bmag + 2.5 * np.log10(ref_DN) #converting DN counts to magnitudes using zeropoint calculated above mean_mags = zeropoint - 2.5 * np.log10(mean) #using NGC205's distance away from us to calculate size of radii dist = 824 #kpc angles = (pix / 3600) * (np.pi / 180) #converting from arcsec to degrees to radians r_kpc = dist * np.tan(angles) #defining linear model to calculate surface brightness profile best fit #(magnitudes are a log of the counts - the function for surface brightness becomes linear instead of exponential) def linear_model(constant, A, R): return A + R*constant #finding the best fit parameters for the linear region of the plot above #using r^.25 this time because we're looking at an elliptical galaxy! #we're also not excluding a "bulge" section of the data because there isn't one popt, pcov = scipy.optimize.curve_fit(linear_model, (r_kpc)**.25, mean_mags) print'linear best-fit parameters:', popt popt_uncertainties = np.sqrt(np.diag(pcov)) print'uncertainties in best-fit parameters:', popt_uncertainties #calculating best fit line using measured radii and parameters above best_fit_lin = linear_model((r_kpc)**.25, popt[0], popt[1]) fig = plt.figure(figsize=(15,10)) plt.plot((r_kpc)**.25, mean_mags, "r-", linewidth=2.0) plt.plot((r_kpc)**.25, best_fit_lin) plt.xlabel('(radius (kpc))^.25') plt.ylabel('magnitudes/arcsec^2') plt.title('NGC 205 B magnitude as a function of radius^.25') ``` As expected, it looks like the changes in brightness of NGC205 are linearly related to changes in radius^.25 - the brightness is decreasing more slowly than it did for any of the spirals we measured! ## V filter ``` #mean sky value, standard deviation, etc from AIJ mean_sky = 6357.220 sky_std = 55.346 area = 12710 sky_std_mean = sky_std / np.sqrt(area) data_v = np.loadtxt('ngc205B.txt') pix = data_v[:,0] cut_1 = data_v[:,1] - mean_sky cut_2 = data_v[:,2] - mean_sky mean = data_v[:,3] - mean_sky std = data_v[:,4] fig = plt.figure(figsize=(15,10)) plt.plot(pix, cut_1, "b-", alpha=0.4) plt.plot(pix, cut_2, "b-", alpha=0.4) plt.plot(pix, mean, "r-") plt.xlabel('radius (pixels)') plt.ylabel('DN counts') plt.title('NGC 205 cuts in V filter along semimajor axis with calculated average') std_mean = std / np.sqrt(2) fig = plt.figure(figsize=(15,10)) plt.plot(pix, mean, "r-", linewidth=2.0) plt.errorbar(pix, mean, yerr = std_mean, alpha=0.5) plt.xlabel('radius (pixels)') plt.ylabel('DN counts') plt.title('NGC 205 average V counts') ``` ## R filter ``` #mean sky value, standard deviation, etc from AIJ mean_sky = 7455.730 sky_std = 62.187 area = 10880 sky_std_mean = sky_std / np.sqrt(area) data_r = np.loadtxt('ngc205B.txt') pix = data_r[:,0] cut_1 = data_r[:,1] - mean_sky cut_2 = data_r[:,2] - mean_sky mean = data_r[:,3] - mean_sky std = data_r[:,4] fig = plt.figure(figsize=(15,10)) plt.plot(pix, cut_1, "b-", alpha=0.4) plt.plot(pix, cut_2, "b-", alpha=0.4) plt.plot(pix, mean, "r-") plt.xlabel('radius (pixels)') plt.ylabel('DN counts') plt.title('NGC 205 cuts in R filter along semimajor axis with calculated average') std_mean = std / np.sqrt(2) fig = plt.figure(figsize=(15,10)) plt.plot(pix, mean, "r-", linewidth=2.0) plt.errorbar(pix, mean, yerr = std_mean, alpha=0.5) plt.xlabel('radius (pixels)') plt.ylabel('DN counts') plt.title('NGC 205 average R counts') ```
github_jupyter
# Notation This notebook shows the notation used in this project. ``` %load_ext autoreload %autoreload 2 %config IPCompleter.greedy=True ``` ## Show Pre-defined Notation Collection ``` from solara.utils.notation import NOTATION from IPython.display import Markdown Markdown(NOTATION.get_mrkdwn_table_str()) ``` ## Setting Up Power Notation Collections ``` import solara.utils.notation import solara.envs.wiring power_flow = solara.envs.wiring.PowerFlow(['solar','battery','load','grid'], fully_connected=False) power_flow.add_connection('solar','battery') power_flow.add_connection('solar','load') power_flow.add_connection('solar','grid') power_flow.add_connection('grid','battery') power_flow.add_connection('grid','load') power_flow.add_connection('battery','load') power_flow.add_connection('battery','grid') power_notation = solara.utils.notation.NotationCollection( solara.utils.notation.create_power_variables(power_flow, include_in_out_vars=True) ) #Markdown(power_notation.get_mrkdwn_table_str()) import solara.utils.notation import solara.envs.wiring power_flow = solara.envs.wiring.PowerFlow(['x','y'], fully_connected=True) #power_flow.add_connection('x','y') ex_power_notation = solara.utils.notation.NotationCollection( solara.utils.notation.create_power_variables(power_flow) ) #Markdown(ex_power_notation.get_mrkdwn_table_str()) # Creating separate notation collections for variables and parameters complete_notation_list = NOTATION.notation_list + power_notation.notation_list cp_variables = [var_def for var_def in complete_notation_list if var_def.cp_type == "variable"] cp_parameters = [var_def for var_def in complete_notation_list if var_def.cp_type == "parameter"] cp_var_collection = solara.utils.notation.NotationCollection(cp_variables) cp_par_collection = solara.utils.notation.NotationCollection(cp_parameters) # Create summary commands listing notation cp_var_cmds = [var_def.latex_cmd for var_def in cp_variables] cp_par_cmds = [var_def.latex_cmd for var_def in cp_parameters] def get_text_list_cmd(cmd_list): return '$' + '$, $'.join(cmd_list[:-1]) + '$, and $' + cmd_list[-1] + "$" def get_math_list_cmd(cmd_list): out_str = "\\substack{" for i, cmd in enumerate(cmd_list): out_str += cmd if i < len(cmd_list)-1: out_str += ", " if i % 4 == 3: out_str += "\\\\ " out_str += "}" return out_str get_text_list_cmd(cp_par_cmds) print(get_math_list_cmd(cp_var_cmds)) ``` ## Creating Latex Style File Content ``` # Print output to be copied in latex style file print(r""" %%% AUTOMATICALLY GENERATED NOTATION %%% \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{autonotation} """) print("\n%%% Main Notation") NOTATION.print_notation_style() print("\n%%% Power Notation") power_notation.print_notation_style() print("\n%%% Example Power Notation") ex_power_notation.print_notation_style() print("\n%%% Convex Problem Commands") print("\\newcommand{{\\allcpvariablesmath}}{{{}}}".format(get_math_list_cmd(cp_var_cmds))) print("\\newcommand{{\\allcpparameterstext}}{{{}}}".format(get_text_list_cmd(cp_par_cmds))) ``` ## Creating Tables for Report ``` display(Markdown(cp_var_collection.get_latex_table_str())) display(Markdown(cp_par_collection.get_latex_table_str())) # Equivalent Markdown Tables display(Markdown(cp_var_collection.get_mrkdwn_table_str())) display(Markdown(cp_par_collection.get_mrkdwn_table_str())) ```
github_jupyter
# rigidregistration: Quick walk through The code below provides a quick walk-through demonstrating use of the rigidregistration python package. If you find this code useful in your own research, please cite the associated publication: "Image registration of low signal-to-noise cryo-STEM data", Ultramicroscopy (2018), DOI: 10.1016/j.ultramic.2018.04.008 ### Getting started In this example, data which is formatted as .tif files are loaded using the tifffile package. For other file formats common to electron microscopy data (e.g., .dm3, .ser...) we recommend the excellent hyperspy package for i/o handling. See hyperspy.org. ``` # Import libraries and functions import numpy as np import matplotlib.pyplot as plt from tifffile import imread import rigidregistration import os %matplotlib inline # Load data and instantiate imstack object datadir = "../../data/20210602_CrSBr/stacks/" if not (os.path.isdir( datadir+"registered")): os.mkdir( datadir+"registered") fname = "1854_300keV_7C_12cm_10Mx_22mrad_1p2us_40frames" f= datadir+fname + ".tif" # Filepath to data stack=np.rollaxis(imread(f),0,3)/float(2**16) # Rearrange axes and normalize data s=rigidregistration.stackregistration.imstack(stack) # Instantiage imstack object. s.getFFTs() # Inspect data in preparation for registration for i in range(5,6): # Select which images from the stack to display fig,(ax1,ax2)=plt.subplots(1,2) ax1.matshow(stack[:,:,i],cmap='gray') ax2.matshow(np.log(np.abs(np.fft.fftshift(np.fft.fft2(stack[:,:,i])))),cmap='gray',vmin=np.average(np.log(np.abs(np.fft.fft2(stack[:,:,i]))))) ax1.grid(False) ax2.grid(False) plt.show() ``` ### Fourier masking A Fourier mask is used to avoid incorrect cross correlations, by weighting more trustworthy information in frequency space more heavily. ``` # Choose best mask masktype="gaussian" n=5 s.makeFourierMask(mask=masktype,n=n) s.show_Fourier_mask(i=i,j=j) ``` ### Calculate image shifts Calculate the relative shifts between all pairs of images from their cross correlations. ``` s.findImageShifts(findMaxima='gf',verbose=True); # Find shifts. ``` ### Find and correct outliers in shift matrix The previous step determines the relative shifts between all pairs of images. Here, any incorrectly calculated shifts -- which may result from noisy, low SNR data -- are identified and corrected. First, the shift matrix is displayed and inspected. Next, outliers are identified. Outliers are then corrected. ``` # Show Xij and Yij matrices s.show_Rij() # Identify outliers s.get_outliers(threshold=30) # Set outlier threshhold s.show_Rij() # Correct outliers s.make_corrected_Rij() # Correct outliers using the transitivity relations s.show_Rij_c() # Display the corrected shift matrix ``` ### Calculate average image To obtain the average image, each image in the stack is shifted by an amount which is calculated from the shift matrix. The entire, shifted image stack is then averaged. Several functions are available for displaying and saving the resulting average image, and for summarizing the processing that's been applied to the data for quick review. ``` # Create registered image stack and average s.get_averaged_image() # Save the registered stack s.save_registered_stack( datadir+"registered/"+ fname +".tif") ```
github_jupyter
# 📃 Solution for Exercise M2.01 The aim of this exercise is to make the following experiments: * train and test a support vector machine classifier through cross-validation; * study the effect of the parameter gamma of this classifier using a validation curve; * use a learning curve to determine the usefulness of adding new samples in the dataset when building a classifier. To make these experiments we will first load the blood transfusion dataset. <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last">If you want a deeper overview regarding this dataset, you can refer to the Appendix - Datasets description section at the end of this MOOC.</p> </div> ``` import pandas as pd blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv") data = blood_transfusion.drop(columns="Class") target = blood_transfusion["Class"] ``` We will use a support vector machine classifier (SVM). In its most simple form, a SVM classifier is a linear classifier behaving similarly to a logistic regression. Indeed, the optimization used to find the optimal weights of the linear model are different but we don't need to know these details for the exercise. Also, this classifier can become more flexible/expressive by using a so-called kernel that makes the model become non-linear. Again, no requirement regarding the mathematics is required to accomplish this exercise. We will use an RBF kernel where a parameter `gamma` allows to tune the flexibility of the model. First let's create a predictive pipeline made of: * a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) with default parameter; * a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) where the parameter `kernel` could be set to `"rbf"`. Note that this is the default. ``` # solution from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC model = make_pipeline(StandardScaler(), SVC()) ``` Evaluate the generalization performance of your model by cross-validation with a `ShuffleSplit` scheme. Thus, you can use [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html) and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html) to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit` and let the other parameters to the default. ``` # solution from sklearn.model_selection import cross_validate, ShuffleSplit cv = ShuffleSplit(random_state=0) cv_results = cross_validate(model, data, target, cv=cv, n_jobs=2) cv_results = pd.DataFrame(cv_results) cv_results print( f"Accuracy score of our model:\n" f"{cv_results['test_score'].mean():.3f} +/- " f"{cv_results['test_score'].std():.3f}" ) ``` As previously mentioned, the parameter `gamma` is one of the parameters controlling under/over-fitting in support vector machine with an RBF kernel. Evaluate the effect of the parameter `gamma` by using the [`sklearn.model_selection.validation_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html) function. You can leave the default `scoring=None` which is equivalent to `scoring="accuracy"` for classification problems. You can vary `gamma` between `10e-3` and `10e2` by generating samples on a logarithmic scale with the help of `np.logspace(-3, 2, num=30)`. Since we are manipulating a `Pipeline` the parameter name will be set to `svc__gamma` instead of only `gamma`. You can retrieve the parameter name using `model.get_params().keys()`. We will go more into detail regarding accessing and setting hyperparameter in the next section. ``` # solution import numpy as np from sklearn.model_selection import validation_curve gammas = np.logspace(-3, 2, num=30) param_name = "svc__gamma" train_scores, test_scores = validation_curve( model, data, target, param_name=param_name, param_range=gammas, cv=cv, n_jobs=2) ``` Plot the validation curve for the train and test scores. ``` # solution import matplotlib.pyplot as plt plt.errorbar(gammas, train_scores.mean(axis=1), yerr=train_scores.std(axis=1), label='Training score') plt.errorbar(gammas, test_scores.mean(axis=1), yerr=test_scores.std(axis=1), label='Testing score') plt.legend() plt.xscale("log") plt.xlabel(r"Value of hyperparameter $\gamma$") plt.ylabel("Accuracy score") _ = plt.title("Validation score of support vector machine") ``` Looking at the curve, we can clearly identify the over-fitting regime of the SVC classifier when `gamma > 1`. The best setting is around `gamma = 1` while for `gamma < 1`, it is not very clear if the classifier is under-fitting but the testing score is worse than for `gamma = 1`. Now, you can perform an analysis to check whether adding new samples to the dataset could help our model to better generalize. Compute the learning curve (using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html)) by computing the train and test scores for different training dataset size. Plot the train and test scores with respect to the number of samples. ``` # solution from sklearn.model_selection import learning_curve train_sizes = np.linspace(0.1, 1, num=10) results = learning_curve( model, data, target, train_sizes=train_sizes, cv=cv, n_jobs=2) train_size, train_scores, test_scores = results[:3] plt.errorbar(train_size, train_scores.mean(axis=1), yerr=train_scores.std(axis=1), label='Training score') plt.errorbar(train_size, test_scores.mean(axis=1), yerr=test_scores.std(axis=1), label='Testing score') plt.legend() plt.xlabel("Number of samples in the training set") plt.ylabel("Accuracy") _ = plt.title("Learning curve for support vector machine") ``` We observe that adding new samples in the dataset does not improve the testing score. We can only conclude that the standard deviation of the training error is decreasing when adding more samples which is not a surprise.
github_jupyter
### Checklist for submission It is extremely important to make sure that: 1. Everything runs as expected (no bugs when running cells); 2. The output from each cell corresponds to its code (don't change any cell's contents without rerunning it afterwards); 3. All outputs are present (don't delete any of the outputs); 4. Fill in all the places that say `# YOUR CODE HERE`, or "**Your answer:** (fill in here)". 5. Never copy/paste any notebook cells. Inserting new cells is allowed, but it should not be necessary. 6. The notebook contains some hidden metadata which is important during our grading process. **Make sure not to corrupt any of this metadata!** The metadata may for example be corrupted if you copy/paste any notebook cells, or if you perform an unsuccessful git merge / git pull. It may also be pruned completely if using Google Colab, so watch out for this. Searching for "nbgrader" when opening the notebook in a text editor should take you to the important metadata entries. 7. Although we will try our very best to avoid this, it may happen that bugs are found after an assignment is released, and that we will push an updated version of the assignment to GitHub. If this happens, it is important that you update to the new version, while making sure the notebook metadata is properly updated as well. The safest way to make sure nothing gets messed up is to start from scratch on a clean updated version of the notebook, copy/pasting your code from the cells of the previous version into the cells of the new version. 8. If you need to have multiple parallel versions of this notebook, make sure not to move them to another directory. 9. Although not forced to work exclusively in the course `conda` environment, you need to make sure that the notebook will run in that environment, i.e. that you have not added any additional dependencies. **FOR HA1, HA2, HA3 ONLY:** Failing to meet any of these requirements might lead to either a subtraction of POEs (at best) or a request for resubmission (at worst). We advise you to perform the following steps before submission to ensure that requirements 1, 2, and 3 are always met: **Restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All). This might require a bit of time, so plan ahead for this (and possibly use Google Cloud's GPU in HA1 and HA2 for this step). Finally press the "Save and Checkout" button before handing in, to make sure that all your changes are saved to this .ipynb file. ### Fill in name of notebook file This might seem silly, but the version check below needs to know the filename of the current notebook, which is not trivial to find out programmatically. You might want to have several parallel versions of the notebook, and it is fine to rename the notebook as long as it stays in the same directory. **However**, if you do rename it, you also need to update its own filename below: ``` # nb_fname = "XXX.ipynb" ``` ### Fill in group number and member names (use NAME2 and GROUP only for HA1, HA2 and HA3): ``` NAME1 = "" NAME2 = "" GROUP = "" ``` ### Check Python version ``` from platform import python_version_tuple assert python_version_tuple()[:2] == ('3','7'), "You are not running Python 3.7. Make sure to run Python through the course Conda environment." ``` ### Check that notebook server has access to all required resources, and that notebook has not moved ``` import os nb_dirname = os.path.abspath('') assignment_name = os.path.basename(nb_dirname) assert assignment_name in ['IHA1', 'IHA2', 'HA1', 'HA2', 'HA3'], \ '[ERROR] The notebook appears to have been moved from its original directory' ``` ### Verify correct nb_fname ``` from IPython.display import display, HTML try: display(HTML(r'<script>if("{nb_fname}" != IPython.notebook.notebook_name) {{ alert("You have filled in nb_fname = \"{nb_fname}\", but this does not seem to match the notebook filename \"" + IPython.notebook.notebook_name + "\"."); }}</script>'.format(nb_fname=nb_fname))) except NameError: assert False, 'Make sure to fill in the nb_fname variable above!' ``` ### Verify that your notebook is up-to-date and not corrupted in any way ``` import sys sys.path.append('..') from ha_utils import check_notebook_uptodate_and_not_corrupted check_notebook_uptodate_and_not_corrupted(nb_dirname, nb_fname) ``` # HA1 - Cats and dogs <img src="https://cdn.pixabay.com/photo/2015/05/20/10/03/cat-and-dog-775116_960_720.jpg" alt="Image of cats and dogs" style="width: 500px;"/> For this home assignment, we'll use the Kaggle dataset for the [Dogs vs. Cats competition](https://www.kaggle.com/c/dogs-vs-cats). It is comprised of 25k colour images of dogs and cats. Our goal with this assignment will be to create a classifier that can discriminate between cats or dogs. The goal is to make sure that you all can independently create, train and evaluate a model using a popular deep learning framework. A secondary goal is also to expose you to GPU computing, either your own or via a cloud computing service. The focus is on implementing the models, and much of the surrounding code is provided for you. You are expected to understand the provided code. ## Using your cloud GPU ### Strong recommendation: In order to make the most out of your GPU hours, first try solving the initial part of this notebook (tasks 0-3) in your own computer (these tasks can be solved on the CPU), and leave most of the available hours for solving tasks 4-5, and refining your best model further (and, if you have the spare hours, experiment a bit!). ### Working efficiently: Training for several epochs just to have your code break at the last validation step is incredibly frustrating and inefficient. Good practice is to first test long training runs with a much simpler dry-run: a single epoch, a few batches et c. Requirements: - Whenever we ask you to plot anything, be sure to add a title and label the axes. If you're plotting more than one curve in the same plot, also add a legend. - When we ask you to train an architecture, train it for a reasonable number of epochs. "Reasonable" here means you should be fairly confident that training for a higher number of epochs wouldn't impact your conclusions regarding the model's performance. When experimenting, a single epoch is often enough to tell whether your model setup has improved or not. Hints: - If you get errors saying you've exhausted the GPU resources, well, then you've exhausted the GPU resources. However, sometimes that's because Pytorch didn't release a part of the GPU's memory. If you think your CNN should fit in your memory during training, try restarting the kernel and directly training only that architecture. - Every group has enough cloud credits to complete this assignment. However, this statement assumes you'll use your resources judiciously (e.g. always try the code first in your machine and make sure everything works properly before starting your instances) and **won't forget to stop your instance after using it,** otherwise you might run out of credits. - Before starting, take a look at the images we'll be using. This is a hard task, don't get discouraged if your first models perform poorly (several participants in the original competition didn't achieve an accuracy higher than 60%). - Solving the computer labs and individual home assignments is a good way to get prepared for this assignment. --- ## 0. Imports In the following cell, add all the imports you'll use in this assignment. ``` # YOUR CODE HERE ``` --- ## 1. Loading the data and preprocessing In this part we will set up the data used in this assignment. You need to download it, then we'll walk you through how to make a custom Pytorch dataset abstraction. The abstraction enables you to visualise and play around with the image data and to finally create data loaders, which are necessary for the training. The first step is to head to the [Kaggle website for the cats and dogs competition](https://www.kaggle.com/c/dogs-vs-cats/data) and download the data from there. You should download both the test and train folders together in one zip file (there is a `Download all` button at the bottom of the page). Unfortunately, you need to create a Kaggle account for this. **Only necessary for tasks 4-6**: Downloading the data to your local computer is quite straight-forward. Sooner or later you will have to upload the data to the cloud instance and that is a bit more tricky. There are a few ways to do it: - Jupyter Notebook upload function. When starting the notebook server with the command `jupyter notebook` you are directed to a main page. In the top right corner there is an upload button. - Using [`scp`](https://linuxize.com/post/how-to-use-scp-command-to-securely-transfer-files/) to copy files via an ssh connection. - Using the [Kaggle CLI](https://github.com/Kaggle/kaggle-api). We have added it to the conda environment. To begin with, download the data to your local computer and create a folder structure that resembles the following (obviously, the folder names are up to you): small_train small_val train val | | | | | | | | ------------- ------------- ------------- ------------- | | | | | | | | | | | | | | | | cats dogs cats dogs cats dogs cats dogs The `small_train` and `small_val` folders have the training and validation samples for your smaller subset of the data, while the `train` and `val` folders contain all the samples you extracted from Kaggle's `train.zip`. This is just a convenient way of having a smaller dataset to play with for faster prototyping. We provide you a notebook that shows how to achieve this folder structure (`create_project_notebook_structure.ipynb`), starting from the original `dogs-vs-cats.zip` file that you download from Kaggle. If you do use that notebook, we encourage you to understand how each step is being done, so you can generalize this knowledge to new datasets you'll encounter. **(2 points)** For the smaller dataset, we advise you to use 70% of the data as training data (and thereby the remaining 30% for validation data). However, for the larger dataset, you should decide how to split between training and validation. **What percentage of the larger dataset did you decide to use for training?** **Did you decide to keep the same ratio split between train and validation sets for the larger dataset? Motivate your decision!** **Your answer:** (fill in here) Fill in the dataset paths (to be used later by your data loaders): ``` # TODO: Change the directories accordingly train_path = "/your/path" val_path = "/your/path" small_train_path = "/your/path" small_val_path = "/your/path" # YOUR CODE HERE ``` ### 1.1 Dataset To create data loaders we first need to create a dataset abstraction class. The purpose of a data loader is to efficiently provide the CPU/GPU with mini-batches of data. We now work with data complex enough to actually warrant the use of data loaders. In particular, we don't want to load all images into memory at once. Like before, the data loader is an instance of the Pytorch [`DataLoader`](https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader) which wraps a class that inherits from [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset), that we create ourselves. Image classification is such a common task that Pytorch provides a ready-to-use dataset class for this task, called [`ImageFolder`](https://pytorch.org/vision/stable/datasets.html?highlight=imagefolder#imagefolder). Using this class however, is rather opaque so for your understanding we will show you how to construct a custom dataset class. If you know this method, you will be able to create a data loader for any dataset you may encounter. We construct a class `DogsCatsData` as a subclass of `Dataset`. The dataset subclass holds the actual data, or at least provides access to it. To make it work with the `DataLoader` class we need to implement two methods: - `__getitem__(self, index)`: return the `index`'th sample, i.e. a single pair of (image, label) - `__len__(self)`: simply return the total number of samples $N$ in the dataset. These methods are so called Python "magic" methods, signified by the leading and closing double underscores. They typically enable special syntax for a class: `__getitem__` enables indexing of a class, and `__len__` enables calling the `len` function: ```python # Consider an instance `data` of a class `MyDataset` implementing `__getitem__` and `__len__` data[10] # returns the item with index 10 in `data` len(data) # returns the length/size of `data` ``` We will return to why these are needed in the `DataLoader` wrapping class Now, to the actual implementation: The idea is to have the dataset class only store the filenames of the images (and the corresponding label), not the images themselves. We will find and store the filenames in the constructor. The `__getitem__` method will use the index to look up the correct filename and load it into memory. The `__len__` method is left for you to implement. Being able to use and understand code you have not written is an important ability. Below you are required to interact with the dataset class with a simple completion of the implementation and by extracting some data from the class. This is partly to ensure that you understand this specific class and partly to show you some tools for exploring new code. ``` from torch.utils.data import Dataset from itertools import chain from PIL import Image class DogsCatsData(Dataset): def __init__(self, root, transform, dog_label=1, cat_label=0): """Constructor Args: root (Path/str): Filepath to the data root, e.g. './small_train' transform (Compose): A composition of image transforms, see below. """ root = Path(root) if not (root.exists() and root.is_dir()): raise ValueError(f"Data root '{root}' is invalid") self.root = root self.transform = transform self._dog_label = dog_label self._cat_label = cat_label # Collect samples, both cat and dog and store pairs of (filepath, label) in a simple list. self._samples = self._collect_samples() def __getitem__(self, index): """Get sample by index Args: index (int) Returns: The index'th sample (Tensor, int) """ # Access the stored path and label for the correct index path, label = self._samples[index] # Load the image into memory img = Image.open(path) # Perform transforms, if any. if self.transform is not None: img = self.transform(img) return img, label def __len__(self): """Total number of samples""" # YOUR CODE HERE def _collect_samples(self): """Collect all paths and labels Helper method for the constructor """ # Iterator over dog filpath dog_paths = self._collect_imgs_sub_dir(self.root / "dogs") # Iterator of pairs (path, dog label) # Again, we use the `map` function to create an iterator. It's use is not as common as the so called # 'list comprehension' you've previously seen, but a good alternative to have seen. dog_paths_and_labels = map(lambda path: (path, self._dog_label), dog_paths) # Same for cats cat_paths = self._collect_imgs_sub_dir(self.root / "cats") cat_paths_and_labels = map(lambda path: (path, self._cat_label), cat_paths) # Sorting is not strictly necessary, but filesystem globbing (wildcard search) is not deterministic, # and consistency is nice when debugging. return sorted(list(chain(dog_paths_and_labels, cat_paths_and_labels)), key=lambda x: x[0].stem) @staticmethod def _collect_imgs_sub_dir(sub_dir: Path): """Collect image paths in a directory Helper method for the constructor """ if not sub_dir.exists(): raise ValueError(f"Data root '{self.root}' must contain sub dir '{sub_dir.name}'") return sub_dir.glob("*.jpg") def get_sample_by_id(self, id_): """Get sample by image id Convenience method for exploration. The indices does not correspond to the image id's in the filenames. Here is a (rather inefficient) way of inspecting a specific image. Args: id_ (str): Image id, e.g. `dog.321` """ id_index = [path.stem for (path, _) in self._samples].index(id_) return self[id_index] ``` **(2 points)** Show that you understand the implementation by creating an instance called `example_dataset` of it. Create it from the small training set. Use the instance to 1. Print the number of samples in it 2. Print the label of the second sample, note that this is a number (0 or 1) ``` # The Dataset constructor has a transform attribute, we will cover it below. Just use this for now: from torchvision.transforms import Compose transform = Compose([ToTensor()]) # YOUR CODE HERE ``` It is vital to explore your data, but it can be tricky to deal with images in the tensor format. To aid you, use the below helper function to visually inspect your images. ``` def display_image(axis, image_tensor): """Display a tensor as an image Args: axis (pyplot axis) image_tensor (torch.Tensor): tensor with shape (num_channels=3, width, heigth) """ # See hint above if not isinstance(image_tensor, torch.Tensor): raise TypeError("The `display_image` function expects a `torch.Tensor` " + "use the `ToTensor` transformation to convert the images to tensors.") # The imshow commands expects a `numpy array` with shape (3, width, height) # We rearrange the dimensions with `permute` and then convert it to `numpy` image_data = image_tensor.permute(1, 2, 0).numpy() height, width, _ = image_data.shape axis.imshow(image_data) axis.set_xlim(0, width) # By convention when working with images, the origin is at the top left corner. # Therefore, we switch the order of the y limits. axis.set_ylim(height, 0) # Example usage _, axis = plt.subplots() some_random_index = 453 # Here we use the __getitem__ method as a "magic" method. # Implementing it for a class, enables square bracket '[]' indexing image_tensor, label = example_dataset[some_random_index] display_image(axis, image_tensor) ``` ### 1.2 Preprocessing The `DogsCatsData` class constructor has an argument called `transform`. It allows us to transform or preprocess all the images in a batch, from the raw image data to a more suitable format. There are multiple motivations for preprocessing: - Some transformations might be needed to actually make the data work with our network (reshaping, permuting dimensions et c.). - Make the training more efficient by making the input dimensions smaller, e.g. resizing, cropping. - Artificially expanding the training data through [data augmentation](https://cartesianfaith.com/2016/10/06/what-you-need-to-know-about-data-augmentation-for-machine-learning/) - We have some clever idea of how to change the data to create a simpler optimisation problem. We do not expect you to do data augmentation, but feel free to preprocess the data as you see fit. Use the [documentation](https://pytorch.org/vision/stable/transforms.html#torchvision-transforms) to view available transforms. Extra important is the `Compose` transformation, which is a meta-transformation which composes actual ones, and the `ToTensor` transformation which is the simplest way to go from image to tensor format. Hints: - Revisit the `DogsCatsData` example usage to see how to use the `Compose` and `ToTensor` transformations. - When feeding the images to your CNN, you'll probably want all of them to have the same spatial size, even though the .jpeg files differ in this. Resizing the images can be done using the previously mentioned Pytorch Transforms. - Resizing the images to a smaller size while loading them can be beneficial as it speeds up training. The CNN's do surprisingly well on 64x64 or even 32x32 images. Shorter training cycles give you more time to experiment! Note: The VGG network used later in this assignment is specialised for images that are 224x224. We encourage you to explore the data and choose transformations that you believe to be useful. For exploration we provide you with a helper function to visually compare transformations side by side: ``` def compare_transforms(transformations, index): """Visually compare transformations side by side. Takes a list of DogsCatsData datasets with different compositions of transformations. It then display the `index`th image of the dataset for each transformed dataset in the list. Example usage: compare_transforms([dataset_with_transform_1, dataset_with_transform_2], 0) Args: transformations (list(DogsCatsData)): list of dataset instances with different transformations index (int): Index of the sample in the dataset you wish to compare. """ # Here we combine two functions from basic python to validate the input to the function: # - `all` takes an iterable (something we can loop over, e.g. a list) of booleans # and returns True if every element is True, otherwise it returns False. # - `isinstance` checks whether a variable is an instance of a particular type (class) if not all(isinstance(transf, Dataset) for transf in transformations): raise TypeError("All elements in the `transformations` list need to be of type Dataset") num_transformations = len(transformations) fig, axes = plt.subplots(1, num_transformations) # This is just a hack to make sure that `axes` is a list of the same length as `transformations`. # If we only have one element in the list, `plt.subplots` will not create a list of a single axis # but rather just an axis without a list. if num_transformations == 1: axes = [axes] for counter, (axis, transf) in enumerate(zip(axes, transformations)): axis.set_title(f"transf: {counter}") image_tensor = transf[index][0] display_image(axis, image_tensor) plt.show() # Explore your dataset in this cell, you do not need to produce any results. # YOUR CODE HERE ``` **(2 points)** Normalisation of the training data is popular in pre-processing. What is the argument or intuition for why this is a beneficial transformation? **Your answer:** (fill in here) ### 1.3 Data loaders With our dataset class implementation in place, creating a `DataLoader` instance is simple. The data loader class wraps the dataset and provides a way to iterate over batches in the training loop. To produce batches, it gets the total number of samples $N$ with the dataset's `__len__` method. It divides the indices $1, \dots, N$ into equally sized index batches with $B$ (batch size) elements. A particular batch with pairs of image and label is created by calling the dataset's `__getitem__` method with the indices in the batch. NB: the last batch in an epoch might be smaller if $N$ is not divisible by $B$. Create the data loaders needed for training (use the small version of the data), in the cell below. The `DataLoader` class is documented [here](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) , but it's not that much to it. You simply create a data loader with a dataset instance and some other (self-explanatoty) settings: ```python train_dataloader = DataLoader(example_dataset, batch_size=batch_size, shuffle=True) ``` **(1 point)** Create data loaders required for training and validation. Hints: - The specified `batch_size` should be chosen so that you train fast but don't run out of memory. You need to figure this out empirically; start small and increase the batch size until you run out of memory. Beyond this pragmatic approach, feel free to contribute to the highly contested scientific debate about the relation between batch size and generalisation. - The `DataLoader` constructor takes an optional argument `num_workers`, which defaults to `0` if not provided. Setting a higher number creates multiple threads which load batches concurrently. This can speed up training considerably. ``` # YOUR CODE HERE ``` --- ## 2. Training ### 2.1 The first model **(3 points)** Now, it's time to create a model called `FirstCnn`. To begin with, you have to create a CNN to an exact specification. After that, you will get the chance to be more creative. For the first model, create a network that: - Inherits from [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module) - Implements a constructor `__init__(self, img_size)`, a `forward(self, input_batch)` method and whatever other helper methods you deem necessary. Note that a (square) image size should be a parameter in the model's constructor. While not strictly necessary, it is an acceptable way of handling varying input dim's and it is convenient for testing. - Can handle square images of arbitrary size and arbitrary large mini-batch sizes (within memory limits, of course). You may assume that there are always three colour channels, i.e., a mini-batch will have the shape `(batch size = B, num channels = 3, img size = D, D)` - Has layers: 1. Two convolutional layers, each with 10 filters, kernel size = 3, stride = 1, padding = 0 2. A single fully connected layer. - Related layers such as a pooling operation are optional. - Choose suitable activation functions - Outputs the probability of the image belonging to the class 'dog'. Technically the output should consist of `B` probabilities, one for each image in the mini-batch and so have the shape `(B,)`. Hints: - The subpage for [`torch.nn`](https://pytorch.org/docs/stable/nn.html) is a good place to find the layer specifics. - Going from the last CNN layer to the final fully connected layer is not trivial. The convolutions produces feature maps which we can think of as an image with many channels, while the fully connected layer expects a row vector as input. Calculate how many output neurons the convolutions produce and use `.reshape` to make your tensor fit the fully connected layer. It is also common to see the `.view` and `.squeeze` methods to do the same thing. They basically do the same thing (apart from some differences in internal memory management) but are less transparent. *Hint within the hint:* remember that the fully connected layers expects a *batch* of 1D tensors. ``` # YOUR CODE HERE def test_model(model_class): assert issubclass(model_class, nn.Module), "Model class should inherit from nn.Module" assert getattr(model_class, "forward", None) is not None, "Model class should have a 'forward' method" _test_output(model_class, some_img_size=224, some_batch_size=64) _test_output(model_class, some_img_size=32, some_batch_size=8) print("Test passed.") def _test_output(model_class, some_img_size, some_batch_size): random_input = torch.rand(some_batch_size, 3, some_img_size, some_img_size) model_instance = model_class(img_size=some_img_size) output = model_instance.forward(random_input) output_shape = list(output.shape) assert output_shape == [some_batch_size], f"Expected output size [{some_batch_size}], got {output_shape}" # Note that the test takes the actual class, not an instance of it, as input. # Here, we assume that the model class is named 'FirstCnn' test_model(FirstCnn) ``` **(3 points)** You have been told that one of the benefits of CNN is that it can handle input of different sizes. Yet, you needed to know the image size in the constructor. Explain how you made your model handle different input sizes and why it is necessary, despite it being a CNN. **Your answer:** (fill in here) ### 2.2 The training loop **(1 point)** You have already seen quite a few training loops in the preparations. Below we provide you with an example of a basic one that you can use if you please. If you do use it, you need to provide an implementation that maps network outputs (probabilites) to hard labels. ``` def output_to_label(z): """Map network output z to a hard label {0, 1} Args: z (Tensor): Probabilities for each sample in a batch. """ # YOUR CODE HERE def test_output_to_label(fn): batch_size = torch.randint(1, 64, (1,)) random_logits = torch.rand(batch_size) random_probs = random_logits / random_logits.sum() labels = fn(random_probs) assert labels.shape == random_logits.shape, "The element-wise function should preserve the shape" assert labels.dtype == torch.long, "Incorrect datatype, should be torch.long" fixed_logits = torch.tensor([0.1, 0.9, 0.51, 0.49, 0.7]) fixed_labels = torch.tensor([0, 1, 1, 0, 1], dtype=torch.long) assert all(fixed_labels == fn(fixed_logits)), "Incorrect fixed output" assert fixed_logits.device == fixed_labels.device, "Make sure to specify tensor device" # <- Pseudo code for device unit test test_output_to_label(output_to_label) def training_loop(model, optimizer, loss_fn, train_loader, val_loader, num_epochs, print_every): print("Starting training") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) train_losses, train_accs, val_losses, val_accs = [], [], [], [] for epoch in range(1, num_epochs+1): model, train_loss, train_acc = train_epoch(model, optimizer, loss_fn, train_loader, val_loader, device, print_every) val_loss, val_acc = validate(model, loss_fn, val_loader, device) print(f"Epoch {epoch}/{num_epochs}: " f"Train loss: {sum(train_loss)/len(train_loss):.3f}, " f"Train acc.: {sum(train_acc)/len(train_acc):.3f}, " f"Val. loss: {val_loss:.3f}, " f"Val. acc.: {val_acc:.3f}") train_losses.extend(train_loss) train_accs.extend(train_acc) val_losses.append(val_loss) val_accs.append(val_acc) return model, train_losses, train_accs, val_losses, val_accs def train_epoch(model, optimizer, loss_fn, train_loader, val_loader, device, print_every): # Train: model.train() train_loss_batches, train_acc_batches = [], [] num_batches = len(train_loader) for batch_index, (x, y) in enumerate(train_loader, 1): inputs, labels = x.to(device), y.to(device) optimizer.zero_grad() z = model.forward(inputs) loss = loss_fn(z, labels.float()) loss.backward() optimizer.step() train_loss_batches.append(loss.item()) hard_preds = output_to_label(z) acc_batch_avg = (hard_preds == labels).float().mean().item() train_acc_batches.append(acc_batch_avg) # If you want to print your progress more often than every epoch you can # set `print_every` to the number of batches you want between every status update. # Note that the print out will trigger a full validation on the full val. set => slows down training if print_every is not None and batch_index % print_every == 0: val_loss, val_acc = validate(model, loss_fn, val_loader, device) model.train() print(f"\tBatch {batch_index}/{num_batches}: " f"\tTrain loss: {sum(train_loss_batches[-print_every:])/print_every:.3f}, " f"\tTrain acc.: {sum(train_acc_batches[-print_every:])/print_every:.3f}, " f"\tVal. loss: {val_loss:.3f}, " f"\tVal. acc.: {val_acc:.3f}") return model, train_loss_batches, train_acc_batches def validate(model, loss_fn, val_loader, device): val_loss_cum = 0 val_acc_cum = 0 model.eval() with torch.no_grad(): for batch_index, (x, y) in enumerate(val_loader, 1): inputs, labels = x.to(device), y.to(device) z = model.forward(inputs) batch_loss = loss_fn(z, labels.float()) val_loss_cum += batch_loss.item() hard_preds = output_to_label(z) acc_batch_avg = (hard_preds == labels).float().mean().item() val_acc_cum += acc_batch_avg return val_loss_cum/len(val_loader), val_acc_cum/len(val_loader) ``` **(1 point)** Use the training loop to train your model, using the two data loaders you created earlier. Train for a reasonable amount of epochs, so as to get a good sense of how well this architecture performs. Hints: - Training on a CPU is slow and in the beginning you just want to verify that your architecture actually produces a predicition with the correct shape. Make everything you can to speed up the prototyping phase, e.g. train only for a single epoch and make the images ridiculously small. ``` # YOUR CODE HERE ``` ### 2.3 Visualisation **(1 point)** Create two plots. In one of them, plot the loss in the training and the validation datasets. In the other one, plot the accuracy in the training and validation datasets. Note that the given training loop produces metrics at different intervals for training and validation, make sure that you align your metrics in a way that makes sense. ``` # YOUR CODE HERE ``` **(2 points)** Based on these, what would you suggest for improving your model? Why? **Your answer:** (fill in here) --- ## 3. Improving your model **(3 points)** Now you are free to create whichever model you want. A simple improvement based on your analysis of the above results is fine, or you can do something more ambitious. When you're happy with one architecture, copy it in the cell below and train it here. Save the training and validation losses and accuracies. You'll use this later to compare your best model with the one using transfer learning. **Note**: When trying different ideas, you'll end up with several different models. However, when submitting your solutions to Canvas, the cell below must contain only the definition and training of *one* model. Remove all code related to the models that were not chosen. ``` # YOUR CODE HERE ``` **(1 point)** Create two plots. In one of them, plot the loss in the training and the validation datasets. In the other one, plot the accuracy in the training and validation datasets. ``` # YOUR CODE HERE ``` **(3 points)** Did your results improve? What problems did your improvements fix? Explain why, or why not. **Your answer:** (fill in here) [Save your model](https://pytorch.org/tutorials/beginner/saving_loading_models.html) to disk (the architecture, weights and optimizer state). This is simply so you can use it again easily in the later parts of the notebook, without having to keep it in memory or re-training it. The actual file you create is not relevant to your submission. The code to save the model is given in the cell below. ``` # Assuming that you called your model "improved_model" torch.save(model.state_dict(), "improved_model") # Create and initialise model with a previously saved state dict: saved_model = FirstCnn(img_size=224) saved_model.load_state_dict(torch.load("improved_model")) ``` --- ## 4. Transfer Learning **From now, training on CPU will not be feasible. If your computer has a GPU, try it out! Otherwise, now is the time to connect to your cloud instance** Now, instead of trying to come up with a good architecture for this task, we'll use the VGG16 architecture, but with the top layers removed (the fully connected layers + softmax). We'll substitute them with our own top network, designed for dog/cat classification. However, this model has a very high capacity, and will probably suffer a lot from overfitting if we try to train it from scratch, using only our small subset of data. Instead, we'll start the optimization with the weights obtained after training VGG16 on the ImageNet dataset. Start by loading the *pretrained* VGG16 model, from the [torchvision.models](https://pytorch.org/vision/stable/models.html?highlight=vgg#torchvision.models.vgg16). ``` from torchvision import models1 vgg_model = models.vgg16(pretrained=True) print(vgg_model.classifier) ``` **(1 point)** Create a new model with the layers you want to add on top of VGG. *Hint:* - You can access and modify the top layers of the VGG model with `vgg_model.classifier`, and the remaining layers with `vgg_model.features`. - You can get the number of output features of `vgg_model.features` with `vgg_model.classifier[0].in_features` ``` # YOUR CODE HERE ``` **(2 points)** Now add the new model on top of VGG. ``` # YOUR CODE HERE ``` ### 4.1 Using VGG features **(1 point)** Now we're almost ready to train the new model. For transfer learning we want to freeze all but the top layers in your architecture (i.e. signal to the optimizer that the bottom layers should not be changed during optimization) by setting the attribute `requires_grad` for all parameters `vgg_model.features` to `False`. ``` # YOUR CODE HERE ``` **(1 point)** Perform the transfer learning by training the top layers of your model. ``` # YOUR CODE HERE ``` **(1 point)** Create two plots. In one of them, plot the loss in the training and the validation datasets. In the other one, plot the accuracy in the training and validation datasets. ``` # YOUR CODE HERE ``` **(2 points)** How does the model perform, compared to the model obtained in step 3? Create one plot with the training accuracy and another with the validation accuracy of the two scenarios. ``` # YOUR CODE HERE ``` **(3 points)** Compare these results. Which approach worked best, starting from scratch or doing transfer learning? Reflect on whether your comparison is fair or not: **Your answer:** (fill in here) **(2 points)** What are the main differences between the ImageNet dataset and the Dogs vs Cats dataset we used? **Your answer:** (fill in here) **(2 points)** Even though there are considerable differences between these datasets, why is it that transfer learning is still a good idea? **Your answer:** (fill in here) **(1 point)** In which scenario would transfer learning be unsuitable? **Your answer:** (fill in here) Save the model to a file. ``` # YOUR CODE HERE ``` ### 4.2 Fine-tuning Now that we have a better starting point for the top layers, we can train the entire network. Unfreeze the bottom layers by resetting the `requires_grad` attribute to `True`. ``` # YOUR CODE HERE ``` **(1 point)** Fine tune the model by training all the layers. ``` # YOUR CODE HERE ``` **(1 point)** How does the model perform, compared to the model trained with frozen layers? Create one plot with the training accuracy and another with the validation accuracy of the two scenarios. ``` # YOUR CODE HERE ``` **(2 points)** Why is it a good idea to use a very small learning rate when doing fine tuning? **Your answer:** (fill in here) Save the model to file. ``` torch.save(vgg_model.state_dict(), "fine_tuned") ``` ### 4.3 Improving the top model (optional) Improve the architecture for the layers you add on top of VGG16. Try different ideas! When you're happy with one architecture, copy it in the cell below and train it here. ``` # YOUR CODE HERE ``` **(1 point)** How does the model perform, compared to the model trained in step 4.2? Create one plot with the training accuracy and another with the validation accuracy of the two scenarios. ``` # YOUR CODE HERE ``` Save the model to a file. ``` # YOUR CODE HERE ``` ## 5. Final training Now we'll train the model that achieved the best performance so far using the entire dataset. **Note**: start the optimization with the weights you obtained training in the smaller subset, i.e. *not* from scratch. First, create two new data loaders, one for training samples and one for validation samples. This time, they'll load data from the folders for the entire dataset. ``` # YOUR CODE HERE ``` **(1 point)** Train your model using the full data. This optimization might take a long time. ``` # YOUR CODE HERE ``` **(1 point)** How does the model perform now when trained on the entire dataset, compared to when only trained on the smaller subset of data? Create one plot with the training accuracy and another with the validation accuracy of the two scenarios. ``` # YOUR CODE HERE ``` **(2 points)** What can you conclude from these plots? Did you expect what you observe in the plots, explain! **Your answer:** (fill in here) ## 6. Evaluation on test set (optional) Now we'll evaluate your final model, obtained in step 6, on the test set. As mentioned before, the samples in the test set are not labelled, so we can't compute any supervised performance metrics ourselves. As a bit of fun and to inspire some friendly competition you may instead submit it to Kaggle for evaluation. Compute the predictions for all samples in the test set according to your best model, and save it in a .csv file with the format expected by the competition. For the test data we need a slightly different dataset class, due to the lack of labels in the data. A more proper way to implement it would be to make a common class which handles both the train and test settings. Here, we'll just copy the train dataset class and make some modifications to ignore the labels. Hints: - There is a `sampleSubmission.csv` file included in the zip data. Take a look at it to better understand what is the expected format here. - If you don't know how to create and write to files with Python, it's a well-behaved Google search. ``` from pathlib import Path from PIL import Image from time import time class TestData(Dataset): def __init__(self, root: Path, transform): root = Path(root) if not (root.exists() and root.is_dir()): raise ValueError(f"Data root '{root}' is invalid") self.root = root self.transform = transform self._samples = self._collect_samples() def __getitem__(self, index): path = self._samples[index] num_id = int(path.stem) img = Image.open(path) if self.transform is not None: img = self.transform(img) return img, num_id def __len__(self): return len(self._samples) def _collect_samples(self): test_paths = self._collect_imgs_sub_dir(self.root) return sorted(list(test_paths), key=lambda path: int(path.stem)) @staticmethod def _collect_imgs_sub_dir(sub_dir: Path): if not sub_dir.exists(): raise ValueError(f"Data root '{self.root}' must contain sub dir '{sub_dir.name}'") return sub_dir.glob("*.jpg") def get_sample_by_id(self, id_): id_index = self._samples.index(id_) return self[id_index] # YOUR CODE HERE ``` Now that you created your submission file, submit it to Kaggle for evaluation. The [old competition](https://www.kaggle.com/c/dogs-vs-cats) does not allow submissions any more, but you can submit your file to the [new one](https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition) via the "Late submission" button (they use the same data). The Kaggle CLI can be used as well. Kaggle evaluates your submission according to your log-loss score. Which score did you obtain? **Your answer:** (fill in here) What was the username you used for this submission? **Your answer:** (fill in here)
github_jupyter
<a href="https://colab.research.google.com/github/Aggregate-Intellect/original-handson-packages/blob/main/MLOps/Module1-Model_Packaging/Common_Serialization_Methods/Serializing_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Model Serialization Example This is a simple model used for illustrating the fragile nature of serializing object with Python's Pickle format. ``` import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim from torch.nn import functional as F from sklearn import preprocessing from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') # Download the csv to the content directory in colab ! wget https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv ``` # Data Prep ``` iris_df = pd.read_csv('/content/iris.csv') # Change label strings to ints - for an actual model, a label_encoder would be used here species = {'Setosa': 0,'Versicolor': 1, 'Virginica': 2} iris_df['variety'] = [species[item] for item in iris_df['variety']] iris_df['variety'].value_counts() print(iris_df.shape) iris_df.head() # Create features / labels and train / test splits iris_x = iris_df.drop('variety', axis = 1) iris_y = iris_df[['variety']] X_train, x_test, Y_train, y_test = train_test_split(iris_x, iris_y, test_size=0.3, random_state=0) # Convert to tensors X_train = torch.from_numpy(X_train.values).float() X_test = torch.from_numpy(x_test.values).float() y_train = torch.from_numpy(Y_train.values).view(1,-1)[0] y_test = torch.from_numpy(y_test.values).view(1,-1)[0] ``` # Model ``` # Simple MLP for demonstration serialization input_size = 4 output_size = 3 hidden_size = 30 class IrisNet(nn.Module): def __init__(self): super(IrisNet, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, output_size) def forward(self, X): X = torch.sigmoid((self.fc1(X))) X = torch.sigmoid(self.fc2(X)) X = self.fc3(X) return F.log_softmax(X, dim=-1) # initialize the network and define the optimizer and loss function model = IrisNet() optimizer = optim.Adam(model.parameters(), lr = 0.03) loss_fn = nn.NLLLoss() # Train the model epochs = 500 for epoch in range(epochs): optimizer.zero_grad() y_pred = model(X_train) loss = loss_fn(y_pred , y_train) loss.backward() optimizer.step() if epoch % 100 == 0: print(f'Epoch: {epoch} loss: {loss.item()}') def inference(model, input): """Conduct inference for a model""" return torch.argmax(model(input)) example = torch.tensor([5.1, 3.5, 1.4, 0.2]) pred = inference(model, example) print(pred) ``` # Serialize the model ``` # Mount to google drive in order to save there from google.colab import drive drive.mount('/content/gdrive', force_remount=True) model_name = 'iris_model.pt' model_path = f"/content/gdrive/My Drive/MLOPS/hands_on/serialization/models/{model_name}" # Save the model torch.save(model, model_path) # Ensure the model was saved ! ls /content/gdrive/My\ Drive/MLOPS/hands_on/serialization/models/ # Load the model new_model = torch.load(model_path) new_model example = torch.tensor([5.1, 3.5, 1.4, 0.2]) pred = inference(new_model, example) print(pred) ``` # state_dict ``` model.state_dict() model_name = 'iris_model_state_dict.pt' model_path = f"/content/gdrive/My Drive/MLOPS/hands_on/serialization/models/{model_name}" # Save the models state_dict torch.save(model.state_dict(), model_path) ! ls /content/gdrive/My\ Drive/MLOPS/hands_on/serialization/models/ model_name = 'iris_model_state_dict.pt' model_path = f"/content/gdrive/My Drive/MLOPS/hands_on/serialization/models/{model_name}" model.load_state_dict(torch.load(model_path)) example = torch.tensor([5.1, 3.5, 1.4, 0.2]) pred = inference(new_model, example) print(pred) ```
github_jupyter
``` from citylearn import CityLearn, building_loader, auto_size from energy_models import HeatPump, EnergyStorage, Building import matplotlib.pyplot as plt import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import collections import gym from gym.utils import seeding from gym import core, spaces import os import ptan import time import argparse import model, common from matplotlib.pyplot import figure import numpy as np class AgentD4PG(ptan.agent.BaseAgent): """ Agent implementing noisy agent """ def __init__(self, net, device="cpu", epsilon=1.0): self.net = net self.device = device self.epsilon = epsilon def __call__(self, states, agent_states): states_v = ptan.agent.float32_preprocessor(states).to(self.device) mu_v = self.net(states_v) actions = mu_v.data.cpu().numpy() actions += self.epsilon * np.random.normal(size=actions.shape) actions = np.clip(actions, -1, 1) return actions, agent_states class DDPGActor(nn.Module): def __init__(self, obs_size, act_size): super(DDPGActor, self).__init__() self.net = nn.Sequential( nn.Linear(obs_size, 4), nn.ReLU(), nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, act_size), nn.Tanh() ) def forward(self, x): return self.net(x) class DDPGCritic(nn.Module): def __init__(self, obs_size, act_size): super(DDPGCritic, self).__init__() self.obs_net = nn.Sequential( nn.Linear(obs_size, 8), nn.BatchNorm1d(8), nn.ReLU(), ) self.out_net = nn.Sequential( nn.Linear(8 + act_size, 6), nn.BatchNorm1d(6), nn.ReLU(), nn.Linear(6, 1) ) def forward(self, x, a): obs = self.obs_net(x) return self.out_net(torch.cat([obs, a], dim=1)) from pathlib import Path data_folder = Path("data/") demand_file = data_folder / "AustinResidential_TH.csv" weather_file = data_folder / 'Austin_Airp_TX-hour.csv' building_ids = [4, 5, 9, 16, 21, 26, 33, 36, 49, 59] heat_pump, heat_tank, cooling_tank = {}, {}, {} #Ref: Assessment of energy efficiency in electric storage water heaters (2008 Energy and Buildings) loss_factor = 0.19/24 buildings = {} for uid in building_ids: heat_pump[uid] = HeatPump(nominal_power = 9e12, eta_tech = 0.22, t_target_heating = 45, t_target_cooling = 10) heat_tank[uid] = EnergyStorage(capacity = 9e12, loss_coeff = loss_factor) cooling_tank[uid] = EnergyStorage(capacity = 9e12, loss_coeff = loss_factor) buildings[uid] = Building(uid, heating_storage = heat_tank[uid], cooling_storage = cooling_tank[uid], heating_device = heat_pump[uid], cooling_device = heat_pump[uid]) buildings[uid].state_action_space(np.array([24.0, 40.0, 1.001]), np.array([1.0, 17.0, -0.001]), np.array([0.5]), np.array([-0.5])) building_loader(demand_file, weather_file, buildings) auto_size(buildings, t_target_heating = 45, t_target_cooling = 10) env = {} for uid in building_ids: env[uid] = CityLearn(demand_file, weather_file, buildings = {uid: buildings[uid]}, time_resolution = 1, simulation_period = (3500,6000)) env[uid](uid) if __name__ == "__main__": N_AGENTS = 2 GAMMA = 0.99 BATCH_SIZE = 5000 LEARNING_RATE_ACTOR = 1e-4 LEARNING_RATE_CRITIC = 1e-3 REPLAY_SIZE = 5000 REPLAY_INITIAL = 100 TEST_ITERS = 120 EPSILON_DECAY_LAST_FRAME = 1000 EPSILON_START = 1.2 EPSILON_FINAL = 0.02 device = torch.device("cpu") act_net, crt_net, tgt_act_net, tgt_crt_net, agent, exp_source, buffer, act_opt, crt_opt, frame_idx = {}, {}, {}, {}, {}, {}, {}, {}, {}, {} rew_last_1000, rew, track_loss_critic, track_loss_actor = {}, {}, {}, {} # for uid in buildings: # env[uid].reset() for uid in building_ids: #Create as many actor and critic nets as number of agents #Actor: states_agent_i -> actions_agent_i act_net[uid] = DDPGActor(buildings[uid].observation_spaces.shape[0], buildings[uid].action_spaces.shape[0]).to(device) #Critic: states_all_agents + actions_all_agents -> Q-value_agent_i [1] crt_net[uid] = DDPGCritic(buildings[uid].observation_spaces.shape[0], buildings[uid].action_spaces.shape[0]).to(device) tgt_act_net[uid] = ptan.agent.TargetNet(act_net[uid]) tgt_crt_net[uid] = ptan.agent.TargetNet(crt_net[uid]) agent[uid] = model.AgentD4PG(act_net[uid], device=device) exp_source[uid] = ptan.experience.ExperienceSourceFirstLast(env[uid], agent[uid], gamma=GAMMA, steps_count=1) buffer[uid] = ptan.experience.ExperienceReplayBuffer(exp_source[uid], buffer_size=REPLAY_SIZE) act_opt[uid] = optim.Adam(act_net[uid].parameters(), lr=LEARNING_RATE_ACTOR) crt_opt[uid] = optim.Adam(crt_net[uid].parameters(), lr=LEARNING_RATE_CRITIC) frame_idx[uid] = 0 rew_last_1000[uid], rew[uid], track_loss_critic[uid], track_loss_actor[uid] = [], [], [], [] batch, states_v, actions_v, rewards_v, dones_mask, last_states_v, q_v, last_act_v, q_last_v, q_ref_v, critic_loss_v, cur_actions_v, actor_loss_v = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {} cost, price_list, buffer_reward = {},{},{} for uid in buildings: cost[uid] = [] price_list[uid] = [] buffer_reward[uid] = [] while not env[building_ids[-1]]._terminal(): if frame_idx[4]%100 == 0: print(frame_idx[uid]) for uid in buildings: # print(env[uid].time_step) agent[uid].epsilon = max(EPSILON_FINAL, EPSILON_START - frame_idx[uid] / EPSILON_DECAY_LAST_FRAME) frame_idx[uid] += 1 buffer[uid].populate(1) # print(buffer[uid].buffer[-1]) # print(env[uid].buildings[uid].time_step) price = env[uid].total_electric_consumption[-1]*3e-5 + 0.045 price_list[uid].append(price) for uid in buildings: buffer_reward[uid].append(buffer[uid].buffer[-1].reward) electricity_cost = buffer[uid].buffer[-1].reward*price cost[uid].append(-electricity_cost) buffer[uid].buffer[-1] = buffer[uid].buffer[-1]._replace(reward=electricity_cost) if len(buffer[uid]) < REPLAY_INITIAL: continue for uid in buildings: for k in range(6): batch[uid] = buffer[uid].sample(BATCH_SIZE) states_v[uid], actions_v[uid], rewards_v[uid], dones_mask[uid], last_states_v[uid] = common.unpack_batch_ddqn(batch[uid], device) # TRAIN CRITIC crt_opt[uid].zero_grad() #Obtaining Q' using critic net with parameters teta_Q' q_v[uid] = crt_net[uid](states_v[uid], actions_v[uid]) #Obtaining estimated optimal actions a|teta_mu from target actor net and from s_i+1. last_act_v[uid] = tgt_act_net[uid].target_model(last_states_v[uid]) #<----- Actor to train Critic #Obtaining Q'(s_i+1, a|teta_mu) from critic net Q' q_last_v[uid] = tgt_crt_net[uid].target_model(last_states_v[uid], last_act_v[uid]) q_last_v[uid][dones_mask[uid]] = 0.0 #Q_target used to train critic net Q' q_ref_v[uid] = rewards_v[uid].unsqueeze(dim=-1) + q_last_v[uid] * GAMMA critic_loss_v[uid] = F.mse_loss(q_v[uid], q_ref_v[uid].detach()) critic_loss_v[uid].backward() crt_opt[uid].step() # TRAIN ACTOR act_opt[uid].zero_grad() #Obtaining estimated optimal current actions a|teta_mu from actor net and from s_i cur_actions_v[uid] = act_net[uid](states_v[uid]) #Actor loss = mean{ -Q_i'(s_i, a|teta_mu) } actor_loss_v[uid] = -crt_net[uid](states_v[uid], cur_actions_v[uid]) #<----- Critic to train Actor actor_loss_v[uid] = actor_loss_v[uid].mean() #Find gradient of the loss and backpropagate to perform the updates of teta_mu actor_loss_v[uid].backward() act_opt[uid].step() if frame_idx[uid] % 1 == 0: tgt_act_net[uid].alpha_sync(alpha=1 - 0.1) tgt_crt_net[uid].alpha_sync(alpha=1 - 0.1) from matplotlib.pyplot import figure #Plotting all the individual actions figure(figsize=(18, 6)) for uid in buildings: plt.plot(env[uid].action_track[uid][2300:2500]) plt.show() ```
github_jupyter
# Uniform NSOLT dictionary learning Requirements: Python 3.7.x, PyTorch 1.7.x/1.8.x, Graphviz Contact address: Shogo MURAMATSU Faculty of Engineering, Niigata University, 8050 2-no-cho Ikarashi, Nishi-ku, Niigata, 950-2181, JAPAN http://msiplab.eng.niigata-u.ac.jp Copyright (c) 2021, Shogo MURAMATSU, All rights reserved. ## Preparation ``` import math ``` ## Parameter settings * Decimation factor * Number of channels * Sparsity ratio * Number of iterations * Standard deviation of initial angles * Patch size for training * Number of patches ``` # Decimation factor (Strides) decFactor = [2, 2] # [My Mx] nDecs = decFactor[0]*decFactor[1] # prod(decFactor) # Number of channels ( sum(nChannels) >= prod(decFactors) ) nChannels = [3, 3] # [Ps Pa] (Ps=Pa) redundancyRatio = sum(nChannels)/nDecs # Polyphase order ppOrder = [2, 2] # Sparsity ratio sparsityRatio = 1/3 # Number of iterations nIters = 3 # Standard deviation of initial angles stdInitAng = math.pi/6 # Patch size for training szPatchTrn = [32, 32] # > [ (Ny+1)My (Nx+1)Mx ] # Number of patchs per image nSubImgs = 64 # No DC-leakage noDcLeakage = True # Setting of dictionary update step learning_rate = 1e-3 batch_size = 8 epochs = 8 momentum = 0.9 ``` ## Convolutional dictionary learning ### Problem setting: $\{\hat{\mathbf{\theta}},\{ \hat{\mathbf{s}}_n \}\}=\arg\min_{\{\mathbf{\theta},\{\mathbf{s}_n\}\}}\frac{1}{2S}\sum_{n=1}^{S}\|\mathbf{v}_n-\mathbf{D}_{\mathbf{\theta}}\hat{\mathbf{s}}_n\|_2^2,\ \quad\mathrm{s.t.}\ \forall n, \|\mathbf{s}_n\|_0\leq K,$ where $\mathbf{D}_{\mathbf{\theta}}$ is a convolutional dictionary with the design parameter vector $\mathbf{\theta}$. ### Algorithm: Iterate the sparse approximation step and the dictionary update step. * Sparse approximation step $\hat{\mathbf{s}}_n=\arg\min_{\mathbf{s}_n}\frac{1}{2} \|\mathbf{v}_n-\hat{\mathbf{D}}\mathbf{s}_n\|_2^2\ \quad \mathrm{s.t.}\ \|\mathbf{s}_n\|_0\leq K$ * Dictionary update step $\hat{\mathbf{\theta}}=\arg\min_{\mathbf{\theta}}\frac{1}{2S}\sum_{n=1}^{S}\|\mathbf{v}_n-\mathbf{D}_{\mathbf{\theta}}\hat{\mathbf{s}}_n\|_2^2$ $\hat{\mathbf{D}}=\mathbf{D}_{\hat{\mathbf{\theta}}}$ ## Bivariate lattice-structure oversampled filter banks As an example, let us adopt a non-separable oversampled lapped transform (NSOLT) of type-I with the number of channels (the numbers of even and odd symmetric channels are identical to each other) and polyphase order (even): $\mathbf{E}(z_\mathrm{v},z_\mathbf{h})=\left(\prod_{k_\mathrm{h}=1}^{N_\mathrm{h}/2}{\mathbf{V}_{2k_\mathrm{h}}^{\{\mathrm{h}\}}}\bar{\mathbf{Q}}(z_\mathrm{h}){\mathbf{V}_{2k_\mathrm{h}-1}^{\{\mathrm{h}\}}}{\mathbf{Q}}(z_\mathrm{h})\right) \left(\prod_{k_{\mathrm{v}}=1}^{N_\mathrm{v}/2}{\mathbf{V}_{2k_\mathrm{v}}^{\{\mathrm{v}\}}}\bar{\mathbf{Q}}(z_\mathrm{v}){\mathbf{V}_{2k_\mathrm{v}-1}^{\{\mathrm{v}\}}}{\mathbf{Q}}(z_\mathrm{v})\right)\mathbf{V}_0\mathbf{E}_0,$ $\mathbf{R}(z_\mathrm{v},z_\mathbf{h})=\mathbf{E}^T(z_\mathrm{v}^{-1},z_\mathrm{h}^{-1}),$ where * $\mathbf{E}(z_\mathrm{v},z_\mathrm{h})$: Type-I polyphase matrix of the analysis filter bank * $\mathbf{R}(z_\mathrm{v},z_\mathrm{h})$: Type-II polyphase matrix in the synthesis filter bank * $z_d\in\mathbb{C}, d\in\{\mathrm{v},\mathrm{h}\}$: The parameter of Z-transformation direction * $N_d\in \mathbb{N}, d\in\{\mathrm{v},\mathrm{h}\}$: Polyphase order in direction $d$ (number of overlapping blocks) * $\mathbf{V}_0=\left(\begin{array}{cc}\mathbf{W}_{0} & \mathbf{O} \\ \mathbf{O} & \mathbf{U}_0\end{array}\right) \left(\begin{array}{c}\mathbf{I}_{M/2} \\ \mathbf{O} \\\mathbf{I}_{M/2} \\\mathbf{O}\end{array}\right)\in\mathbb{R}^{P\times M}$, $\mathbf{V}_n^{\{d\}}=\left(\begin{array}{cc}\mathbf{I}_{P/2} & \mathbf{O} \\\mathbf{O} & \mathbf{U}_n^{\{d\}}\end{array}\right)\in\mathbb{R}^{P\times P}, d\in\{\mathrm{v},\mathrm{h}\}$, where$\mathbf{W}_0, \mathbf{U}_0,\mathbf{U}_n^{\{d\}}\in\mathbb{R}^{P/2\times P/2}$ are orthonromal matrices. * $\mathbf{Q}(z)=\mathbf{B}_{P}\left(\begin{array}{cc} \mathbf{I}_{P/2} & \mathbf{O} \\ \mathbf{O} & z^{-1}\mathbf{I}_{P/2}\end{array}\right)\mathbf{B}_{P}$, $\bar{\mathbf{Q}}(z)=\mathbf{B}_{P}\left(\begin{array}{cc} z\mathbf{I}_{P/2} & \mathbf{O} \\ \mathbf{O} & \mathbf{I}_{P/2}\end{array}\right)\mathbf{B}_{P}$, $\mathbf{B}_{P}=\frac{1}{\sqrt{2}}\left(\begin{array}{cc} \mathbf{I}_{P/2} & \mathbf{I}_{P/2} \\ \mathbf{I}_{P/2} & -\mathbf{I}_{P/2}\end{array}\right)$ 【Example】For $P/2=3$, a parametric orthonormal matrix $\mathbf{U}(\mathbf{\theta},\mathbf{\mu})$ can be constructed by $\mathbf{U}(\mathbf{\theta},\mathbf{\mu}) \colon = \left(\begin{array}{cc} \mu_1 & 0& 0\\ 0 & \mu_1 & 0 \\ 0 & 0 & \mu_2 \end{array}\right)\left(\begin{array}{ccc} 1 & 0 & 0 \\0 & \cos\theta_2& -\sin\theta_2 \\ 0 & \sin\theta_2 & \cos\theta_2 \end{array}\right)\left(\begin{array}{ccc} \cos\theta_1& 0 & -\sin\theta_1 \\ 0 & 1 & 0 \\\sin\theta_1 & 0 & \cos\theta_1 \end{array}\right)\left(\begin{array}{ccc} \cos\theta_0& -\sin\theta_0 & 0 \\ \sin\theta_0 & \cos\theta_0 & 0 \\ 0 & 0 & 1 \end{array}\right),$ ${\mathbf{U}(\mathbf{\theta},\mathbf{\mu})}^T = \left(\begin{array}{ccc} \cos\theta_0& \sin\theta_0 & 0 \\ -\sin\theta_0 & \cos\theta_0 & 0 \\ 0 & 0 & 1 \end{array}\right)\left(\begin{array}{ccc} \cos\theta_1& 0 & \sin\theta_1 \\ 0 & 1 & 0 \\-\sin\theta_1 & 0 & \cos\theta_1 \end{array}\right)\left(\begin{array}{ccc} 1 & 0 & 0 \\0 & \cos\theta_2& \sin\theta_2 \\ 0 & -\sin\theta_2 & \cos\theta_2 \end{array}\right)\left(\begin{array}{cc} \mu_0 & 0& 0\\ 0 & \mu_1 & 0 \\ 0 & 0 & \mu_2 \end{array}\right),$ where $\mathbf{\theta}\in\mathbb{R}^{(P-2)P/8}$ and $\mathbf{\mu}=\{-1,1\}^{P/2}$. For the sake of simplification, the sign parameters $\mu_k$ are fixed to $-1$ for $\mathbf{U}_n^{\{d\}}$ witn odd $n$, otherwise they are fixed to $+1$. Partial differentiation can be, for examle, conducted as $\frac{\partial}{\partial \theta_1}{\mathbf{U}(\mathbf{\theta},\mathbf{\mu})}^T = \left(\begin{array}{ccc} \cos\theta_0& \sin\theta_0 & 0 \\ -\sin\theta_0 & \cos\theta_0 & 0 \\ 0 & 0 & 1 \end{array}\right)\left(\begin{array}{ccc} -\sin\theta_1& 0 & \cos\theta_1 \\ 0 & 0 & 0 \\-\cos\theta_1 & 0 & -\sin\theta_1 \end{array}\right)\left(\begin{array}{ccc} 1 & 0 & 0 \\0 & \cos\theta_2& \sin\theta_2 \\ 0 & -\sin\theta_2 & \cos\theta_2 \end{array}\right)\left(\begin{array}{cc} \mu_0 & 0& 0\\ 0 & \mu_1 & 0 \\ 0 & 0 & \mu_2 \end{array}\right).$ ## Definition of custom layers and networks Use a custom layer of PyTorch to implement Synthesis NSOLT (Synthesis NSOLT). Definition of layers w/ Learnable properties * Final rotation: $\mathbf{V}_0^T$ (torch_nsolt.nsoltFinalRotation2dLayer) * Intermediate rotation: ${\mathbf{V}_n^{\{d\}}}^T$ (torch_nsolt.nsoltIntermediateRotation2dLayer) Definition of layers w/o Learnable properties * Bivariate inverese DCT (2-D IDCT): $\mathbf{E}_0^T=\mathbf{E}_0^{-1}$ (torch_nsolt.nsoltBlockDct2dLayer) * Vertical up extension: $\mathbf{Q}^T(z_\mathrm{v}^{-1})$ (torch_nsolt.nsoltAtomExtension2dLayer) * Vertical down extension: $\bar{\mathbf{Q}}^T(z_\mathrm{v}^{-1})$ (torch_nsolt.nsoltAtomExtension2dLayer) * Horizontal left extension: $\mathbf{Q}^T(z_\mathrm{h}^{-1})$ (torch_nsolt.nsoltAtomExtension2dLayer) * Horizontal right extension: $\bar{\mathbf{Q}}^T(z_\mathrm{h}^{-1})$ (torch_nsolt.nsoltAtomExtension2dLayer) 【References】 * MATLAB SaivDr Package: https://github.com/msiplab/SaivDr * S. Muramatsu, K. Furuya and N. Yuki, "Multidimensional Nonseparable Oversampled Lapped Transforms: Theory and Design," in IEEE Transactions on Signal Processing, vol. 65, no. 5, pp. 1251-1264, 1 March1, 2017, doi: 10.1109/TSP.2016.2633240. * S. Muramatsu, T. Kobayashi, M. Hiki and H. Kikuchi, "Boundary Operation of 2-D Nonseparable Linear-Phase Paraunitary Filter Banks," in IEEE Transactions on Image Processing, vol. 21, no. 4, pp. 2314-2318, April 2012, doi: 10.1109/TIP.2011.2181527. * S. Muramatsu, M. Ishii and Z. Chen, "Efficient parameter optimization for example-based design of nonseparable oversampled lapped transform," 2016 IEEE International Conference on Image Processing (ICIP), Phoenix, AZ, 2016, pp. 3618-3622, doi: 10.1109/ICIP.2016.7533034. * Furuya, K., Hara, S., Seino, K., & Muramatsu, S. (2016). Boundary operation of 2D non-separable oversampled lapped transforms. _APSIPA Transactions on Signal and Information Processing, 5_, E9. doi:10.1017/ATSIP.2016.3. ``` import sys sys.path.append('../../appendix/') sys.path.append('../../appendix/torch_nsolt/') import torch import torch.nn import torch_nsolt as nsolt from torchviz import make_dot from torch_nsolt.orthonormalTransform import OrthonormalTransform from itertools import chain # Construction of deep learning network analysisnet = nsolt.NsoltAnalysis2dNetwork( number_of_channels = nChannels, decimation_factor = decFactor, polyphase_order = ppOrder, number_of_vanishing_moments = noDcLeakage ) # Initialize def init_angles(m): if type(m) == OrthonormalTransform: torch.nn.init.normal_(m.angles,mean=0.0,std=stdInitAng) analysisnet.apply(init_angles) synthesisnet = analysisnet.T # Visualize height, width = szPatchTrn torch.manual_seed(0) x = torch.randn(1,1,height,width,dtype=torch.float,requires_grad=False) y = synthesisnet(analysisnet(x)) params_dict = dict(chain(analysisnet.named_parameters(), synthesisnet.named_parameters())) make_dot(y,params=params_dict) #optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) ```
github_jupyter